Link debug libnx & remove outdated nxstl includes

This commit is contained in:
Luke Street 2020-10-21 00:35:07 -04:00
parent 7f63cabaea
commit 81fb4e4c2d
4 changed files with 5 additions and 565 deletions

View File

@ -15,14 +15,16 @@ add_library(logvisor
lib/logvisor.cpp
include/logvisor/logvisor.hpp)
if(NOT NX)
target_link_libraries(logvisor PUBLIC fmt)
if(NX)
target_link_libraries(logvisor PUBLIC debug nxd optimized nx)
else()
target_link_libraries(logvisor PUBLIC ${CMAKE_DL_LIBS})
endif()
target_link_libraries(logvisor PUBLIC fmt)
target_include_directories(logvisor PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
install(DIRECTORY include/logvisor DESTINATION include)
install(DIRECTORY include/logvisor DESTINATION include)
install(DIRECTORY fmt/include/fmt DESTINATION include)
set(version_config_file "${PROJECT_BINARY_DIR}/logvisorConfigVersion.cmake")

View File

@ -1,155 +0,0 @@
#ifndef _NXSTL_CONDVAR
#define _NXSTL_CONDVAR 1
#ifdef __SWITCH__
extern "C" {
#include <switch/kernel/condvar.h>
#include <switch/result.h>
}
#include <chrono>
#include <bits/std_mutex.h>
#include <ext/concurrence.h>
#include <bits/alloc_traits.h>
#include <bits/allocator.h>
#include <bits/unique_ptr.h>
#include <bits/shared_ptr.h>
#include <bits/cxxabi_forced.h>
namespace std _GLIBCXX_VISIBILITY(default)
{
/// cv_status
enum class cv_status { no_timeout, timeout };
/// condition_variable
class condition_variable
{
typedef chrono::system_clock __clock_t;
typedef CondVar __native_type;
__native_type _M_cond = {};
public:
typedef __native_type* native_handle_type;
constexpr condition_variable() noexcept = default;
~condition_variable() noexcept = default;
condition_variable(const condition_variable&) = delete;
condition_variable& operator=(const condition_variable&) = delete;
void
notify_one() noexcept
{
condvarWakeOne(&_M_cond);
}
void
notify_all() noexcept
{
condvarWakeAll(&_M_cond);
}
void
wait(unique_lock<mutex>& __lock) noexcept
{
condvarWait(&_M_cond, __lock.mutex()->native_handle());
}
template<typename _Predicate>
void
wait(unique_lock<mutex>& __lock, _Predicate __p)
{
while (!__p())
wait(__lock);
}
template<typename _Duration>
cv_status
wait_until(unique_lock<mutex>& __lock,
const chrono::time_point<__clock_t, _Duration>& __atime)
{ return __wait_until_impl(__lock, __atime); }
template<typename _Clock, typename _Duration>
cv_status
wait_until(unique_lock<mutex>& __lock,
const chrono::time_point<_Clock, _Duration>& __atime)
{
// DR 887 - Sync unknown clock to known clock.
const typename _Clock::time_point __c_entry = _Clock::now();
const __clock_t::time_point __s_entry = __clock_t::now();
const auto __delta = __atime - __c_entry;
const auto __s_atime = __s_entry + __delta;
return __wait_until_impl(__lock, __s_atime);
}
template<typename _Clock, typename _Duration, typename _Predicate>
bool
wait_until(unique_lock<mutex>& __lock,
const chrono::time_point<_Clock, _Duration>& __atime,
_Predicate __p)
{
while (!__p())
if (wait_until(__lock, __atime) == cv_status::timeout)
return __p();
return true;
}
template<typename _Rep, typename _Period>
cv_status
wait_for(unique_lock<mutex>& __lock,
const chrono::duration<_Rep, _Period>& __rtime)
{
using __dur = typename __clock_t::duration;
auto __reltime = chrono::duration_cast<__dur>(__rtime);
if (__reltime < __rtime)
++__reltime;
return wait_until(__lock, __clock_t::now() + __reltime);
}
template<typename _Rep, typename _Period, typename _Predicate>
bool
wait_for(unique_lock<mutex>& __lock,
const chrono::duration<_Rep, _Period>& __rtime,
_Predicate __p)
{
using __dur = typename __clock_t::duration;
auto __reltime = chrono::duration_cast<__dur>(__rtime);
if (__reltime < __rtime)
++__reltime;
return wait_until(__lock, __clock_t::now() + __reltime, std::move(__p));
}
native_handle_type
native_handle()
{ return &_M_cond; }
private:
template<typename _Dur>
cv_status
__wait_until_impl(unique_lock<mutex>& __lock,
const chrono::time_point<__clock_t, _Dur>& __atime)
{
auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime);
Result res = condvarWaitTimeout(&_M_cond, __lock.mutex()->native_handle(), __ns.count());
if (R_DESCRIPTION(res) == KernelError_Timeout)
return cv_status::timeout;
return cv_status::no_timeout;
}
};
void
notify_all_at_thread_exit(condition_variable&, unique_lock<mutex>);
struct __at_thread_exit_elt
{
__at_thread_exit_elt* _M_next;
void (*_M_cb)(void*);
};
}
#endif
#endif

View File

@ -1,112 +0,0 @@
#ifndef _NXSTL_MUTEX
#define _NXSTL_MUTEX 1
#ifdef __SWITCH__
extern "C" {
#include <switch/kernel/mutex.h>
}
namespace std _GLIBCXX_VISIBILITY(default)
{
// Common base class for std::mutex
class __mutex_base
{
protected:
typedef Mutex __native_type;
__native_type _M_mutex = {};
constexpr __mutex_base() noexcept = default;
__mutex_base(const __mutex_base&) = delete;
__mutex_base& operator=(const __mutex_base&) = delete;
};
/// The standard mutex type.
class mutex : private __mutex_base
{
public:
typedef __native_type* native_handle_type;
constexpr mutex() noexcept = default;
~mutex() = default;
mutex(const mutex&) = delete;
mutex& operator=(const mutex&) = delete;
void
lock()
{
mutexLock(&_M_mutex);
}
bool
try_lock() noexcept
{
return mutexTryLock(&_M_mutex);
}
void
unlock()
{
mutexUnlock(&_M_mutex);
}
native_handle_type
native_handle() noexcept
{ return &_M_mutex; }
};
// Common base class for std::recursive_mutex
class __recursive_mutex_base
{
protected:
typedef RMutex __native_type;
__recursive_mutex_base(const __recursive_mutex_base&) = delete;
__recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
__native_type _M_mutex = {};
__recursive_mutex_base() = default;
};
/// The standard recursive mutex type.
class recursive_mutex : private __recursive_mutex_base
{
public:
typedef __native_type* native_handle_type;
constexpr recursive_mutex() = default;
~recursive_mutex() = default;
recursive_mutex(const recursive_mutex&) = delete;
recursive_mutex& operator=(const recursive_mutex&) = delete;
void
lock()
{
rmutexLock(&_M_mutex);
}
bool
try_lock() noexcept
{
return rmutexTryLock(&_M_mutex);
}
void
unlock()
{
rmutexUnlock(&_M_mutex);
}
native_handle_type
native_handle() noexcept
{ return &_M_mutex; }
};
}
#endif
#endif

View File

@ -1,295 +0,0 @@
#ifndef _NXSTL_THREAD
#define _NXSTL_THREAD 1
#ifdef __SWITCH__
extern "C" {
#include <switch/kernel/thread.h>
#include <switch/arm/tls.h>
#include <switch/result.h>
}
#include <memory>
namespace std _GLIBCXX_VISIBILITY(default)
{
/// thread
class thread
{
public:
// Abstract base class for types that wrap arbitrary functors to be
// invoked in the new thread of execution.
struct _State
{
virtual ~_State() = default;
virtual void _M_run() = 0;
};
using _State_ptr = unique_ptr<_State>;
typedef Thread native_handle_type;
/// thread::id
class id
{
native_handle_type _M_thread;
public:
id() noexcept : _M_thread() { }
explicit
id(native_handle_type __id) : _M_thread(__id) { }
private:
friend class thread;
friend class hash<thread::id>;
friend bool
operator==(thread::id __x, thread::id __y) noexcept;
friend bool
operator<(thread::id __x, thread::id __y) noexcept;
template<class _CharT, class _Traits>
friend basic_ostream<_CharT, _Traits>&
operator<<(basic_ostream<_CharT, _Traits>& __out, thread::id __id);
};
private:
id _M_id;
public:
thread() noexcept = default;
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 2097. packaged_task constructors should be constrained
thread(thread&) = delete;
thread(const thread&) = delete;
thread(const thread&&) = delete;
thread(thread&& __t) noexcept
{ swap(__t); }
template<typename _Callable, typename... _Args>
explicit
thread(_Callable&& __f, _Args&&... __args)
{
_M_start_thread(_S_make_state(
__make_invoker(std::forward<_Callable>(__f),
std::forward<_Args>(__args)...)));
}
~thread()
{
if (joinable())
std::terminate();
}
thread& operator=(const thread&) = delete;
thread& operator=(thread&& __t) noexcept
{
if (joinable())
std::terminate();
swap(__t);
return *this;
}
void
swap(thread& __t) noexcept
{ std::swap(_M_id, __t._M_id); }
bool
joinable() const noexcept
{ return !(_M_id == id()); }
void
join()
{
threadWaitForExit(&_M_id._M_thread);
_M_id = id();
}
void
detach()
{
_M_id = id();
}
thread::id
get_id() const noexcept
{ return _M_id; }
/** @pre thread is joinable
*/
native_handle_type
native_handle()
{ return _M_id._M_thread; }
// Returns a value that hints at the number of hardware thread contexts.
static unsigned int
hardware_concurrency() noexcept
{
return 3;
}
private:
template<typename _Callable>
struct _State_impl : public _State
{
_Callable _M_func;
_State_impl(_Callable&& __f) : _M_func(std::forward<_Callable>(__f))
{ }
void
_M_run() { _M_func(); }
};
static void
execute_native_thread_routine(void *arg)
{
reinterpret_cast<_State*>(arg)->_M_run();
}
void
_M_start_thread(_State_ptr state)
{
Result res = threadCreate(&_M_id._M_thread, execute_native_thread_routine,
state.get(), 8192, 0x2B, -2);
if (R_FAILED(res))
__throw_system_error(res);
res = threadStart(&_M_id._M_thread);
if (R_FAILED(res))
__throw_system_error(res);
state.release();
}
template<typename _Callable>
static _State_ptr
_S_make_state(_Callable&& __f)
{
using _Impl = _State_impl<_Callable>;
return _State_ptr{new _Impl{std::forward<_Callable>(__f)}};
}
private:
// A call wrapper that does INVOKE(forwarded tuple elements...)
template<typename _Tuple>
struct _Invoker
{
_Tuple _M_t;
template<size_t _Index>
static __tuple_element_t<_Index, _Tuple>&&
_S_declval();
template<size_t... _Ind>
auto
_M_invoke(_Index_tuple<_Ind...>)
noexcept(noexcept(std::__invoke(_S_declval<_Ind>()...)))
-> decltype(std::__invoke(_S_declval<_Ind>()...))
{ return std::__invoke(std::get<_Ind>(std::move(_M_t))...); }
using _Indices
= typename _Build_index_tuple<tuple_size<_Tuple>::value>::__type;
auto
operator()()
noexcept(noexcept(std::declval<_Invoker&>()._M_invoke(_Indices())))
-> decltype(std::declval<_Invoker&>()._M_invoke(_Indices()))
{ return _M_invoke(_Indices()); }
};
template<typename... _Tp>
using __decayed_tuple = tuple<typename std::decay<_Tp>::type...>;
public:
// Returns a call wrapper that stores
// tuple{DECAY_COPY(__callable), DECAY_COPY(__args)...}.
template<typename _Callable, typename... _Args>
static _Invoker<__decayed_tuple<_Callable, _Args...>>
__make_invoker(_Callable&& __callable, _Args&&... __args)
{
return { __decayed_tuple<_Callable, _Args...>{
std::forward<_Callable>(__callable), std::forward<_Args>(__args)...
} };
}
};
inline void
swap(thread& __x, thread& __y) noexcept
{ __x.swap(__y); }
inline bool
operator==(thread::id __x, thread::id __y) noexcept
{
// pthread_equal is undefined if either thread ID is not valid, so we
// can't safely use __gthread_equal on default-constructed values (nor
// the non-zero value returned by this_thread::get_id() for
// single-threaded programs using GNU libc). Assume EqualityComparable.
return __x._M_thread.handle == __y._M_thread.handle;
}
inline bool
operator!=(thread::id __x, thread::id __y) noexcept
{ return !(__x == __y); }
inline bool
operator<(thread::id __x, thread::id __y) noexcept
{
// Pthreads doesn't define any way to do this, so we just have to
// assume native_handle_type is LessThanComparable.
return __x._M_thread.handle < __y._M_thread.handle;
}
inline bool
operator<=(thread::id __x, thread::id __y) noexcept
{ return !(__y < __x); }
inline bool
operator>(thread::id __x, thread::id __y) noexcept
{ return __y < __x; }
inline bool
operator>=(thread::id __x, thread::id __y) noexcept
{ return !(__x < __y); }
// DR 889.
/// std::hash specialization for thread::id.
template<>
struct hash<thread::id>
: public __hash_base<size_t, thread::id>
{
size_t
operator()(const thread::id& __id) const noexcept
{ return std::_Hash_impl::hash(__id._M_thread); }
};
template<class _CharT, class _Traits>
inline basic_ostream<_CharT, _Traits>&
operator<<(basic_ostream<_CharT, _Traits>& __out, thread::id __id)
{
if (__id == thread::id())
return __out << "thread::id of a non-executing thread";
else
return __out << __id._M_thread;
}
namespace this_thread
{
/// get_id
inline thread::id
get_id() noexcept
{
Thread ret;
uint8_t* tls = (uint8_t*)armGetTls();
uint8_t* threadCtx = *(uint8_t**)(tls + 0x1F8);
ret.handle = *(Handle*)(threadCtx + 0x1B8);
ret.stack_mem = *(void**)(threadCtx + 0x48);
ret.stack_mirror = *(void**)(threadCtx + 0x50);
ret.stack_sz = *(size_t*)(threadCtx + 0x58);
return thread::id(ret);
}
}
}
#endif
#endif