|
|
@ -49,6 +49,11 @@ |
|
|
|
#include <trace_helpers.h> |
|
|
|
#include <wx/log.h> |
|
|
|
|
|
|
|
#ifdef _WIN32 |
|
|
|
#include <optional> |
|
|
|
#include <windows.h> |
|
|
|
#endif |
|
|
|
|
|
|
|
/** |
|
|
|
* Implement a coroutine. |
|
|
|
* |
|
|
@ -186,7 +191,10 @@ public: |
|
|
|
/** |
|
|
|
* Create a coroutine from a delegate object. |
|
|
|
*/ |
|
|
|
COROUTINE( std::function<ReturnType(ArgType)> aEntry ) : |
|
|
|
COROUTINE( std::function<ReturnType( ArgType )> aEntry ) : |
|
|
|
#ifdef _WIN32 |
|
|
|
m_stack( nullptr ), |
|
|
|
#endif |
|
|
|
m_func( std::move( aEntry ) ), |
|
|
|
m_running( false ), |
|
|
|
m_args( nullptr ), |
|
|
@ -215,6 +223,11 @@ public: |
|
|
|
|
|
|
|
if( m_callee.ctx ) |
|
|
|
libcontext::release_fcontext( m_callee.ctx ); |
|
|
|
|
|
|
|
#ifdef _WIN32 |
|
|
|
if( m_stack ) |
|
|
|
::VirtualFree( m_stack, 0, MEM_RELEASE ); |
|
|
|
#endif |
|
|
|
} |
|
|
|
|
|
|
|
public: |
|
|
@ -375,7 +388,46 @@ private: |
|
|
|
void* sp = nullptr; |
|
|
|
|
|
|
|
#ifndef LIBCONTEXT_HAS_OWN_STACK |
|
|
|
#ifdef _WIN32 |
|
|
|
// kind of a hack to avoid calling into GetSystemInfo constantly for page size |
|
|
|
static std::optional<std::size_t> system_page_size; |
|
|
|
if( !system_page_size.has_value() ) |
|
|
|
{ |
|
|
|
// For Windows, we want to use VirtualAlloc and VirtualProtect which let us |
|
|
|
// trap stack overflows with the guard page, it'll still crash but this |
|
|
|
// has a chance of preserving stack in dumps |
|
|
|
SYSTEM_INFO si; |
|
|
|
::GetSystemInfo( &si ); |
|
|
|
|
|
|
|
system_page_size = static_cast<std::size_t>( si.dwPageSize ); |
|
|
|
} |
|
|
|
|
|
|
|
// this shouldn't happen but just incase |
|
|
|
if( m_stack ) |
|
|
|
::VirtualFree( m_stack, 0, MEM_RELEASE ); |
|
|
|
|
|
|
|
// calculate the correct number of pages to allocate based on request stack size |
|
|
|
std::size_t pages = ( m_stacksize + system_page_size.value() - 1 ) / system_page_size.value(); |
|
|
|
|
|
|
|
// we allocate an extra page for the guard |
|
|
|
std::size_t alloc_size = ( pages + 1 ) * system_page_size.value(); |
|
|
|
|
|
|
|
m_stack = ::VirtualAlloc( 0, alloc_size, MEM_COMMIT, PAGE_READWRITE ); |
|
|
|
if( !m_stack ) |
|
|
|
throw std::bad_alloc(); |
|
|
|
|
|
|
|
// now configure the first page (by only specifying a single page_size from vp) |
|
|
|
// that will act as the guard page |
|
|
|
// the stack will grow from the end and hopefully never into this guarded region |
|
|
|
DWORD old_prot; // dummy var since the arg cannot be NULL |
|
|
|
const BOOL res = ::VirtualProtect( m_stack, system_page_size.value(), |
|
|
|
PAGE_READWRITE | PAGE_GUARD, &old_prot ); |
|
|
|
|
|
|
|
assert( res != false ); |
|
|
|
|
|
|
|
stackSize = alloc_size; |
|
|
|
sp = static_cast<char*>( m_stack ) + stackSize; |
|
|
|
#else |
|
|
|
// fixme: Clean up stack stuff. Add a guard |
|
|
|
m_stack.reset( new char[stackSize] ); |
|
|
|
|
|
|
@ -389,6 +441,7 @@ private: |
|
|
|
m_valgrind_stack = VALGRIND_STACK_REGISTER( sp, m_stack.get() ); |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
#ifdef KICAD_SANITIZE_THREADS |
|
|
|
// Create a new fiber to go with the new context |
|
|
@ -475,7 +528,11 @@ private: |
|
|
|
} |
|
|
|
|
|
|
|
///< coroutine stack |
|
|
|
#ifdef _WIN32 |
|
|
|
void* m_stack; |
|
|
|
#else |
|
|
|
std::unique_ptr<char[]> m_stack; |
|
|
|
#endif |
|
|
|
|
|
|
|
int m_stacksize; |
|
|
|
|
|
|
|