|
|
@ -25,6 +25,8 @@ |
|
|
|
#include <threadpool.h>
|
|
|
|
#include <sql_class.h>
|
|
|
|
#include <sql_parse.h>
|
|
|
|
#include "threadpool_generic.h"
|
|
|
|
#include <my_global.h>
|
|
|
|
|
|
|
|
#ifdef WITH_WSREP
|
|
|
|
#include "wsrep_trans_observer.h"
|
|
|
@ -56,7 +58,6 @@ static void threadpool_remove_connection(THD *thd); |
|
|
|
static dispatch_command_return threadpool_process_request(THD *thd); |
|
|
|
static THD* threadpool_add_connection(CONNECT *connect, TP_connection *c); |
|
|
|
|
|
|
|
extern bool do_command(THD*); |
|
|
|
|
|
|
|
static inline TP_connection *get_TP_connection(THD *thd) |
|
|
|
{ |
|
|
@ -173,14 +174,9 @@ static TP_PRIORITY get_priority(TP_connection *c) |
|
|
|
return prio; |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void tp_callback(TP_connection *c) |
|
|
|
static bool tp_callback_run(TP_connection *c) |
|
|
|
{ |
|
|
|
DBUG_ASSERT(c); |
|
|
|
|
|
|
|
Worker_thread_context worker_context; |
|
|
|
worker_context.save(); |
|
|
|
|
|
|
|
THD *thd= c->thd; |
|
|
|
|
|
|
|
c->state = TP_STATE_RUNNING; |
|
|
@ -193,12 +189,14 @@ void tp_callback(TP_connection *c) |
|
|
|
if (!thd) |
|
|
|
{ |
|
|
|
/* Bail out on connect error.*/ |
|
|
|
goto error; |
|
|
|
return false; |
|
|
|
} |
|
|
|
c->connect= 0; |
|
|
|
thd->apc_target.epoch.fetch_add(1, std::memory_order_release); |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
thd->apc_target.epoch.fetch_add(1, std::memory_order_release); |
|
|
|
retry: |
|
|
|
switch(threadpool_process_request(thd)) |
|
|
|
{ |
|
|
@ -212,11 +210,15 @@ retry: |
|
|
|
thd->async_state.m_state = thd_async_state::enum_async_state::RESUMED; |
|
|
|
goto retry; |
|
|
|
} |
|
|
|
worker_context.restore(); |
|
|
|
return; |
|
|
|
return true; |
|
|
|
case DISPATCH_COMMAND_CLOSE_CONNECTION: |
|
|
|
/* QUIT or an error occurred. */ |
|
|
|
goto error; |
|
|
|
/*
|
|
|
|
QUIT or an error occurred. |
|
|
|
|
|
|
|
We can skip epoch increase here: process_apc_requests will be called |
|
|
|
one last time after thd is unlinked. |
|
|
|
*/ |
|
|
|
return false; |
|
|
|
case DISPATCH_COMMAND_SUCCESS: |
|
|
|
break; |
|
|
|
} |
|
|
@ -229,19 +231,33 @@ retry: |
|
|
|
/* Read next command from client. */ |
|
|
|
c->set_io_timeout(thd->get_net_wait_timeout()); |
|
|
|
c->state= TP_STATE_IDLE; |
|
|
|
if (c->start_io()) |
|
|
|
goto error; |
|
|
|
|
|
|
|
worker_context.restore(); |
|
|
|
return; |
|
|
|
thd->apc_target.epoch.fetch_add(1, std::memory_order_acquire); |
|
|
|
if (unlikely(thd->apc_target.have_apc_requests())) |
|
|
|
thd->apc_target.process_apc_requests(); |
|
|
|
|
|
|
|
int error= c->start_io(); |
|
|
|
|
|
|
|
return error == 0; |
|
|
|
} |
|
|
|
|
|
|
|
error: |
|
|
|
c->thd= 0; |
|
|
|
if (thd) |
|
|
|
void tp_callback(TP_connection *c) |
|
|
|
{ |
|
|
|
Worker_thread_context worker_context; |
|
|
|
worker_context.save(); |
|
|
|
|
|
|
|
bool success= tp_callback_run(c); |
|
|
|
|
|
|
|
if (!success) |
|
|
|
{ |
|
|
|
threadpool_remove_connection(thd); |
|
|
|
THD *thd= c->thd; |
|
|
|
c->thd= 0; |
|
|
|
if (thd) |
|
|
|
{ |
|
|
|
threadpool_remove_connection(thd); |
|
|
|
} |
|
|
|
delete c; |
|
|
|
} |
|
|
|
delete c; |
|
|
|
worker_context.restore(); |
|
|
|
} |
|
|
|
|
|
|
@ -320,6 +336,10 @@ static void threadpool_remove_connection(THD *thd) |
|
|
|
end_connection(thd); |
|
|
|
close_connection(thd, 0); |
|
|
|
unlink_thd(thd); |
|
|
|
// The rest of APC requests should be processed after unlinking.
|
|
|
|
// This guarantees that no new APC requests can be added.
|
|
|
|
// TODO: better notify the requestor with some KILLED state here.
|
|
|
|
thd->apc_target.process_apc_requests(); |
|
|
|
PSI_CALL_delete_current_thread(); // before THD is destroyed
|
|
|
|
delete thd; |
|
|
|
|
|
|
@ -367,7 +387,14 @@ static dispatch_command_return threadpool_process_request(THD *thd) |
|
|
|
|
|
|
|
thread_attach(thd); |
|
|
|
if(thd->async_state.m_state == thd_async_state::enum_async_state::RESUMED) |
|
|
|
{ |
|
|
|
if (unlikely(thd->async_state.m_command == COM_SLEEP)) |
|
|
|
return DISPATCH_COMMAND_SUCCESS; // Special case for thread pool APC
|
|
|
|
|
|
|
|
goto resume; |
|
|
|
} |
|
|
|
|
|
|
|
thd->apc_target.process_apc_requests(); |
|
|
|
|
|
|
|
if (thd->killed >= KILL_CONNECTION) |
|
|
|
{ |
|
|
@ -573,10 +600,71 @@ static void tp_resume(THD* thd) |
|
|
|
{ |
|
|
|
DBUG_ASSERT(thd->async_state.m_state == thd_async_state::enum_async_state::SUSPENDED); |
|
|
|
thd->async_state.m_state = thd_async_state::enum_async_state::RESUMED; |
|
|
|
thd->apc_target.epoch.fetch_add(1, std::memory_order_acquire); |
|
|
|
TP_connection* c = get_TP_connection(thd); |
|
|
|
pool->resume(c); |
|
|
|
} |
|
|
|
|
|
|
|
static bool tp_notify_apc(THD *thd) |
|
|
|
{ |
|
|
|
mysql_mutex_assert_owner(&thd->LOCK_thd_kill); |
|
|
|
TP_connection* c= get_TP_connection(thd); |
|
|
|
longlong process_epoch= thd->apc_target.process_epoch; |
|
|
|
longlong first_epoch= thd->apc_target.epoch; |
|
|
|
while (1) |
|
|
|
{ |
|
|
|
longlong epoch= thd->apc_target.epoch; |
|
|
|
if (epoch & 1 || epoch != first_epoch) |
|
|
|
{ |
|
|
|
// We are in the safe zone, where we can guarantee a processing.
|
|
|
|
break; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
|
|
|
|
/*
|
|
|
|
Continue to the retry-loop. |
|
|
|
We are either before APC request check, or after the check and before |
|
|
|
run, or the run is skipped. |
|
|
|
*/ |
|
|
|
|
|
|
|
process_epoch= thd->apc_target.process_epoch; |
|
|
|
if (process_epoch & 1) |
|
|
|
{ |
|
|
|
// We are hanging on LOCK_thd_kill in process_apc_requests, or going to.
|
|
|
|
break; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
// We are either before apc requests checked or processed, or after APC
|
|
|
|
// processed. We can't distinguish these states, so just flush the pool
|
|
|
|
// and then retry if failed.
|
|
|
|
int status= pool->wake(c); |
|
|
|
if (likely(status == 0)) |
|
|
|
{ |
|
|
|
break; |
|
|
|
} |
|
|
|
else if (unlikely(status < 0)) |
|
|
|
{ |
|
|
|
return false; |
|
|
|
} |
|
|
|
else |
|
|
|
{ |
|
|
|
/*
|
|
|
|
If the run is skipped and somebody else took connection out of the |
|
|
|
poll, then we will wait until the epoch change, therefore, will wait |
|
|
|
until the task will reach the worker by the queue, which can be |
|
|
|
long. |
|
|
|
So longer sleep here (1ms). |
|
|
|
*/ |
|
|
|
my_sleep(1000); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
return true; |
|
|
|
} |
|
|
|
|
|
|
|
static scheduler_functions tp_scheduler_functions= |
|
|
|
{ |
|
|
|
0, // max_threads
|
|
|
@ -588,7 +676,8 @@ static scheduler_functions tp_scheduler_functions= |
|
|
|
tp_wait_end, // thd_wait_end
|
|
|
|
tp_post_kill_notification, // post kill notification
|
|
|
|
tp_end, // end
|
|
|
|
tp_resume |
|
|
|
tp_resume, |
|
|
|
tp_notify_apc, |
|
|
|
}; |
|
|
|
|
|
|
|
void pool_of_threads_scheduler(struct scheduler_functions *func, |
|
|
|