@ -64,14 +64,13 @@ need to protect it by a mutex. It is only ever read by the thread
doing the shutdown */
UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE ;
/** LRU flush batch is further divided into this chunk size to
reduce the wait time for the threads waiting for a clean block */
# define PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE 100
# ifdef UNIV_PFS_THREAD
UNIV_INTERN mysql_pfs_key_t buf_page_cleaner_thread_key ;
# endif /* UNIV_PFS_THREAD */
/** Event to synchronise with the flushing. */
os_event_t buf_flush_event ;
/** If LRU list of a buf_pool is less than this size then LRU eviction
should not happen . This is because when we do LRU flushing we also put
the blocks on free list . If LRU list is very small then we can end up
@ -135,60 +134,6 @@ buf_flush_validate_skip(
}
# endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/*******************************************************************/ /**
Sets hazard pointer during flush_list iteration . */
UNIV_INLINE
void
buf_flush_set_hp (
/*=============*/
buf_pool_t * buf_pool , /*!< in/out: buffer pool instance */
const buf_page_t * bpage ) /*!< in: buffer control block */
{
ut_ad ( buf_flush_list_mutex_own ( buf_pool ) ) ;
ut_ad ( buf_pool - > flush_list_hp = = NULL | | bpage = = NULL ) ;
ut_ad ( ! bpage | | buf_page_in_file ( bpage ) ) ;
ut_ad ( ! bpage | | bpage - > in_flush_list ) ;
ut_ad ( ! bpage | | buf_pool_from_bpage ( bpage ) = = buf_pool ) ;
buf_pool - > flush_list_hp = bpage ;
}
/*******************************************************************/ /**
Checks if the given block is a hazard pointer
@ return true if bpage is hazard pointer */
UNIV_INLINE
bool
buf_flush_is_hp (
/*============*/
buf_pool_t * buf_pool , /*!< in: buffer pool instance */
const buf_page_t * bpage ) /*!< in: buffer control block */
{
ut_ad ( buf_flush_list_mutex_own ( buf_pool ) ) ;
return ( buf_pool - > flush_list_hp = = bpage ) ;
}
/*******************************************************************/ /**
Whenever we move a block in flush_list ( either to remove it or to
relocate it ) we check the hazard pointer set by some other thread
doing the flush list scan . If the hazard pointer is the same as the
one we are about going to move then we set it to NULL to force a rescan
in the thread doing the batch . */
UNIV_INLINE
void
buf_flush_update_hp (
/*================*/
buf_pool_t * buf_pool , /*!< in: buffer pool instance */
buf_page_t * bpage ) /*!< in: buffer control block */
{
ut_ad ( buf_flush_list_mutex_own ( buf_pool ) ) ;
if ( buf_flush_is_hp ( buf_pool , bpage ) ) {
buf_flush_set_hp ( buf_pool , NULL ) ;
MONITOR_INC ( MONITOR_FLUSH_HP_RESCAN ) ;
}
}
/******************************************************************/ /**
Insert a block in the flush_rbt and returns a pointer to its
predecessor or NULL if no predecessor . The ordering is maintained
@ -587,6 +532,10 @@ buf_flush_remove(
buf_flush_list_mutex_enter ( buf_pool ) ;
/* Important that we adjust the hazard pointer before removing
the bpage from flush list . */
buf_pool - > flush_hp . adjust ( bpage ) ;
switch ( buf_page_get_state ( bpage ) ) {
case BUF_BLOCK_POOL_WATCH :
case BUF_BLOCK_ZIP_PAGE :
@ -627,7 +576,6 @@ buf_flush_remove(
ut_a ( buf_flush_validate_skip ( buf_pool ) ) ;
# endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_flush_update_hp ( buf_pool , bpage ) ;
buf_flush_list_mutex_exit ( buf_pool ) ;
}
@ -678,6 +626,10 @@ buf_flush_relocate_on_flush_list(
prev_b = buf_flush_insert_in_flush_rbt ( dpage ) ;
}
/* Important that we adjust the hazard pointer before removing
the bpage from the flush list . */
buf_pool - > flush_hp . adjust ( bpage ) ;
/* Must be done after we have removed it from the flush_rbt
because we assert on in_flush_list in comparison function . */
ut_d ( bpage - > in_flush_list = FALSE ) ;
@ -706,7 +658,6 @@ buf_flush_relocate_on_flush_list(
ut_a ( buf_flush_validate_low ( buf_pool ) ) ;
# endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_flush_update_hp ( buf_pool , bpage ) ;
buf_flush_list_mutex_exit ( buf_pool ) ;
}
@ -997,7 +948,10 @@ buf_flush_write_block_low(
if ( sync ) {
ut_ad ( flush_type = = BUF_FLUSH_SINGLE_PAGE ) ;
fil_flush ( buf_page_get_space ( bpage ) ) ;
buf_page_io_complete ( bpage ) ;
/* true means we want to evict this page from the
LRU list as well . */
buf_page_io_complete ( bpage , true ) ;
}
/* Increment the counter of I/O operations used
@ -1091,10 +1045,10 @@ buf_flush_page(
rw_lock_s_lock_gen ( rw_lock , BUF_IO_WRITE ) ;
}
/* Even though bpage is not protected by any mutex at this
point , it is safe to access bpage , because it is io_fixed and
oldest_modification ! = 0. Thus , it cannot be relocated in the
buffer pool or removed from flush_list or LRU_list . */
/* Even though bpage is not protected by any mutex at this
point , it is safe to access bpage , because it is io_fixed and
oldest_modification ! = 0. Thus , it cannot be relocated in the
buffer pool or removed from flush_list or LRU_list . */
buf_flush_write_block_low ( bpage , flush_type , sync ) ;
}
@ -1458,105 +1412,70 @@ This utility flushes dirty blocks from the end of the LRU list.
The calling thread is not allowed to own any latches on pages !
It attempts to make ' max ' blocks available in the free list . Note that
it is a best effort attempt and it is not guaranteed that after a call
to this function there will be ' max ' blocks in the free list .
@ return number of blocks for which the write request was queued . */
static
ulint
to this function there will be ' max ' blocks in the free list . */
__attribute__ ( ( nonnull ) )
void
buf_flush_LRU_list_batch (
/*=====================*/
buf_pool_t * buf_pool , /*!< in: buffer pool instance */
ulint max ) /*!< in: desired number of
ulint max , /*!< in: desired number of
blocks in the free_list */
flush_counters_t * n ) /*!< out: flushed/evicted page
counts */
{
buf_page_t * bpage ;
ulint count = 0 ;
ulint scanned = 0 ;
ulint free_len = UT_LIST_GET_LEN ( buf_pool - > free ) ;
ulint lru_len = UT_LIST_GET_LEN ( buf_pool - > LRU ) ;
n - > flushed = 0 ;
n - > evicted = 0 ;
n - > unzip_LRU_evicted = 0 ;
ut_ad ( buf_pool_mutex_own ( buf_pool ) ) ;
bpage = UT_LIST_GET_LAST ( buf_pool - > LRU ) ;
while ( bpage ! = NULL & & count < max
& & free_len < srv_LRU_scan_depth
& & lru_len > BUF_LRU_MIN_LEN ) {
for ( bpage = UT_LIST_GET_LAST ( buf_pool - > LRU ) ;
bpage ! = NULL & & ( n - > evicted + n - > flushed ) < max
& & free_len < srv_LRU_scan_depth
& & lru_len > BUF_LRU_MIN_LEN ;
+ + scanned ,
bpage = buf_pool - > lru_hp . get ( ) ) {
ib_mutex_t * block_mutex = buf_page_get_mutex ( bpage ) ;
ibool evict ;
buf_page_t * prev = UT_LIST_GET_PREV ( LRU , bpage ) ;
buf_pool - > lru_hp . set ( prev ) ;
ib_mutex_t * block_mutex = buf_page_get_mutex ( bpage ) ;
mutex_enter ( block_mutex ) ;
evict = buf_flush_ready_for_replace ( bpage ) ;
bool evict = buf_flush_ready_for_replace ( bpage ) ;
mutex_exit ( block_mutex ) ;
+ + scanned ;
/* If the block is ready to be replaced we try to
free it i . e . : put it on the free list .
Otherwise we try to flush the block and its
neighbors . In this case we ' ll put it on the
free list in the next pass . We do this extra work
of putting blocks to the free list instead of
just flushing them because after every flush
we have to restart the scan from the tail of
the LRU list and if we don ' t clear the tail
of the flushed pages then the scan becomes
O ( n * n ) . */
if ( evict ) {
/* block is ready for eviction i.e., it is
clean and is not IO - fixed or buffer fixed . */
if ( buf_LRU_free_page ( bpage , true ) ) {
/* buf_pool->mutex was potentially
released and reacquired . */
bpage = UT_LIST_GET_LAST ( buf_pool - > LRU ) ;
} else {
bpage = UT_LIST_GET_PREV ( LRU , bpage ) ;
n - > evicted + + ;
}
} else {
ulint space ;
ulint offset ;
buf_page_t * prev_bpage ;
prev_bpage = UT_LIST_GET_PREV ( LRU , bpage ) ;
/* Save the previous bpage */
if ( prev_bpage ! = NULL ) {
space = prev_bpage - > space ;
offset = prev_bpage - > offset ;
} else {
space = ULINT_UNDEFINED ;
offset = ULINT_UNDEFINED ;
}
if ( ! buf_flush_page_and_try_neighbors (
bpage , BUF_FLUSH_LRU , max , & count ) ) {
bpage = prev_bpage ;
} else {
/* buf_pool->mutex was released.
reposition the iterator . Note : the
prev block could have been repositioned
too but that should be rare . */
if ( prev_bpage ! = NULL ) {
ut_ad ( space ! = ULINT_UNDEFINED ) ;
ut_ad ( offset ! = ULINT_UNDEFINED ) ;
prev_bpage = buf_page_hash_get (
buf_pool , space , offset ) ;
}
bpage = prev_bpage ;
}
/* Block is ready for flush. Dispatch an IO
request . The IO helper thread will put it on
free list in IO completion routine . */
buf_flush_page_and_try_neighbors (
bpage , BUF_FLUSH_LRU , max , & n - > flushed ) ;
}
ut_ad ( ! mutex_own ( block_mutex ) ) ;
ut_ad ( buf_pool_mutex_own ( buf_pool ) ) ;
free_len = UT_LIST_GET_LEN ( buf_pool - > free ) ;
lru_len = UT_LIST_GET_LEN ( buf_pool - > LRU ) ;
}
buf_pool - > lru_hp . set ( NULL ) ;
/* We keep track of all flushes happening as part of LRU
flush . When estimating the desired rate at which flush_list
should be flushed , we factor in this value . */
buf_lru_flush_page_count + = count ;
buf_lru_flush_page_count + = n - > flushed ;
ut_ad ( buf_pool_mutex_own ( buf_pool ) ) ;
@ -1567,35 +1486,38 @@ buf_flush_LRU_list_batch(
MONITOR_LRU_BATCH_SCANNED_PER_CALL ,
scanned ) ;
}
return ( count ) ;
}
/*******************************************************************/ /**
Flush and move pages from LRU or unzip_LRU list to the free list .
Whether LRU or unzip_LRU is used depends on the state of the system .
@ return number of blocks for which either the write request was queued
or in case of unzip_LRU the number of blocks actually moved to the
free list */
Whether LRU or unzip_LRU is used depends on the state of the system . */
__attribute__ ( ( nonnull ) )
static
ulint
void
buf_do_LRU_batch (
/*=============*/
buf_pool_t * buf_pool , /*!< in: buffer pool instance */
ulint max ) /*!< in: desired number of
ulint max , /*!< in: desired number of
blocks in the free_list */
flush_counters_t * n ) /*!< out: flushed/evicted page
counts */
{
ulint count = 0 ;
if ( buf_LRU_evict_from_unzip_LRU ( buf_pool ) ) {
count + = buf_free_from_unzip_LRU_list_batch ( buf_pool , max ) ;
n - > unzip_LRU_evicted = buf_free_from_unzip_LRU_list_batch ( buf_pool , max ) ;
} else {
n - > unzip_LRU_evicted = 0 ;
}
if ( max > count ) {
count + = buf_flush_LRU_list_batch ( buf_pool , max - count ) ;
if ( max > n - > unzip_LRU_evicted ) {
buf_flush_LRU_list_batch ( buf_pool , max - n - > unzip_LRU_evicted , n ) ;
} else {
n - > evicted = 0 ;
n - > flushed = 0 ;
}
return ( count ) ;
/* Add evicted pages from unzip_LRU to the evicted pages from
the simple LRU . */
n - > evicted + = n - > unzip_LRU_evicted ;
}
/*******************************************************************/ /**
@ -1637,6 +1559,7 @@ buf_do_flush_list_batch(
for ( buf_page_t * bpage = UT_LIST_GET_LAST ( buf_pool - > flush_list ) ;
count < min_n & & bpage ! = NULL & & len > 0
& & bpage - > oldest_modification < lsn_limit ;
bpage = buf_pool - > flush_hp . get ( ) ,
+ + scanned ) {
buf_page_t * prev ;
@ -1645,8 +1568,7 @@ buf_do_flush_list_batch(
ut_ad ( bpage - > in_flush_list ) ;
prev = UT_LIST_GET_PREV ( list , bpage ) ;
buf_flush_set_hp ( buf_pool , prev ) ;
buf_pool - > flush_hp . set ( prev ) ;
buf_flush_list_mutex_exit ( buf_pool ) ;
# ifdef UNIV_DEBUG
@ -1657,23 +1579,12 @@ buf_do_flush_list_batch(
buf_flush_list_mutex_enter ( buf_pool ) ;
ut_ad ( flushed | | buf_flush_is_hp ( buf_pool , prev ) ) ;
ut_ad ( flushed | | buf_pool - > flush_hp . is_hp ( prev ) ) ;
if ( ! buf_flush_is_hp ( buf_pool , prev ) ) {
/* The hazard pointer was reset by some other
thread . Restart the scan . */
ut_ad ( buf_flush_is_hp ( buf_pool , NULL ) ) ;
bpage = UT_LIST_GET_LAST ( buf_pool - > flush_list ) ;
len = UT_LIST_GET_LEN ( buf_pool - > flush_list ) ;
} else {
bpage = prev ;
- - len ;
buf_flush_set_hp ( buf_pool , NULL ) ;
}
ut_ad ( ! bpage | | bpage - > in_flush_list ) ;
- - len ;
}
buf_pool - > flush_hp . set ( NULL ) ;
buf_flush_list_mutex_exit ( buf_pool ) ;
MONITOR_INC_VALUE_CUMULATIVE ( MONITOR_FLUSH_BATCH_SCANNED ,
@ -1691,9 +1602,9 @@ This utility flushes dirty blocks from the end of the LRU list or flush_list.
NOTE 1 : in the case of an LRU flush the calling thread may own latches to
pages : to avoid deadlocks , this function must be written so that it cannot
end up waiting for these latches ! NOTE 2 : in the case of a flush list flush ,
the calling thread is not allowed to own any latches on pages !
@ return number of blocks for which the write request was queued */
ulint
the calling thread is not allowed to own any latches on pages ! */
__attribute__ ( ( nonnull ) )
void
buf_flush_batch (
/*============*/
buf_pool_t * buf_pool , /*!< in: buffer pool instance */
@ -1704,14 +1615,14 @@ buf_flush_batch(
ulint min_n , /*!< in: wished minimum mumber of blocks
flushed ( it is not guaranteed that the
actual number is that big , though ) */
lsn_t lsn_limit ) /*!< in: in the case of BUF_FLUSH_LIST
lsn_t lsn_limit , /*!< in: in the case of BUF_FLUSH_LIST
all blocks whose oldest_modification is
smaller than this should be flushed
( if their number does not exceed
min_n ) , otherwise ignored */
flush_counters_t * n ) /*!< out: flushed/evicted page
counts */
{
ulint count = 0 ;
ut_ad ( flush_type = = BUF_FLUSH_LRU | | flush_type = = BUF_FLUSH_LIST ) ;
# ifdef UNIV_SYNC_DEBUG
ut_ad ( ( flush_type ! = BUF_FLUSH_LIST )
@ -1724,10 +1635,11 @@ buf_flush_batch(
the flush functions . */
switch ( flush_type ) {
case BUF_FLUSH_LRU :
count = buf_do_LRU_batch ( buf_pool , min_n ) ;
buf_do_LRU_batch ( buf_pool , min_n , n ) ;
break ;
case BUF_FLUSH_LIST :
count = buf_do_flush_list_batch ( buf_pool , min_n , lsn_limit ) ;
n - > flushed = buf_do_flush_list_batch ( buf_pool , min_n , lsn_limit ) ;
n - > evicted = 0 ;
break ;
default :
ut_error ;
@ -1736,15 +1648,13 @@ buf_flush_batch(
buf_pool_mutex_exit ( buf_pool ) ;
# ifdef UNIV_DEBUG
if ( buf_debug_prints & & count > 0 ) {
if ( buf_debug_prints & & n - > flushed > 0 ) {
fprintf ( stderr , flush_type = = BUF_FLUSH_LRU
? " Flushed %lu pages in LRU flush \n "
: " Flushed %lu pages in flush list flush \n " ,
( ulong ) count ) ;
( ulong ) n - > flushed ) ;
}
# endif /* UNIV_DEBUG */
return ( count ) ;
}
/******************************************************************/ /**
@ -1855,48 +1765,6 @@ buf_flush_wait_batch_end(
}
}
/*******************************************************************/ /**
This utility flushes dirty blocks from the end of the LRU list and also
puts replaceable clean pages from the end of the LRU list to the free
list .
NOTE : The calling thread is not allowed to own any latches on pages !
@ return true if a batch was queued successfully . false if another batch
of same type was already running . */
static
bool
buf_flush_LRU (
/*==========*/
buf_pool_t * buf_pool , /*!< in/out: buffer pool instance */
ulint min_n , /*!< in: wished minimum mumber of blocks
flushed ( it is not guaranteed that the
actual number is that big , though ) */
ulint * n_processed ) /*!< out: the number of pages
which were processed is passed
back to caller . Ignored if NULL */
{
ulint page_count ;
if ( n_processed ) {
* n_processed = 0 ;
}
if ( ! buf_flush_start ( buf_pool , BUF_FLUSH_LRU ) ) {
return ( false ) ;
}
page_count = buf_flush_batch ( buf_pool , BUF_FLUSH_LRU , min_n , 0 ) ;
buf_flush_end ( buf_pool , BUF_FLUSH_LRU ) ;
buf_flush_common ( BUF_FLUSH_LRU , page_count ) ;
if ( n_processed ) {
* n_processed = page_count ;
}
return ( true ) ;
}
/*******************************************************************/ /**
This utility flushes dirty blocks from the end of the flush list of
all buffer pool instances .
@ -1943,8 +1811,8 @@ buf_flush_list(
/* Flush to lsn_limit in all buffer pool instances */
for ( i = 0 ; i < srv_buf_pool_instances ; i + + ) {
buf_pool_t * buf_pool ;
ulint page_count = 0 ;
buf_pool_t * buf_pool ;
flush_counters_t n ;
buf_pool = buf_pool_from_array ( i ) ;
@ -1964,23 +1832,23 @@ buf_flush_list(
continue ;
}
page_count = buf_flush_batch (
buf_pool , BUF_FLUSH_LIST , min_n , lsn_limit ) ;
buf_flush_batch (
buf_pool , BUF_FLUSH_LIST , min_n , lsn_limit , & n ) ;
buf_flush_end ( buf_pool , BUF_FLUSH_LIST ) ;
buf_flush_common ( BUF_FLUSH_LIST , page_count ) ;
buf_flush_common ( BUF_FLUSH_LIST , n . flushed ) ;
if ( n_processed ) {
* n_processed + = page_count ;
* n_processed + = n . flushed ;
}
if ( page_count ) {
if ( n . flushed ) {
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_FLUSH_BATCH_TOTAL_PAGE ,
MONITOR_FLUSH_BATCH_COUNT ,
MONITOR_FLUSH_BATCH_PAGES ,
page_count ) ;
n . flushed ) ;
}
}
@ -1988,12 +1856,12 @@ buf_flush_list(
}
/******************************************************************/ /**
This function picks up a single dirty page from the tail of the LRU
list , flushes it , removes it from page_hash and LRU list and puts
it on the free list . It is called from user threads when they are
unable to find a replaceable page at the tail of the LRU list i . e . :
when the background LRU flushing in the page_cleaner thread is not
fast enough to keep pace with the workload .
This function picks up a single page from the tail of the LRU
list , flushes it ( if it is dirty ) , removes it from page_hash and LRU
list and puts it on the free list . It is called from user threads when
they are unable to find a replaceable page at the tail of the LRU
list i . e . : when the background LRU flushing in the page_cleaner thread
is not fast enough to keep pace with the workload .
@ return TRUE if success . */
UNIV_INTERN
ibool
@ -2003,84 +1871,67 @@ buf_flush_single_page_from_LRU(
{
ulint scanned ;
buf_page_t * bpage ;
ibool freed ;
buf_pool_mutex_enter ( buf_pool ) ;
for ( bpage = UT_LIST_GET_LAST ( buf_pool - > LRU ) , scanned = 1 ;
for ( bpage = buf_pool - > single_scan_itr . start ( ) ,
scanned = 0 , freed = FALSE ;
bpage ! = NULL ;
bpage = UT_LIST_GET_PREV ( LRU , bpage ) , + + scanned ) {
ib_mutex_t * block_mutex = buf_page_get_mutex ( bpage ) ;
+ + scanned , bpage = buf_pool - > single_scan_itr . get ( ) ) {
mutex_enter ( block_mutex ) ;
ut_ad ( buf_pool_mutex_own ( buf_pool ) ) ;
if ( buf_flush_ready_for_flush ( bpage , BUF_FLUSH_SINGLE_PAGE ) ) {
buf_page_t * prev = UT_LIST_GET_PREV ( LRU , bpage ) ;
buf_pool - > single_scan_itr . set ( prev ) ;
/* The following call will release the buffer pool
and block mutex . */
ibool flushed = buf_flush_page (
buf_pool , bpage , BUF_FLUSH_SINGLE_PAGE , true ) ;
ib_mutex_t * block_mutex = buf_page_get_mutex ( bpage ) ;
mutex_enter ( block_mutex ) ;
if ( flushed ) {
/* buf_flush_page() will release the
block mutex */
if ( buf_flush_ready_for_replace ( bpage ) ) {
/* block is ready for eviction i.e., it is
clean and is not IO - fixed or buffer fixed . */
mutex_exit ( block_mutex ) ;
if ( buf_LRU_free_page ( bpage , true ) ) {
buf_pool_mutex_exit ( buf_pool ) ;
freed = TRUE ;
break ;
}
} else if ( buf_flush_ready_for_flush (
bpage , BUF_FLUSH_SINGLE_PAGE ) ) {
/* Block is ready for flush. Dispatch an IO
request . We ' ll put it on free list in IO
completion routine . The following call , if
successful , will release the buffer pool and
block mutex . */
freed = buf_flush_page ( buf_pool , bpage ,
BUF_FLUSH_SINGLE_PAGE , true ) ;
if ( freed ) {
/* block and buffer pool mutex have
already been reelased . */
break ;
}
mutex_exit ( block_mutex ) ;
} else {
mutex_exit ( block_mutex ) ;
}
mutex_exit ( block_mutex ) ;
}
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_LRU_SINGLE_FLUSH_SCANNED ,
MONITOR_LRU_SINGLE_FLUSH_SCANNED_NUM_CALL ,
MONITOR_LRU_SINGLE_FLUSH_SCANNED_PER_CALL ,
scanned ) ;
if ( bpage = = NULL ) {
if ( ! freed ) {
/* Can't find a single flushable page. */
ut_ad ( ! bpage ) ;
buf_pool_mutex_exit ( buf_pool ) ;
return ( FALSE ) ;
}
ibool freed = FALSE ;
/* At this point the page has been written to the disk.
As we are not holding buffer pool or block mutex therefore
we cannot use the bpage safely . It may have been plucked out
of the LRU list by some other thread or it may even have
relocated in case of a compressed page . We need to start
the scan of LRU list again to remove the block from the LRU
list and put it on the free list . */
buf_pool_mutex_enter ( buf_pool ) ;
for ( bpage = UT_LIST_GET_LAST ( buf_pool - > LRU ) ;
bpage ! = NULL ;
bpage = UT_LIST_GET_PREV ( LRU , bpage ) ) {
ib_mutex_t * block_mutex = buf_page_get_mutex ( bpage ) ;
mutex_enter ( block_mutex ) ;
ibool ready = buf_flush_ready_for_replace ( bpage ) ;
mutex_exit ( block_mutex ) ;
if ( ready ) {
bool evict_zip ;
evict_zip = ! buf_LRU_evict_from_unzip_LRU ( buf_pool ) ; ;
freed = buf_LRU_free_page ( bpage , evict_zip ) ;
break ;
}
if ( scanned ) {
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_LRU_SINGLE_FLUSH_SCANNED ,
MONITOR_LRU_SINGLE_FLUSH_SCANNED_NUM_CALL ,
MONITOR_LRU_SINGLE_FLUSH_SCANNED_PER_CALL ,
scanned ) ;
}
buf_pool_mutex_exit ( buf_pool ) ;
ut_ad ( ! buf_pool_mutex_own ( buf_pool ) ) ;
return ( freed ) ;
}
@ -2107,6 +1958,7 @@ buf_flush_LRU_tail(void)
buf_pool_t * buf_pool = buf_pool_from_array ( i ) ;
ulint scan_depth ;
flush_counters_t n ;
/* srv_LRU_scan_depth can be arbitrarily large value.
We cap it with current LRU size . */
@ -2116,44 +1968,37 @@ buf_flush_LRU_tail(void)
scan_depth = ut_min ( srv_LRU_scan_depth , scan_depth ) ;
/* We divide LRU flush into smaller chunks because
there may be user threads waiting for the flush to
end in buf_LRU_get_free_block ( ) . */
for ( ulint j = 0 ;
j < scan_depth ;
j + = PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE ) {
ulint n_flushed = 0 ;
/* Currently page_cleaner is the only thread
that can trigger an LRU flush . It is possible
that a batch triggered during last iteration is
still running , */
if ( buf_flush_LRU ( buf_pool ,
PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE ,
& n_flushed ) ) {
/* Allowed only one batch per
buffer pool instance . */
buf_flush_wait_batch_end (
buf_pool , BUF_FLUSH_LRU ) ;
}
/* Currently page_cleaner is the only thread
that can trigger an LRU flush . It is possible
that a batch triggered during last iteration is
still running , */
if ( ! buf_flush_start ( buf_pool , BUF_FLUSH_LRU ) ) {
continue ;
}
if ( n_flushed ) {
total_flushed + = n_flushed ;
} else {
/* Nothing to flush */
break ;
}
buf_flush_batch ( buf_pool , BUF_FLUSH_LRU , scan_depth , 0 , & n ) ;
buf_flush_end ( buf_pool , BUF_FLUSH_LRU ) ;
buf_flush_common ( BUF_FLUSH_LRU , n . flushed ) ;
if ( n . flushed ) {
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE ,
MONITOR_LRU_BATCH_FLUSH_COUNT ,
MONITOR_LRU_BATCH_FLUSH_PAGES ,
n . flushed ) ;
}
}
if ( total_flushed ) {
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_LRU_BATCH_TOTAL_PAGE ,
MONITOR_LRU_BATCH_COUNT ,
MONITOR_LRU_BATCH_PAGES ,
total_flushed ) ;
if ( n . evicted ) {
MONITOR_INC_VALUE_CUMULATIVE (
MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE ,
MONITOR_LRU_BATCH_EVICT_COUNT ,
MONITOR_LRU_BATCH_EVICT_PAGES ,
n . evicted ) ;
}
total_flushed + = ( n . flushed + n . evicted ) ;
}
return ( total_flushed ) ;
@ -2404,11 +2249,14 @@ page_cleaner_sleep_if_needed(
if ( next_loop_time > cur_time ) {
/* Get sleep interval in micro seconds. We use
ut_min ( ) to avoid long sleep in case of
wrap around . */
os_thread_sleep ( ut_min ( 1000000 ,
( next_loop_time - cur_time )
* 1000 ) ) ;
ut_min ( ) to avoid long sleep in case of wrap around . */
ulint sleep_us ;
sleep_us = ut_min ( 1000000 , ( next_loop_time - cur_time ) * 1000 ) ;
ib_int64_t sig_count = os_event_reset ( buf_flush_event ) ;
os_event_wait_time_low ( buf_flush_event , sleep_us , sig_count ) ;
}
}
@ -2538,6 +2386,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
thread_exit :
buf_page_cleaner_is_active = FALSE ;
os_event_free ( buf_flush_event ) ;
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return ( ) to exit . */
os_thread_exit ( NULL ) ;