Browse Source

Updated with changes from Percona Server 5.1.54-12.5, from lp:percona-server

as of February 4, 2011/

Merged: revid:oleg.tsarev@percona.com-20110113143630-b9ojivymbiwe3y2i
pull/47/merge
unknown 15 years ago
parent
commit
8e0ed9eeba
  1. 117
      ChangeLog
  2. 4
      Makefile.in
  3. 12
      btr/btr0cur.c
  4. 481
      buf/buf0buf.c
  5. 80
      buf/buf0flu.c
  6. 2
      buf/buf0lru.c
  7. 2
      dict/dict0crea.c
  8. 170
      dict/dict0dict.c
  9. 9
      dict/dict0load.c
  10. 108
      fil/fil0fil.c
  11. 50
      fsp/fsp0fsp.c
  12. 64
      ha/hash0hash.c
  13. 468
      handler/ha_innodb.cc
  14. 1
      handler/ha_innodb.h
  15. 11
      handler/handler0alter.cc
  16. 1
      handler/innodb_patch_info.h
  17. 197
      ibuf/ibuf0ibuf.c
  18. 49
      include/btr0btr.h
  19. 38
      include/btr0btr.ic
  20. 18
      include/btr0cur.h
  21. 6
      include/buf0buf.h
  22. 14
      include/buf0flu.h
  23. 44
      include/dict0dict.h
  24. 9
      include/fil0fil.h
  25. 49
      include/hash0hash.h
  26. 5
      include/ibuf0ibuf.h
  27. 3
      include/log0log.h
  28. 4
      include/os0file.h
  29. 28
      include/os0proc.h
  30. 4
      include/os0sync.h
  31. 5
      include/row0ins.h
  32. 4
      include/row0mysql.h
  33. 11
      include/row0upd.h
  34. 7
      include/srv0srv.h
  35. 3
      include/srv0start.h
  36. 2
      include/trx0sys.h
  37. 2
      include/trx0sys.ic
  38. 7
      include/univ.i
  39. 43
      include/ut0lst.h
  40. 8
      log/log0log.c
  41. 16
      log/log0recv.c
  42. 38
      os/os0file.c
  43. 170
      os/os0proc.c
  44. 53
      os/os0sync.c
  45. 4
      percona-suite/have_response_time_distribution.inc
  46. 2
      percona-suite/have_response_time_distribution.require
  47. 1
      percona-suite/log_connection_error.patch/percona_log_connection_error-master.opt
  48. 15
      percona-suite/log_connection_error.patch/percona_log_connection_error.result
  49. 52
      percona-suite/log_connection_error.patch/percona_log_connection_error.test
  50. 2
      percona-suite/percona_innodb_buffer_pool_shm-master.opt
  51. 8
      percona-suite/percona_innodb_buffer_pool_shm.result
  52. 19
      percona-suite/percona_innodb_buffer_pool_shm.test
  53. 1
      percona-suite/percona_log_connection_error-master.opt
  54. 6
      percona-suite/percona_server_variables.result
  55. 1
      percona-suite/percona_server_variables.test
  56. 16
      percona-suite/profiling_slow.patch/percona_bug643149.result
  57. 50
      percona-suite/profiling_slow.patch/percona_bug643149.test
  58. 2
      percona-suite/query_cache_enhance.patch/percona_status_wait_query_cache_mutex.test
  59. 70
      percona-suite/response-time-distribution.patch/percona_query_response_time-replication.result
  60. 57
      percona-suite/response-time-distribution.patch/percona_query_response_time-replication.test
  61. 313
      percona-suite/response-time-distribution.patch/percona_query_response_time-stored.result
  62. 90
      percona-suite/response-time-distribution.patch/percona_query_response_time-stored.test
  63. 567
      percona-suite/response-time-distribution.patch/percona_query_response_time.result
  64. 68
      percona-suite/response-time-distribution.patch/percona_query_response_time.test
  65. 1
      percona-suite/response-time-distribution.patch/percona_query_response_time_flush.inc
  66. 8
      percona-suite/response-time-distribution.patch/percona_query_response_time_show.inc
  67. 19
      percona-suite/response-time-distribution.patch/percona_query_response_time_sleep.inc
  68. 21
      percona-suite/show_slave_status_nolock.patch/percona_show_slave_status_nolock.result
  69. 47
      percona-suite/show_slave_status_nolock.patch/percona_show_slave_status_nolock.test
  70. 16
      percona-suite/slow_extended.patch/grep.inc
  71. 1
      percona-suite/slow_extended.patch/percona_slow_extended-combined-master.opt
  72. 18
      percona-suite/slow_extended.patch/percona_slow_extended-combined.result
  73. 6
      percona-suite/slow_extended.patch/percona_slow_extended-combined.test
  74. 1
      percona-suite/slow_extended.patch/percona_slow_extended-combined2-master.opt
  75. 12
      percona-suite/slow_extended.patch/percona_slow_extended-combined2.result
  76. 4
      percona-suite/slow_extended.patch/percona_slow_extended-combined2.test
  77. 2
      percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow-master.opt
  78. 2
      percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow.result
  79. 8
      percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow.test
  80. 2
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter-master.opt
  81. 2
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter.result
  82. 7
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter.test
  83. 1
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl-master.opt
  84. 3
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl.result
  85. 1
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl.test
  86. 1
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl-master.opt
  87. 3
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl.result
  88. 1
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl.test
  89. 1
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl-master.opt
  90. 9
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl.result
  91. 3
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl.test
  92. 2
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-master.opt
  93. 2
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity.result
  94. 7
      percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity.test
  95. 2
      percona-suite/slow_extended.patch/percona_slow_extended-long_query_time-master.opt
  96. 2
      percona-suite/slow_extended.patch/percona_slow_extended-long_query_time.result
  97. 8
      percona-suite/slow_extended.patch/percona_slow_extended-long_query_time.test
  98. 2
      percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended-master.opt
  99. 3
      percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended.result
  100. 14
      percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended.test

117
ChangeLog

@ -1,30 +1,121 @@
2010-11-11 The InnoDB Team
* thr/thr0loc.c, trx/trx0i_s.c:
Fix Bug#57802 Empty ASSERTION parameter passed to the HASH_SEARCH macro
2010-11-10 The InnoDB Team
* dict/dict0dict.c, handler/handler0alter.cc, include/dict0dict.h
row/row0merge.c:
Fix Bug#55084 InnoDB crash and corruption after ALTER TABLE
2010-11-10 The InnoDB Team
* srv/srv0start.c:
Fix Bug#48026 Log start and end of InnoDB buffer pool
initialization to the error log
2010-11-03 The InnoDB Team
* include/btr0btr.h, include/btr0btr.ic, dict/dict0crea.c:
Fix Bug#57947 InnoDB diagnostics shows btr_block_get calls
instead of real callers
2010-11-03 The InnoDB Team
* fil/fil0fil.c, fsp/fsp0fsp.c, handler/ha_innodb.cc,
include/fil0fil.h, include/univ.i:
Fix Bug #54538 - use of exclusive innodb dictionary lock limits
performance.
2010-11-02 The InnoDB Team
* btr/btr0cur.c, dict/dict0dict.c, dict/dict0load.c,
handler/ha_innodb.cc, include/dict0dict.h, row/row0mysql.c,
innodb_bug53046.result, innodb_bug53046.test:
Fix Bug#53046 dict_update_statistics_low can still be run
concurrently on same table
2010-11-02 The InnoDB Team
* row/row0sel.c:
Fix Bug#57799 READ UNCOMMITTED access failure of off-page
DYNAMIC or COMPRESSED columns again
2010-10-24 The InnoDB Team
* row/row0mysql.c
Fix Bug#57700 Latching order violation in
row_truncate_table_for_mysql()
2010-10-20 The InnoDB Team
* dict/dict0load.c
Fix Bug#57616 Sig 11 in dict_load_table() when failed to load
index or foreign key
2010-10-19 The InnoDB Team
* btr/btr0cur.c, buf/buf0buf.c, buf/buf0flu.c, handler/ha_innodb.cc,
ibuf/ibuf0ibuf.c, include/btr0cur.h, include/buf0flu.h,
include/ibuf0ibuf.h, include/row0mysql.h,
row/row0mysql.c, row/row0sel.c,
innodb_bug56680.test, innodb_bug56680.result:
Fix Bug#56680 InnoDB may return wrong results from a
case-insensitive covering index
2010-10-18 The InnoDB Team
* handler/ha_innodb.cc, handler/ha_innodb.h, innodb_bug57252.result,
innodb_bug57252.test:
Fix Bug#57252 disabling innobase_stats_on_metadata disables ANALYZE
2010-10-14 The InnoDB Team
* handler/ha_innodb.cc, innodb_bug56143.result, innodb_bug56143.test:
Fix Bug#56143 too many foreign keys causes output of show create
table to become invalid
2010-10-14 The InnoDB Team
* srv/srv0start.c:
Fix Bug#57397 io_handler_thread() will never cleanup
2010-10-11 The InnoDB Team
* row/row0sel.c
Fix Bug #57345 btr_pcur_store_position abort for load with
concurrent lock/unlock tables
2010-10-11 The InnoDB Team
* row/row0mysql.c, innodb_bug56947.result, innodb_bug56947.test:
Fix Bug #56947 InnoDB leaks memory when failing to create a table
2010-10-06 The InnoDB Team
* row/row0mysql.c, innodb_bug57255.result, innodb_bug57255.test
Fix Bug #Cascade Delete results in "Got error -1 from storage engine"
Fix Bug #57255 Cascade Delete results in "Got error -1 from
storage engine"
2010-09-27 The InnoDB Team
* row/row0sel.c, innodb_bug56716.result, innodb_bug56716.test:
Fix Bug #56716 InnoDB locks a record gap without locking the table
Fix Bug#56716 InnoDB locks a record gap without locking the table
2010-09-06 The InnoDB Team
* dict/dict0load.c, innodb_bug53756.test innodb_bug53756.result
Fix Bug #53756 ALTER TABLE ADD PRIMARY KEY affects crash recovery
* dict/dict0load.c, innodb_bug53756.test innodb_bug53756.result:
Fix Bug#53756 ALTER TABLE ADD PRIMARY KEY affects crash recovery
2010-08-24 The InnoDB Team
* handler/ha_innodb.c, dict/dict0dict.c:
Fix Bug #55832 selects crash too easily when innodb_force_recovery>3
Fix Bug#55832 selects crash too easily when innodb_force_recovery>3
2010-08-03 The InnoDB Team
* include/dict0dict.h, include/dict0dict.ic, row/row0mysql.c:
Fix bug #54678, InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
Fix Bug#54678 InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
2010-08-03 The InnoDB Team
@ -37,13 +128,13 @@
2010-08-03 The InnoDB Team
* include/ut0mem.h, ut/ut0mem.c:
Fix Bug #55627 segv in ut_free pars_lexer_close innobase_shutdown
Fix Bug#55627 segv in ut_free pars_lexer_close innobase_shutdown
innodb-use-sys-malloc=0
2010-08-01 The InnoDB Team
* handler/ha_innodb.cc
Fix Bug #55382 Assignment with SELECT expressions takes unexpected
* handler/ha_innodb.cc:
Fix Bug#55382 Assignment with SELECT expressions takes unexpected
S locks in READ COMMITTED
2010-07-27 The InnoDB Team
@ -65,8 +156,8 @@
2010-06-29 The InnoDB Team
* btr/btr0cur.c, include/btr0cur.h,
include/row0mysql.h, row/row0merge.c, row/row0sel.c:
* btr/btr0cur.c, include/btr0cur.h, include/row0mysql.h,
row/row0merge.c, row/row0sel.c:
Fix Bug#54358 READ UNCOMMITTED access failure of off-page DYNAMIC
or COMPRESSED columns
@ -98,7 +189,7 @@
* dict/dict0load.c, fil/fil0fil.c:
Fix Bug#54658: InnoDB: Warning: allocated tablespace %lu,
old maximum was 0 (introduced in Bug #53578 fix)
old maximum was 0 (introduced in Bug#53578 fix)
2010-06-16 The InnoDB Team

4
Makefile.in

@ -529,8 +529,6 @@ plugin_ftexample_shared_target = @plugin_ftexample_shared_target@
plugin_ftexample_static_target = @plugin_ftexample_static_target@
plugin_heap_shared_target = @plugin_heap_shared_target@
plugin_heap_static_target = @plugin_heap_static_target@
plugin_ibmdb2i_shared_target = @plugin_ibmdb2i_shared_target@
plugin_ibmdb2i_static_target = @plugin_ibmdb2i_static_target@
plugin_innobase_shared_target = @plugin_innobase_shared_target@
plugin_innobase_static_target = @plugin_innobase_static_target@
plugin_innodb_plugin_shared_target = @plugin_innodb_plugin_shared_target@
@ -546,8 +544,6 @@ plugin_partition_static_target = @plugin_partition_static_target@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
pstack_dir = @pstack_dir@
pstack_libs = @pstack_libs@
readline_basedir = @readline_basedir@
readline_dir = @readline_dir@
readline_h_ln_cmd = @readline_h_ln_cmd@

12
btr/btr0cur.c

@ -1124,7 +1124,7 @@ btr_cur_ins_lock_and_undo(
not zero, the parameters index and thr
should be specified */
btr_cur_t* cursor, /*!< in: cursor on page after which to insert */
const dtuple_t* entry, /*!< in: entry to insert */
dtuple_t* entry, /*!< in/out: entry to insert */
que_thr_t* thr, /*!< in: query thread or NULL */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool* inherit)/*!< out: TRUE if the inserted new record maybe
@ -1803,7 +1803,7 @@ func_exit:
See if there is enough place in the page modification log to log
an update-in-place.
@return TRUE if enough place */
static
UNIV_INTERN
ibool
btr_cur_update_alloc_zip(
/*=====================*/
@ -3582,11 +3582,9 @@ btr_estimate_number_of_different_key_vals(
effective_pages = btr_estimate_n_pages_not_null(index, 1 /*k*/, first_rec_path);
if (!effective_pages) {
dict_index_stat_mutex_enter(index);
for (j = 0; j <= n_cols; j++) {
index->stat_n_diff_key_vals[j] = (ib_int64_t)index->stat_n_leaf_pages;
}
dict_index_stat_mutex_exit(index);
return;
} else if (effective_pages > index->stat_n_leaf_pages) {
effective_pages = index->stat_n_leaf_pages;
@ -3728,8 +3726,6 @@ btr_estimate_number_of_different_key_vals(
also the pages used for external storage of fields (those pages are
included in index->stat_n_leaf_pages) */
dict_index_stat_mutex_enter(index);
for (j = 0; j <= n_cols; j++) {
index->stat_n_diff_key_vals[j]
= ((n_diff[j]
@ -3768,8 +3764,6 @@ btr_estimate_number_of_different_key_vals(
}
}
dict_index_stat_mutex_exit(index);
mem_free(n_diff);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
@ -4182,7 +4176,7 @@ Stores the fields in big_rec_vec to the tablespace and puts pointers to
them in rec. The extern flags in rec will have to be set beforehand.
The fields are stored on pages allocated from leaf node
file segment of the index tree.
@return DB_SUCCESS or error */
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
ulint
btr_store_big_rec_extern_fields(

481
buf/buf0buf.c

@ -53,6 +53,10 @@ Created 11/5/1995 Heikki Tuuri
#include "page0zip.h"
#include "trx0trx.h"
#include "srv0start.h"
#include "que0que.h"
#include "read0read.h"
#include "row0row.h"
#include "ha_prototypes.h"
/* prototypes for new functions added to ha_innodb.cc */
trx_t* innobase_get_trx();
@ -310,6 +314,30 @@ read-ahead or flush occurs */
UNIV_INTERN ibool buf_debug_prints = FALSE;
#endif /* UNIV_DEBUG */
/* Buffer pool shared memory segment information */
typedef struct buf_shm_info_struct buf_shm_info_t;
struct buf_shm_info_struct {
char head_str[8];
ulint binary_id;
ibool is_new; /* during initializing */
ibool clean; /* clean shutdowned and free */
ibool reusable; /* reusable */
ulint buf_pool_size; /* backup value */
ulint page_size; /* backup value */
ulint frame_offset; /* offset of the first frame based on chunk->mem */
ulint zip_hash_offset;
ulint zip_hash_n;
ulint checksum;
buf_pool_t buf_pool_backup;
buf_chunk_t chunk_backup;
ib_uint64_t dummy;
};
#define BUF_SHM_INFO_HEAD "XTRA_SHM"
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
@ -756,6 +784,45 @@ buf_block_init(
#endif /* UNIV_SYNC_DEBUG */
}
static
void
buf_block_reuse(
/*============*/
buf_block_t* block,
ptrdiff_t frame_offset)
{
/* block_init */
block->frame += frame_offset;
UNIV_MEM_DESC(block->frame, UNIV_PAGE_SIZE, block);
block->index = NULL;
#ifdef UNIV_DEBUG
/* recreate later */
block->page.in_page_hash = FALSE;
block->page.in_zip_hash = FALSE;
#endif /* UNIV_DEBUG */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
block->n_pointers = 0;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
if (block->page.zip.data)
block->page.zip.data += frame_offset;
block->is_hashed = FALSE;
mutex_create(&block->mutex, SYNC_BUF_BLOCK);
rw_lock_create(&block->lock, SYNC_LEVEL_VARYING);
ut_ad(rw_lock_validate(&(block->lock)));
#ifdef UNIV_SYNC_DEBUG
rw_lock_create(&block->debug_latch, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
}
/********************************************************************//**
Allocates a chunk of buffer frames.
@return chunk, or NULL on failure */
@ -768,26 +835,188 @@ buf_chunk_init(
{
buf_block_t* block;
byte* frame;
ulint zip_hash_n = 0;
ulint zip_hash_mem_size = 0;
hash_table_t* zip_hash_tmp = NULL;
ulint i;
buf_shm_info_t* shm_info = NULL;
/* Round down to a multiple of page size,
although it already should be. */
mem_size = ut_2pow_round(mem_size, UNIV_PAGE_SIZE);
srv_buffer_pool_shm_is_reused = FALSE;
if (srv_buffer_pool_shm_key) {
/* zip_hash size */
zip_hash_n = (mem_size / UNIV_PAGE_SIZE) * 2;
zip_hash_mem_size = ut_2pow_round(hash_create_needed(zip_hash_n)
+ (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE);
}
/* Reserve space for the block descriptors. */
mem_size += ut_2pow_round((mem_size / UNIV_PAGE_SIZE) * (sizeof *block)
+ (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE);
if (srv_buffer_pool_shm_key) {
mem_size += ut_2pow_round(sizeof(buf_shm_info_t)
+ (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE);
mem_size += zip_hash_mem_size;
}
chunk->mem_size = mem_size;
if (srv_buffer_pool_shm_key) {
ulint binary_id;
ibool is_new;
ut_a(buf_pool->n_chunks == 1);
fprintf(stderr,
"InnoDB: Warning: The innodb_buffer_pool_shm_key option has been specified.\n"
"InnoDB: Do not change the following between restarts of the server while this option is being used:\n"
"InnoDB: * the mysqld executable between restarts of the server.\n"
"InnoDB: * the value of innodb_buffer_pool_size.\n"
"InnoDB: * the value of innodb_page_size.\n"
"InnoDB: * datafiles created by InnoDB during this session.\n"
"InnoDB: Otherwise, data corruption in datafiles may result.\n");
/* FIXME: This is vague id still */
binary_id = (ulint) ((byte*)mtr_commit - (byte*)btr_root_get)
+ (ulint) ((byte*)os_get_os_version - (byte*)buf_calc_page_new_checksum)
+ (ulint) ((byte*)page_dir_find_owner_slot - (byte*)dfield_data_is_binary_equal)
+ (ulint) ((byte*)que_graph_publish - (byte*)dict_casedn_str)
+ (ulint) ((byte*)read_view_oldest_copy_or_open_new - (byte*)fil_space_get_version)
+ (ulint) ((byte*)rec_get_n_extern_new - (byte*)fsp_get_size_low)
+ (ulint) ((byte*)row_get_trx_id_offset - (byte*)ha_create_func)
+ (ulint) ((byte*)srv_set_io_thread_op_info - (byte*)thd_is_replication_slave_thread)
+ (ulint) ((byte*)mutex_create_func - (byte*)ibuf_inside)
+ (ulint) ((byte*)trx_set_detailed_error - (byte*)lock_check_trx_id_sanity)
+ (ulint) ((byte*)ut_time - (byte*)mem_heap_strdup);
chunk->mem = os_shm_alloc(&chunk->mem_size, srv_buffer_pool_shm_key, &is_new);
if (UNIV_UNLIKELY(chunk->mem == NULL)) {
return(NULL);
}
init_again:
#ifdef UNIV_SET_MEM_TO_ZERO
if (is_new) {
memset(chunk->mem, '\0', chunk->mem_size);
}
#endif
/* for ut_fold_binary_32(), these values should be 32-bit aligned */
ut_a(sizeof(buf_shm_info_t) % 4 == 0);
ut_a((ulint)chunk->mem % 4 == 0);
ut_a(chunk->mem_size % 4 == 0);
shm_info = chunk->mem;
zip_hash_tmp = (hash_table_t*)((byte*)chunk->mem + chunk->mem_size - zip_hash_mem_size);
if (is_new) {
strncpy(shm_info->head_str, BUF_SHM_INFO_HEAD, 8);
shm_info->binary_id = binary_id;
shm_info->is_new = TRUE; /* changed to FALSE when the initialization is finished */
shm_info->clean = FALSE; /* changed to TRUE when free the segment. */
shm_info->reusable = FALSE; /* changed to TRUE when validation is finished. */
shm_info->buf_pool_size = srv_buf_pool_size;
shm_info->page_size = srv_page_size;
shm_info->zip_hash_offset = chunk->mem_size - zip_hash_mem_size;
shm_info->zip_hash_n = zip_hash_n;
} else {
ulint checksum;
if (strncmp(shm_info->head_str, BUF_SHM_INFO_HEAD, 8)) {
fprintf(stderr,
"InnoDB: Error: The shared memory segment seems not to be for buffer pool.\n");
return(NULL);
}
if (shm_info->binary_id != binary_id) {
fprintf(stderr,
"InnoDB: Error: The shared memory segment seems not to be for this binary.\n");
return(NULL);
}
if (shm_info->is_new) {
fprintf(stderr,
"InnoDB: Error: The shared memory was not initialized yet.\n");
return(NULL);
}
if (shm_info->buf_pool_size != srv_buf_pool_size) {
fprintf(stderr,
"InnoDB: Error: srv_buf_pool_size is different (shm=%lu current=%lu).\n",
shm_info->buf_pool_size, srv_buf_pool_size);
return(NULL);
}
if (shm_info->page_size != srv_page_size) {
fprintf(stderr,
"InnoDB: Error: srv_page_size is different (shm=%lu current=%lu).\n",
shm_info->page_size, srv_page_size);
return(NULL);
}
if (!shm_info->reusable) {
fprintf(stderr,
"InnoDB: Warning: The shared memory has unrecoverable contents.\n"
"InnoDB: The shared memory segment is initialized.\n");
is_new = TRUE;
goto init_again;
}
if (!shm_info->clean) {
fprintf(stderr,
"InnoDB: Warning: The shared memory was not shut down cleanly.\n"
"InnoDB: The shared memory segment is initialized.\n");
is_new = TRUE;
goto init_again;
}
ut_a(shm_info->zip_hash_offset == chunk->mem_size - zip_hash_mem_size);
ut_a(shm_info->zip_hash_n == zip_hash_n);
/* check checksum */
if (srv_buffer_pool_shm_checksum) {
checksum = ut_fold_binary_32((byte*)chunk->mem + sizeof(buf_shm_info_t),
chunk->mem_size - sizeof(buf_shm_info_t));
} else {
checksum = BUF_NO_CHECKSUM_MAGIC;
}
if (shm_info->checksum != BUF_NO_CHECKSUM_MAGIC
&& shm_info->checksum != checksum) {
fprintf(stderr,
"InnoDB: Error: checksum of the shared memory is not match. "
"(stored=%lu calculated=%lu)\n",
shm_info->checksum, checksum);
return(NULL);
}
/* flag to use the segment. */
shm_info->clean = FALSE; /* changed to TRUE when free the segment. */
}
/* init zip_hash contents */
if (is_new) {
hash_create_init(zip_hash_tmp, zip_hash_n);
} else {
/* adjust offset is done later */
hash_create_reuse(zip_hash_tmp);
srv_buffer_pool_shm_is_reused = TRUE;
}
} else {
chunk->mem = os_mem_alloc_large(&chunk->mem_size);
if (UNIV_UNLIKELY(chunk->mem == NULL)) {
return(NULL);
}
}
/* Allocate the block descriptors from
the start of the memory block. */
if (srv_buffer_pool_shm_key) {
chunk->blocks = (buf_block_t*)((byte*)chunk->mem + sizeof(buf_shm_info_t));
} else {
chunk->blocks = chunk->mem;
}
/* Align a pointer to the first frame. Note that when
os_large_page_size is smaller than UNIV_PAGE_SIZE,
@ -795,8 +1024,13 @@ buf_chunk_init(
it is bigger, we may allocate more blocks than requested. */
frame = ut_align(chunk->mem, UNIV_PAGE_SIZE);
if (srv_buffer_pool_shm_key) {
/* reserve zip_hash space and always -1 for reproductibity */
chunk->size = (chunk->mem_size - zip_hash_mem_size) / UNIV_PAGE_SIZE - 1;
} else {
chunk->size = chunk->mem_size / UNIV_PAGE_SIZE
- (frame != chunk->mem);
}
/* Subtract the space needed for block descriptors. */
{
@ -810,6 +1044,98 @@ buf_chunk_init(
chunk->size = size;
}
if (shm_info && !(shm_info->is_new)) {
/* convert the shared memory segment for reuse */
ptrdiff_t phys_offset;
ptrdiff_t logi_offset;
ptrdiff_t blocks_offset;
void* previous_frame_address;
if (chunk->size < shm_info->chunk_backup.size) {
fprintf(stderr,
"InnoDB: Error: The buffer pool became smaller because of allocated address.\n"
"InnoDB: Retrying may avoid this situation.\n");
shm_info->clean = TRUE; /* release the flag for retrying */
return(NULL);
}
chunk->size = shm_info->chunk_backup.size;
phys_offset = frame - ((byte*)chunk->mem + shm_info->frame_offset);
logi_offset = frame - chunk->blocks[0].frame;
previous_frame_address = chunk->blocks[0].frame;
blocks_offset = (byte*)chunk->blocks - (byte*)shm_info->chunk_backup.blocks;
if (phys_offset || logi_offset || blocks_offset) {
fprintf(stderr,
"InnoDB: Buffer pool in the shared memory segment should be converted.\n"
"InnoDB: Previous frames in address : %p\n"
"InnoDB: Previous frames were located : %p\n"
"InnoDB: Current frames should be located: %p\n"
"InnoDB: Pysical offset : %ld (%#lx)\n"
"InnoDB: Logical offset (frames) : %ld (%#lx)\n"
"InnoDB: Logical offset (blocks) : %ld (%#lx)\n",
(byte*)chunk->mem + shm_info->frame_offset,
chunk->blocks[0].frame, frame,
phys_offset, phys_offset, logi_offset, logi_offset,
blocks_offset, blocks_offset);
} else {
fprintf(stderr,
"InnoDB: Buffer pool in the shared memory segment can be used as it is.\n");
}
if (phys_offset) {
fprintf(stderr,
"InnoDB: Aligning physical offset...");
memmove(frame, (byte*)chunk->mem + shm_info->frame_offset,
chunk->size * UNIV_PAGE_SIZE);
fprintf(stderr,
" Done.\n");
}
/* buf_block_t */
block = chunk->blocks;
for (i = chunk->size; i--; ) {
buf_block_reuse(block, logi_offset);
block++;
}
if (logi_offset || blocks_offset) {
fprintf(stderr,
"InnoDB: Aligning logical offset...");
/* buf_pool_t buf_pool_backup */
UT_LIST_OFFSET(flush_list, buf_page_t, shm_info->buf_pool_backup.flush_list,
previous_frame_address, logi_offset, blocks_offset);
UT_LIST_OFFSET(free, buf_page_t, shm_info->buf_pool_backup.free,
previous_frame_address, logi_offset, blocks_offset);
UT_LIST_OFFSET(LRU, buf_page_t, shm_info->buf_pool_backup.LRU,
previous_frame_address, logi_offset, blocks_offset);
if (shm_info->buf_pool_backup.LRU_old)
shm_info->buf_pool_backup.LRU_old =
(buf_page_t*)((byte*)(shm_info->buf_pool_backup.LRU_old)
+ (((void*)shm_info->buf_pool_backup.LRU_old > previous_frame_address)
? logi_offset : blocks_offset));
UT_LIST_OFFSET(unzip_LRU, buf_block_t, shm_info->buf_pool_backup.unzip_LRU,
previous_frame_address, logi_offset, blocks_offset);
UT_LIST_OFFSET(zip_list, buf_page_t, shm_info->buf_pool_backup.zip_clean,
previous_frame_address, logi_offset, blocks_offset);
for (i = 0; i < BUF_BUDDY_SIZES_MAX; i++) {
UT_LIST_OFFSET(zip_list, buf_page_t, shm_info->buf_pool_backup.zip_free[i],
previous_frame_address, logi_offset, blocks_offset);
}
HASH_OFFSET(zip_hash_tmp, buf_page_t, hash,
previous_frame_address, logi_offset, blocks_offset);
fprintf(stderr,
" Done.\n");
}
} else {
/* Init block structs and assign frames for them. Then we
assign the frames to the first blocks (we already mapped the
memory above). */
@ -833,6 +1159,11 @@ buf_chunk_init(
block++;
frame += UNIV_PAGE_SIZE;
}
}
if (shm_info) {
shm_info->frame_offset = chunk->blocks[0].frame - (byte*)chunk->mem;
}
return(chunk);
}
@ -1014,6 +1345,8 @@ buf_chunk_free(
UNIV_MEM_UNDESC(block);
}
ut_a(!srv_buffer_pool_shm_key);
os_mem_free_large(chunk->mem, chunk->mem_size);
}
@ -1063,7 +1396,10 @@ buf_pool_init(void)
srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
/* zip_hash is allocated to shm when srv_buffer_pool_shm_key is enabled */
if (!srv_buffer_pool_shm_key) {
buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
}
buf_pool->last_printout_time = time(NULL);
@ -1078,6 +1414,86 @@ buf_pool_init(void)
--------------------------- */
/* All fields are initialized by mem_zalloc(). */
if (srv_buffer_pool_shm_key) {
buf_shm_info_t* shm_info;
ut_a((byte*)chunk->blocks == (byte*)chunk->mem + sizeof(buf_shm_info_t));
shm_info = chunk->mem;
buf_pool->zip_hash = (hash_table_t*)((byte*)chunk->mem + shm_info->zip_hash_offset);
if(shm_info->is_new) {
shm_info->is_new = FALSE; /* initialization was finished */
} else {
buf_block_t* block = chunk->blocks;
buf_page_t* b;
/* shm_info->buf_pool_backup should be converted */
/* at buf_chunk_init(). So copy simply. */
buf_pool->flush_list = shm_info->buf_pool_backup.flush_list;
buf_pool->freed_page_clock = shm_info->buf_pool_backup.freed_page_clock;
buf_pool->free = shm_info->buf_pool_backup.free;
buf_pool->LRU = shm_info->buf_pool_backup.LRU;
buf_pool->LRU_old = shm_info->buf_pool_backup.LRU_old;
buf_pool->LRU_old_len = shm_info->buf_pool_backup.LRU_old_len;
buf_pool->unzip_LRU = shm_info->buf_pool_backup.unzip_LRU;
buf_pool->zip_clean = shm_info->buf_pool_backup.zip_clean;
for (i = 0; i < BUF_BUDDY_SIZES_MAX; i++) {
buf_pool->zip_free[i] = shm_info->buf_pool_backup.zip_free[i];
}
for (i = 0; i < chunk->size; i++, block++) {
if (buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE) {
ut_d(block->page.in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(
block->page.space,
block->page.offset),
&block->page);
}
}
for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
b = UT_LIST_GET_NEXT(zip_list, b)) {
ut_ad(!b->in_flush_list);
ut_ad(b->in_LRU_list);
ut_d(b->in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(b->space, b->offset), b);
}
for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
b = UT_LIST_GET_NEXT(flush_list, b)) {
ut_ad(b->in_flush_list);
ut_ad(b->in_LRU_list);
switch (buf_page_get_state(b)) {
case BUF_BLOCK_ZIP_DIRTY:
ut_d(b->in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
buf_page_address_fold(b->space,
b->offset), b);
break;
case BUF_BLOCK_FILE_PAGE:
/* uncompressed page */
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
}
}
}
}
mutex_exit(&LRU_list_mutex);
rw_lock_x_unlock(&page_hash_latch);
buf_pool_mutex_exit();
@ -1102,6 +1518,34 @@ buf_pool_free(void)
buf_chunk_t* chunk;
buf_chunk_t* chunks;
if (srv_buffer_pool_shm_key) {
buf_shm_info_t* shm_info;
ut_a(buf_pool->n_chunks == 1);
chunk = buf_pool->chunks;
shm_info = chunk->mem;
ut_a((byte*)chunk->blocks == (byte*)chunk->mem + sizeof(buf_shm_info_t));
/* validation the shared memory segment doesn't have unrecoverable contents. */
/* Currently, validation became not needed */
shm_info->reusable = TRUE;
memcpy(&(shm_info->buf_pool_backup), buf_pool, sizeof(buf_pool_t));
memcpy(&(shm_info->chunk_backup), chunk, sizeof(buf_chunk_t));
if (srv_fast_shutdown < 2) {
if (srv_buffer_pool_shm_checksum) {
shm_info->checksum = ut_fold_binary_32((byte*)chunk->mem + sizeof(buf_shm_info_t),
chunk->mem_size - sizeof(buf_shm_info_t));
} else {
shm_info->checksum = BUF_NO_CHECKSUM_MAGIC;
}
shm_info->clean = TRUE;
}
os_shm_free(chunk->mem, chunk->mem_size);
} else {
chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks;
@ -1110,10 +1554,13 @@ buf_pool_free(void)
would fail at shutdown. */
os_mem_free_large(chunk->mem, chunk->mem_size);
}
}
mem_free(buf_pool->chunks);
hash_table_free(buf_pool->page_hash);
if (!srv_buffer_pool_shm_key) {
hash_table_free(buf_pool->zip_hash);
}
mem_free(buf_pool);
buf_pool = NULL;
}
@ -1308,6 +1755,11 @@ try_again:
//buf_pool_mutex_enter();
mutex_enter(&LRU_list_mutex);
if (srv_buffer_pool_shm_key) {
/* Cannot support shrink */
goto func_done;
}
shrink_again:
if (buf_pool->n_chunks <= 1) {
@ -1551,6 +2003,11 @@ void
buf_pool_resize(void)
/*=================*/
{
if (srv_buffer_pool_shm_key) {
/* Cannot support resize */
return;
}
//buf_pool_mutex_enter();
mutex_enter(&LRU_list_mutex);
@ -2511,6 +2968,30 @@ wait_until_unfixed:
bytes. */
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
#endif
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
if (mode == BUF_GET_IF_IN_POOL && ibuf_debug) {
/* Try to evict the block from the buffer pool, to use the
insert buffer as much as possible. */
if (buf_LRU_free_block(&block->page, TRUE, NULL)
== BUF_LRU_FREED) {
buf_pool_mutex_exit();
mutex_exit(&block->mutex);
fprintf(stderr,
"innodb_change_buffering_debug evict %u %u\n",
(unsigned) space, (unsigned) offset);
return(NULL);
} else if (buf_flush_page_try(block)) {
fprintf(stderr,
"innodb_change_buffering_debug flush %u %u\n",
(unsigned) space, (unsigned) offset);
guess = block;
goto loop;
}
/* Failed to evict the page; change it directly */
}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
buf_block_buf_fix_inc(block, file, line);

80
buf/buf0flu.c

@ -1064,6 +1064,82 @@ buf_flush_write_block_low(
}
}
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: buf_pool_mutex and block->mutex must be held upon entering this
function, and they will be released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
UNIV_INTERN
ibool
buf_flush_page_try(
/*===============*/
buf_block_t* block) /*!< in/out: buffer control block */
{
ut_ad(buf_pool_mutex_own());
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(mutex_own(&block->mutex));
if (!buf_flush_ready_for_flush(&block->page, BUF_FLUSH_LRU)) {
return(FALSE);
}
if (buf_pool->n_flush[BUF_FLUSH_LRU] > 0
|| buf_pool->init_flush[BUF_FLUSH_LRU]) {
/* There is already a flush batch of the same type running */
return(FALSE);
}
buf_pool->init_flush[BUF_FLUSH_LRU] = TRUE;
buf_page_set_io_fix(&block->page, BUF_IO_WRITE);
buf_page_set_flush_type(&block->page, BUF_FLUSH_LRU);
if (buf_pool->n_flush[BUF_FLUSH_LRU]++ == 0) {
os_event_reset(buf_pool->no_flush[BUF_FLUSH_LRU]);
}
/* VERY IMPORTANT:
Because any thread may call the LRU flush, even when owning
locks on pages, to avoid deadlocks, we must make sure that the
s-lock is acquired on the page without waiting: this is
accomplished because buf_flush_ready_for_flush() must hold,
and that requires the page not to be bufferfixed. */
rw_lock_s_lock_gen(&block->lock, BUF_IO_WRITE);
/* Note that the s-latch is acquired before releasing the
buf_pool mutex: this ensures that the latch is acquired
immediately. */
mutex_exit(&block->mutex);
buf_pool_mutex_exit();
/* Even though block is not protected by any mutex at this
point, it is safe to access block, because it is io_fixed and
oldest_modification != 0. Thus, it cannot be relocated in the
buffer pool or removed from flush_list or LRU_list. */
buf_flush_write_block_low(&block->page);
buf_pool_mutex_enter();
buf_pool->init_flush[BUF_FLUSH_LRU] = FALSE;
if (buf_pool->n_flush[BUF_FLUSH_LRU] == 0) {
/* The running flush batch has ended */
os_event_set(buf_pool->no_flush[BUF_FLUSH_LRU]);
}
buf_pool_mutex_exit();
buf_flush_buffered_writes();
return(TRUE);
}
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: in simulated aio we must call
@ -1735,9 +1811,9 @@ buf_flush_validate_low(void)
ut_a(om > 0);
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_page_t* rpage;
ut_a(rnode);
buf_page_t* rpage = *rbt_value(buf_page_t*,
rnode);
rpage = *rbt_value(buf_page_t*, rnode);
ut_a(rpage);
ut_a(rpage == bpage);
rnode = rbt_next(buf_pool->flush_rbt, rnode);

2
buf/buf0lru.c

@ -2265,7 +2265,7 @@ buf_LRU_file_restore(void)
ulint req = 0;
ibool terminated = FALSE;
ibool ret = FALSE;
dump_record_t* records;
dump_record_t* records = NULL;
ulint size;
ulint size_high;
ulint length;

2
dict/dict0crea.c

@ -894,7 +894,7 @@ dict_truncate_index_tree(
appropriate field in the SYS_INDEXES record: this mini-transaction
marks the B-tree totally truncated */
btr_page_get(space, zip_size, root_page_no, RW_X_LATCH, mtr);
btr_block_get(space, zip_size, root_page_no, RW_X_LATCH, mtr);
btr_free_root(space, zip_size, root_page_no, mtr);
create:

170
dict/dict0dict.c

@ -81,9 +81,18 @@ UNIV_INTERN rw_lock_t dict_operation_lock;
/** Identifies generated InnoDB foreign key names */
static char dict_ibfk[] = "_ibfk_";
/** array of mutexes protecting dict_index_t::stat_n_diff_key_vals[] */
#define DICT_INDEX_STAT_MUTEX_SIZE 32
static mutex_t dict_index_stat_mutex[DICT_INDEX_STAT_MUTEX_SIZE];
/** array of rw locks protecting
dict_table_t::stat_initialized
dict_table_t::stat_n_rows (*)
dict_table_t::stat_clustered_index_size
dict_table_t::stat_sum_of_other_index_sizes
dict_table_t::stat_modified_counter (*)
dict_table_t::indexes*::stat_n_diff_key_vals[]
dict_table_t::indexes*::stat_index_size
dict_table_t::indexes*::stat_n_leaf_pages
(*) those are not always protected for performance reasons */
#define DICT_TABLE_STATS_LATCHES_SIZE 64
static rw_lock_t dict_table_stats_latches[DICT_TABLE_STATS_LATCHES_SIZE];
/*******************************************************************//**
Tries to find column names for the index and sets the col field of the
@ -244,43 +253,65 @@ dict_mutex_exit_for_mysql(void)
mutex_exit(&(dict_sys->mutex));
}
/** Get the mutex that protects index->stat_n_diff_key_vals[] */
#define GET_INDEX_STAT_MUTEX(index) \
(&dict_index_stat_mutex[ut_fold_dulint(index->id) \
% DICT_INDEX_STAT_MUTEX_SIZE])
/** Get the latch that protects the stats of a given table */
#define GET_TABLE_STATS_LATCH(table) \
(&dict_table_stats_latches[ut_fold_dulint(table->id) \
% DICT_TABLE_STATS_LATCHES_SIZE])
/**********************************************************************//**
Lock the appropriate mutex to protect index->stat_n_diff_key_vals[].
index->id is used to pick the right mutex and it should not change
before dict_index_stat_mutex_exit() is called on this index. */
Lock the appropriate latch to protect a given table's statistics.
table->id is used to pick the corresponding latch from a global array of
latches. */
UNIV_INTERN
void
dict_index_stat_mutex_enter(
/*========================*/
const dict_index_t* index) /*!< in: index */
dict_table_stats_lock(
/*==================*/
const dict_table_t* table, /*!< in: table */
ulint latch_mode) /*!< in: RW_S_LATCH or
RW_X_LATCH */
{
ut_ad(index != NULL);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(index->cached);
ut_ad(!index->to_be_dropped);
ut_ad(table != NULL);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
mutex_enter(GET_INDEX_STAT_MUTEX(index));
switch (latch_mode) {
case RW_S_LATCH:
rw_lock_s_lock(GET_TABLE_STATS_LATCH(table));
break;
case RW_X_LATCH:
rw_lock_x_lock(GET_TABLE_STATS_LATCH(table));
break;
case RW_NO_LATCH:
/* fall through */
default:
ut_error;
}
}
/**********************************************************************//**
Unlock the appropriate mutex that protects index->stat_n_diff_key_vals[]. */
Unlock the latch that has been locked by dict_table_stats_lock() */
UNIV_INTERN
void
dict_index_stat_mutex_exit(
/*=======================*/
const dict_index_t* index) /*!< in: index */
dict_table_stats_unlock(
/*====================*/
const dict_table_t* table, /*!< in: table */
ulint latch_mode) /*!< in: RW_S_LATCH or
RW_X_LATCH */
{
ut_ad(index != NULL);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(index->cached);
ut_ad(!index->to_be_dropped);
ut_ad(table != NULL);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
mutex_exit(GET_INDEX_STAT_MUTEX(index));
switch (latch_mode) {
case RW_S_LATCH:
rw_lock_s_unlock(GET_TABLE_STATS_LATCH(table));
break;
case RW_X_LATCH:
rw_lock_x_unlock(GET_TABLE_STATS_LATCH(table));
break;
case RW_NO_LATCH:
/* fall through */
default:
ut_error;
}
}
/********************************************************************//**
@ -671,8 +702,8 @@ dict_init(void)
mutex_create(&dict_foreign_err_mutex, SYNC_ANY_LATCH);
for (i = 0; i < DICT_INDEX_STAT_MUTEX_SIZE; i++) {
mutex_create(&dict_index_stat_mutex[i], SYNC_INDEX_TREE);
for (i = 0; i < DICT_TABLE_STATS_LATCHES_SIZE; i++) {
rw_lock_create(&dict_table_stats_latches[i], SYNC_INDEX_TREE);
}
}
@ -704,13 +735,12 @@ dict_table_get(
mutex_exit(&(dict_sys->mutex));
if (table != NULL) {
if (!table->stat_initialized && !table->is_corrupt) {
/* If table->ibd_file_missing == TRUE, this will
print an error message and return without doing
anything. */
dict_update_statistics(table, FALSE);
}
if (table != NULL && !table->is_corrupt) {
/* If table->ibd_file_missing == TRUE, this will
print an error message and return without doing
anything. */
dict_update_statistics(table, TRUE /* only update stats
if they have not been initialized */, FALSE);
}
return(table);
@ -4366,11 +4396,9 @@ next_rec:
btr_pcur_close(&pcur);
mtr_commit(&mtr);
dict_index_stat_mutex_enter(index);
for (i = 0; i <= n_cols; i++) {
index->stat_n_diff_key_vals[i] = stat_n_diff_key_vals_tmp[i];
}
dict_index_stat_mutex_exit(index);
}
/*===========================================*/
@ -4424,11 +4452,9 @@ dict_store_statistics(
n_cols = dict_index_get_n_unique(index);
stat_n_diff_key_vals_tmp = mem_heap_zalloc(heap, (n_cols + 1) * sizeof(ib_int64_t));
dict_index_stat_mutex_enter(index);
for (i = 0; i <= n_cols; i++) {
stat_n_diff_key_vals_tmp[i] = index->stat_n_diff_key_vals[i];
}
dict_index_stat_mutex_exit(index);
sys_stats = dict_sys->sys_stats;
sys_index = UT_LIST_GET_FIRST(sys_stats->indexes);
@ -4504,12 +4530,13 @@ Calculates new estimates for table and index statistics. The statistics
are used in query optimization. */
UNIV_INTERN
void
dict_update_statistics_low(
/*=======================*/
dict_update_statistics(
/*===================*/
dict_table_t* table, /*!< in/out: table */
ibool has_dict_mutex __attribute__((unused)),
/*!< in: TRUE if the caller has the
dictionary mutex */
ibool only_calc_if_missing_stats, /*!< in: only
update/recalc the stats if they have
not been initialized yet, otherwise
do nothing */
ibool sync) /*!< in: TRUE if must update SYS_STATS */
{
dict_index_t* index;
@ -4528,6 +4555,8 @@ dict_update_statistics_low(
}
if (srv_use_sys_stats_table && !((table->flags >> DICT_TF2_SHIFT) & DICT_TF2_TEMPORARY) && !sync) {
dict_table_stats_lock(table, RW_X_LATCH);
/* reload statistics from SYS_STATS table */
if (dict_reload_statistics(table, &sum_of_index_sizes)) {
/* success */
@ -4537,6 +4566,8 @@ dict_update_statistics_low(
#endif
goto end;
}
dict_table_stats_unlock(table, RW_X_LATCH);
}
#ifdef UNIV_DEBUG
fprintf(stderr, "InnoDB: DEBUG: update_statistics for %s.\n",
@ -4555,6 +4586,12 @@ dict_update_statistics_low(
return;
}
dict_table_stats_lock(table, RW_X_LATCH);
if (only_calc_if_missing_stats && table->stat_initialized) {
dict_table_stats_unlock(table, RW_X_LATCH);
return;
}
do {
if (table->is_corrupt) {
@ -4610,13 +4647,9 @@ dict_update_statistics_low(
end:
index = dict_table_get_first_index(table);
dict_index_stat_mutex_enter(index);
table->stat_n_rows = index->stat_n_diff_key_vals[
dict_index_get_n_unique(index)];
dict_index_stat_mutex_exit(index);
table->stat_clustered_index_size = index->stat_index_size;
table->stat_sum_of_other_index_sizes = sum_of_index_sizes
@ -4625,19 +4658,8 @@ end:
table->stat_initialized = TRUE;
table->stat_modified_counter = 0;
}
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization. */
UNIV_INTERN
void
dict_update_statistics(
/*===================*/
dict_table_t* table, /*!< in/out: table */
ibool sync)
{
dict_update_statistics_low(table, FALSE, sync);
dict_table_stats_unlock(table, RW_X_LATCH);
}
/**********************************************************************//**
@ -4717,8 +4739,9 @@ dict_table_print_low(
ut_ad(mutex_own(&(dict_sys->mutex)));
if (srv_stats_auto_update)
dict_update_statistics_low(table, TRUE, FALSE);
dict_update_statistics(table, FALSE /* update even if initialized */, FALSE);
dict_table_stats_lock(table, RW_S_LATCH);
fprintf(stderr,
"--------------------------------------\n"
@ -4747,6 +4770,8 @@ dict_table_print_low(
index = UT_LIST_GET_NEXT(indexes, index);
}
dict_table_stats_unlock(table, RW_S_LATCH);
foreign = UT_LIST_GET_FIRST(table->foreign_list);
while (foreign != NULL) {
@ -4795,8 +4820,6 @@ dict_index_print_low(
ut_ad(mutex_own(&(dict_sys->mutex)));
dict_index_stat_mutex_enter(index);
if (index->n_user_defined_cols > 0) {
n_vals = index->stat_n_diff_key_vals[
index->n_user_defined_cols];
@ -4804,8 +4827,6 @@ dict_index_print_low(
n_vals = index->stat_n_diff_key_vals[1];
}
dict_index_stat_mutex_exit(index);
fprintf(stderr,
" INDEX: name %s, id %lu %lu, fields %lu/%lu,"
" uniq %lu, type %lu\n"
@ -5149,7 +5170,8 @@ void
dict_table_replace_index_in_foreign_list(
/*=====================================*/
dict_table_t* table, /*!< in/out: table */
dict_index_t* index) /*!< in: index to be replaced */
dict_index_t* index, /*!< in: index to be replaced */
const trx_t* trx) /*!< in: transaction handle */
{
dict_foreign_t* foreign;
@ -5160,7 +5182,13 @@ dict_table_replace_index_in_foreign_list(
if (foreign->foreign_index == index) {
dict_index_t* new_index
= dict_foreign_find_equiv_index(foreign);
ut_a(new_index);
/* There must exist an alternative index if
check_foreigns (FOREIGN_KEY_CHECKS) is on,
since ha_innobase::prepare_drop_index had done
the check before we reach here. */
ut_a(new_index || !trx->check_foreigns);
foreign->foreign_index = new_index;
}
@ -5294,8 +5322,8 @@ dict_close(void)
mem_free(dict_sys);
dict_sys = NULL;
for (i = 0; i < DICT_INDEX_STAT_MUTEX_SIZE; i++) {
mutex_free(&dict_index_stat_mutex[i]);
for (i = 0; i < DICT_TABLE_STATS_LATCHES_SIZE; i++) {
rw_lock_free(&dict_table_stats_latches[i]);
}
}

9
dict/dict0load.c

@ -221,8 +221,9 @@ loop:
/* The table definition was corrupt if there
is no index */
if (srv_stats_auto_update && dict_table_get_first_index(table)) {
dict_update_statistics_low(table, TRUE, FALSE);
if (dict_table_get_first_index(table)) {
dict_update_statistics(table, FALSE /* update
even if initialized */, FALSE);
}
dict_table_print_low(table);
@ -1023,13 +1024,13 @@ err_exit:
if (err != DB_SUCCESS) {
dict_table_remove_from_cache(table);
table = NULL;
} else {
table->fk_max_recusive_level = 0;
}
} else if (!srv_force_recovery) {
dict_table_remove_from_cache(table);
table = NULL;
}
table->fk_max_recusive_level = 0;
#if 0
if (err != DB_SUCCESS && table != NULL) {

108
fil/fil0fil.c

@ -336,14 +336,15 @@ fil_get_space_id_for_table(
/*******************************************************************//**
Frees a space object from the tablespace memory cache. Closes the files in
the chain but does not delete them. There must not be any pending i/o's or
flushes on the files. */
flushes on the files.
@return TRUE on success */
static
ibool
fil_space_free(
/*===========*/
/* out: TRUE if success */
ulint id, /* in: space id */
ibool own_mutex);/* in: TRUE if own system->mutex */
ulint id, /* in: space id */
ibool x_latched); /* in: TRUE if caller has space->latch
in X mode */
/********************************************************************//**
Reads data from a space to a buffer. Remember that the possible incomplete
blocks at the end of file are ignored: they are not taken into account when
@ -617,7 +618,7 @@ fil_node_create(
UT_LIST_ADD_LAST(chain, space->chain, node);
if (id < SRV_LOG_SPACE_FIRST_ID && fil_system->max_assigned_id < id) {
if (id < SRV_EXTRA_SYS_SPACE_FIRST_ID && fil_system->max_assigned_id < id) {
fil_system->max_assigned_id = id;
}
@ -1130,6 +1131,7 @@ try_again:
space = fil_space_get_by_name(name);
if (UNIV_LIKELY_NULL(space)) {
ibool success;
ulint namesake_id;
ut_print_timestamp(stderr);
@ -1168,9 +1170,10 @@ try_again:
namesake_id = space->id;
mutex_exit(&fil_system->mutex);
success = fil_space_free(namesake_id, FALSE);
ut_a(success);
fil_space_free(namesake_id, FALSE);
mutex_exit(&fil_system->mutex);
goto try_again;
}
@ -1205,6 +1208,7 @@ try_again:
space->mark = FALSE;
if (UNIV_LIKELY(purpose == FIL_TABLESPACE && !recv_recovery_on)
&& UNIV_UNLIKELY(id < SRV_EXTRA_SYS_SPACE_FIRST_ID)
&& UNIV_UNLIKELY(id > fil_system->max_assigned_id)) {
if (!fil_system->space_id_reuse_warned) {
fil_system->space_id_reuse_warned = TRUE;
@ -1290,7 +1294,7 @@ fil_assign_new_space_id(
(ulong) SRV_LOG_SPACE_FIRST_ID);
}
success = (id < SRV_LOG_SPACE_FIRST_ID);
success = (id < SRV_EXTRA_SYS_SPACE_FIRST_ID);
if (success) {
*space_id = fil_system->max_assigned_id = id;
@ -1323,15 +1327,14 @@ fil_space_free(
/*===========*/
/* out: TRUE if success */
ulint id, /* in: space id */
ibool own_mutex) /* in: TRUE if own system->mutex */
ibool x_latched) /* in: TRUE if caller has space->latch
in X mode */
{
fil_space_t* space;
fil_space_t* namespace;
fil_node_t* fil_node;
if (!own_mutex) {
mutex_enter(&fil_system->mutex);
}
ut_ad(mutex_own(&fil_system->mutex));
space = fil_space_get_by_id(id);
@ -1342,8 +1345,6 @@ fil_space_free(
" from the cache but\n"
"InnoDB: it is not there.\n", (ulong) id);
mutex_exit(&fil_system->mutex);
return(FALSE);
}
@ -1378,8 +1379,8 @@ fil_space_free(
ut_a(0 == UT_LIST_GET_LEN(space->chain));
if (!own_mutex) {
mutex_exit(&fil_system->mutex);
if (x_latched) {
rw_lock_x_unlock(&space->latch);
}
rw_lock_free(&(space->latch));
@ -1626,25 +1627,27 @@ fil_close_all_files(void)
/*=====================*/
{
fil_space_t* space;
fil_node_t* node;
mutex_enter(&fil_system->mutex);
space = UT_LIST_GET_FIRST(fil_system->space_list);
while (space != NULL) {
fil_node_t* node;
fil_space_t* prev_space = space;
node = UT_LIST_GET_FIRST(space->chain);
for (node = UT_LIST_GET_FIRST(space->chain);
node != NULL;
node = UT_LIST_GET_NEXT(chain, node)) {
while (node != NULL) {
if (node->open) {
fil_node_close_file(node, fil_system);
}
node = UT_LIST_GET_NEXT(chain, node);
}
space = UT_LIST_GET_NEXT(space_list, space);
fil_space_free(prev_space->id, TRUE);
fil_space_free(prev_space->id, FALSE);
}
mutex_exit(&fil_system->mutex);
@ -1666,6 +1669,10 @@ fil_set_max_space_id_if_bigger(
ut_error;
}
if (max_id >= SRV_EXTRA_SYS_SPACE_FIRST_ID) {
return;
}
mutex_enter(&fil_system->mutex);
if (fil_system->max_assigned_id < max_id) {
@ -1684,6 +1691,7 @@ static
ulint
fil_write_lsn_and_arch_no_to_file(
/*==============================*/
ulint space_id,
ulint sum_of_sizes, /*!< in: combined size of previous files
in space, in database pages */
ib_uint64_t lsn, /*!< in: lsn to write */
@ -1693,14 +1701,16 @@ fil_write_lsn_and_arch_no_to_file(
byte* buf1;
byte* buf;
ut_a(trx_sys_sys_space(space_id));
buf1 = mem_alloc(2 * UNIV_PAGE_SIZE);
buf = ut_align(buf1, UNIV_PAGE_SIZE);
fil_read(TRUE, 0, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
fil_read(TRUE, space_id, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
mach_write_ull(buf + FIL_PAGE_FILE_FLUSH_LSN, lsn);
fil_write(TRUE, 0, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
fil_write(TRUE, space_id, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
mem_free(buf1);
@ -1736,7 +1746,7 @@ fil_write_flushed_lsn_to_data_files(
always open. */
if (space->purpose == FIL_TABLESPACE
&& space->id == 0) {
&& trx_sys_sys_space(space->id)) {
sum_of_sizes = 0;
node = UT_LIST_GET_FIRST(space->chain);
@ -1744,7 +1754,7 @@ fil_write_flushed_lsn_to_data_files(
mutex_exit(&fil_system->mutex);
err = fil_write_lsn_and_arch_no_to_file(
sum_of_sizes, lsn, arch_log_no);
space->id, sum_of_sizes, lsn, arch_log_no);
if (err != DB_SUCCESS) {
return(err);
@ -2264,6 +2274,19 @@ try_again:
path = mem_strdup(space->name);
mutex_exit(&fil_system->mutex);
/* Important: We rely on the data dictionary mutex to ensure
that a race is not possible here. It should serialize the tablespace
drop/free. We acquire an X latch only to avoid a race condition
when accessing the tablespace instance via:
fsp_get_available_space_in_free_extents().
There our main motivation is to reduce the contention on the
dictionary mutex. */
rw_lock_x_lock(&space->latch);
#ifndef UNIV_HOTBACKUP
/* Invalidate in the buffer pool all pages belonging to the
tablespace. Since we have set space->is_being_deleted = TRUE, readahead
@ -2276,7 +2299,11 @@ try_again:
#endif
/* printf("Deleting tablespace %s id %lu\n", space->name, id); */
success = fil_space_free(id, FALSE);
mutex_enter(&fil_system->mutex);
success = fil_space_free(id, TRUE);
mutex_exit(&fil_system->mutex);
if (success) {
success = os_file_delete(path);
@ -2284,6 +2311,8 @@ try_again:
if (!success) {
success = os_file_delete_if_exists(path);
}
} else {
rw_lock_x_unlock(&space->latch);
}
if (success) {
@ -2311,6 +2340,31 @@ try_again:
return(FALSE);
}
/*******************************************************************//**
Returns TRUE if a single-table tablespace is being deleted.
@return TRUE if being deleted */
UNIV_INTERN
ibool
fil_tablespace_is_being_deleted(
/*============================*/
ulint id) /*!< in: space id */
{
fil_space_t* space;
ibool is_being_deleted;
mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(id);
ut_a(space != NULL);
is_being_deleted = space->is_being_deleted;
mutex_exit(&fil_system->mutex);
return(is_being_deleted);
}
#ifndef UNIV_HOTBACKUP
/*******************************************************************//**
Discards a single-table tablespace. The tablespace must be cached in the
@ -5339,7 +5393,7 @@ fil_page_get_type(
return(mach_read_from_2(page + FIL_PAGE_TYPE));
}
/********************************************************************
/****************************************************************//**
Initializes the tablespace memory cache. */
UNIV_INTERN
void

50
fsp/fsp0fsp.c

@ -3130,13 +3130,63 @@ fsp_get_available_space_in_free_extents(
ut_ad(!mutex_own(&kernel_mutex));
/* The convoluted mutex acquire is to overcome latching order
issues: The problem is that the fil_mutex is at a lower level
than the tablespace latch and the buffer pool mutex. We have to
first prevent any operations on the file system by acquiring the
dictionary mutex. Then acquire the tablespace latch to obey the
latching order and then release the dictionary mutex. That way we
ensure that the tablespace instance can't be freed while we are
examining its contents (see fil_space_free()).
However, there is one further complication, we release the fil_mutex
when we need to invalidate the the pages in the buffer pool and we
reacquire the fil_mutex when deleting and freeing the tablespace
instance in fil0fil.c. Here we need to account for that situation
too. */
mutex_enter(&dict_sys->mutex);
/* At this stage there is no guarantee that the tablespace even
exists in the cache. */
if (fil_tablespace_deleted_or_being_deleted_in_mem(space, -1)) {
mutex_exit(&dict_sys->mutex);
return(ULLINT_UNDEFINED);
}
mtr_start(&mtr);
latch = fil_space_get_latch(space, &flags);
/* This should ensure that the tablespace instance can't be freed
by another thread. However, the tablespace pages can still be freed
from the buffer pool. We need to check for that again. */
zip_size = dict_table_flags_to_zip_size(flags);
mtr_x_lock(latch, &mtr);
mutex_exit(&dict_sys->mutex);
/* At this point it is possible for the tablespace to be deleted and
its pages removed from the buffer pool. We need to check for that
situation. However, the tablespace instance can't be deleted because
our latching above should ensure that. */
if (fil_tablespace_is_being_deleted(space)) {
mtr_commit(&mtr);
return(ULLINT_UNDEFINED);
}
/* From here on even if the user has dropped the tablespace, the
pages _must_ still exist in the buffer pool and the tablespace
instance _must_ be in the file system hash table. */
space_header = fsp_get_space_header(space, zip_size, &mtr);
size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, &mtr);

64
ha/hash0hash.c

@ -127,6 +127,70 @@ hash_create(
return(table);
}
/*************************************************************//**
*/
UNIV_INTERN
ulint
hash_create_needed(
/*===============*/
ulint n)
{
ulint prime;
ulint offset;
prime = ut_find_prime(n);
offset = (sizeof(hash_table_t) + 7) / 8;
offset *= 8;
return(offset + sizeof(hash_cell_t) * prime);
}
UNIV_INTERN
void
hash_create_init(
/*=============*/
hash_table_t* table,
ulint n)
{
ulint prime;
ulint offset;
prime = ut_find_prime(n);
offset = (sizeof(hash_table_t) + 7) / 8;
offset *= 8;
table->array = (hash_cell_t*)(((byte*)table) + offset);
table->n_cells = prime;
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
table->adaptive = FALSE;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
table->n_mutexes = 0;
table->mutexes = NULL;
table->heaps = NULL;
table->heap = NULL;
ut_d(table->magic_n = HASH_TABLE_MAGIC_N);
/* Initialize the cell array */
hash_table_clear(table);
}
UNIV_INTERN
void
hash_create_reuse(
/*==============*/
hash_table_t* table)
{
ulint offset;
offset = (sizeof(hash_table_t) + 7) / 8;
offset *= 8;
table->array = (hash_cell_t*)(((byte*)table) + offset);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
}
/*************************************************************//**
Frees a hash table. */
UNIV_INTERN

468
handler/ha_innodb.cc

@ -152,6 +152,7 @@ static ulong innobase_read_io_threads;
static ulong innobase_write_io_threads;
static ulong innobase_page_size;
static ulong innobase_log_block_size;
static my_bool innobase_thread_concurrency_timer_based;
static long long innobase_buffer_pool_size, innobase_log_file_size;
@ -197,6 +198,7 @@ static my_bool innobase_rollback_on_timeout = FALSE;
static my_bool innobase_create_status_file = FALSE;
static my_bool innobase_stats_on_metadata = TRUE;
static my_bool innobase_use_sys_stats_table = FALSE;
static my_bool innobase_buffer_pool_shm_checksum = TRUE;
static char* internal_innobase_data_file_path = NULL;
@ -2118,6 +2120,32 @@ innobase_init(
goto error;
}
srv_log_block_size = 0;
if (innobase_log_block_size != (1 << 9)) { /*!=512*/
uint n_shift;
fprintf(stderr,
"InnoDB: Warning: innodb_log_block_size has been changed from default value 512. (###EXPERIMENTAL### operation)\n");
for (n_shift = 9; n_shift <= UNIV_PAGE_SIZE_SHIFT_MAX; n_shift++) {
if (innobase_log_block_size == ((ulong)1 << n_shift)) {
srv_log_block_size = (1 << n_shift);
fprintf(stderr,
"InnoDB: The log block size is set to %lu.\n",
srv_log_block_size);
break;
}
}
} else {
srv_log_block_size = 512;
}
if (!srv_log_block_size) {
fprintf(stderr,
"InnoDB: Error: %lu is not valid value for innodb_log_block_size.\n",
innobase_log_block_size);
goto error;
}
#ifndef MYSQL_SERVER
innodb_overwrite_relay_log_info = FALSE;
#endif
@ -2421,7 +2449,7 @@ innobase_change_buffering_inited_ok:
srv_n_write_io_threads = (ulint) innobase_write_io_threads;
srv_read_ahead &= 3;
srv_adaptive_checkpoint %= 3;
srv_adaptive_checkpoint %= 4;
srv_force_recovery = (ulint) innobase_force_recovery;
@ -2430,6 +2458,7 @@ innobase_change_buffering_inited_ok:
srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
srv_use_checksums = (ibool) innobase_use_checksums;
srv_fast_checksum = (ibool) innobase_fast_checksum;
srv_buffer_pool_shm_checksum = (ibool) innobase_buffer_pool_shm_checksum;
#ifdef HAVE_LARGE_PAGES
if ((os_use_large_pages = (ibool) my_use_large_pages))
@ -4754,17 +4783,18 @@ include_field:
n_requested_fields++;
templ->col_no = i;
templ->clust_rec_field_no = dict_col_get_clust_pos(
&index->table->cols[i], clust_index);
ut_ad(templ->clust_rec_field_no != ULINT_UNDEFINED);
if (index == clust_index) {
templ->rec_field_no = dict_col_get_clust_pos(
&index->table->cols[i], index);
templ->rec_field_no = templ->clust_rec_field_no;
} else {
templ->rec_field_no = dict_index_get_nth_col_pos(
index, i);
}
if (templ->rec_field_no == ULINT_UNDEFINED) {
prebuilt->need_to_access_clustered = TRUE;
if (templ->rec_field_no == ULINT_UNDEFINED) {
prebuilt->need_to_access_clustered = TRUE;
}
}
if (field->null_ptr) {
@ -4816,9 +4846,7 @@ skip_field:
for (i = 0; i < n_requested_fields; i++) {
templ = prebuilt->mysql_template + i;
templ->rec_field_no = dict_col_get_clust_pos(
&index->table->cols[templ->col_no],
clust_index);
templ->rec_field_no = templ->clust_rec_field_no;
}
}
}
@ -6674,6 +6702,33 @@ create_clustered_index_when_no_primary(
return(error);
}
/*****************************************************************//**
Return a display name for the row format
@return row format name */
const char *get_row_format_name(
/*============================*/
enum row_type row_format) /*!< in: Row Format */
{
switch (row_format) {
case ROW_TYPE_COMPACT:
return("COMPACT");
case ROW_TYPE_COMPRESSED:
return("COMPRESSED");
case ROW_TYPE_DYNAMIC:
return("DYNAMIC");
case ROW_TYPE_REDUNDANT:
return("REDUNDANT");
case ROW_TYPE_DEFAULT:
return("DEFAULT");
case ROW_TYPE_FIXED:
return("FIXED");
default:
break;
}
return("NOT USED");
}
/*****************************************************************//**
Validates the create options. We may build on this function
in future. For now, it checks two specifiers:
@ -6689,9 +6744,9 @@ create_options_are_valid(
columns and indexes */
HA_CREATE_INFO* create_info) /*!< in: create info. */
{
ibool kbs_specified = FALSE;
ibool kbs_specified = FALSE;
ibool ret = TRUE;
enum row_type row_type = form->s->row_type;
ut_ad(thd != NULL);
@ -6700,13 +6755,28 @@ create_options_are_valid(
return(TRUE);
}
/* Check for a valid Innodb ROW_FORMAT specifier. For example,
ROW_TYPE_FIXED can be sent to Innodb */
switch (row_type) {
case ROW_TYPE_COMPACT:
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
case ROW_TYPE_REDUNDANT:
case ROW_TYPE_DEFAULT:
break;
default:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
}
ut_ad(form != NULL);
ut_ad(create_info != NULL);
/* First check if KEY_BLOCK_SIZE was specified. */
if (create_info->key_block_size
|| (create_info->used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE)) {
/* First check if a non-zero KEY_BLOCK_SIZE was specified. */
if (create_info->key_block_size) {
kbs_specified = TRUE;
switch (create_info->key_block_size) {
case 1:
@ -6717,13 +6787,12 @@ create_options_are_valid(
/* Valid value. */
break;
default:
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid"
" KEY_BLOCK_SIZE = %lu."
" Valid values are"
" [1, 2, 4, 8, 16]",
create_info->key_block_size);
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid KEY_BLOCK_SIZE = %lu."
" Valid values are [1, 2, 4, 8, 16]",
create_info->key_block_size);
ret = FALSE;
}
}
@ -6731,110 +6800,67 @@ create_options_are_valid(
/* If KEY_BLOCK_SIZE was specified, check for its
dependencies. */
if (kbs_specified && !srv_file_per_table) {
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_per_table.");
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_per_table.");
ret = FALSE;
}
if (kbs_specified && srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_format >"
" Antelope.");
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
ret = FALSE;
}
/* Now check for ROW_FORMAT specifier. */
if (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT) {
switch (form->s->row_type) {
const char* row_format_name;
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
row_format_name
= form->s->row_type == ROW_TYPE_COMPRESSED
? "COMPRESSED"
: "DYNAMIC";
/* These two ROW_FORMATs require
srv_file_per_table and srv_file_format */
if (!srv_file_per_table) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s"
" requires innodb_file_per_table.",
row_format_name);
ret = FALSE;
}
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s"
" requires innodb_file_format >"
" Antelope.",
row_format_name);
ret = FALSE;
}
/* Cannot specify KEY_BLOCK_SIZE with
ROW_FORMAT = DYNAMIC.
However, we do allow COMPRESSED to be
specified with KEY_BLOCK_SIZE. */
if (kbs_specified
&& form->s->row_type == ROW_TYPE_DYNAMIC) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: cannot specify"
" ROW_FORMAT = DYNAMIC with"
" KEY_BLOCK_SIZE.");
ret = FALSE;
}
break;
case ROW_TYPE_REDUNDANT:
case ROW_TYPE_COMPACT:
case ROW_TYPE_DEFAULT:
/* Default is COMPACT. */
row_format_name
= form->s->row_type == ROW_TYPE_REDUNDANT
? "REDUNDANT"
: "COMPACT";
/* Cannot specify KEY_BLOCK_SIZE with these
format specifiers. */
if (kbs_specified) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: cannot specify"
" ROW_FORMAT = %s with"
" KEY_BLOCK_SIZE.",
row_format_name);
ret = FALSE;
}
break;
switch (row_type) {
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
/* These two ROW_FORMATs require srv_file_per_table
and srv_file_format > Antelope */
if (!srv_file_per_table) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s"
" requires innodb_file_per_table.",
get_row_format_name(row_type));
ret = FALSE;
}
default:
push_warning(thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_type));
ret = FALSE;
}
default:
break;
}
switch (row_type) {
case ROW_TYPE_REDUNDANT:
case ROW_TYPE_COMPACT:
case ROW_TYPE_DYNAMIC:
/* KEY_BLOCK_SIZE is only allowed with Compressed or Default */
if (kbs_specified) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: cannot specify ROW_FORMAT = %s"
" with KEY_BLOCK_SIZE.",
get_row_format_name(row_type));
ret = FALSE;
}
default:
break;
}
return(ret);
@ -6960,14 +6986,15 @@ ha_innobase::create(
goto cleanup;
}
if (create_info->key_block_size
|| (create_info->used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE)) {
if (create_info->key_block_size) {
/* Determine the page_zip.ssize corresponding to the
requested page size (key_block_size) in kilobytes. */
ulint ssize, ksize;
ulint key_block_size = create_info->key_block_size;
/* Set 'flags' to the correct key_block_size.
It will be zero if key_block_size is an invalid number.*/
for (ssize = ksize = 1; ssize <= DICT_TF_ZSSIZE_MAX;
ssize++, ksize <<= 1) {
if (key_block_size == ksize) {
@ -6980,38 +7007,39 @@ ha_innobase::create(
}
if (!srv_file_per_table) {
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_per_table.");
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_per_table.");
flags = 0;
}
if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE"
" requires innodb_file_format >"
" Antelope.");
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
flags = 0;
}
if (!flags) {
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring"
" KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring"
" KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
}
}
row_type = form->s->row_type;
if (flags) {
/* KEY_BLOCK_SIZE was specified. */
if (!(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
/* ROW_FORMAT was not specified;
default to ROW_FORMAT=COMPRESSED */
/* if ROW_FORMAT is set to default,
automatically change it to COMPRESSED.*/
if (row_type == ROW_TYPE_DEFAULT) {
row_type = ROW_TYPE_COMPRESSED;
} else if (row_type != ROW_TYPE_COMPRESSED) {
/* ROW_FORMAT other than COMPRESSED
@ -7021,8 +7049,7 @@ ha_innobase::create(
such combinations can be obtained
with ALTER TABLE anyway. */
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu"
" unless ROW_FORMAT=COMPRESSED.",
@ -7030,7 +7057,7 @@ ha_innobase::create(
flags = 0;
}
} else {
/* No KEY_BLOCK_SIZE */
/* flags == 0 means no KEY_BLOCK_SIZE.*/
if (row_type == ROW_TYPE_COMPRESSED) {
/* ROW_FORMAT=COMPRESSED without
KEY_BLOCK_SIZE implies half the
@ -7047,33 +7074,24 @@ ha_innobase::create(
}
switch (row_type) {
const char* row_format_name;
case ROW_TYPE_REDUNDANT:
break;
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
row_format_name
= row_type == ROW_TYPE_COMPRESSED
? "COMPRESSED"
: "DYNAMIC";
if (!srv_file_per_table) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s"
" requires innodb_file_per_table.",
row_format_name);
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_type));
} else if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s"
" requires innodb_file_format >"
" Antelope.",
row_format_name);
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_type));
} else {
flags |= DICT_TF_COMPACT
| (DICT_TF_FORMAT_ZIP
@ -7085,10 +7103,10 @@ ha_innobase::create(
case ROW_TYPE_NOT_USED:
case ROW_TYPE_FIXED:
default:
push_warning(thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
case ROW_TYPE_DEFAULT:
case ROW_TYPE_COMPACT:
flags = DICT_TF_COMPACT;
@ -7271,6 +7289,14 @@ ha_innobase::discard_or_import_tablespace(
err = row_discard_tablespace_for_mysql(dict_table->name, trx);
} else {
err = row_import_tablespace_for_mysql(dict_table->name, trx);
/* in expanded import mode re-initialize auto_increment again */
if ((err == DB_SUCCESS) && srv_expand_import &&
(table->found_next_number_field != NULL)) {
dict_table_autoinc_lock(dict_table);
innobase_initialize_autoinc();
dict_table_autoinc_unlock(dict_table);
}
}
err = convert_error_code_to_mysql(err, dict_table->flags, NULL);
@ -7734,6 +7760,7 @@ ha_innobase::estimate_rows_upper_bound(void)
dict_index_t* index;
ulonglong estimate;
ulonglong local_data_file_length;
ulint stat_n_leaf_pages;
DBUG_ENTER("estimate_rows_upper_bound");
@ -7753,10 +7780,12 @@ ha_innobase::estimate_rows_upper_bound(void)
index = dict_table_get_first_index(prebuilt->table);
ut_a(index->stat_n_leaf_pages > 0);
stat_n_leaf_pages = index->stat_n_leaf_pages;
ut_a(stat_n_leaf_pages > 0);
local_data_file_length =
((ulonglong) index->stat_n_leaf_pages) * UNIV_PAGE_SIZE;
((ulonglong) stat_n_leaf_pages) * UNIV_PAGE_SIZE;
/* Calculate a minimum length for a clustered index record and from
@ -7923,9 +7952,12 @@ Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
UNIV_INTERN
int
ha_innobase::info(
/*==============*/
uint flag) /*!< in: what information MySQL requests */
ha_innobase::info_low(
/*==================*/
uint flag, /*!< in: what information MySQL
requests */
bool called_from_analyze) /* in: TRUE if called from
::analyze() */
{
dict_table_t* ib_table;
dict_index_t* index;
@ -7956,14 +7988,12 @@ ha_innobase::info(
ib_table = prebuilt->table;
if (flag & HA_STATUS_TIME) {
if ((innobase_stats_on_metadata
|| thd_sql_command(user_thd) == SQLCOM_ANALYZE)
&& !share->ib_table->is_corrupt) {
if ((called_from_analyze || innobase_stats_on_metadata) && !share->ib_table->is_corrupt) {
/* In sql_show we call with this flag: update
then statistics so that they are up-to-date */
if (srv_use_sys_stats_table && !((ib_table->flags >> DICT_TF2_SHIFT) & DICT_TF2_TEMPORARY)
&& thd_sql_command(user_thd) == SQLCOM_ANALYZE) {
&& called_from_analyze) {
/* If the indexes on the table don't have enough rows in SYS_STATS system table, */
/* they need to be created. */
dict_index_t* index;
@ -7985,7 +8015,8 @@ ha_innobase::info(
prebuilt->trx->op_info = "updating table statistics";
dict_update_statistics(ib_table,
(thd_sql_command(user_thd) == SQLCOM_ANALYZE)?TRUE:FALSE);
FALSE /* update even if stats
are initialized */, called_from_analyze);
prebuilt->trx->op_info = "returning various info to MySQL";
}
@ -8004,6 +8035,9 @@ ha_innobase::info(
}
if (flag & HA_STATUS_VARIABLE) {
dict_table_stats_lock(ib_table, RW_S_LATCH);
n_rows = ib_table->stat_n_rows;
/* Because we do not protect stat_n_rows by any mutex in a
@ -8053,6 +8087,8 @@ ha_innobase::info(
ib_table->stat_sum_of_other_index_sizes)
* UNIV_PAGE_SIZE;
dict_table_stats_unlock(ib_table, RW_S_LATCH);
/* Since fsp_get_available_space_in_free_extents() is
acquiring latches inside InnoDB, we do not call it if we
are asked by MySQL to avoid locking. Another reason to
@ -8069,19 +8105,12 @@ ha_innobase::info(
innodb_crash_recovery is set to a high value. */
stats.delete_length = 0;
} else {
/* lock the data dictionary to avoid races with
ibd_file_missing and tablespace_discarded */
row_mysql_lock_data_dictionary(prebuilt->trx);
ullint avail_space;
/* ib_table->space must be an existent tablespace */
if (!ib_table->ibd_file_missing
&& !ib_table->tablespace_discarded) {
stats.delete_length =
fsp_get_available_space_in_free_extents(
ib_table->space) * 1024;
} else {
avail_space = fsp_get_available_space_in_free_extents(
ib_table->space);
if (avail_space == ULLINT_UNDEFINED) {
THD* thd;
thd = ha_thd();
@ -8098,9 +8127,9 @@ ha_innobase::info(
ib_table->name);
stats.delete_length = 0;
} else {
stats.delete_length = avail_space * 1024;
}
row_mysql_unlock_data_dictionary(prebuilt->trx);
}
stats.check_time = 0;
@ -8129,6 +8158,8 @@ ha_innobase::info(
table->s->keys);
}
dict_table_stats_lock(ib_table, RW_S_LATCH);
for (i = 0; i < table->s->keys; i++) {
ulong j;
/* We could get index quickly through internal
@ -8166,8 +8197,6 @@ ha_innobase::info(
break;
}
dict_index_stat_mutex_enter(index);
if (index->stat_n_diff_key_vals[j + 1] == 0) {
rec_per_key = stats.records;
@ -8176,8 +8205,6 @@ ha_innobase::info(
index->stat_n_diff_key_vals[j + 1]);
}
dict_index_stat_mutex_exit(index);
/* Since MySQL seems to favor table scans
too much over index searches, we pretend
index selectivity is 2 times better than
@ -8194,6 +8221,8 @@ ha_innobase::info(
(ulong) rec_per_key;
}
}
dict_table_stats_unlock(ib_table, RW_S_LATCH);
}
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
@ -8227,6 +8256,18 @@ func_exit:
DBUG_RETURN(0);
}
/*********************************************************************//**
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
UNIV_INTERN
int
ha_innobase::info(
/*==============*/
uint flag) /*!< in: what information MySQL requests */
{
return(info_low(flag, false /* not called from analyze */));
}
/**********************************************************************//**
Updates index cardinalities of the table, based on 8 random dives into
each index tree. This does NOT calculate exact statistics on the table.
@ -8243,7 +8284,8 @@ ha_innobase::analyze(
}
/* Simply call ::info() with all the flags */
info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE);
info_low(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE,
true /* called from analyze */);
if (share->ib_table->is_corrupt) {
return(HA_ADMIN_CORRUPT);
@ -8552,8 +8594,6 @@ ha_innobase::get_foreign_key_create_info(void)
flen = ftell(srv_dict_tmpfile);
if (flen < 0) {
flen = 0;
} else if (flen > 64000 - 1) {
flen = 64000 - 1;
}
/* allocate buffer for the string, and
@ -9866,7 +9906,11 @@ ha_innobase::innobase_peek_autoinc(void)
auto_inc = dict_table_autoinc_read(innodb_table);
ut_a(auto_inc > 0);
if (auto_inc == 0) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: AUTOINC next value generation "
"is disabled for '%s'\n", innodb_table->name);
}
dict_table_autoinc_unlock(innodb_table);
@ -11140,6 +11184,11 @@ static MYSQL_SYSVAR_ULONG(page_size, innobase_page_size,
"###EXPERIMENTAL###: The universal page size of the database. Changing for created database is not supported. Use on your own risk!",
NULL, NULL, (1 << 14), (1 << 12), (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
static MYSQL_SYSVAR_ULONG(log_block_size, innobase_log_block_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"###EXPERIMENTAL###: The log block size of the transaction log file. Changing for created log file is not supported. Use on your own risk!",
NULL, NULL, (1 << 9)/*512*/, (1 << 9)/*512*/, (1 << UNIV_PAGE_SIZE_SHIFT_MAX), 0);
static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir,
PLUGIN_VAR_READONLY,
"The common part for InnoDB table spaces.",
@ -11358,6 +11407,16 @@ static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size,
"The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
NULL, NULL, 128*1024*1024L, 32*1024*1024L, LONGLONG_MAX, 1024*1024L);
static MYSQL_SYSVAR_UINT(buffer_pool_shm_key, srv_buffer_pool_shm_key,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"[experimental] The key value of shared memory segment for the buffer pool. 0 (default) disables the feature.",
NULL, NULL, 0, 0, INT_MAX32, 0);
static MYSQL_SYSVAR_BOOL(buffer_pool_shm_checksum, innobase_buffer_pool_shm_checksum,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
"Enable buffer_pool_shm checksum validation (enabled by default).",
NULL, NULL, TRUE);
static MYSQL_SYSVAR_ULONG(commit_concurrency, innobase_commit_concurrency,
PLUGIN_VAR_RQCMDARG,
"Helps in performance tuning in heavily concurrent environments.",
@ -11489,6 +11548,13 @@ static MYSQL_SYSVAR_STR(change_buffering, innobase_change_buffering,
innodb_change_buffering_validate,
innodb_change_buffering_update, "inserts");
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
static MYSQL_SYSVAR_UINT(change_buffering_debug, ibuf_debug,
PLUGIN_VAR_RQCMDARG,
"Debug flags for InnoDB change buffering (0=none)",
NULL, NULL, 0, 0, 1, 0);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
static MYSQL_SYSVAR_ULONG(read_ahead_threshold, srv_read_ahead_threshold,
PLUGIN_VAR_RQCMDARG,
"Number of pages that must be accessed sequentially for InnoDB to "
@ -11561,17 +11627,19 @@ innodb_adaptive_checkpoint_update(
void* var_ptr,
const void* save)
{
*(long *)var_ptr= (*(long *)save) % 3;
*(long *)var_ptr= (*(long *)save) % 4;
}
const char *adaptive_checkpoint_names[]=
{
"none", /* 0 */
"reflex", /* 1 */
"estimate", /* 2 */
"keep_average", /* 3 */
/* For compatibility of the older patch */
"0", /* 3 ("none" + 3) */
"1", /* 4 ("reflex" + 3) */
"2", /* 5 ("estimate" + 3) */
"0", /* 4 ("none" + 3) */
"1", /* 5 ("reflex" + 3) */
"2", /* 6 ("estimate" + 3) */
"3", /* 7 ("keep_average" + 4) */
NullS
};
TYPELIB adaptive_checkpoint_typelib=
@ -11581,7 +11649,7 @@ TYPELIB adaptive_checkpoint_typelib=
};
static MYSQL_SYSVAR_ENUM(adaptive_checkpoint, srv_adaptive_checkpoint,
PLUGIN_VAR_RQCMDARG,
"Enable/Disable flushing along modified age. (none, reflex, [estimate])",
"Enable/Disable flushing along modified age. (none, reflex, [estimate], keep_average)",
NULL, innodb_adaptive_checkpoint_update, 2, &adaptive_checkpoint_typelib);
static MYSQL_SYSVAR_ULONG(enable_unsafe_group_commit, srv_enable_unsafe_group_commit,
@ -11620,9 +11688,12 @@ static MYSQL_SYSVAR_ULONG(pass_corrupt_table, srv_pass_corrupt_table,
static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(page_size),
MYSQL_SYSVAR(log_block_size),
MYSQL_SYSVAR(additional_mem_pool_size),
MYSQL_SYSVAR(autoextend_increment),
MYSQL_SYSVAR(buffer_pool_size),
MYSQL_SYSVAR(buffer_pool_shm_key),
MYSQL_SYSVAR(buffer_pool_shm_checksum),
MYSQL_SYSVAR(checksums),
MYSQL_SYSVAR(fast_checksum),
MYSQL_SYSVAR(commit_concurrency),
@ -11698,6 +11769,9 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(dict_size_limit),
MYSQL_SYSVAR(use_sys_malloc),
MYSQL_SYSVAR(change_buffering),
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
MYSQL_SYSVAR(change_buffering_debug),
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
MYSQL_SYSVAR(read_ahead_threshold),
MYSQL_SYSVAR(io_capacity),
MYSQL_SYSVAR(auto_lru_dump),

1
handler/ha_innodb.h

@ -110,6 +110,7 @@ class ha_innobase: public handler
ulint innobase_update_autoinc(ulonglong auto_inc);
void innobase_initialize_autoinc();
dict_index_t* innobase_get_index(uint keynr);
int info_low(uint flag, bool called_from_analyze);
/* Init values for the class: */
public:

11
handler/handler0alter.cc

@ -1012,12 +1012,13 @@ ha_innobase::prepare_drop_index(
index->to_be_dropped = TRUE;
}
/* If FOREIGN_KEY_CHECK = 1 you may not drop an index defined
/* If FOREIGN_KEY_CHECKS = 1 you may not drop an index defined
for a foreign key constraint because InnoDB requires that both
tables contain indexes for the constraint. Note that CREATE
INDEX id ON table does a CREATE INDEX and DROP INDEX, and we
can ignore here foreign keys because a new index for the
foreign key has already been created.
tables contain indexes for the constraint. Such index can
be dropped only if FOREIGN_KEY_CHECKS is set to 0.
Note that CREATE INDEX id ON table does a CREATE INDEX and
DROP INDEX, and we can ignore here foreign keys because a
new index for the foreign key has already been created.
We check for the foreign key constraints after marking the
candidate indexes for deletion, because when we check for an

1
handler/innodb_patch_info.h

@ -47,5 +47,6 @@ struct innodb_enhancement {
{"innodb_fast_checksum","Using the checksum on 32bit-unit calculation","incompatible for unpatched ver.","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_files_extend","allow >4GB transaction log files, and can vary universal page size of datafiles","incompatible for unpatched ver.","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_sys_tables_sys_indexes","Expose InnoDB SYS_TABLES and SYS_INDEXES schema tables","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_buffer_pool_shm","Put buffer pool contents to shared memory segment and reuse it at clean restart [experimental]","","http://www.percona.com/docs/wiki/percona-xtradb"},
{NULL, NULL, NULL, NULL}
};

197
ibuf/ibuf0ibuf.c

@ -49,6 +49,7 @@ Created 7/19/1997 Heikki Tuuri
#include "btr0cur.h"
#include "btr0pcur.h"
#include "btr0btr.h"
#include "row0upd.h"
#include "sync0sync.h"
#include "dict0boot.h"
#include "fut0lst.h"
@ -170,6 +171,11 @@ access order rules. */
/** Operations that can currently be buffered. */
UNIV_INTERN ibuf_use_t ibuf_use = IBUF_USE_INSERT;
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/** Flag to control insert buffer debugging. */
UNIV_INTERN uint ibuf_debug;
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/** The insert buffer control structure */
UNIV_INTERN ibuf_t* ibuf = NULL;
@ -2886,9 +2892,80 @@ During merge, inserts to an index page a secondary index entry extracted
from the insert buffer. */
static
void
ibuf_insert_to_index_page_low(
/*==========================*/
const dtuple_t* entry, /*!< in: buffered entry to insert */
buf_block_t* block, /*!< in/out: index page where the buffered
entry should be placed */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr, /*!< in/out: mtr */
page_cur_t* page_cur)/*!< in/out: cursor positioned on the record
after which to insert the buffered entry */
{
const page_t* page;
ulint space;
ulint page_no;
ulint zip_size;
const page_t* bitmap_page;
ulint old_bits;
if (UNIV_LIKELY
(page_cur_tuple_insert(page_cur, entry, index, 0, mtr) != NULL)) {
return;
}
/* If the record did not fit, reorganize */
btr_page_reorganize(block, index, mtr);
page_cur_search(block, index, entry, PAGE_CUR_LE, page_cur);
/* This time the record must fit */
if (UNIV_LIKELY
(page_cur_tuple_insert(page_cur, entry, index, 0, mtr) != NULL)) {
return;
}
page = buf_block_get_frame(block);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: Insert buffer insert fails;"
" page free %lu, dtuple size %lu\n",
(ulong) page_get_max_insert_size(page, 1),
(ulong) rec_get_converted_size(index, entry, 0));
fputs("InnoDB: Cannot insert index record ", stderr);
dtuple_print(stderr, entry);
fputs("\nInnoDB: The table where this index record belongs\n"
"InnoDB: is now probably corrupt. Please run CHECK TABLE on\n"
"InnoDB: that table.\n", stderr);
space = page_get_space_id(page);
zip_size = buf_block_get_zip_size(block);
page_no = page_get_page_no(page);
bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr);
old_bits = ibuf_bitmap_page_get_bits(bitmap_page, page_no, zip_size,
IBUF_BITMAP_FREE, mtr);
fprintf(stderr,
"InnoDB: space %lu, page %lu, zip_size %lu, bitmap bits %lu\n",
(ulong) space, (ulong) page_no,
(ulong) zip_size, (ulong) old_bits);
fputs("InnoDB: Submit a detailed bug report"
" to http://bugs.mysql.com\n", stderr);
}
/************************************************************************
During merge, inserts to an index page a secondary index entry extracted
from the insert buffer. */
static
void
ibuf_insert_to_index_page(
/*======================*/
dtuple_t* entry, /*!< in: buffered entry to insert */
const dtuple_t* entry, /*!< in: buffered entry to insert */
buf_block_t* block, /*!< in/out: index page where the buffered entry
should be placed */
dict_index_t* index, /*!< in: record descriptor */
@ -2898,11 +2975,10 @@ ibuf_insert_to_index_page(
ulint low_match;
page_t* page = buf_block_get_frame(block);
rec_t* rec;
page_t* bitmap_page;
ulint old_bits;
ut_ad(ibuf_inside());
ut_ad(dtuple_check_typed(entry));
ut_ad(!buf_block_align(page)->is_hashed);
if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
!= (ibool)!!page_is_comp(page))) {
@ -2940,71 +3016,86 @@ dump:
low_match = page_cur_search(block, index, entry,
PAGE_CUR_LE, &page_cur);
if (low_match == dtuple_get_n_fields(entry)) {
if (UNIV_UNLIKELY(low_match == dtuple_get_n_fields(entry))) {
mem_heap_t* heap;
upd_t* update;
ulint* offsets;
page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
page_zip = buf_block_get_page_zip(block);
btr_cur_del_unmark_for_ibuf(rec, page_zip, mtr);
} else {
rec = page_cur_tuple_insert(&page_cur, entry, index, 0, mtr);
/* This is based on
row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */
ut_ad(rec_get_deleted_flag(rec, page_is_comp(page)));
if (UNIV_LIKELY(rec != NULL)) {
heap = mem_heap_create(1024);
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED,
&heap);
update = row_upd_build_sec_rec_difference_binary(
index, entry, rec, NULL, heap);
page_zip = buf_block_get_page_zip(block);
if (update->n_fields == 0) {
/* The records only differ in the delete-mark.
Clear the delete-mark, like we did before
Bug #56680 was fixed. */
btr_cur_del_unmark_for_ibuf(rec, page_zip, mtr);
updated_in_place:
mem_heap_free(heap);
return;
}
/* If the record did not fit, reorganize */
btr_page_reorganize(block, index, mtr);
page_cur_search(block, index, entry, PAGE_CUR_LE, &page_cur);
/* Copy the info bits. Clear the delete-mark. */
update->info_bits = rec_get_info_bits(rec, page_is_comp(page));
update->info_bits &= ~REC_INFO_DELETED_FLAG;
/* We cannot invoke btr_cur_optimistic_update() here,
because we do not have a btr_cur_t or que_thr_t,
as the insert buffer merge occurs at a very low level. */
if (!row_upd_changes_field_size_or_external(index, offsets,
update)
&& (!page_zip || btr_cur_update_alloc_zip(
page_zip, block, index,
rec_offs_size(offsets), FALSE, mtr))) {
/* This is the easy case. Do something similar
to btr_cur_update_in_place(). */
row_upd_rec_in_place(rec, index, offsets,
update, page_zip);
goto updated_in_place;
}
/* This time the record must fit */
if (UNIV_UNLIKELY
(!page_cur_tuple_insert(&page_cur, entry, index,
0, mtr))) {
ulint space;
ulint page_no;
ulint zip_size;
/* A collation may identify values that differ in
storage length.
Some examples (1 or 2 bytes):
utf8_turkish_ci: I = U+0131 LATIN SMALL LETTER DOTLESS I
utf8_general_ci: S = U+00DF LATIN SMALL LETTER SHARP S
utf8_general_ci: A = U+00E4 LATIN SMALL LETTER A WITH DIAERESIS
ut_print_timestamp(stderr);
latin1_german2_ci: SS = U+00DF LATIN SMALL LETTER SHARP S
fprintf(stderr,
" InnoDB: Error: Insert buffer insert"
" fails; page free %lu,"
" dtuple size %lu\n",
(ulong) page_get_max_insert_size(
page, 1),
(ulong) rec_get_converted_size(
index, entry, 0));
fputs("InnoDB: Cannot insert index record ",
stderr);
dtuple_print(stderr, entry);
fputs("\nInnoDB: The table where"
" this index record belongs\n"
"InnoDB: is now probably corrupt."
" Please run CHECK TABLE on\n"
"InnoDB: that table.\n", stderr);
Examples of a character (3-byte UTF-8 sequence)
identified with 2 or 4 characters (1-byte UTF-8 sequences):
space = page_get_space_id(page);
zip_size = buf_block_get_zip_size(block);
page_no = page_get_page_no(page);
utf8_unicode_ci: 'II' = U+2171 SMALL ROMAN NUMERAL TWO
utf8_unicode_ci: '(10)' = U+247D PARENTHESIZED NUMBER TEN
*/
bitmap_page = ibuf_bitmap_get_map_page(
space, page_no, zip_size, mtr);
old_bits = ibuf_bitmap_page_get_bits(
bitmap_page, page_no, zip_size,
IBUF_BITMAP_FREE, mtr);
/* Delete the different-length record, and insert the
buffered one. */
fprintf(stderr,
"InnoDB: space %lu, page %lu,"
" zip_size %lu, bitmap bits %lu\n",
(ulong) space, (ulong) page_no,
(ulong) zip_size, (ulong) old_bits);
lock_rec_store_on_page_infimum(block, rec);
page_cur_delete_rec(&page_cur, index, offsets, mtr);
page_cur_move_to_prev(&page_cur);
mem_heap_free(heap);
fputs("InnoDB: Submit a detailed bug report"
" to http://bugs.mysql.com\n", stderr);
}
ibuf_insert_to_index_page_low(entry, block, index, mtr,
&page_cur);
lock_rec_restore_from_page_infimum(block, rec, block);
} else {
ibuf_insert_to_index_page_low(entry, block, index, mtr,
&page_cur);
}
}

49
include/btr0btr.h

@ -94,26 +94,35 @@ btr_root_get(
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
buf_block_t*
btr_block_get(
/*==========*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
mtr_t* mtr); /*!< in: mtr */
/**************************************************************//**
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
page_t*
btr_page_get(
/*=========*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
mtr_t* mtr); /*!< in: mtr */
btr_block_get_func(
/*===============*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in/out: mtr */
__attribute__((nonnull));
/** Gets a buffer page and declares its latching order level.
@param space tablespace identifier
@param zip_size compressed page size in bytes or 0 for uncompressed pages
@param page_no page number
@param mode latch mode
@param mtr mini-transaction handle
@return the block descriptor */
# define btr_block_get(space,zip_size,page_no,mode,mtr) \
btr_block_get_func(space,zip_size,page_no,mode,__FILE__,__LINE__,mtr)
/** Gets a buffer page and declares its latching order level.
@param space tablespace identifier
@param zip_size compressed page size in bytes or 0 for uncompressed pages
@param page_no page number
@param mode latch mode
@param mtr mini-transaction handle
@return the uncompressed page frame */
# define btr_page_get(space,zip_size,page_no,mode,mtr) \
buf_block_get_frame(btr_block_get(space,zip_size,page_no,mode,mtr))
#endif /* !UNIV_HOTBACKUP */
/**************************************************************//**
Gets the index id field of a page.

38
include/btr0btr.ic

@ -39,18 +39,21 @@ Created 6/2/1994 Heikki Tuuri
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
buf_block_t*
btr_block_get(
/*==========*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
mtr_t* mtr) /*!< in: mtr */
btr_block_get_func(
/*===============*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in/out: mtr */
{
buf_block_t* block;
block = buf_page_get(space, zip_size, page_no, mode, mtr);
block = buf_page_get_gen(space, zip_size, page_no, mode,
NULL, BUF_GET, file, line, mtr);
ut_a(srv_pass_corrupt_table || block);
@ -62,23 +65,6 @@ btr_block_get(
return(block);
}
/**************************************************************//**
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
page_t*
btr_page_get(
/*=========*/
ulint space, /*!< in: space id */
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint page_no, /*!< in: page number */
ulint mode, /*!< in: latch mode */
mtr_t* mtr) /*!< in: mtr */
{
return(buf_block_get_frame(btr_block_get(space, zip_size, page_no,
mode, mtr)));
}
/**************************************************************//**
Sets the index id field of a page. */
UNIV_INLINE

18
include/btr0cur.h

@ -242,6 +242,22 @@ btr_cur_pessimistic_insert(
que_thr_t* thr, /*!< in: query thread or NULL */
mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
See if there is enough place in the page modification log to log
an update-in-place.
@return TRUE if enough place */
UNIV_INTERN
ibool
btr_cur_update_alloc_zip(
/*=====================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page */
buf_block_t* block, /*!< in/out: buffer page */
dict_index_t* index, /*!< in: the index corresponding to the block */
ulint length, /*!< in: size needed */
ibool create, /*!< in: TRUE=delete-and-insert,
FALSE=update-in-place */
mtr_t* mtr) /*!< in: mini-transaction */
__attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Updates a record when the update causes no size changes in its fields.
@return DB_SUCCESS or error number */
UNIV_INTERN
@ -504,7 +520,7 @@ Stores the fields in big_rec_vec to the tablespace and puts pointers to
them in rec. The extern flags in rec will have to be set beforehand.
The fields are stored on pages allocated from leaf node
file segment of the index tree.
@return DB_SUCCESS or error */
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
ulint
btr_store_big_rec_extern_fields(

6
include/buf0buf.h

@ -36,6 +36,7 @@ Created 11/5/1995 Heikki Tuuri
#include "ut0rbt.h"
#ifndef UNIV_HOTBACKUP
#include "os0proc.h"
#include "srv0srv.h"
/** @name Modes for buf_page_get_gen */
/* @{ */
@ -1301,7 +1302,10 @@ struct buf_block_struct{
/**********************************************************************//**
Compute the hash fold value for blocks in buf_pool->zip_hash. */
/* @{ */
#define BUF_POOL_ZIP_FOLD_PTR(ptr) ((ulint) (ptr) / UNIV_PAGE_SIZE)
/* the fold should be relative when srv_buffer_pool_shm_key is enabled */
#define BUF_POOL_ZIP_FOLD_PTR(ptr) (!srv_buffer_pool_shm_key\
?((ulint) (ptr) / UNIV_PAGE_SIZE)\
:((ulint) ((byte*)ptr - (byte*)(buf_pool->chunks->blocks->frame)) / UNIV_PAGE_SIZE))
#define BUF_POOL_ZIP_FOLD(b) BUF_POOL_ZIP_FOLD_PTR((b)->frame)
#define BUF_POOL_ZIP_FOLD_BPAGE(b) BUF_POOL_ZIP_FOLD((buf_block_t*) (b))
/* @} */

14
include/buf0flu.h

@ -76,6 +76,20 @@ buf_flush_init_for_writing(
ib_uint64_t newest_lsn); /*!< in: newest modification lsn
to the page */
#ifndef UNIV_HOTBACKUP
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: buf_pool_mutex and block->mutex must be held upon entering this
function, and they will be released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
UNIV_INTERN
ibool
buf_flush_page_try(
/*===============*/
buf_block_t* block) /*!< in/out: buffer control block */
__attribute__((nonnull, warn_unused_result));
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/*******************************************************************//**
This utility flushes dirty blocks from the end of the LRU list or flush_list.
NOTE 1: in the case of an LRU flush the calling thread may own latches to

44
include/dict0dict.h

@ -318,7 +318,8 @@ void
dict_table_replace_index_in_foreign_list(
/*=====================================*/
dict_table_t* table, /*!< in/out: table */
dict_index_t* index); /*!< in: index to be replaced */
dict_index_t* index, /*!< in: index to be replaced */
const trx_t* trx); /*!< in: transaction handle */
/*********************************************************************//**
Checks if a index is defined for a foreign key constraint. Index is a part
of a foreign key constraint if the index is referenced by foreign key
@ -1053,20 +1054,13 @@ Calculates new estimates for table and index statistics. The statistics
are used in query optimization. */
UNIV_INTERN
void
dict_update_statistics_low(
/*=======================*/
dict_table_t* table, /*!< in/out: table */
ibool has_dict_mutex, /*!< in: TRUE if the caller has the
dictionary mutex */
ibool sync);
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization. */
UNIV_INTERN
void
dict_update_statistics(
/*===================*/
dict_table_t* table, /*!< in/out: table */
dict_table_t* table, /*!< in/out: table */
ibool only_calc_if_missing_stats, /*!< in: only
update/recalc the stats if they have
not been initialized yet, otherwise
do nothing */
ibool sync);
/********************************************************************//**
Reserves the dictionary system mutex for MySQL. */
@ -1081,21 +1075,25 @@ void
dict_mutex_exit_for_mysql(void);
/*===========================*/
/**********************************************************************//**
Lock the appropriate mutex to protect index->stat_n_diff_key_vals[].
index->id is used to pick the right mutex and it should not change
before dict_index_stat_mutex_exit() is called on this index. */
Lock the appropriate latch to protect a given table's statistics.
table->id is used to pick the corresponding latch from a global array of
latches. */
UNIV_INTERN
void
dict_index_stat_mutex_enter(
/*========================*/
const dict_index_t* index); /*!< in: index */
dict_table_stats_lock(
/*==================*/
const dict_table_t* table, /*!< in: table */
ulint latch_mode); /*!< in: RW_S_LATCH or
RW_X_LATCH */
/**********************************************************************//**
Unlock the appropriate mutex that protects index->stat_n_diff_key_vals[]. */
Unlock the latch that has been locked by dict_table_stats_lock() */
UNIV_INTERN
void
dict_index_stat_mutex_exit(
/*=======================*/
const dict_index_t* index); /*!< in: index */
dict_table_stats_unlock(
/*====================*/
const dict_table_t* table, /*!< in: table */
ulint latch_mode); /*!< in: RW_S_LATCH or
RW_X_LATCH */
/********************************************************************//**
Checks if the database name in two table names is the same.
@return TRUE if same db name */

9
include/fil0fil.h

@ -737,6 +737,15 @@ fil_page_get_type(
/*==============*/
const byte* page); /*!< in: file page */
/*******************************************************************//**
Returns TRUE if a single-table tablespace is being deleted.
@return TRUE if being deleted */
UNIV_INTERN
ibool
fil_tablespace_is_being_deleted(
/*============================*/
ulint id); /*!< in: space id */
/*************************************************************************
Return local hash table informations. */

49
include/hash0hash.h

@ -49,6 +49,28 @@ hash_table_t*
hash_create(
/*========*/
ulint n); /*!< in: number of array cells */
/*************************************************************//**
*/
UNIV_INTERN
ulint
hash_create_needed(
/*===============*/
ulint n);
UNIV_INTERN
void
hash_create_init(
/*=============*/
hash_table_t* table,
ulint n);
UNIV_INTERN
void
hash_create_reuse(
/*==============*/
hash_table_t* table);
#ifndef UNIV_HOTBACKUP
/*************************************************************//**
Creates a mutex array to protect a hash table. */
@ -328,6 +350,33 @@ do {\
}\
} while (0)
/********************************************************************//**
Align nodes with moving location.*/
#define HASH_OFFSET(TABLE, NODE_TYPE, PTR_NAME, FADDR, FOFFSET, BOFFSET) \
do {\
ulint i2222;\
ulint cell_count2222;\
\
cell_count2222 = hash_get_n_cells(TABLE);\
\
for (i2222 = 0; i2222 < cell_count2222; i2222++) {\
NODE_TYPE* node2222;\
\
if ((TABLE)->array[i2222].node) \
(TABLE)->array[i2222].node = (void*)((byte*)(TABLE)->array[i2222].node \
+ (((TABLE)->array[i2222].node > (void*)FADDR)?FOFFSET:BOFFSET));\
node2222 = HASH_GET_FIRST((TABLE), i2222);\
\
while (node2222) {\
if (node2222->PTR_NAME) \
node2222->PTR_NAME = (void*)((byte*)(node2222->PTR_NAME) \
+ ((((void*)node2222->PTR_NAME) > (void*)FADDR)?FOFFSET:BOFFSET));\
\
node2222 = node2222->PTR_NAME;\
}\
}\
} while (0)
/************************************************************//**
Gets the mutex index for a fold value in a hash table.
@return mutex number */

5
include/ibuf0ibuf.h

@ -48,6 +48,11 @@ typedef enum {
/** Operations that can currently be buffered. */
extern ibuf_use_t ibuf_use;
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/** Flag to control insert buffer debugging. */
extern uint ibuf_debug;
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/** The insert buffer control structure */
extern ibuf_t* ibuf;

3
include/log0log.h

@ -672,6 +672,9 @@ extern log_t* log_sys;
when mysqld is first time started
on the restored database, it can
print helpful info for the user */
#define LOG_FILE_OS_FILE_LOG_BLOCK_SIZE 64
/* extend to record log_block_size
of XtraDB. 0 means default 512 */
#define LOG_FILE_ARCH_COMPLETED OS_FILE_LOG_BLOCK_SIZE
/* this 4-byte field is TRUE when
the writing of an archived log file

4
include/os0file.h

@ -107,7 +107,7 @@ whole block gets written. This should be true even in most cases of a crash:
if this fails for a log block, then it is equivalent to a media failure in the
log. */
#define OS_FILE_LOG_BLOCK_SIZE 512
#define OS_FILE_LOG_BLOCK_SIZE srv_log_block_size
/** Options for file_create @{ */
#define OS_FILE_OPEN 51
@ -188,6 +188,8 @@ extern ulint os_n_file_reads;
extern ulint os_n_file_writes;
extern ulint os_n_fsyncs;
extern ulint srv_log_block_size;
/* File types for directory entry data type */
enum os_file_type_enum{

28
include/os0proc.h

@ -32,6 +32,11 @@ Created 9/30/1995 Heikki Tuuri
#ifdef UNIV_LINUX
#include <sys/ipc.h>
#include <sys/shm.h>
#else
# if defined HAVE_SYS_IPC_H && HAVE_SYS_SHM_H
#include <sys/ipc.h>
#include <sys/shm.h>
# endif
#endif
typedef void* os_process_t;
@ -70,6 +75,29 @@ os_mem_free_large(
ulint size); /*!< in: size returned by
os_mem_alloc_large() */
/****************************************************************//**
Allocates or attaches and reuses shared memory segment.
The content is not cleared automatically.
@return allocated memory */
UNIV_INTERN
void*
os_shm_alloc(
/*=========*/
ulint* n, /*!< in/out: number of bytes */
uint key,
ibool* is_new);
/****************************************************************//**
Detach shared memory segment. */
UNIV_INTERN
void
os_shm_free(
/*========*/
void *ptr, /*!< in: pointer returned by
os_shm_alloc() */
ulint size); /*!< in: size returned by
os_shm_alloc() */
#ifndef UNIV_NONINL
#include "os0proc.ic"
#endif

4
include/os0sync.h

@ -189,14 +189,14 @@ os_event_wait_low(
/**********************************************************//**
Waits for an event object until it is in the signaled state or
a timeout is exceeded. In Unix the timeout is always infinite.
a timeout is exceeded.
@return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */
UNIV_INTERN
ulint
os_event_wait_time(
/*===============*/
os_event_t event, /*!< in: event to wait */
ulint time); /*!< in: timeout in microseconds, or
ulint wtime); /*!< in: timeout in microseconds, or
OS_SYNC_INFINITE_TIME */
#ifdef __WIN__
/**********************************************************//**

5
include/row0ins.h

@ -84,9 +84,10 @@ ulint
row_ins_index_entry(
/*================*/
dict_index_t* index, /*!< in: index */
dtuple_t* entry, /*!< in: index entry to insert */
dtuple_t* entry, /*!< in/out: index entry to insert */
ulint n_ext, /*!< in: number of externally stored columns */
ibool foreign,/*!< in: TRUE=check foreign key constraints */
ibool foreign,/*!< in: TRUE=check foreign key constraints
(foreign=FALSE only during CREATE INDEX) */
que_thr_t* thr); /*!< in: query thread */
/***********************************************************//**
Inserts a row to a table. This is a high-level function used in

4
include/row0mysql.h

@ -535,6 +535,10 @@ struct mysql_row_templ_struct {
Innobase record in the current index;
not defined if template_type is
ROW_MYSQL_WHOLE_ROW */
ulint clust_rec_field_no; /*!< field number of the column in an
Innobase record in the clustered index;
not defined if template_type is
ROW_MYSQL_WHOLE_ROW */
ulint mysql_col_offset; /*!< offset of the column in the MySQL
row format */
ulint mysql_col_len; /*!< length of the column in the MySQL

11
include/row0upd.h

@ -126,8 +126,8 @@ UNIV_INTERN
void
row_upd_index_entry_sys_field(
/*==========================*/
const dtuple_t* entry, /*!< in: index entry, where the memory buffers
for sys fields are already allocated:
dtuple_t* entry, /*!< in/out: index entry, where the memory
buffers for sys fields are already allocated:
the function just copies the new values to
them */
dict_index_t* index, /*!< in: clustered index */
@ -167,8 +167,11 @@ row_upd_changes_field_size_or_external(
const upd_t* update);/*!< in: update vector */
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Replaces the new column values stored in the update vector to the record
given. No field size changes are allowed. */
Replaces the new column values stored in the update vector to the
record given. No field size changes are allowed. This function is
usually invoked on a clustered index. The only use case for a
secondary index is row_ins_sec_index_entry_by_modify() or its
counterpart in ibuf_insert_to_index_page(). */
UNIV_INTERN
void
row_upd_rec_in_place(

7
include/srv0srv.h

@ -57,6 +57,9 @@ extern const char srv_mysql50_table_name_prefix[9];
thread starts running */
extern os_event_t srv_lock_timeout_thread_event;
/* This event is set at shutdown to wakeup threads from sleep */
extern os_event_t srv_shutdown_event;
/* If the last data file is auto-extended, we add this many pages to it
at a time */
#define SRV_AUTO_EXTEND_INCREMENT \
@ -156,6 +159,10 @@ extern ulint srv_buf_pool_curr_size; /*!< current size in bytes */
extern ulint srv_mem_pool_size;
extern ulint srv_lock_table_size;
extern uint srv_buffer_pool_shm_key;
extern ibool srv_buffer_pool_shm_is_reused;
extern ibool srv_buffer_pool_shm_checksum;
extern ibool srv_thread_concurrency_timer_based;
extern ulint srv_n_file_io_threads;

3
include/srv0start.h

@ -131,4 +131,7 @@ extern enum srv_shutdown_state srv_shutdown_state;
/** Log 'spaces' have id's >= this */
#define SRV_LOG_SPACE_FIRST_ID 0xFFFFFFF0UL
/** reserved for extra system tables */
#define SRV_EXTRA_SYS_SPACE_FIRST_ID 0xFFFFFFE0UL
#endif

2
include/trx0sys.h

@ -470,7 +470,7 @@ trx_sys_file_format_id_to_name(
/* Space id and page no where the trx system file copy resides */
#define TRX_SYS_SPACE 0 /* the SYSTEM tablespace */
#define TRX_DOUBLEWRITE_SPACE 1 /* the doublewrite buffer tablespace if used */
#define TRX_DOUBLEWRITE_SPACE 0xFFFFFFE0UL /* the doublewrite buffer tablespace if used */
#define TRX_SYS_SPACE_MAX 9 /* reserved max space id for system tablespaces */
#include "fsp0fsp.h"
#define TRX_SYS_PAGE_NO FSP_TRX_SYS_PAGE_NO

2
include/trx0sys.ic

@ -81,7 +81,7 @@ trx_sys_sys_space(
{
if (srv_doublewrite_file) {
/* several spaces are reserved */
return((ibool)(space <= TRX_SYS_SPACE_MAX));
return((ibool)(space == TRX_SYS_SPACE || space == TRX_DOUBLEWRITE_SPACE));
} else {
return((ibool)(space == TRX_SYS_SPACE));
}

7
include/univ.i

@ -46,10 +46,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 0
#define INNODB_VERSION_BUGFIX 13
#define INNODB_VERSION_BUGFIX 14
#ifndef PERCONA_INNODB_VERSION
#define PERCONA_INNODB_VERSION 11.6
#define PERCONA_INNODB_VERSION 12.5
#endif
@ -371,6 +371,9 @@ typedef unsigned long long int ullint;
/* Maximum value for ib_uint64_t */
#define IB_ULONGLONG_MAX ((ib_uint64_t) (~0ULL))
/* THe 'undefined' value for ullint */
#define ULLINT_UNDEFINED ((ullint)(-1))
/* This 'ibool' type is used within Innobase. Remember that different included
headers may define 'bool' differently. Do not assume that 'bool' is a ulint! */
#define ibool ulint

43
include/ut0lst.h

@ -257,5 +257,48 @@ do { \
ut_a(ut_list_node_313 == NULL); \
} while (0)
/********************************************************************//**
Align nodes with moving location.
@param NAME the name of the list
@param TYPE node type
@param BASE base node (not a pointer to it)
@param OFFSET offset moved */
#define UT_LIST_OFFSET(NAME, TYPE, BASE, FADDR, FOFFSET, BOFFSET) \
do { \
ulint ut_list_i_313; \
TYPE* ut_list_node_313; \
\
if ((BASE).start) \
(BASE).start = (void*)((byte*)((BASE).start) \
+ (((void*)((BASE).start) > (void*)FADDR)?FOFFSET:BOFFSET));\
if ((BASE).end) \
(BASE).end = (void*)((byte*)((BASE).end) \
+ (((void*)((BASE).end) > (void*)FADDR)?FOFFSET:BOFFSET));\
\
ut_list_node_313 = (BASE).start; \
\
for (ut_list_i_313 = (BASE).count; ut_list_i_313--; ) { \
ut_a(ut_list_node_313); \
if ((ut_list_node_313->NAME).prev) \
(ut_list_node_313->NAME).prev = (void*)((byte*)((ut_list_node_313->NAME).prev)\
+ (((void*)((ut_list_node_313->NAME).prev) > (void*)FADDR)?FOFFSET:BOFFSET));\
if ((ut_list_node_313->NAME).next) \
(ut_list_node_313->NAME).next = (void*)((byte*)((ut_list_node_313->NAME).next)\
+ (((void*)((ut_list_node_313->NAME).next)> (void*)FADDR)?FOFFSET:BOFFSET));\
ut_list_node_313 = (ut_list_node_313->NAME).next; \
} \
\
ut_a(ut_list_node_313 == NULL); \
\
ut_list_node_313 = (BASE).end; \
\
for (ut_list_i_313 = (BASE).count; ut_list_i_313--; ) { \
ut_a(ut_list_node_313); \
ut_list_node_313 = (ut_list_node_313->NAME).prev; \
} \
\
ut_a(ut_list_node_313 == NULL); \
} while (0)
#endif

8
log/log0log.c

@ -1184,6 +1184,9 @@ log_group_file_header_flush(
/* Wipe over possible label of ibbackup --restore */
memcpy(buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, " ", 4);
mach_write_to_4(buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE,
srv_log_block_size);
dest_offset = nth_file * group->file_size;
#ifdef UNIV_DEBUG
@ -1777,9 +1780,7 @@ log_group_checkpoint(
ulint i;
ut_ad(mutex_own(&(log_sys->mutex)));
#if LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE
# error "LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE"
#endif
ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
buf = group->checkpoint_buf;
@ -3102,6 +3103,7 @@ logs_empty_and_mark_files_at_shutdown(void)
algorithm only works if the server is idle at shutdown */
srv_shutdown_state = SRV_SHUTDOWN_CLEANUP;
os_event_set(srv_shutdown_event);
loop:
os_thread_sleep(100000);

16
log/log0recv.c

@ -2899,6 +2899,7 @@ recv_init_crash_recovery(void)
/*==========================*/
{
ut_a(!recv_needed_recovery);
ut_a(!srv_buffer_pool_shm_is_reused);
recv_needed_recovery = TRUE;
@ -2956,6 +2957,7 @@ recv_recovery_from_checkpoint_start_func(
log_group_t* max_cp_group;
log_group_t* up_to_date_group;
ulint max_cp_field;
ulint log_hdr_log_block_size;
ib_uint64_t checkpoint_lsn;
ib_uint64_t checkpoint_no;
ib_uint64_t old_scanned_lsn;
@ -3057,6 +3059,20 @@ recv_recovery_from_checkpoint_start_func(
log_hdr_buf, max_cp_group);
}
log_hdr_log_block_size
= mach_read_from_4(log_hdr_buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE);
if (log_hdr_log_block_size == 0) {
/* 0 means default value */
log_hdr_log_block_size = 512;
}
if (log_hdr_log_block_size != srv_log_block_size) {
fprintf(stderr,
"InnoDB: Error: The block size of ib_logfile (%lu) "
"is not equal to innodb_log_block_size.\n",
log_hdr_log_block_size);
return(DB_ERROR);
}
#ifdef UNIV_LOG_ARCHIVE
group = UT_LIST_GET_FIRST(log_sys->log_groups);

38
os/os0file.c

@ -1222,10 +1222,12 @@ UNIV_INTERN
void
os_file_set_nocache(
/*================*/
int fd, /*!< in: file descriptor to alter */
const char* file_name, /*!< in: file name, used in the
diagnostic message */
const char* operation_name) /*!< in: "open" or "create"; used in the
int fd /*!< in: file descriptor to alter */
__attribute__((unused)),
const char* file_name /*!< in: used in the diagnostic message */
__attribute__((unused)),
const char* operation_name __attribute__((unused)))
/*!< in: "open" or "create"; used in the
diagnostic message */
{
/* some versions of Solaris may not have DIRECTIO_ON */
@ -2355,7 +2357,10 @@ _os_file_read(
ulint i;
#endif /* !UNIV_HOTBACKUP */
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((offset & 0xFFFFFFFFUL) == offset);
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_reads++;
os_bytes_read_since_printout += n;
@ -2479,7 +2484,10 @@ os_file_read_no_error_handling(
ulint i;
#endif /* !UNIV_HOTBACKUP */
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((offset & 0xFFFFFFFFUL) == offset);
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_reads++;
os_bytes_read_since_printout += n;
@ -2609,7 +2617,10 @@ os_file_write(
ulint i;
#endif /* !UNIV_HOTBACKUP */
ut_a((offset & 0xFFFFFFFF) == offset);
/* On 64-bit Windows, ulint is 64 bits. But offset and n should be
no more than 32 bits. */
ut_a((offset & 0xFFFFFFFFUL) == offset);
ut_a((n & 0xFFFFFFFFUL) == n);
os_n_file_writes++;
@ -3383,12 +3394,14 @@ os_aio_array_reserve_slot(
trx_t* trx)
{
os_aio_slot_t* slot;
#ifdef WIN_ASYNC_IO
OVERLAPPED* control;
#endif
ulint i;
ulint slots_per_seg;
ulint local_seg;
#ifdef WIN_ASYNC_IO
OVERLAPPED* control;
ut_a((len & 0xFFFFFFFFUL) == len);
#endif
/* No need of a mutex. Only reading constant fields */
slots_per_seg = array->n_slots / array->n_segments;
@ -3690,6 +3703,9 @@ os_aio(
ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(os_aio_validate());
#ifdef WIN_ASYNC_IO
ut_ad((n & 0xFFFFFFFFUL) == n);
#endif
wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER;
mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER);
@ -3935,16 +3951,18 @@ os_aio_windows_handle(
/* retry failed read/write operation synchronously.
No need to hold array->mutex. */
ut_a((slot->len & 0xFFFFFFFFUL) == slot->len);
switch (slot->type) {
case OS_FILE_WRITE:
ret = WriteFile(slot->file, slot->buf,
slot->len, &len,
(DWORD) slot->len, &len,
&(slot->control));
break;
case OS_FILE_READ:
ret = ReadFile(slot->file, slot->buf,
slot->len, &len,
(DWORD) slot->len, &len,
&(slot->control));
break;

170
os/os0proc.c

@ -229,3 +229,173 @@ os_mem_free_large(
}
#endif
}
/****************************************************************//**
Allocates or attaches and reuses shared memory segment.
The content is not cleared automatically.
@return allocated memory */
UNIV_INTERN
void*
os_shm_alloc(
/*=========*/
ulint* n, /*!< in/out: number of bytes */
uint key,
ibool* is_new)
{
void* ptr;
#if defined HAVE_SYS_IPC_H && HAVE_SYS_SHM_H
ulint size;
int shmid;
*is_new = FALSE;
fprintf(stderr,
"InnoDB: The shared memory segment containing the buffer pool is: key %#x (%d).\n",
key, key);
# if defined HAVE_LARGE_PAGES && defined UNIV_LINUX
if (!os_use_large_pages || !os_large_page_size) {
goto skip;
}
/* Align block size to os_large_page_size */
ut_ad(ut_is_2pow(os_large_page_size));
size = ut_2pow_round(*n + (os_large_page_size - 1),
os_large_page_size);
shmid = shmget((key_t)key, (size_t)size,
IPC_CREAT | IPC_EXCL | SHM_HUGETLB | SHM_R | SHM_W);
if (shmid < 0) {
if (errno == EEXIST) {
fprintf(stderr,
"InnoDB: HugeTLB: The shared memory segment exists.\n");
shmid = shmget((key_t)key, (size_t)size,
SHM_HUGETLB | SHM_R | SHM_W);
if (shmid < 0) {
fprintf(stderr,
"InnoDB: HugeTLB: Warning: Failed to allocate %lu bytes. (reuse) errno %d\n",
size, errno);
goto skip;
} else {
fprintf(stderr,
"InnoDB: HugeTLB: The existent shared memory segment is used.\n");
}
} else {
fprintf(stderr,
"InnoDB: HugeTLB: Warning: Failed to allocate %lu bytes. (new) errno %d\n",
size, errno);
goto skip;
}
} else {
*is_new = TRUE;
fprintf(stderr,
"InnoDB: HugeTLB: A new shared memory segment has been created .\n");
}
ptr = shmat(shmid, NULL, 0);
if (ptr == (void *)-1) {
fprintf(stderr,
"InnoDB: HugeTLB: Warning: Failed to attach shared memory segment, errno %d\n",
errno);
ptr = NULL;
}
if (ptr) {
*n = size;
os_fast_mutex_lock(&ut_list_mutex);
ut_total_allocated_memory += size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_ALLOC(ptr, size);
return(ptr);
}
skip:
*is_new = FALSE;
# endif /* HAVE_LARGE_PAGES && defined UNIV_LINUX */
# ifdef HAVE_GETPAGESIZE
size = getpagesize();
# else
size = UNIV_PAGE_SIZE;
# endif
/* Align block size to system page size */
ut_ad(ut_is_2pow(size));
size = *n = ut_2pow_round(*n + (size - 1), size);
shmid = shmget((key_t)key, (size_t)size,
IPC_CREAT | IPC_EXCL | SHM_R | SHM_W);
if (shmid < 0) {
if (errno == EEXIST) {
fprintf(stderr,
"InnoDB: A shared memory segment containing the buffer pool seems to already exist.\n");
shmid = shmget((key_t)key, (size_t)size,
SHM_R | SHM_W);
if (shmid < 0) {
fprintf(stderr,
"InnoDB: Warning: Failed to allocate %lu bytes. (reuse) errno %d\n",
size, errno);
ptr = NULL;
goto end;
} else {
fprintf(stderr,
"InnoDB: The existent shared memory segment is used.\n");
}
} else {
fprintf(stderr,
"InnoDB: Warning: Failed to allocate %lu bytes. (new) errno %d\n",
size, errno);
ptr = NULL;
goto end;
}
} else {
*is_new = TRUE;
fprintf(stderr,
"InnoDB: A new shared memory segment has been created.\n");
}
ptr = shmat(shmid, NULL, 0);
if (ptr == (void *)-1) {
fprintf(stderr,
"InnoDB: Warning: Failed to attach shared memory segment, errno %d\n",
errno);
ptr = NULL;
}
if (ptr) {
*n = size;
os_fast_mutex_lock(&ut_list_mutex);
ut_total_allocated_memory += size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_ALLOC(ptr, size);
}
end:
#else /* HAVE_SYS_IPC_H && HAVE_SYS_SHM_H */
fprintf(stderr, "InnoDB: shared memory segment is not supported.\n");
ptr = NULL;
#endif /* HAVE_SYS_IPC_H && HAVE_SYS_SHM_H */
return(ptr);
}
/****************************************************************//**
Detach shared memory segment. */
UNIV_INTERN
void
os_shm_free(
/*========*/
void *ptr, /*!< in: pointer returned by
os_shm_alloc() */
ulint size) /*!< in: size returned by
os_shm_alloc() */
{
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
os_fast_mutex_unlock(&ut_list_mutex);
#if defined HAVE_SYS_IPC_H && HAVE_SYS_SHM_H
if (!shmdt(ptr)) {
os_fast_mutex_lock(&ut_list_mutex);
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
UNIV_MEM_FREE(ptr, size);
}
#else /* HAVE_SYS_IPC_H && HAVE_SYS_SHM_H */
fprintf(stderr, "InnoDB: shared memory segment is not supported.\n");
#endif /* HAVE_SYS_IPC_H && HAVE_SYS_SHM_H */
}

53
os/os0sync.c

@ -31,6 +31,9 @@ Created 9/6/1995 Heikki Tuuri
#ifdef __WIN__
#include <windows.h>
#else
#include <sys/time.h>
#include <time.h>
#endif
#include "ut0mem.h"
@ -407,14 +410,14 @@ os_event_wait_low(
/**********************************************************//**
Waits for an event object until it is in the signaled state or
a timeout is exceeded. In Unix the timeout is always infinite.
a timeout is exceeded.
@return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */
UNIV_INTERN
ulint
os_event_wait_time(
/*===============*/
os_event_t event, /*!< in: event to wait */
ulint time) /*!< in: timeout in microseconds, or
ulint wtime) /*!< in: timeout in microseconds, or
OS_SYNC_INFINITE_TIME */
{
#ifdef __WIN__
@ -422,8 +425,8 @@ os_event_wait_time(
ut_a(event);
if (time != OS_SYNC_INFINITE_TIME) {
err = WaitForSingleObject(event->handle, (DWORD) time / 1000);
if (wtime != OS_SYNC_INFINITE_TIME) {
err = WaitForSingleObject(event->handle, (DWORD) wtime / 1000);
} else {
err = WaitForSingleObject(event->handle, INFINITE);
}
@ -439,13 +442,47 @@ os_event_wait_time(
return(1000000); /* dummy value to eliminate compiler warn. */
}
#else
UT_NOT_USED(time);
int err;
int ret = 0;
ulint tmp;
ib_int64_t old_count;
struct timeval tv_start;
struct timespec timeout;
if (wtime == OS_SYNC_INFINITE_TIME) {
os_event_wait(event);
return 0;
}
/* Compute the absolute point in time at which to time out. */
gettimeofday(&tv_start, NULL);
tmp = tv_start.tv_usec + wtime;
timeout.tv_sec = tv_start.tv_sec + (tmp / 1000000);
timeout.tv_nsec = (tmp % 1000000) * 1000;
os_fast_mutex_lock(&(event->os_mutex));
old_count = event->signal_count;
for (;;) {
if (event->is_set == TRUE || event->signal_count != old_count)
break;
err = pthread_cond_timedwait(&(event->cond_var),
&(event->os_mutex), &timeout);
if (err == ETIMEDOUT) {
ret = OS_SYNC_TIME_EXCEEDED;
break;
}
}
/* In Posix this is just an ordinary, infinite wait */
os_fast_mutex_unlock(&(event->os_mutex));
os_event_wait(event);
if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
os_thread_exit(NULL);
}
return(0);
return ret;
#endif
}

4
percona-suite/have_response_time_distribution.inc

@ -0,0 +1,4 @@
-- require r/have_response_time_distribution.require
disable_query_log;
show variables like 'have_response_time_distribution';
enable_query_log;

2
percona-suite/have_response_time_distribution.require

@ -0,0 +1,2 @@
Variable_name Value
have_response_time_distribution YES

1
percona-suite/log_connection_error.patch/percona_log_connection_error-master.opt

@ -0,0 +1 @@
--log-error

15
percona-suite/log_connection_error.patch/percona_log_connection_error.result

@ -0,0 +1,15 @@
SET @old_max_connections = @@max_connections;
SET @old_log_warnings = @@log_warnings;
SET GLOBAL max_connections=2;
SET GLOBAL LOG_WARNINGS = 0;
connect(localhost,root,,test,port,socket);
ERROR HY000: Too many connections
SET GLOBAL LOG_WARNINGS = 1;
connect(localhost,root,,test,port,socket);
ERROR HY000: Too many connections
SET GLOBAL LOG_WARNINGS = 0;
connect(localhost,root,,test,port,socket);
ERROR HY000: Too many connections
SET GLOBAL max_connections = @old_max_connections;
SET GLOBAL log_warnings = @old_log_warnings;
1

52
percona-suite/log_connection_error.patch/percona_log_connection_error.test

@ -0,0 +1,52 @@
--source include/not_embedded.inc
connect (main,localhost,root,,);
connection main;
SET @old_max_connections = @@max_connections;
SET @old_log_warnings = @@log_warnings;
SET GLOBAL max_connections=2;
let $port=`SELECT Variable_value FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE Variable_name LIKE 'port'`;
let $socket=`SELECT Variable_value FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE Variable_name LIKE 'socket'`;
SET GLOBAL LOG_WARNINGS = 0;
--connect (conn0,localhost,root,,)
connection conn0;
replace_result $port port $socket socket;
--error 1040
--connect(conn1,localhost,root,,)
disconnect conn0;
SLEEP 0.1; # tsarev: hack, but i don't know (and didn't find) how right
connection main;
SET GLOBAL LOG_WARNINGS = 1;
--connect (conn1,localhost,root,,)
replace_result $port port $socket socket;
--error 1040
--connect (conn0,localhost,root,,)
disconnect conn1;
SLEEP 0.1; # tsarev: hack, but i don't know (and didn't find) how right
connection main;
SET GLOBAL LOG_WARNINGS = 0;
--connect (conn0,localhost,root,,)
replace_result $port port $socket socket;
--error 1040
--connect(conn1,localhost,root,,)
disconnect conn0;
SLEEP 0.1; # tsarev: hack, but i don't know (and didn't find) how right
connection main;
SET GLOBAL max_connections = @old_max_connections;
SET GLOBAL log_warnings = @old_log_warnings;
let $log_error_= `SELECT @@GLOBAL.log_error`;
if(!`select LENGTH('$log_error_')`)
{
# MySQL Server on windows is started with --console and thus
# does not know the location of its .err log, use default location
let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.1.err;
}
# Assign env variable LOG_ERROR
let LOG_ERROR=$log_error_;
let cmd=cat $log_error | grep "Too many connections" | wc -l;
exec $cmd;

2
percona-suite/percona_innodb_buffer_pool_shm-master.opt

@ -0,0 +1,2 @@
--innodb_buffer_pool_shm_key=123456
--innodb=FORCE

8
percona-suite/percona_innodb_buffer_pool_shm.result

@ -0,0 +1,8 @@
show variables like 'innodb_buffer_pool_shm%';
Variable_name Value
innodb_buffer_pool_shm_checksum ON
innodb_buffer_pool_shm_key 123456
show variables like 'innodb_buffer_pool_shm%';
Variable_name Value
innodb_buffer_pool_shm_checksum ON
innodb_buffer_pool_shm_key 123456

19
percona-suite/percona_innodb_buffer_pool_shm.test

@ -0,0 +1,19 @@
--source include/big_test.inc
--source include/have_innodb.inc
show variables like 'innodb_buffer_pool_shm%';
#clean shutdown (restart_mysqld.inc is not clean if over 10 sec...)
--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
wait
EOF
shutdown_server 120;
--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
restart
EOF
--enable_reconnect
--source include/wait_until_connected_again.inc
--disable_reconnect
show variables like 'innodb_buffer_pool_shm%';
--sleep 1
--system ipcrm -M 123456

1
percona-suite/percona_log_connection_error-master.opt

@ -1 +0,0 @@
--log-error

6
percona-suite/percona_server_variables.result

@ -36,10 +36,12 @@ delayed_insert_limit Value
delayed_insert_timeout Value
delayed_queue_size Value
div_precision_increment Value
enable_query_response_time_stats Value
engine_condition_pushdown Value
error_count Value
event_scheduler Value
expire_logs_days Value
fast_index_creation Value
flush Value
flush_time Value
foreign_key_checks Value
@ -62,6 +64,7 @@ have_ndbcluster Value
have_openssl Value
have_partitioning Value
have_query_cache Value
have_response_time_distribution Value
have_rtree_keys Value
have_ssl Value
have_symlink Value
@ -78,6 +81,8 @@ innodb_additional_mem_pool_size Value
innodb_auto_lru_dump Value
innodb_autoextend_increment Value
innodb_autoinc_lock_mode Value
innodb_buffer_pool_shm_checksum Value
innodb_buffer_pool_shm_key Value
innodb_buffer_pool_size Value
innodb_change_buffering Value
innodb_checkpoint_age_target Value
@ -246,6 +251,7 @@ query_cache_strip_comments Value
query_cache_type Value
query_cache_wlock_invalidate Value
query_prealloc_size Value
query_response_time_range_base Value
rand_seed1 Value
rand_seed2 Value
range_alloc_block_size Value

1
percona-suite/percona_server_variables.test

@ -1,3 +1,4 @@
--source include/have_response_time_distribution.inc
--source include/have_innodb.inc
--source include/have_debug.inc

16
percona-suite/profiling_slow.patch/percona_bug643149.result

@ -0,0 +1,16 @@
SET @old_slow_query_log_file=@@global.slow_query_log_file;
SET GLOBAL slow_query_log=on;
SET LOCAL profiling_server=on;
SET LOCAL long_query_time=1;
SET GLOBAL slow_query_log_file='MYSQLTEST_VARDIR/percona_bug643149_slow.log';;
SELECT SLEEP(2);
SLEEP(2)
0
# Time: X X:X:X
# User@Host: root[root] @ localhost []
# Thread_id: X Schema: test Last_errno: X Killed: X
# Query_time: X.X Lock_time: X.X Rows_sent: X Rows_examined: X Rows_affected: X Rows_read: X
# Bytes_sent: X Tmp_tables: X Tmp_disk_tables: X Tmp_table_sizes: X
# Profile_starting: X.X Profile_starting_cpu: X.X Profile_checking_permissions: X.X Profile_checking_permissions_cpu: X.X Profile_Opening_tables: X.X Profile_Opening_tables_cpu: X.X Profile_init: X.X Profile_init_cpu: X.X Profile_optimizing: X.X Profile_optimizing_cpu: X.X Profile_executing: X.X Profile_executing_cpu: X.X Profile_User_sleep: X.X Profile_User_sleep_cpu: X.X Profile_end: X.X Profile_end_cpu: X.X Profile_query_end: X.X Profile_query_end_cpu: X.X Profile_freeing_items: X.X Profile_freeing_items_cpu: X.X Profile_logging_slow_query: X.X Profile_logging_slow_query_cpu: X.X
# Profile_total: X.X Profile_total_cpu: X.X
SET GLOBAL slow_query_log_file=@old_slow_query_log_file;

50
percona-suite/profiling_slow.patch/percona_bug643149.test

@ -0,0 +1,50 @@
#
# This test suffers from server
# Bug#38124 "general_log_file" variable silently unset when using expression
# In short:
# SET GLOBAL general_log_file = @<whatever>
# SET GLOBAL slow_query_log = @<whatever>
# cause that the value of these server system variables is set to default
# instead of the assigned values. There comes no error message or warning.
# If this bug is fixed please
# 1. try this test with "let $fixed_bug38124 = 0;"
# 2. remove all workarounds if 1. was successful.
let $fixed_bug38124 = 0;
SET @old_slow_query_log_file=@@global.slow_query_log_file;
SET GLOBAL slow_query_log=on;
SET LOCAL profiling_server=on;
SET LOCAL long_query_time=1;
let slogfile=$MYSQLTEST_VARDIR/percona_bug643149_slow.log;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval SET GLOBAL slow_query_log_file='$slogfile';
--disable_ps_protocol
SELECT SLEEP(2);
--enable_ps_protocol
perl;
$slogfile= $ENV{'slogfile'};
open(FILE, "$slogfile") or
die("Unable to read slow query log file $slogfile: $!\n");
while(<FILE>) {
next if (!/^#/);
s/[0-9]+/X/g;
s/ +/ /g;
print;
}
close(FILE);
EOF
SET GLOBAL slow_query_log_file=@old_slow_query_log_file;
if(!$fixed_bug38124)
{
--disable_query_log
let $my_var = `SELECT @old_slow_query_log_file`;
eval SET @@global.slow_query_log_file = '$my_var';
--enable_query_log
}

2
percona-suite/query_cache_enhance.patch/percona_status_wait_query_cache_mutex.test

@ -34,4 +34,4 @@ SET SESSION debug="+d,status_wait_query_cache_mutex_sleep";
SHOW PROCESSLIST;
DROP TABLE t;
set GLOBAL query_cache_size=0;
set GLOBAL query_cache_size=0;

70
percona-suite/response-time-distribution.patch/percona_query_response_time-replication.result

@ -0,0 +1,70 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
DROP TABLE IF EXISTS t;
CREATE TABLE t(id INT);
SELECT * from t;
id
SELECT * from t;
id
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
Warnings:
Warning 1292 Truncated incorrect query_response_time_range_base value: '1'
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 10
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=ON;
INSERT INTO t SELECT SLEEP(0.4);
Warnings:
Note 1592 Statement may not be safe to log in statement format.
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
0
INSERT INTO t SELECT SLEEP(0.4);
Warnings:
Note 1592 Statement may not be safe to log in statement format.
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
0
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
2
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
3
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
FLUSH QUERY_RESPONSE_TIME;
INSERT INTO t SELECT SLEEP(0.4);
Warnings:
Note 1592 Statement may not be safe to log in statement format.
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
0
INSERT INTO t SELECT SLEEP(0.4);
Warnings:
Note 1592 Statement may not be safe to log in statement format.
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
0
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
2
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count)
3
DROP TABLE IF EXISTS t;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=OFF;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

57
percona-suite/response-time-distribution.patch/percona_query_response_time-replication.test

@ -0,0 +1,57 @@
--source include/have_response_time_distribution.inc
--source include/master-slave.inc
--source include/have_binlog_format_statement.inc
--source include/have_debug.inc
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
connection master;
-- disable_warnings
DROP TABLE IF EXISTS t;
-- enable_warnings
CREATE TABLE t(id INT);
SELECT * from t;
sync_slave_with_master;
connection slave;
SELECT * from t;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=ON;
connection master;
INSERT INTO t SELECT SLEEP(0.4);
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
INSERT INTO t SELECT SLEEP(0.4);
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
sync_slave_with_master;
connection slave;
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
connection master;
INSERT INTO t SELECT SLEEP(0.4);
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
INSERT INTO t SELECT SLEEP(0.4);
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
sync_slave_with_master;
connection slave;
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SELECT SUM(INFORMATION_SCHEMA.QUERY_RESPONSE_TIME.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
connection master;
DROP TABLE IF EXISTS t;
sync_slave_with_master;
connection slave;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=OFF;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

313
percona-suite/response-time-distribution.patch/percona_query_response_time-stored.result

@ -0,0 +1,313 @@
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
CREATE FUNCTION test_f()
RETURNS CHAR(30) DETERMINISTIC
BEGIN
DECLARE first VARCHAR(5);
DECLARE second VARCHAR(5);
DECLARE result VARCHAR(20);
SELECT SLEEP(1.11) INTO first;
SET first= 'Hello';
SET second=', ';
SET result= CONCAT(first,second);
SET result= CONCAT(result,'world!');
RETURN result;
END/
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
Warnings:
Warning 1292 Truncated incorrect query_response_time_range_base value: '1'
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
FLUSH QUERY_RESPONSE_TIME;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
44
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000003
0.000007
0.000015
0.000030
0.000061
0.000122
0.000244
0.000488
0.000976
0.001953
0.003906
0.007812
0.015625
0.031250
0.062500
0.125000
0.250000
0.500000
1.000000
2.000000
4.000000
8.000000
16.000000
32.000000
64.000000
128.000000
256.000000
512.000000
1024.000000
2048.000000
4096.000000
8192.000000
16384.000000
32768.000000
65536.000000
131072.000000
262144.000000
524288.000000
1048576.00000
2097152.00000
4194304.00000
8388608.00000
TOO LONG
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
test_f()
Hello, world!
SELECT test_f();
test_f()
Hello, world!
SELECT test_f();
test_f()
Hello, world!
SELECT test_f();
test_f()
Hello, world!
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
4 4 4 1 44
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
44
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000003
0.000007
0.000015
0.000030
0.000061
0.000122
0.000244
0.000488
0.000976
0.001953
0.003906
0.007812
0.015625
0.031250
0.062500
0.125000
0.250000
0.500000
1.000000
2.000000
4.000000
8.000000
16.000000
32.000000
64.000000
128.000000
256.000000
512.000000
1024.000000
2048.000000
4096.000000
8192.000000
16384.000000
32768.000000
65536.000000
131072.000000
262144.000000
524288.000000
1048576.00000
2097152.00000
4194304.00000
8388608.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 10
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
test_f()
Hello, world!
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
1 1 1 1 14
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
14
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000010
0.000100
0.001000
0.010000
0.100000
1.000000
10.000000
100.000000
1000.000000
10000.000000
100000.000000
1000000.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 10
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 7;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 7
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
test_f()
Hello, world!
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
1 1 1 1 17
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
17
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000008
0.000059
0.000416
0.002915
0.020408
0.142857
1.000000
7.000000
49.000000
343.000000
2401.000000
16807.000000
117649.000000
823543.000000
5764801.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 7
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 156;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 156
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
test_f()
Hello, world!
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
1 1 1 1 7
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
7
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000041
0.006410
1.000000
156.000000
24336.000000
3796416.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 156
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1000;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
test_f()
Hello, world!
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
1 1 1 1 6
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
6
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.001000
1.000000
1000.000000
1000000.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1001;
Warnings:
Warning 1292 Truncated incorrect query_response_time_range_base value: '1001'
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE =10;
DROP FUNCTION test_f;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

90
percona-suite/response-time-distribution.patch/percona_query_response_time-stored.test

@ -0,0 +1,90 @@
--source include/have_response_time_distribution.inc
--source include/have_debug.inc
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
delimiter /;
CREATE FUNCTION test_f()
RETURNS CHAR(30) DETERMINISTIC
BEGIN
DECLARE first VARCHAR(5);
DECLARE second VARCHAR(5);
DECLARE result VARCHAR(20);
SELECT SLEEP(1.11) INTO first;
SET first= 'Hello';
SET second=', ';
SET result= CONCAT(first,second);
SET result= CONCAT(result,'world!');
RETURN result;
END/
delimiter ;/
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
source include/percona_query_response_time_show.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
SELECT test_f();
SELECT test_f();
SELECT test_f();
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 7;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 156;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1000;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT test_f();
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1001;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE =10;
DROP FUNCTION test_f;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

567
percona-suite/response-time-distribution.patch/percona_query_response_time.result

@ -0,0 +1,567 @@
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
Warnings:
Warning 1292 Truncated incorrect query_response_time_range_base value: '1'
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
FLUSH QUERY_RESPONSE_TIME;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
44
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000003
0.000007
0.000015
0.000030
0.000061
0.000122
0.000244
0.000488
0.000976
0.001953
0.003906
0.007812
0.015625
0.031250
0.062500
0.125000
0.250000
0.500000
1.000000
2.000000
4.000000
8.000000
16.000000
32.000000
64.000000
128.000000
256.000000
512.000000
1024.000000
2048.000000
4096.000000
8192.000000
16384.000000
32768.000000
65536.000000
131072.000000
262144.000000
524288.000000
1048576.00000
2097152.00000
4194304.00000
8388608.00000
TOO LONG
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT SLEEP(0.31);
SLEEP(0.31)
0
SELECT SLEEP(0.32);
SLEEP(0.32)
0
SELECT SLEEP(0.33);
SLEEP(0.33)
0
SELECT SLEEP(0.34);
SLEEP(0.34)
0
SELECT SLEEP(0.35);
SLEEP(0.35)
0
SELECT SLEEP(0.36);
SLEEP(0.36)
0
SELECT SLEEP(0.37);
SLEEP(0.37)
0
SELECT SLEEP(0.38);
SLEEP(0.38)
0
SELECT SLEEP(0.39);
SLEEP(0.39)
0
SELECT SLEEP(0.40);
SLEEP(0.40)
0
SELECT SLEEP(1.1);
SLEEP(1.1)
0
SELECT SLEEP(1.2);
SLEEP(1.2)
0
SELECT SLEEP(1.3);
SLEEP(1.3)
0
SELECT SLEEP(1.5);
SLEEP(1.5)
0
SELECT SLEEP(1.4);
SLEEP(1.4)
0
SELECT SLEEP(0.5);
SLEEP(0.5)
0
SELECT SLEEP(2.1);
SLEEP(2.1)
0
SELECT SLEEP(2.3);
SLEEP(2.3)
0
SELECT SLEEP(2.5);
SLEEP(2.5)
0
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
10 19 15 4 44
1 19 15 4 44
5 19 15 4 44
3 19 15 4 44
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
44
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000003
0.000007
0.000015
0.000030
0.000061
0.000122
0.000244
0.000488
0.000976
0.001953
0.003906
0.007812
0.015625
0.031250
0.062500
0.125000
0.250000
0.500000
1.000000
2.000000
4.000000
8.000000
16.000000
32.000000
64.000000
128.000000
256.000000
512.000000
1024.000000
2048.000000
4096.000000
8192.000000
16384.000000
32768.000000
65536.000000
131072.000000
262144.000000
524288.000000
1048576.00000
2097152.00000
4194304.00000
8388608.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 2
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 10
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT SLEEP(0.31);
SLEEP(0.31)
0
SELECT SLEEP(0.32);
SLEEP(0.32)
0
SELECT SLEEP(0.33);
SLEEP(0.33)
0
SELECT SLEEP(0.34);
SLEEP(0.34)
0
SELECT SLEEP(0.35);
SLEEP(0.35)
0
SELECT SLEEP(0.36);
SLEEP(0.36)
0
SELECT SLEEP(0.37);
SLEEP(0.37)
0
SELECT SLEEP(0.38);
SLEEP(0.38)
0
SELECT SLEEP(0.39);
SLEEP(0.39)
0
SELECT SLEEP(0.40);
SLEEP(0.40)
0
SELECT SLEEP(1.1);
SLEEP(1.1)
0
SELECT SLEEP(1.2);
SLEEP(1.2)
0
SELECT SLEEP(1.3);
SLEEP(1.3)
0
SELECT SLEEP(1.5);
SLEEP(1.5)
0
SELECT SLEEP(1.4);
SLEEP(1.4)
0
SELECT SLEEP(0.5);
SLEEP(0.5)
0
SELECT SLEEP(2.1);
SLEEP(2.1)
0
SELECT SLEEP(2.3);
SLEEP(2.3)
0
SELECT SLEEP(2.5);
SLEEP(2.5)
0
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
11 19 17 2 14
8 19 17 2 14
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
14
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000010
0.000100
0.001000
0.010000
0.100000
1.000000
10.000000
100.000000
1000.000000
10000.000000
100000.000000
1000000.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 10
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 7;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 7
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT SLEEP(0.31);
SLEEP(0.31)
0
SELECT SLEEP(0.32);
SLEEP(0.32)
0
SELECT SLEEP(0.33);
SLEEP(0.33)
0
SELECT SLEEP(0.34);
SLEEP(0.34)
0
SELECT SLEEP(0.35);
SLEEP(0.35)
0
SELECT SLEEP(0.36);
SLEEP(0.36)
0
SELECT SLEEP(0.37);
SLEEP(0.37)
0
SELECT SLEEP(0.38);
SLEEP(0.38)
0
SELECT SLEEP(0.39);
SLEEP(0.39)
0
SELECT SLEEP(0.40);
SLEEP(0.40)
0
SELECT SLEEP(1.1);
SLEEP(1.1)
0
SELECT SLEEP(1.2);
SLEEP(1.2)
0
SELECT SLEEP(1.3);
SLEEP(1.3)
0
SELECT SLEEP(1.5);
SLEEP(1.5)
0
SELECT SLEEP(1.4);
SLEEP(1.4)
0
SELECT SLEEP(0.5);
SLEEP(0.5)
0
SELECT SLEEP(2.1);
SLEEP(2.1)
0
SELECT SLEEP(2.3);
SLEEP(2.3)
0
SELECT SLEEP(2.5);
SLEEP(2.5)
0
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
11 19 17 2 17
8 19 17 2 17
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
17
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.000008
0.000059
0.000416
0.002915
0.020408
0.142857
1.000000
7.000000
49.000000
343.000000
2401.000000
16807.000000
117649.000000
823543.000000
5764801.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 7
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 156;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 156
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT SLEEP(0.31);
SLEEP(0.31)
0
SELECT SLEEP(0.32);
SLEEP(0.32)
0
SELECT SLEEP(0.33);
SLEEP(0.33)
0
SELECT SLEEP(0.34);
SLEEP(0.34)
0
SELECT SLEEP(0.35);
SLEEP(0.35)
0
SELECT SLEEP(0.36);
SLEEP(0.36)
0
SELECT SLEEP(0.37);
SLEEP(0.37)
0
SELECT SLEEP(0.38);
SLEEP(0.38)
0
SELECT SLEEP(0.39);
SLEEP(0.39)
0
SELECT SLEEP(0.40);
SLEEP(0.40)
0
SELECT SLEEP(1.1);
SLEEP(1.1)
0
SELECT SLEEP(1.2);
SLEEP(1.2)
0
SELECT SLEEP(1.3);
SLEEP(1.3)
0
SELECT SLEEP(1.5);
SLEEP(1.5)
0
SELECT SLEEP(1.4);
SLEEP(1.4)
0
SELECT SLEEP(0.5);
SLEEP(0.5)
0
SELECT SLEEP(2.1);
SLEEP(2.1)
0
SELECT SLEEP(2.3);
SLEEP(2.3)
0
SELECT SLEEP(2.5);
SLEEP(2.5)
0
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
11 19 17 2 7
8 19 17 2 7
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
7
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000041
0.006410
1.000000
156.000000
24336.000000
3796416.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 156
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1000;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
FLUSH QUERY_RESPONSE_TIME;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
SELECT SLEEP(0.31);
SLEEP(0.31)
0
SELECT SLEEP(0.32);
SLEEP(0.32)
0
SELECT SLEEP(0.33);
SLEEP(0.33)
0
SELECT SLEEP(0.34);
SLEEP(0.34)
0
SELECT SLEEP(0.35);
SLEEP(0.35)
0
SELECT SLEEP(0.36);
SLEEP(0.36)
0
SELECT SLEEP(0.37);
SLEEP(0.37)
0
SELECT SLEEP(0.38);
SLEEP(0.38)
0
SELECT SLEEP(0.39);
SLEEP(0.39)
0
SELECT SLEEP(0.40);
SLEEP(0.40)
0
SELECT SLEEP(1.1);
SLEEP(1.1)
0
SELECT SLEEP(1.2);
SLEEP(1.2)
0
SELECT SLEEP(1.3);
SLEEP(1.3)
0
SELECT SLEEP(1.5);
SLEEP(1.5)
0
SELECT SLEEP(1.4);
SLEEP(1.4)
0
SELECT SLEEP(0.5);
SLEEP(0.5)
0
SELECT SLEEP(2.1);
SLEEP(2.1)
0
SELECT SLEEP(2.3);
SLEEP(2.3)
0
SELECT SLEEP(2.5);
SLEEP(2.5)
0
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
count query_count query_total not_zero_region_count region_count
11 19 17 2 6
8 19 17 2 6
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
region_count
6
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
time
0.000001
0.001000
1.000000
1000.000000
1000000.00000
TOO LONG
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1001;
Warnings:
Warning 1292 Truncated incorrect query_response_time_range_base value: '1001'
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
Variable_name Value
query_response_time_range_base 1000
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE =10;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

68
percona-suite/response-time-distribution.patch/percona_query_response_time.test

@ -0,0 +1,68 @@
--source include/have_response_time_distribution.inc
--source include/have_debug.inc
SET SESSION debug="+d,response_time_distribution_log_only_more_300_milliseconds";
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 2;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
source include/percona_query_response_time_show.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
source include/percona_query_response_time_sleep.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 10;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
source include/percona_query_response_time_sleep.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 7;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
source include/percona_query_response_time_sleep.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 156;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
source include/percona_query_response_time_sleep.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1000;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
source include/percona_query_response_time_flush.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=1;
source include/percona_query_response_time_sleep.inc;
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
source include/percona_query_response_time_show.inc;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE = 1001;
SHOW GLOBAL VARIABLES where Variable_name like 'QUERY_RESPONSE_TIME_RANGE_BASE';
SET GLOBAL ENABLE_QUERY_RESPONSE_TIME_STATS=0;
SET GLOBAL QUERY_RESPONSE_TIME_RANGE_BASE =10;
SET SESSION debug="-d,response_time_distribution_log_only_more_300_milliseconds";

1
percona-suite/response-time-distribution.patch/percona_query_response_time_flush.inc

@ -0,0 +1 @@
FLUSH QUERY_RESPONSE_TIME;

8
percona-suite/response-time-distribution.patch/percona_query_response_time_show.inc

@ -0,0 +1,8 @@
SELECT d.count,
(SELECT SUM(a.count) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as a WHERE a.count != 0) as query_count,
(SELECT SUM((b.total * 1000000) DIV 1000000) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as b WHERE b.count != 0) as query_total,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as c WHERE c.count != 0) as not_zero_region_count,
(SELECT COUNT(*) FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME) as region_count
FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME as d WHERE d.count > 0;
SELECT COUNT(*) as region_count FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;
SELECT time FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME;

19
percona-suite/response-time-distribution.patch/percona_query_response_time_sleep.inc

@ -0,0 +1,19 @@
SELECT SLEEP(0.31);
SELECT SLEEP(0.32);
SELECT SLEEP(0.33);
SELECT SLEEP(0.34);
SELECT SLEEP(0.35);
SELECT SLEEP(0.36);
SELECT SLEEP(0.37);
SELECT SLEEP(0.38);
SELECT SLEEP(0.39);
SELECT SLEEP(0.40);
SELECT SLEEP(1.1);
SELECT SLEEP(1.2);
SELECT SLEEP(1.3);
SELECT SLEEP(1.5);
SELECT SLEEP(1.4);
SELECT SLEEP(0.5);
SELECT SLEEP(2.1);
SELECT SLEEP(2.3);
SELECT SLEEP(2.5);

21
percona-suite/show_slave_status_nolock.patch/percona_show_slave_status_nolock.result

@ -0,0 +1,21 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
DROP TABLE IF EXISTS t;
CREATE TABLE t(id INT);
INSERT INTO t SELECT SLEEP(10);
STOP SLAVE;
Warnings:
Note 1592 Statement may not be safe to log in statement format.
master count(*)
master 1
slave count(*)
slave 0
SHOW SLAVE STATUS NOLOCK;
START SLAVE;
slave count(*)
slave 1
DROP TABLE t;

47
percona-suite/show_slave_status_nolock.patch/percona_show_slave_status_nolock.test

@ -0,0 +1,47 @@
--source include/master-slave.inc
--source include/have_binlog_format_statement.inc
connection master;
--disable_warnings
DROP TABLE IF EXISTS t;
--enable_warnings
CREATE TABLE t(id INT);
sync_slave_with_master;
connection master;
send INSERT INTO t SELECT SLEEP(10);
connection slave;
sleep 15;
send STOP SLAVE;
connection master;
reap;
--disable_query_log
select "master",count(*) from t;
--enable_query_log
connection slave1;
--disable_query_log
select "slave",count(*) from t;
--enable_query_log
--disable_result_log
SHOW SLAVE STATUS NOLOCK;
--enable_result_log
connection slave;
reap;
--source include/wait_for_slave_to_stop.inc
START SLAVE;
--source include/wait_for_slave_to_start.inc
--disable_query_log
select "slave",count(*) from t;
--enable_query_log
connection master;
DROP TABLE t;
sync_slave_with_master;

16
percona-suite/slow_extended.patch/grep.inc

@ -0,0 +1,16 @@
perl;
$file = $ENV{'grep_file'};
$pattern = $ENV{'grep_pattern'};
open(FILE, "$file")
or die("Cannot open file $file: $!\n");
$lines = 0;
while(<FILE>) {
$lines++ if (/$pattern/);
}
print "$lines\n";
close(FILE);
EOF

1
percona-suite/slow_extended.patch/percona_slow_extended-combined-master.opt

@ -0,0 +1 @@
--use_global_long_query_time --log_slow_verbosity="full"

18
percona-suite/slow_extended.patch/percona_slow_extended-combined.result

@ -0,0 +1,18 @@
show variables like 'use_global_long_query_time';
Variable_name Value
use_global_long_query_time ON
show variables like 'use_global_log_slow_control';
Variable_name Value
use_global_log_slow_control long_query_time
show variables like 'log_slow_verbosity';
Variable_name Value
log_slow_verbosity microtime,query_plan,innodb
show global variables like 'use_global_long_query_time';
Variable_name Value
use_global_long_query_time ON
show global variables like 'log_slow_verbosity';
Variable_name Value
log_slow_verbosity microtime,query_plan,innodb
show global variables like 'use_global_log_slow_control';
Variable_name Value
use_global_log_slow_control long_query_time

6
percona-suite/slow_extended.patch/percona_slow_extended-combined.test

@ -0,0 +1,6 @@
show variables like 'use_global_long_query_time';
show variables like 'use_global_log_slow_control';
show variables like 'log_slow_verbosity';
show global variables like 'use_global_long_query_time';
show global variables like 'log_slow_verbosity';
show global variables like 'use_global_log_slow_control';

1
percona-suite/slow_extended.patch/percona_slow_extended-combined2-master.opt

@ -0,0 +1 @@
--use_global_log_slow_control="long_query_time"

12
percona-suite/slow_extended.patch/percona_slow_extended-combined2.result

@ -0,0 +1,12 @@
show variables like 'use_global_long_query_time';
Variable_name Value
use_global_long_query_time ON
show variables like 'use_global_log_slow_control';
Variable_name Value
use_global_log_slow_control long_query_time
show global variables like 'use_global_long_query_time';
Variable_name Value
use_global_long_query_time ON
show global variables like 'use_global_log_slow_control';
Variable_name Value
use_global_log_slow_control long_query_time

4
percona-suite/slow_extended.patch/percona_slow_extended-combined2.test

@ -0,0 +1,4 @@
show variables like 'use_global_long_query_time';
show variables like 'use_global_log_slow_control';
show global variables like 'use_global_long_query_time';
show global variables like 'use_global_log_slow_control';

2
percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow-master.opt

@ -1 +1 @@
--slow-query-log-file=percona_slow_query_log-control_global_slow.log --long-query-time=1
--slow-query-log-file=percona_slow_query_log-control_global_slow.log --long-query-time=1

2
percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow.result

@ -8,5 +8,5 @@ sleep(2)
0
set global use_global_log_slow_control=none;
set global log_slow_verbosity=microtime;
cat var/mysqld.1/data/percona_slow_query_log-control_global_slow.log | grep "No InnoDB statistics available for this query" | wc -l
FLUSH LOGS;
1

8
percona-suite/slow_extended.patch/percona_slow_extended-control_global_slow.test

@ -5,6 +5,8 @@ set global use_global_log_slow_control="log_slow_verbosity,long_query_time";
SELECT sleep(2);
set global use_global_log_slow_control=none;
set global log_slow_verbosity=microtime;
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-control_global_slow.log | grep "No InnoDB statistics available for this query" | wc -l;
echo $cmd;
exec $cmd;
FLUSH LOGS;
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-control_global_slow.log
--let grep_pattern = No InnoDB statistics available for this query
--source include/grep.inc

2
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter-master.opt

@ -1 +1 @@
--slow-query-log-file=percona_slow_query_log-log_slow_filter.log --long-query-time=1
--slow-query-log-file=percona_slow_query_log-log_slow_filter.log --long-query-time=1

2
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter.result

@ -21,5 +21,5 @@ SELECT sleep(2);
sleep(2)
0
drop table if exists t;
cat var/mysqld.1/data/percona_slow_query_log-log_slow_filter.log | grep Query_time | wc -l
FLUSH LOGS;
2

7
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_filter.test

@ -26,6 +26,7 @@ SELECT sleep(2);
drop table if exists t;
--enable_warnings
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-log_slow_filter.log | grep Query_time | wc -l;
echo $cmd;
exec $cmd;
FLUSH LOGS;
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-log_slow_filter.log
--let grep_pattern = Query_time
--source include/grep.inc

1
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl-master.opt

@ -0,0 +1 @@
--log_slow_sp_statements

3
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl.result

@ -0,0 +1,3 @@
show global variables like 'log_slow_sp_statements';
Variable_name Value
log_slow_sp_statements ON

1
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_sp_statements-cl.test

@ -0,0 +1 @@
show global variables like 'log_slow_sp_statements';

1
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl-master.opt

@ -0,0 +1 @@
--log_slow_timestamp_every

3
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl.result

@ -0,0 +1,3 @@
show global variables like 'log_slow_timestamp_every';
Variable_name Value
log_slow_timestamp_every ON

1
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_timestamp_every-cl.test

@ -0,0 +1 @@
show global variables like 'log_slow_timestamp_every';

1
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl-master.opt

@ -0,0 +1 @@
--log_slow_verbosity="full"

9
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl.result

@ -0,0 +1,9 @@
show global variables like 'log_slow_verbosity';
Variable_name Value
log_slow_verbosity microtime,query_plan,innodb
show variables like 'log_slow_verbosity';
Variable_name Value
log_slow_verbosity microtime,query_plan,innodb
select @@log_slow_verbosity;
@@log_slow_verbosity
microtime,query_plan,innodb

3
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-cl.test

@ -0,0 +1,3 @@
show global variables like 'log_slow_verbosity';
show variables like 'log_slow_verbosity';
select @@log_slow_verbosity;

2
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity-master.opt

@ -1 +1 @@
--slow-query-log-file=percona_slow_query_log-log_slow_verbosity.log --long-query-time=1
--slow-query-log-file=percona_slow_query_log-log_slow_verbosity.log --long-query-time=1

2
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity.result

@ -5,5 +5,5 @@ set log_slow_verbosity=innodb;
SELECT sleep(2);
sleep(2)
0
cat var/mysqld.1/data/percona_slow_query_log-log_slow_verbosity.log | grep "No InnoDB statistics available for this query" | wc -l
FLUSH LOGS;
1

7
percona-suite/slow_extended.patch/percona_slow_extended-log_slow_verbosity.test

@ -6,6 +6,7 @@ set log_slow_verbosity=innodb;
SELECT sleep(2);
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-log_slow_verbosity.log | grep "No InnoDB statistics available for this query" | wc -l;
echo $cmd;
exec $cmd;
FLUSH LOGS;
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-log_slow_verbosity.log
--let grep_pattern = No InnoDB statistics available for this query
--source include/grep.inc

2
percona-suite/slow_extended.patch/percona_slow_extended-long_query_time-master.opt

@ -1 +1 @@
--slow-query-log-file=percona_slow_query_log-long_query_time.log --long-query-time=2
--slow-query-log-file=percona_slow_query_log-long_query_time.log --long-query-time=2

2
percona-suite/slow_extended.patch/percona_slow_extended-long_query_time.result

@ -18,5 +18,5 @@ SELECT sleep(5);
sleep(5)
0
set long_query_time=2;
cat var/mysqld.1/data/percona_slow_query_log-long_query_time.log | grep Query_time | wc -l
FLUSH LOGS;
3

8
percona-suite/slow_extended.patch/percona_slow_extended-long_query_time.test

@ -12,6 +12,8 @@ SELECT sleep(5);
set long_query_time=2;
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-long_query_time.log | grep Query_time | wc -l;
echo $cmd;
exec $cmd;
FLUSH LOGS;
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-long_query_time.log
--let grep_pattern = Query_time
--source include/grep.inc

2
percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended-master.opt

@ -1 +1 @@
--slow-query-log-file=percona_slow_query_log-microseconds_in_slow_query_log.log --long-query-time=1
--slow-query-log-file=percona_slow_query_log-microseconds_in_slow_query_log.log --long-query-time=1

3
percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended.result

@ -6,7 +6,6 @@ SELECT sleep(2);
sleep(2)
0
set global slow_query_log_microseconds_timestamp=OFF;
cat var/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log | grep -E '# Time: [0-9]+[ ]+[0-9]+:[0-\9]+:[0-9]+.[0-9]+' | wc -l
FLUSH LOGS;
1
cat var/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log | grep -E '# Time: [0-9]+[ ]+[0-9]+:[0-\9]+:[0-9]+' | wc -l
2

14
percona-suite/slow_extended.patch/percona_slow_extended-microseconds_in_slow_extended.test

@ -8,10 +8,12 @@ SELECT sleep(2);
set global slow_query_log_microseconds_timestamp=OFF;
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log | grep -E '# Time: [0-9]+[ ]+[0-9]+:[0-\9]+:[0-9]+.[0-9]+' | wc -l;
echo $cmd;
exec $cmd;
FLUSH LOGS;
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log
--let grep_pattern = # Time: [0-9]+[ ]+[0-9]+:[0-9]+:[0-9]+.[0-9]+
--source include/grep.inc
--let grep_file = $MYSQLTEST_VARDIR/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log
--let grep_pattern = # Time: [0-9]+[ ]+[0-9]+:[0-9]+:[0-9]+
--source include/grep.inc
let $cmd = cat var/mysqld.1/data/percona_slow_query_log-microseconds_in_slow_query_log.log | grep -E '# Time: [0-9]+[ ]+[0-9]+:[0-\9]+:[0-9]+' | wc -l;
echo $cmd;
exec $cmd;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save