Browse Source

5.6.36-82.1

pull/863/head
Vicențiu Ciorbaru 8 years ago
parent
commit
b1a2031ff9
  1. 2
      storage/tokudb/CMakeLists.txt
  2. 10
      storage/tokudb/PerconaFT/CMakeLists.txt
  3. 5
      storage/tokudb/PerconaFT/buildheader/make_tdb.cc
  4. 3
      storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake
  5. 9
      storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc
  6. 3
      storage/tokudb/PerconaFT/ft/cachetable/cachetable.h
  7. 97
      storage/tokudb/PerconaFT/ft/ft-ops.cc
  8. 3
      storage/tokudb/PerconaFT/ft/ft.cc
  9. 19
      storage/tokudb/PerconaFT/ft/node.cc
  10. 20
      storage/tokudb/PerconaFT/ft/node.h
  11. 43
      storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
  12. 23
      storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h
  13. 689
      storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
  14. 69
      storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h
  15. 2
      storage/tokudb/PerconaFT/ft/txn/roll.cc
  16. 9
      storage/tokudb/PerconaFT/ft/txn/txn.cc
  17. 5
      storage/tokudb/PerconaFT/ft/txn/txn.h
  18. 150
      storage/tokudb/PerconaFT/locktree/lock_request.cc
  19. 23
      storage/tokudb/PerconaFT/locktree/lock_request.h
  20. 44
      storage/tokudb/PerconaFT/locktree/locktree.cc
  21. 33
      storage/tokudb/PerconaFT/locktree/locktree.h
  22. 13
      storage/tokudb/PerconaFT/locktree/manager.cc
  23. 100
      storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc
  24. 3
      storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc
  25. 1
      storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc
  26. 91
      storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc
  27. 121
      storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
  28. 133
      storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
  29. 135
      storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
  30. 77
      storage/tokudb/PerconaFT/portability/toku_debug_sync.h
  31. 3
      storage/tokudb/PerconaFT/portability/toku_portability.h
  32. 9
      storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc
  33. 3
      storage/tokudb/PerconaFT/src/tests/test_stress0.cc
  34. 9
      storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc
  35. 38
      storage/tokudb/PerconaFT/src/ydb.cc
  36. 22
      storage/tokudb/PerconaFT/src/ydb_row_lock.cc
  37. 8
      storage/tokudb/PerconaFT/src/ydb_txn.cc
  38. 8
      storage/tokudb/PerconaFT/tools/CMakeLists.txt
  39. 2
      storage/tokudb/PerconaFT/tools/ftverify.cc
  40. 3
      storage/tokudb/PerconaFT/tools/tokuftdump.cc
  41. 124
      storage/tokudb/ha_tokudb.cc
  42. 41
      storage/tokudb/ha_tokudb.h
  43. 8
      storage/tokudb/hatoku_hton.cc
  44. 26
      storage/tokudb/mysql-test/tokudb/r/kill_query_blocked_in_lt.result
  45. 5
      storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result
  46. 56
      storage/tokudb/mysql-test/tokudb/t/kill_query_blocked_in_lt.test
  47. 10
      storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test
  48. 2
      storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result
  49. 3
      storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc
  50. 42
      storage/tokudb/mysql-test/tokudb_rpl/include/rpl_tokudb_row_img_general_loop.inc
  51. 32
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_not_null_tokudb.result
  52. 40
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_row_basic_3tokudb.result
  53. 15
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_commit_after_flush.result
  54. 554
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_insert_id.result
  55. 82
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_insert_id_pk.result
  56. 29
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update.result
  57. 60
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update2.result
  58. 202
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update3.result
  59. 2183
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_crash_safe.result
  60. 4739
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_blobs.result
  61. 3681
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_full.result
  62. 3522
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_min.result
  63. 3522
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_noblob.result
  64. 3505
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_full.result
  65. 3530
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_min.result
  66. 3530
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_noblob.result
  67. 275
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_log.result
  68. 51
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_lower_case_table_names.result
  69. 60
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_sp003.result
  70. 47
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_sp006.result
  71. 32
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_trig004.result
  72. 274
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_log.result
  73. 1773
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_mixed_crash_safe.result
  74. 48
      storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_mixed_lower_case_table_names.result
  75. 4
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_not_null_tokudb.test
  76. 12
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_row_basic_3tokudb.test
  77. 6
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_commit_after_flush.test
  78. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id-master.opt
  79. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id-slave.opt
  80. 7
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id.test
  81. 7
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id_pk.test
  82. 4
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update.test
  83. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update2-slave.opt
  84. 14
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update2.test
  85. 13
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update3.test
  86. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe-master.opt
  87. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe-slave.opt
  88. 19
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe.test
  89. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_blobs.cnf
  90. 53
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_blobs.test
  91. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_full.cnf
  92. 50
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_full.test
  93. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_min.cnf
  94. 42
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_min.test
  95. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_noblob.cnf
  96. 42
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_noblob.test
  97. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_full.cnf
  98. 38
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_full.test
  99. 1
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_min.cnf
  100. 41
      storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_min.test

2
storage/tokudb/CMakeLists.txt

@ -1,4 +1,4 @@
SET(TOKUDB_VERSION 5.6.36-82.0)
SET(TOKUDB_VERSION 5.6.36-82.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
NOT CMAKE_VERSION VERSION_LESS "2.8.9")

10
storage/tokudb/PerconaFT/CMakeLists.txt

@ -9,6 +9,16 @@ project(TokuDB)
set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
add_definitions( -DMYSQL_TOKUDB_ENGINE=1)
if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
(CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/sql)
endif ()
endif ()
## Versions of gcc >= 4.9.0 require special version of 'ar' and 'ranlib' for
## link-time optimizations to work properly.
##

5
storage/tokudb/PerconaFT/buildheader/make_tdb.cc

@ -428,6 +428,7 @@ static void print_db_env_struct (void) {
"int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
"int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
"int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
"void (*kill_waiter)(DB_ENV *, void *extra)",
NULL};
sort_and_dump_fields("db_env", true, extra);
@ -548,8 +549,8 @@ static void print_db_txn_struct (void) {
"int (*abort_with_progress)(DB_TXN*, TXN_PROGRESS_POLL_FUNCTION, void*)",
"int (*xa_prepare) (DB_TXN*, TOKU_XA_XID *, uint32_t flags)",
"uint64_t (*id64) (DB_TXN*)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id)",
"uint64_t (*get_client_id)(DB_TXN *)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id, void *client_extra)",
"void (*get_client_id)(DB_TXN *, uint64_t *client_id, void **client_extra)",
"bool (*is_prepared)(DB_TXN *)",
"DB_TXN *(*get_child)(DB_TXN *)",
"uint64_t (*get_start_time)(DB_TXN *)",

3
storage/tokudb/PerconaFT/cmake_modules/TokuThirdParty.cmake

@ -123,6 +123,9 @@ ExternalProject_Add(build_snappy
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_AR=${CMAKE_AR}
-DCMAKE_NM=${CMAKE_NM}
-DCMAKE_RANLIB=${CMAKE_RANLIB}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
${USE_PROJECT_CMAKE_MODULE_PATH}

9
storage/tokudb/PerconaFT/ft/cachetable/cachetable.cc

@ -464,7 +464,10 @@ int toku_cachetable_openf (CACHEFILE *cfptr, CACHETABLE ct, const char *fname_in
char *
toku_cachefile_fname_in_env (CACHEFILE cf) {
return cf->fname_in_env;
if (cf) {
return cf->fname_in_env;
}
return nullptr;
}
void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env) {
@ -2890,6 +2893,10 @@ toku_cachefile_get_cachetable(CACHEFILE cf) {
return cf->cachetable;
}
CACHEFILE toku_pair_get_cachefile(PAIR pair) {
return pair->cachefile;
}
//Only called by ft_end_checkpoint
//Must have access to cf->fd (must be protected)
void toku_cachefile_fsync(CACHEFILE cf) {

3
storage/tokudb/PerconaFT/ft/cachetable/cachetable.h

@ -297,6 +297,9 @@ void *toku_cachefile_get_userdata(CACHEFILE);
CACHETABLE toku_cachefile_get_cachetable(CACHEFILE cf);
// Effect: Get the cachetable.
CACHEFILE toku_pair_get_cachefile(PAIR);
// Effect: Get the cachefile of the pair
void toku_cachetable_swap_pair_values(PAIR old_pair, PAIR new_pair);
// Effect: Swaps the value_data of old_pair and new_pair.
// Requires: both old_pair and new_pair to be pinned with write locks.

97
storage/tokudb/PerconaFT/ft/ft-ops.cc

@ -651,8 +651,12 @@ void toku_ftnode_clone_callback(void *value_data,
// set new pair attr if necessary
if (node->height == 0) {
*new_attr = make_ftnode_pair_attr(node);
node->logical_rows_delta = 0;
cloned_node->logical_rows_delta = 0;
for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node, i) == PT_AVAIL) {
BLB_LRD(node, i) = 0;
BLB_LRD(cloned_node, i) = 0;
}
}
} else {
new_attr->is_valid = false;
}
@ -700,9 +704,26 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
if (ftnode->height == 0) {
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
if (!ftnode->dirty) {
toku_ft_adjust_logical_row_count(
ft, -ftnode->logical_rows_delta);
// A leaf node (height == 0) is being evicted (!keep_me) and is
// not a checkpoint clone (!is_clone). This leaf node may have
// had messages applied to satisfy a query, but was never
// actually dirtied (!ftnode->dirty && !write_me). **Note that
// if (write_me) would persist the node and clear the dirty
// flag **. This message application may have updated the trees
// logical row count. Since these message applications are not
// persisted, we need undo the logical row count adjustments as
// they may occur again in the future if/when the node is
// re-read from disk for another query or change.
if (!ftnode->dirty && !write_me) {
int64_t lrc_delta = 0;
for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode, i) == PT_AVAIL) {
lrc_delta -= BLB_LRD(ftnode, i);
BLB_LRD(ftnode, i) = 0;
}
}
toku_ft_adjust_logical_row_count(ft, lrc_delta);
}
} else {
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
@ -711,6 +732,11 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
toku_free(*disk_data);
} else {
if (ftnode->height == 0) {
// No need to adjust logical row counts when flushing a clone
// as they should have been zeroed out anyway when cloned.
// Clones are 'copies' of work already done so doing it again
// (adjusting row counts) would be redundant and leads to
// inaccurate counts.
for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode, i) == PT_AVAIL) {
BASEMENTNODE bn = BLB(ftnode, i);
@ -718,10 +744,6 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
bn->stat64_delta);
}
}
if (!ftnode->dirty) {
toku_ft_adjust_logical_row_count(
ft, -ftnode->logical_rows_delta);
}
}
}
toku_ftnode_free(&ftnode);
@ -748,24 +770,48 @@ toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe)
}
}
int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash,
void **ftnode_pv, void** disk_data, PAIR_ATTR *sizep, int *dirtyp, void *extraargs) {
int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile),
PAIR p,
int fd,
BLOCKNUM blocknum,
uint32_t fullhash,
void **ftnode_pv,
void **disk_data,
PAIR_ATTR *sizep,
int *dirtyp,
void *extraargs) {
assert(extraargs);
assert(*ftnode_pv == NULL);
FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data;
assert(*ftnode_pv == nullptr);
FTNODE_DISK_DATA *ndd = (FTNODE_DISK_DATA *)disk_data;
ftnode_fetch_extra *bfe = (ftnode_fetch_extra *)extraargs;
FTNODE *node=(FTNODE*)ftnode_pv;
FTNODE *node = (FTNODE *)ftnode_pv;
// deserialize the node, must pass the bfe in because we cannot
// evaluate what piece of the the node is necessary until we get it at
// least partially into memory
int r = toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe);
int r =
toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe);
if (r != 0) {
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr,
"Checksum failure while reading node in file %s.\n",
toku_cachefile_fname_in_env(cachefile));
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
"failed with a checksum error.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
blocknum.b);
} else {
fprintf(stderr, "Error deserializing node, errno = %d", r);
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
"failed with %d.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
blocknum.b,
r);
}
// make absolutely sure we crash before doing anything else.
abort();
@ -774,7 +820,8 @@ int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNU
if (r == 0) {
*sizep = make_ftnode_pair_attr(*node);
(*node)->ct_pair = p;
*dirtyp = (*node)->dirty; // deserialize could mark the node as dirty (presumably for upgrade)
*dirtyp = (*node)->dirty; // deserialize could mark the node as dirty
// (presumably for upgrade)
}
return r;
}
@ -947,6 +994,16 @@ int toku_ftnode_pe_callback(void *ftnode_pv,
basements_to_destroy[num_basements_to_destroy++] = bn;
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
// A basement node is being partially evicted.
// This masement node may have had messages applied to it to
// satisfy a query, but was never actually dirtied.
// This message application may have updated the trees
// logical row count. Since these message applications are
// not being persisted, we need undo the logical row count
// adjustments as they may occur again in the future if/when
// the node is re-read from disk for another query or change.
toku_ft_adjust_logical_row_count(ft,
-bn->logical_rows_delta);
set_BNULL(node, i);
BP_STATE(node, i) = PT_ON_DISK;
num_partial_evictions++;

3
storage/tokudb/PerconaFT/ft/ft.cc

@ -435,7 +435,8 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
}
int fd = toku_cachefile_get_fd(cf);
int r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft);
const char *fn = toku_cachefile_fname_in_env(cf);
int r = toku_deserialize_ft_from(fd, fn, max_acceptable_lsn, &ft);
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else

19
storage/tokudb/PerconaFT/ft/node.cc

@ -93,6 +93,7 @@ void toku_destroy_ftnode_internals(FTNODE node) {
if (node->height > 0) {
destroy_nonleaf_childinfo(BNC(node,i));
} else {
paranoid_invariant(BLB_LRD(node, i) == 0);
destroy_basement_node(BLB(node, i));
}
} else if (BP_STATE(node,i) == PT_COMPRESSED) {
@ -386,8 +387,7 @@ static void bnc_apply_messages_to_basement_node(
const pivot_bounds &
bounds, // contains pivot key bounds of this basement node
txn_gc_info *gc_info,
bool *msgs_applied,
int64_t* logical_rows_delta) {
bool *msgs_applied) {
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
@ -395,6 +395,7 @@ static void bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S stats_delta = {0, 0};
uint64_t workdone_this_ancestor = 0;
int64_t logical_rows_delta = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
@ -470,7 +471,7 @@ static void bnc_apply_messages_to_basement_node(
gc_info,
&workdone_this_ancestor,
&stats_delta,
logical_rows_delta);
&logical_rows_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark
@ -482,7 +483,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
.logical_rows_delta = logical_rows_delta};
.logical_rows_delta = &logical_rows_delta};
if (fresh_ube - fresh_lbi > 0)
*msgs_applied = true;
r = bnc->fresh_message_tree
@ -503,7 +504,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
.logical_rows_delta = logical_rows_delta};
.logical_rows_delta = &logical_rows_delta};
r = bnc->stale_message_tree
.iterate_on_range<struct iterate_do_bn_apply_msg_extra,
@ -521,6 +522,8 @@ static void bnc_apply_messages_to_basement_node(
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
bn->logical_rows_delta += logical_rows_delta;
}
static void
@ -534,7 +537,6 @@ apply_ancestors_messages_to_bn(
bool* msgs_applied
)
{
int64_t logical_rows_delta = 0;
BASEMENTNODE curr_bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
@ -547,16 +549,13 @@ apply_ancestors_messages_to_bn(
curr_ancestors->childnum,
curr_bounds,
gc_info,
msgs_applied,
&logical_rows_delta
msgs_applied
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
}
}
toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
node->logical_rows_delta += logical_rows_delta;
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this

20
storage/tokudb/PerconaFT/ft/node.h

@ -175,11 +175,6 @@ struct ftnode {
int height;
int dirty;
uint32_t fullhash;
// current count of rows add or removed as a result of message application
// to this node as a basement, irrelevant for internal nodes, gets reset
// when node is undirtied. Used to back out tree scoped LRC id node is
// evicted but not persisted
int64_t logical_rows_delta;
// for internal nodes, if n_children==fanout+1 then the tree needs to be
// rebalanced. for leaf nodes, represents number of basement nodes
@ -211,6 +206,10 @@ struct ftnode_leaf_basement_node {
unsigned int seqinsert; // number of sequential inserts to this leaf
MSN max_msn_applied; // max message sequence number applied
bool stale_ancestor_messages_applied;
// current count of rows added or removed as a result of message application
// to this basement node, gets reset when node is undirtied.
// Used to back out tree scoped LRC id node is evicted but not persisted
int64_t logical_rows_delta;
STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk
};
typedef struct ftnode_leaf_basement_node *BASEMENTNODE;
@ -385,6 +384,16 @@ enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node);
enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout);
enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize);
inline const char* toku_ftnode_get_cachefile_fname_in_env(FTNODE node) {
if (node->ct_pair) {
CACHEFILE cf = toku_pair_get_cachefile(node->ct_pair);
if (cf) {
return toku_cachefile_fname_in_env(cf);
}
}
return nullptr;
}
/**
* Finds the next child for HOT to flush to, given that everything up to
* and including k has been flattened.
@ -577,3 +586,4 @@ static inline void set_BSB(FTNODE node, int i, struct sub_block *sb) {
#define BLB_DATA(node,i) (&(BLB(node,i)->data_buffer))
#define BLB_NBYTESINDATA(node,i) (BLB_DATA(node,i)->get_disk_size())
#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert)
#define BLB_LRD(node, i) (BLB(node,i)->logical_rows_delta)

43
storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc

@ -644,7 +644,29 @@ exit:
// Read ft from file into struct. Read both headers and use one.
// We want the latest acceptable header whose checkpoint_lsn is no later
// than max_acceptable_lsn.
int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
#define dump_state_of_toku_deserialize_ft_from() \
fprintf(stderr, \
"%s:%d toku_deserialize_ft_from: " \
"filename[%s] " \
"r[%d] max_acceptable_lsn[%lu]" \
"r0[%d] checkpoint_lsn_0[%lu] checkpoint_count_0[%lu] " \
"r1[%d] checkpoint_lsn_1[%lu] checkpoint_count_1[%lu]\n", \
__FILE__, \
__LINE__, \
fn, \
r, \
max_acceptable_lsn.lsn, \
r0, \
checkpoint_lsn_0.lsn, \
checkpoint_count_0, \
r1, \
checkpoint_lsn_1.lsn, \
checkpoint_count_1);
int toku_deserialize_ft_from(int fd,
const char *fn,
LSN max_acceptable_lsn,
FT *ft) {
struct rbuf rb_0;
struct rbuf rb_1;
uint64_t checkpoint_count_0 = 0;
@ -655,7 +677,7 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
bool h0_acceptable = false;
bool h1_acceptable = false;
struct rbuf *rb = NULL;
int r0, r1, r;
int r0, r1, r = 0;
toku_off_t header_0_off = 0;
r0 = deserialize_ft_from_fd_into_rbuf(fd,
@ -702,6 +724,10 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
// first header, unless it's readable
}
if (r != TOKUDB_DICTIONARY_NO_HEADER) {
dump_state_of_toku_deserialize_ft_from();
}
// it should not be possible for both headers to be later than the
// max_acceptable_lsn
invariant(
@ -713,11 +739,19 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
if (h0_acceptable && h1_acceptable) {
if (checkpoint_count_0 > checkpoint_count_1) {
if (!(checkpoint_count_0 == checkpoint_count_1 + 1) ||
!(version_0 >= version_1)) {
dump_state_of_toku_deserialize_ft_from();
}
invariant(checkpoint_count_0 == checkpoint_count_1 + 1);
invariant(version_0 >= version_1);
rb = &rb_0;
version = version_0;
} else {
if (!(checkpoint_count_1 == checkpoint_count_0 + 1) ||
!(version_1 >= version_0)) {
dump_state_of_toku_deserialize_ft_from();
}
invariant(checkpoint_count_1 == checkpoint_count_0 + 1);
invariant(version_1 >= version_0);
rb = &rb_1;
@ -729,6 +763,7 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
fprintf(
stderr,
"Header 2 checksum failed, but header 1 ok. Proceeding.\n");
dump_state_of_toku_deserialize_ft_from();
}
rb = &rb_0;
version = version_0;
@ -738,11 +773,15 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
fprintf(
stderr,
"Header 1 checksum failed, but header 2 ok. Proceeding.\n");
dump_state_of_toku_deserialize_ft_from();
}
rb = &rb_1;
version = version_1;
}
if (!rb) {
dump_state_of_toku_deserialize_ft_from();
}
paranoid_invariant(rb);
r = deserialize_ft_versioned(fd, rb, ft, version);

23
storage/tokudb/PerconaFT/ft/serialize/ft-serialize.h

@ -42,12 +42,23 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/serialize/block_table.h"
size_t toku_serialize_ft_size(struct ft_header *h);
void toku_serialize_ft_to(int fd, struct ft_header *h, block_table *bt, CACHEFILE cf);
void toku_serialize_ft_to_wbuf(struct wbuf *wbuf, struct ft_header *h, DISKOFF translation_location_on_disk, DISKOFF translation_size_on_disk);
void toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc);
int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft);
void toku_serialize_ft_to(int fd,
struct ft_header *h,
block_table *bt,
CACHEFILE cf);
void toku_serialize_ft_to_wbuf(struct wbuf *wbuf,
struct ft_header *h,
DISKOFF translation_location_on_disk,
DISKOFF translation_size_on_disk);
void toku_serialize_descriptor_contents_to_fd(int fd,
DESCRIPTOR desc,
DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb,
DESCRIPTOR desc);
int toku_deserialize_ft_from(int fd,
const char *fn,
LSN max_acceptable_lsn,
FT *ft);
// TODO rename
int deserialize_ft_from_fd_into_rbuf(int fd,

689
storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
File diff suppressed because it is too large
View File

69
storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.h

@ -46,21 +46,51 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/serialize/block_table.h"
unsigned int toku_serialize_ftnode_size(FTNODE node);
int toku_serialize_ftnode_to_memory(FTNODE node, FTNODE_DISK_DATA *ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
bool do_rebalancing, bool in_parallel,
size_t *n_bytes_to_write, size_t *n_uncompressed_bytes,
char **bytes_to_write);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA *ndd, bool do_rebalancing, FT ft, bool for_checkpoint);
int toku_serialize_rollback_log_to(int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized,
FT ft, bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from(int fd, BLOCKNUM off, uint32_t fullhash, FTNODE *node, FTNODE_DISK_DATA *ndd, ftnode_fetch_extra *bfe);
int toku_serialize_ftnode_to_memory(
FTNODE node,
FTNODE_DISK_DATA *ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
bool do_rebalancing,
bool in_parallel,
size_t *n_bytes_to_write,
size_t *n_uncompressed_bytes,
char **bytes_to_write);
int toku_serialize_ftnode_to(int fd,
BLOCKNUM,
FTNODE node,
FTNODE_DISK_DATA *ndd,
bool do_rebalancing,
FT ft,
bool for_checkpoint);
int toku_serialize_rollback_log_to(int fd,
ROLLBACK_LOG_NODE log,
SERIALIZED_ROLLBACK_LOG_NODE serialized_log,
bool is_serialized,
FT ft,
bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(
ROLLBACK_LOG_NODE log,
SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from(int fd,
BLOCKNUM blocknum,
ROLLBACK_LOG_NODE *logp,
FT ft);
int toku_deserialize_bp_from_disk(FTNODE node,
FTNODE_DISK_DATA ndd,
int childnum,
int fd,
ftnode_fetch_extra *bfe);
int toku_deserialize_bp_from_compressed(FTNODE node,
int childnum,
ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from(int fd,
BLOCKNUM off,
uint32_t fullhash,
FTNODE *node,
FTNODE_DISK_DATA *ndd,
ftnode_fetch_extra *bfe);
void toku_serialize_set_parallel(bool);
@ -73,9 +103,14 @@ int decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_siz
// used by verify
int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
void read_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, FT ft, struct rbuf *rb);
void read_block_from_fd_into_rbuf(int fd,
BLOCKNUM blocknum,
FT ft,
struct rbuf *rb);
int read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb);
int verify_ftnode_sub_block(struct sub_block *sb);
int verify_ftnode_sub_block(struct sub_block *sb,
const char *fname,
BLOCKNUM blocknum);
void just_decompress_sub_block(struct sub_block *sb);
// used by ft-node-deserialize.cc

2
storage/tokudb/PerconaFT/ft/txn/roll.cc

@ -203,7 +203,7 @@ int toku_rollback_frename(BYTESTRING old_iname,
}
if (toku_stat(new_iname_full.get(), &stat) == -1) {
if (ENOENT == errno)
if (ENOENT == errno || ENAMETOOLONG == errno)
new_exist = false;
else
return 1;

9
storage/tokudb/PerconaFT/ft/txn/txn.cc

@ -269,6 +269,7 @@ static txn_child_manager tcm;
.state = TOKUTXN_LIVE,
.num_pin = 0,
.client_id = 0,
.client_extra = nullptr,
.start_time = time(NULL),
};
@ -705,12 +706,14 @@ bool toku_txn_has_spilled_rollback(TOKUTXN txn) {
return txn_has_spilled_rollback_logs(txn);
}
uint64_t toku_txn_get_client_id(TOKUTXN txn) {
return txn->client_id;
void toku_txn_get_client_id(TOKUTXN txn, uint64_t *client_id, void **client_extra) {
if (client_id) *client_id = txn->client_id;
if (client_extra) *client_extra = txn->client_extra;
}
void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id) {
void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id, void *client_extra) {
txn->client_id = client_id;
txn->client_extra = client_extra;
}
time_t toku_txn_get_start_time(struct tokutxn *txn) {

5
storage/tokudb/PerconaFT/ft/txn/txn.h

@ -193,6 +193,7 @@ struct tokutxn {
uint32_t num_pin; // number of threads (all hot indexes) that want this
// txn to not transition to commit or abort
uint64_t client_id;
void *client_extra;
time_t start_time;
};
typedef struct tokutxn *TOKUTXN;
@ -293,8 +294,8 @@ void toku_txn_unpin_live_txn(struct tokutxn *txn);
bool toku_txn_has_spilled_rollback(struct tokutxn *txn);
uint64_t toku_txn_get_client_id(struct tokutxn *txn);
void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id);
void toku_txn_get_client_id(struct tokutxn *txn, uint64_t *client_id, void **client_extra);
void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id, void *client_extra);
time_t toku_txn_get_start_time(struct tokutxn *txn);

150
storage/tokudb/PerconaFT/locktree/lock_request.cc

@ -65,6 +65,7 @@ void lock_request::create(void) {
toku_cond_init(&m_wait_cond, nullptr);
m_start_test_callback = nullptr;
m_start_before_pending_test_callback = nullptr;
m_retry_test_callback = nullptr;
}
@ -79,7 +80,7 @@ void lock_request::destroy(void) {
}
// set the lock request parameters. this API allows a lock request to be reused.
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn) {
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn, void *extra) {
invariant(m_state != state::PENDING);
m_lt = lt;
m_txnid = txnid;
@ -91,6 +92,7 @@ void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT
m_state = state::INITIALIZED;
m_info = lt ? lt->get_lock_request_info() : nullptr;
m_big_txn = big_txn;
m_extra = extra;
}
// get rid of any stored left and right key copies and
@ -173,6 +175,8 @@ int lock_request::start(void) {
m_state = state::PENDING;
m_start_time = toku_current_time_microsec() / 1000;
m_conflicting_txnid = conflicts.get(0);
if (m_start_before_pending_test_callback)
m_start_before_pending_test_callback();
toku_mutex_lock(&m_info->mutex);
insert_into_lock_requests();
if (deadlock_exists(conflicts)) {
@ -180,7 +184,8 @@ int lock_request::start(void) {
r = DB_LOCK_DEADLOCK;
}
toku_mutex_unlock(&m_info->mutex);
if (m_start_test_callback) m_start_test_callback(); // test callback
if (m_start_test_callback)
m_start_test_callback(); // test callback
}
if (r != DB_LOCK_NOTGRANTED) {
@ -203,7 +208,18 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
toku_mutex_lock(&m_info->mutex);
// check again, this time locking out other retry calls
if (m_state == state::PENDING) {
retry();
}
while (m_state == state::PENDING) {
// check if this thread is killed
if (killed_callback && killed_callback()) {
remove_from_lock_requests();
complete(DB_LOCK_NOTGRANTED);
continue;
}
// compute next wait time
uint64_t t_wait;
@ -221,13 +237,13 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
invariant(r == 0 || r == ETIMEDOUT);
t_now = toku_current_time_microsec();
if (m_state == state::PENDING && (t_now >= t_end || (killed_callback && killed_callback()))) {
if (m_state == state::PENDING && (t_now >= t_end)) {
m_info->counters.timeout_count += 1;
// if we're still pending and we timed out, then remove our
// request from the set of lock requests and fail.
remove_from_lock_requests();
// complete sets m_state to COMPLETE, breaking us out of the loop
complete(DB_LOCK_NOTGRANTED);
}
@ -274,13 +290,17 @@ TXNID lock_request::get_conflicting_txnid(void) const {
}
int lock_request::retry(void) {
invariant(m_state == state::PENDING);
int r;
txnid_set conflicts;
conflicts.create();
invariant(m_state == state::PENDING);
if (m_type == type::WRITE) {
r = m_lt->acquire_write_lock(m_txnid, m_left_key, m_right_key, nullptr, m_big_txn);
r = m_lt->acquire_write_lock(
m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
} else {
r = m_lt->acquire_read_lock(m_txnid, m_left_key, m_right_key, nullptr, m_big_txn);
r = m_lt->acquire_read_lock(
m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
}
// if the acquisition succeeded then remove ourselves from the
@ -288,44 +308,63 @@ int lock_request::retry(void) {
if (r == 0) {
remove_from_lock_requests();
complete(r);
if (m_retry_test_callback) m_retry_test_callback(); // test callback
if (m_retry_test_callback)
m_retry_test_callback(); // test callback
toku_cond_broadcast(&m_wait_cond);
} else {
m_conflicting_txnid = conflicts.get(0);
}
conflicts.destroy();
return r;
}
void lock_request::retry_all_lock_requests(locktree *lt) {
void lock_request::retry_all_lock_requests(
locktree *lt,
void (*after_retry_all_test_callback)(void)) {
lt_lock_request_info *info = lt->get_lock_request_info();
// if a thread reads this bit to be true, then it should go ahead and
// take the locktree mutex and retry lock requests. we use this bit
// to prevent every single thread from waiting on the locktree mutex
// in order to retry requests, especially when no requests actually exist.
//
// it is important to note that this bit only provides an optimization.
// it is not problematic for it to be true when it should be false,
// but it can be problematic for it to be false when it should be true.
// therefore, the lock request code must ensures that when lock requests
// are added to this locktree, the bit is set.
// see lock_request::insert_into_lock_requests()
if (!info->should_retry_lock_requests) {
// if there are no pending lock requests than there is nothing to do
// the unlocked data race on pending_is_empty is OK since lock requests
// are retried after added to the pending set.
if (info->pending_is_empty)
return;
// get my retry generation (post increment of retry_want)
unsigned long long my_retry_want = (info->retry_want += 1);
toku_mutex_lock(&info->retry_mutex);
// here is the group retry algorithm.
// get the latest retry_want count and use it as the generation number of
// this retry operation. if this retry generation is > the last retry
// generation, then do the lock retries. otherwise, no lock retries
// are needed.
if ((my_retry_want - 1) == info->retry_done) {
for (;;) {
if (!info->running_retry) {
info->running_retry = true;
info->retry_done = info->retry_want;
toku_mutex_unlock(&info->retry_mutex);
retry_all_lock_requests_info(info);
if (after_retry_all_test_callback)
after_retry_all_test_callback();
toku_mutex_lock(&info->retry_mutex);
info->running_retry = false;
toku_cond_broadcast(&info->retry_cv);
break;
} else {
toku_cond_wait(&info->retry_cv, &info->retry_mutex);
}
}
}
toku_mutex_unlock(&info->retry_mutex);
}
void lock_request::retry_all_lock_requests_info(lt_lock_request_info *info) {
toku_mutex_lock(&info->mutex);
// let other threads know that they need not retry lock requests at this time.
//
// the motivation here is that if a bunch of threads have already released
// their locks in the rangetree, then its probably okay for only one thread
// to iterate over the list of requests and retry them. otherwise, at high
// thread counts and a large number of pending lock requests, you could
// end up wasting a lot of cycles.
info->should_retry_lock_requests = false;
size_t i = 0;
while (i < info->pending_lock_requests.size()) {
// retry all of the pending lock requests.
for (size_t i = 0; i < info->pending_lock_requests.size();) {
lock_request *request;
int r = info->pending_lock_requests.fetch(i, &request);
invariant_zero(r);
@ -346,6 +385,30 @@ void lock_request::retry_all_lock_requests(locktree *lt) {
toku_mutex_unlock(&info->mutex);
}
void *lock_request::get_extra(void) const {
return m_extra;
}
void lock_request::kill_waiter(void) {
remove_from_lock_requests();
complete(DB_LOCK_NOTGRANTED);
toku_cond_broadcast(&m_wait_cond);
}
void lock_request::kill_waiter(locktree *lt, void *extra) {
lt_lock_request_info *info = lt->get_lock_request_info();
toku_mutex_lock(&info->mutex);
for (size_t i = 0; i < info->pending_lock_requests.size(); i++) {
lock_request *request;
int r = info->pending_lock_requests.fetch(i, &request);
if (r == 0 && request->get_extra() == extra) {
request->kill_waiter();
break;
}
}
toku_mutex_unlock(&info->mutex);
}
// find another lock request by txnid. must hold the mutex.
lock_request *lock_request::find_lock_request(const TXNID &txnid) {
lock_request *request;
@ -360,27 +423,30 @@ lock_request *lock_request::find_lock_request(const TXNID &txnid) {
void lock_request::insert_into_lock_requests(void) {
uint32_t idx;
lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
m_txnid, &request, &idx);
invariant(r == DB_NOTFOUND);
r = m_info->pending_lock_requests.insert_at(this, idx);
invariant_zero(r);
// ensure that this bit is true, now that at least one lock request is in the set
m_info->should_retry_lock_requests = true;
m_info->pending_is_empty = false;
}
// remove this lock request from the locktree's set. must hold the mutex.
void lock_request::remove_from_lock_requests(void) {
uint32_t idx;
lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
m_txnid, &request, &idx);
invariant_zero(r);
invariant(request == this);
r = m_info->pending_lock_requests.delete_at(idx);
invariant_zero(r);
if (m_info->pending_lock_requests.size() == 0)
m_info->pending_is_empty = true;
}
int lock_request::find_by_txnid(lock_request * const &request, const TXNID &txnid) {
int lock_request::find_by_txnid(lock_request *const &request,
const TXNID &txnid) {
TXNID request_txnid = request->m_txnid;
if (request_txnid < txnid) {
return -1;
@ -395,6 +461,10 @@ void lock_request::set_start_test_callback(void (*f)(void)) {
m_start_test_callback = f;
}
void lock_request::set_start_before_pending_test_callback(void (*f)(void)) {
m_start_before_pending_test_callback = f;
}
void lock_request::set_retry_test_callback(void (*f)(void)) {
m_retry_test_callback = f;
}

23
storage/tokudb/PerconaFT/locktree/lock_request.h

@ -78,7 +78,7 @@ public:
// effect: Resets the lock request parameters, allowing it to be reused.
// requires: Lock request was already created at some point
void set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, type lock_type, bool big_txn);
void set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, type lock_type, bool big_txn, void *extra = nullptr);
// effect: Tries to acquire a lock described by this lock request.
// returns: The return code of locktree::acquire_[write,read]_lock()
@ -107,14 +107,24 @@ public:
TXNID get_conflicting_txnid(void) const;
// effect: Retries all of the lock requests for the given locktree.
// Any lock requests successfully restarted is completed and woken up.
// Any lock requests successfully restarted is completed and woken
// up.
// The rest remain pending.
static void retry_all_lock_requests(locktree *lt);
static void retry_all_lock_requests(
locktree *lt,
void (*after_retry_test_callback)(void) = nullptr);
static void retry_all_lock_requests_info(lt_lock_request_info *info);
void set_start_test_callback(void (*f)(void));
void set_start_before_pending_test_callback(void (*f)(void));
void set_retry_test_callback(void (*f)(void));
private:
void *get_extra(void) const;
void kill_waiter(void);
static void kill_waiter(locktree *lt, void *extra);
private:
enum state {
UNINITIALIZED,
INITIALIZED,
@ -152,6 +162,8 @@ private:
// locktree that this lock request is for.
struct lt_lock_request_info *m_info;
void *m_extra;
// effect: tries again to acquire the lock described by this lock request
// returns: 0 if retrying the request succeeded and is now complete
int retry(void);
@ -184,9 +196,10 @@ private:
void copy_keys(void);
static int find_by_txnid(lock_request * const &request, const TXNID &txnid);
static int find_by_txnid(lock_request *const &request, const TXNID &txnid);
void (*m_start_test_callback)(void);
void (*m_start_before_pending_test_callback)(void);
void (*m_retry_test_callback)(void);
friend class lock_request_unit_test;

44
storage/tokudb/PerconaFT/locktree/locktree.cc

@ -80,21 +80,24 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const compar
m_sto_end_early_count = 0;
m_sto_end_early_time = 0;
m_lock_request_info.pending_lock_requests.create();
ZERO_STRUCT(m_lock_request_info.mutex);
toku_mutex_init(&m_lock_request_info.mutex, nullptr);
m_lock_request_info.should_retry_lock_requests = false;
ZERO_STRUCT(m_lock_request_info.counters);
// Threads read the should retry bit without a lock
// for performance. It's ok to read the wrong value.
// - If you think you should but you shouldn't, you waste a little time.
// - If you think you shouldn't but you should, then some other thread
// will come around to do the work of retrying requests instead of you.
TOKU_VALGRIND_HG_DISABLE_CHECKING(
&m_lock_request_info.should_retry_lock_requests,
sizeof(m_lock_request_info.should_retry_lock_requests));
TOKU_DRD_IGNORE_VAR(m_lock_request_info.should_retry_lock_requests);
m_lock_request_info.init();
}
void lt_lock_request_info::init(void) {
pending_lock_requests.create();
pending_is_empty = true;
ZERO_STRUCT(mutex);
toku_mutex_init(&mutex, nullptr);
retry_want = retry_done = 0;
ZERO_STRUCT(counters);
ZERO_STRUCT(retry_mutex);
toku_mutex_init(&retry_mutex, nullptr);
toku_cond_init(&retry_cv, nullptr);
running_retry = false;
TOKU_VALGRIND_HG_DISABLE_CHECKING(&pending_is_empty,
sizeof(pending_is_empty));
TOKU_DRD_IGNORE_VAR(pending_is_empty);
}
void locktree::destroy(void) {
@ -104,11 +107,18 @@ void locktree::destroy(void) {
m_rangetree->destroy();
toku_free(m_rangetree);
m_sto_buffer.destroy();
m_lock_request_info.pending_lock_requests.destroy();
m_lock_request_info.destroy();
}
void lt_lock_request_info::destroy(void) {
pending_lock_requests.destroy();
toku_mutex_destroy(&mutex);
toku_mutex_destroy(&retry_mutex);
toku_cond_destroy(&retry_cv);
}
void locktree::add_reference(void) {
(void) toku_sync_add_and_fetch(&m_reference_count, 1);
(void)toku_sync_add_and_fetch(&m_reference_count, 1);
}
uint32_t locktree::release_reference(void) {

33
storage/tokudb/PerconaFT/locktree/locktree.h

@ -38,12 +38,14 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#pragma once
#include <atomic>
#include <db.h>
#include <toku_time.h>
#include <toku_pthread.h>
#include <toku_time.h>
#include <ft/ft-ops.h> // just for DICTIONARY_ID..
#include <ft/comparator.h>
#include <ft/ft-ops.h> // just for DICTIONARY_ID..
#include <util/omt.h>
@ -80,20 +82,33 @@ namespace toku {
// Lock request state for some locktree
struct lt_lock_request_info {
omt<lock_request *> pending_lock_requests;
std::atomic_bool pending_is_empty;
toku_mutex_t mutex;
bool should_retry_lock_requests;
lt_counters counters;
std::atomic_ullong retry_want;
unsigned long long retry_done;
toku_mutex_t retry_mutex;
toku_cond_t retry_cv;
bool running_retry;
void init(void);
void destroy(void);
};
// The locktree manager manages a set of locktrees, one for each open dictionary.
// Locktrees are retrieved from the manager. When they are no longer needed, they
// are be released by the user.
// The locktree manager manages a set of locktrees, one for each open
// dictionary. Locktrees are retrieved from the manager. When they are no
// longer needed, they are be released by the user.
class locktree_manager {
public:
public:
// param: create_cb, called just after a locktree is first created.
// destroy_cb, called just before a locktree is destroyed.
// escalate_cb, called after a locktree is escalated (with extra param)
void create(lt_create_cb create_cb, lt_destroy_cb destroy_cb, lt_escalate_cb escalate_cb, void *extra);
// escalate_cb, called after a locktree is escalated (with extra
// param)
void create(lt_create_cb create_cb,
lt_destroy_cb destroy_cb,
lt_escalate_cb escalate_cb,
void *extra);
void destroy(void);
@ -159,6 +174,8 @@ namespace toku {
// Add time t to the escalator's wait time statistics
void add_escalator_wait_time(uint64_t t);
void kill_waiter(void *extra);
private:
static const uint64_t DEFAULT_MAX_LOCK_MEMORY = 64L * 1024 * 1024;

13
storage/tokudb/PerconaFT/locktree/manager.cc

@ -483,4 +483,17 @@ void locktree_manager::get_status(LTM_STATUS statp) {
*statp = ltm_status;
}
void locktree_manager::kill_waiter(void *extra) {
mutex_lock();
int r = 0;
size_t num_locktrees = m_locktree_map.size();
for (size_t i = 0; i < num_locktrees; i++) {
locktree *lt;
r = m_locktree_map.fetch(i, &lt);
invariant_zero(r);
lock_request::kill_waiter(lt, extra);
}
mutex_unlock();
}
} /* namespace toku */

100
storage/tokudb/PerconaFT/locktree/tests/kill_waiter.cc

@ -0,0 +1,100 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
// test the lock manager kill waiter function
#include "locktree.h"
#include "lock_request.h"
#include "test.h"
#include "locktree_unit_test.h"
#include <thread>
#include <atomic>
namespace toku {
const uint64_t my_lock_wait_time = 1000 * 1000;
const uint64_t my_killed_time = 500 * 1000;
const int n_locks = 4;
static int my_killed_callback(void) {
if (1) fprintf(stderr, "%s:%u %s\n", __FILE__, __LINE__, __FUNCTION__);
return 0;
}
static void locktree_release_lock(locktree *lt, TXNID txn_id, const DBT *left, const DBT *right) {
range_buffer buffer;
buffer.create();
buffer.append(left, right);
lt->release_locks(txn_id, &buffer);
buffer.destroy();
}
static void wait_lock(lock_request *lr, std::atomic_int *done) {
int r = lr->wait(my_lock_wait_time, my_killed_time, my_killed_callback);
assert(r == DB_LOCK_NOTGRANTED);
*done = 1;
}
static void test_kill_waiter(void) {
int r;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
const DBT *one = get_dbt(1);
lock_request locks[n_locks];
std::thread waiters[n_locks-1];
for (int i = 0; i < n_locks; i++) {
locks[i].create();
locks[i].set(lt, i+1, one, one, lock_request::type::WRITE, false, &waiters[i]);
}
// txn 'n_locks' grabs the lock
r = locks[n_locks-1].start();
assert_zero(r);
for (int i = 0; i < n_locks-1; i++) {
r = locks[i].start();
assert(r == DB_LOCK_NOTGRANTED);
}
std::atomic_int done[n_locks-1];
for (int i = 0; i < n_locks-1; i++) {
done[i] = 0;
waiters[i] = std::thread(wait_lock, &locks[i], &done[i]);
}
for (int i = 0; i < n_locks-1; i++) {
assert(!done[i]);
}
sleep(1);
for (int i = 0; i < n_locks-1; i++) {
mgr.kill_waiter(&waiters[i]);
while (!done[i]) sleep(1);
waiters[i].join();
for (int j = i+1; j < n_locks-1; j++)
assert(!done[j]);
}
locktree_release_lock(lt, n_locks, one, one);
for (int i = 0; i < n_locks; i++) {
locks[i].destroy();
}
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
int main(void) {
toku::test_kill_waiter();
return 0;
}

3
storage/tokudb/PerconaFT/locktree/tests/lock_request_killed.cc

@ -51,8 +51,9 @@ static uint64_t t_do_kill;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
if (t_now == t_last_kill)
return 0;
assert(t_now >= t_last_kill);
assert(t_now - t_last_kill >= my_killed_time * 1000 / 2); // div by 2 for valgrind which is not very accurate
t_last_kill = t_now;
killed_calls++;
if (t_now >= t_do_kill)

1
storage/tokudb/PerconaFT/locktree/tests/lock_request_not_killed.cc

@ -52,7 +52,6 @@ static uint64_t t_last_kill;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
assert(t_now >= t_last_kill);
assert(t_now - t_last_kill >= my_killed_time * 1000 / 2); // div by 2 for valgrind which is not very accurate
t_last_kill = t_now;
killed_calls++;
return 0;

91
storage/tokudb/PerconaFT/locktree/tests/lock_request_start_release_wait.cc

@ -0,0 +1,91 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
// test the race between start, release, and wait. since start does not put
// its lock request into the pending set, the blocking txn could release its
// lock before the first txn waits. this will block the first txn because its
// lock request is not known when the lock is released. the bug fix is to try
// again when lock retries are locked out.
#include "lock_request.h"
#include <atomic>
#include <thread>
#include "locktree.h"
#include "locktree_unit_test.h"
#include "test.h"
namespace toku {
const uint64_t my_lock_wait_time = 1000 * 1000; // ms
const uint64_t my_killed_time = 1 * 1000; // ms
static uint64_t t_wait;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
assert(t_now >= t_wait);
if (t_now - t_wait >= my_killed_time * 1000)
abort();
return 0;
}
static void locktree_release_lock(locktree *lt,
TXNID txn_id,
const DBT *left,
const DBT *right) {
range_buffer buffer;
buffer.create();
buffer.append(left, right);
lt->release_locks(txn_id, &buffer);
buffer.destroy();
}
static void test_start_release_wait(void) {
int r;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = {1};
locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
const DBT *one = get_dbt(1);
// a locks one
lock_request a;
a.create();
a.set(lt, 1, one, one, lock_request::type::WRITE, false);
r = a.start();
assert(r == 0);
// b tries to lock one, fails
lock_request b;
b.create();
b.set(lt, 2, one, one, lock_request::type::WRITE, false);
r = b.start();
assert(r == DB_LOCK_NOTGRANTED);
// a releases its lock
locktree_release_lock(lt, 1, one, one);
// b waits for one, gets locks immediately
t_wait = toku_current_time_microsec();
r = b.wait(my_lock_wait_time, my_killed_time, my_killed_callback);
assert(r == 0);
// b releases its lock so we can exit cleanly
locktree_release_lock(lt, 2, one, one);
a.destroy();
b.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
int main(void) {
toku::test_start_release_wait();
return 0;
}

121
storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc

@ -34,12 +34,14 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "lock_request.h"
#include <iostream>
#include "test.h"
#include <thread>
#include "locktree.h"
#include "lock_request.h"
#include "test.h"
// Test FT-633, the data race on the lock request between ::start and ::retry
// This test is non-deterministic. It uses sleeps at 2 critical places to
@ -47,90 +49,65 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
namespace toku {
struct locker_arg {
locktree *_lt;
TXNID _id;
const DBT *_key;
locker_arg(locktree *lt, TXNID id, const DBT *key) : _lt(lt), _id(id), _key(key) {
}
};
static void locker_callback(void) {
usleep(10000);
}
static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
int i;
for (i = 0; i < 1000; i++) {
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// set the test callbacks
request.set_start_test_callback(locker_callback);
request.set_retry_test_callback(locker_callback);
// try to acquire the lock
int r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(10 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
static void locker_callback(void) { usleep(10000); }
static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
int i;
for (i = 0; i < 1000; i++) {
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// set the test callbacks
request.set_start_test_callback(locker_callback);
request.set_retry_test_callback(locker_callback);
// try to acquire the lock
int r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(10 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cout << toku_pthread_self() << " " << i << std::endl;
}
}
static void *locker(void *v_arg) {
locker_arg *arg = static_cast<locker_arg *>(v_arg);
run_locker(arg->_lt, arg->_id, arg->_key);
return arg;
}
} /* namespace toku */
int main(void) {
int r;
toku::locktree lt;
DICTIONARY_ID dict_id = { 1 };
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 2;
toku_pthread_t ids[n_workers];
std::thread worker[n_workers];
for (int i = 0; i < n_workers; i++) {
toku::locker_arg *arg = new toku::locker_arg(&lt, i, one);
r = toku_pthread_create(&ids[i], nullptr, toku::locker, arg);
assert_zero(r);
worker[i] = std::thread(toku::run_locker, &lt, i, one);
}
for (int i = 0; i < n_workers; i++) {
void *ret;
r = toku_pthread_join(ids[i], &ret);
assert_zero(r);
toku::locker_arg *arg = static_cast<toku::locker_arg *>(ret);
delete arg;
worker[i].join();
}
lt.release_reference();

133
storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc

@ -0,0 +1,133 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "lock_request.h"
#include <pthread.h>
#include <iostream>
#include <thread>
#include "locktree.h"
#include "test.h"
// Suppose that 3 threads are running a lock acquire, release, retry sequence.
// There is a race in the retry algorithm with 2 threads running lock retry
// simultaneously. The first thread to run retry sets a flag that will cause
// the second thread to skip the lock retries. If the first thread progressed
// past the contended lock, then the second threa will HANG until its lock timer
// pops, even when the contended lock is no longer held.
// This test exposes this problem as a test hang. The group retry algorithm
// fixes the race in the lock request retry algorihm and this test should no
// longer hang.
namespace toku {
// use 1000 when after_retry_all is implemented, otherwise use 100000
static const int n_tests = 1000; // 100000;
static void after_retry_all(void) { usleep(10000); }
static void run_locker(locktree *lt,
TXNID txnid,
const DBT *key,
pthread_barrier_t *b) {
for (int i = 0; i < n_tests; i++) {
int r;
r = pthread_barrier_wait(b);
assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// try to acquire the lock
r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(1000 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt, after_retry_all);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
}
} /* namespace toku */
int main(void) {
toku::locktree lt;
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 3;
std::thread worker[n_workers];
pthread_barrier_t b;
int r = pthread_barrier_init(&b, nullptr, n_workers);
assert(r == 0);
for (int i = 0; i < n_workers; i++) {
worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
}
for (int i = 0; i < n_workers; i++) {
worker[i].join();
}
r = pthread_barrier_destroy(&b);
assert(r == 0);
lt.release_reference();
lt.destroy();
return 0;
}

135
storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc

@ -0,0 +1,135 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "lock_request.h"
#include <pthread.h>
#include <iostream>
#include <thread>
#include "locktree.h"
#include "test.h"
// Suppose that 2 threads are running a lock acquire, release, retry sequence.
// There is a race between the acquire and the release with 2 threads.
// If thread 1 acquires a lock, and thread 2 tries to acquire the same lock and
// fails, thread 1 may release its lock and retry pending lock requests BEFORE
// thread 2 adds itself to the pending lock requests. If this happens, then
// thread 2 will HANG until its lock timer expires even when the lock it is
// waiting for is FREE.
// This test exposes this problem as a test hang. If the race is fixed, then
// the test runs to completion.
namespace toku {
static void start_before_pending(void) { usleep(10000); }
static void run_locker(locktree *lt,
TXNID txnid,
const DBT *key,
pthread_barrier_t *b) {
for (int i = 0; i < 100000; i++) {
int r;
r = pthread_barrier_wait(b);
assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// if the callback is included, then the race is easy to reproduce.
// Otherwise, several test runs may be required before the race
// happens.
request.set_start_before_pending_test_callback(
start_before_pending);
// try to acquire the lock
r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(1000 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
}
} /* namespace toku */
int main(void) {
toku::locktree lt;
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 2;
std::thread worker[n_workers];
pthread_barrier_t b;
int r = pthread_barrier_init(&b, nullptr, n_workers);
assert(r == 0);
for (int i = 0; i < n_workers; i++) {
worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
}
for (int i = 0; i < n_workers; i++) {
worker[i].join();
}
r = pthread_barrier_destroy(&b);
assert(r == 0);
lt.release_reference();
lt.destroy();
return 0;
}

77
storage/tokudb/PerconaFT/portability/toku_debug_sync.h

@ -0,0 +1,77 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#pragma once
struct tokutxn;
#if defined(ENABLED_DEBUG_SYNC)
/*
the below macros are defined in my_global.h, which is included in m_string.h,
the same macros are defined in TokuSetupCompiler.cmake as compiler options,
undefine them here to avoid build errors
*/
#undef __STDC_FORMAT_MACROS
#undef __STDC_LIMIT_MACROS
#include "m_string.h"
#include "debug_sync.h"
void toku_txn_get_client_id(struct tokutxn *txn,
uint64_t *client_id,
void **client_extra);
inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) {
uint64_t client_id;
void *client_extra;
THD *thd;
if (likely(!opt_debug_sync_timeout))
return;
toku_txn_get_client_id(txn, &client_id, &client_extra);
thd = reinterpret_cast<THD *>(client_extra);
debug_sync(thd, sync_point_name, strlen(sync_point_name));
}
#else // defined(ENABLED_DEBUG_SYNC)
inline void toku_debug_sync(struct tokutxn *, const char *) {};
#endif // defined(ENABLED_DEBUG_SYNC)

3
storage/tokudb/PerconaFT/portability/toku_portability.h

@ -121,6 +121,7 @@ typedef int64_t toku_off_t;
#include "toku_htod.h"
#include "toku_assert.h"
#include "toku_crash.h"
#include "toku_debug_sync.h"
#define UU(x) x __attribute__((__unused__))
@ -183,8 +184,10 @@ extern void *realloc(void*, size_t) __THROW __attribute__((__deprecat
# pragma GCC poison u_int32_t
# pragma GCC poison u_int64_t
# pragma GCC poison BOOL
#if !defined(MYSQL_TOKUDB_ENGINE)
# pragma GCC poison FALSE
# pragma GCC poison TRUE
#endif // MYSQL_TOKUDB_ENGINE
#endif
#pragma GCC poison __sync_fetch_and_add
#pragma GCC poison __sync_fetch_and_sub

9
storage/tokudb/PerconaFT/src/tests/test_iterate_live_transactions.cc

@ -55,7 +55,8 @@ static int iterate_callback(DB_TXN *txn,
iterate_row_locks_callback iterate_locks,
void *locks_extra, void *extra) {
uint64_t txnid = txn->id64(txn);
uint64_t client_id = txn->get_client_id(txn);
uint64_t client_id; void *client_extra;
txn->get_client_id(txn, &client_id, &client_extra);
iterate_extra *info = reinterpret_cast<iterate_extra *>(extra);
DB *db;
DBT left_key, right_key;
@ -93,13 +94,13 @@ int test_main(int UU(argc), char *const UU(argv[])) {
r = env->open(env, TOKU_TEST_FILENAME, env_flags, 0755); CKERR(r);
r = env->txn_begin(env, NULL, &txn1, 0); CKERR(r);
txn1->set_client_id(txn1, 0);
txn1->set_client_id(txn1, 0, nullptr);
txnid1 = txn1->id64(txn1);
r = env->txn_begin(env, NULL, &txn2, 0); CKERR(r);
txn2->set_client_id(txn2, 1);
txn2->set_client_id(txn2, 1, nullptr);
txnid2 = txn2->id64(txn2);
r = env->txn_begin(env, NULL, &txn3, 0); CKERR(r);
txn3->set_client_id(txn3, 2);
txn3->set_client_id(txn3, 2, nullptr);
txnid3 = txn3->id64(txn3);
{

3
storage/tokudb/PerconaFT/src/tests/test_stress0.cc

@ -93,7 +93,8 @@ static int iterate_txns(DB_TXN *txn,
iterate_row_locks_callback iterate_locks,
void *locks_extra, void *extra) {
uint64_t txnid = txn->id64(txn);
uint64_t client_id = txn->get_client_id(txn);
uint64_t client_id; void *client_extra;
txn->get_client_id(txn, &client_id, &client_extra);
invariant_null(extra);
invariant(txnid > 0);
invariant(client_id == 0);

9
storage/tokudb/PerconaFT/src/tests/transactional_fileops.cc

@ -87,6 +87,7 @@ setup (void) {
else error_file = stderr;
r=db_env_create(&env, 0); CKERR(r);
env->set_dir_per_db(env, true);
env->set_errfile(env, error_file ? error_file : stderr);
r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
}
@ -431,6 +432,14 @@ test_fileops_3(void) {
r = env->dbrename(env, txn_a, "a.db", NULL, "d.db", 0);
CKERR2(r, EEXIST);
// verify correct error return code when trying to
// rename a dictionary to a name that is beyond the limit
// of the operating system.
char longname[FILENAME_MAX+11];
memset(longname, 'b', FILENAME_MAX+7);
memcpy(longname+FILENAME_MAX+7, ".db", 4);
r = env->dbrename(env, txn_a, "a.db", NULL, longname, 0);
CKERR2(r, ENAMETOOLONG);
r=txn_a->abort(txn_a); CKERR(r);
}

38
storage/tokudb/PerconaFT/src/ydb.cc

@ -2775,6 +2775,10 @@ static void env_set_killed_callback(DB_ENV *env, uint64_t default_killed_time_ms
env->i->killed_callback = killed_callback;
}
static void env_kill_waiter(DB_ENV *env, void *extra) {
env->i->ltm.kill_waiter(extra);
}
static void env_do_backtrace(DB_ENV *env) {
if (env->i->errcall) {
db_env_do_backtrace_errfunc((toku_env_err_func) toku_env_err, (const void *) env);
@ -2877,6 +2881,7 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
USENV(set_dir_per_db);
USENV(get_dir_per_db);
USENV(get_data_dir);
USENV(kill_waiter);
#undef USENV
// unlocked methods
@ -3061,28 +3066,31 @@ env_dbremove_subdb(DB_ENV * env, DB_TXN * txn, const char *fname, const char *db
// see if we can acquire a table lock for the given dname.
// requires: write lock on dname in the directory. dictionary
// open, close, and begin checkpoint cannot occur.
// returns: true if we could open, lock, and close a dictionary
// with the given dname, false otherwise.
static bool
// returns: zero if we could open, lock, and close a dictionary
// with the given dname, errno otherwise.
static int
can_acquire_table_lock(DB_ENV *env, DB_TXN *txn, const char *iname_in_env) {
int r;
bool got_lock = false;
DB *db;
r = toku_db_create(&db, env, 0);
assert_zero(r);
r = toku_db_open_iname(db, txn, iname_in_env, 0, 0);
assert_zero(r);
if(r) {
if (r == ENAMETOOLONG)
toku_ydb_do_error(env, r, "File name too long!\n");
goto exit;
}
r = toku_db_pre_acquire_table_lock(db, txn);
if (r == 0) {
got_lock = true;
} else {
got_lock = false;
if (r) {
r = DB_LOCK_NOTGRANTED;
}
r = toku_db_close(db);
assert_zero(r);
return got_lock;
exit:
if(db) {
int r2 = toku_db_close(db);
assert_zero(r2);
}
return r;
}
static int
@ -3295,8 +3303,8 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
// otherwise, we're okay in marking this ft as remove on
// commit. no new handles can open for this dictionary
// because the txn has directory write locks on the dname
if (txn && !can_acquire_table_lock(env, txn, new_iname.get())) {
r = DB_LOCK_NOTGRANTED;
if (txn) {
r = can_acquire_table_lock(env, txn, new_iname.get());
}
// We don't do anything at the ft or cachetable layer for rename.
// We just update entries in the environment's directory.

22
storage/tokudb/PerconaFT/src/ydb_row_lock.cc

@ -181,7 +181,16 @@ int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *
request.create();
int r = toku_db_start_range_lock(db, txn, left_key, right_key, lock_type, &request);
if (r == DB_LOCK_NOTGRANTED) {
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_before_wait");
r = toku_db_wait_range_lock(db, txn, &request);
if (r == DB_LOCK_NOTGRANTED)
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_not_granted_after_wait");
}
else if (r == 0) {
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_granted_immediately");
}
request.destroy();
@ -191,9 +200,13 @@ int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *
// Setup and start an asynchronous lock request.
int toku_db_start_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
toku::lock_request::type lock_type, toku::lock_request *request) {
uint64_t client_id;
void *client_extra;
DB_TXN *txn_anc = txn_oldest_ancester(txn);
TXNID txn_anc_id = txn_anc->id64(txn_anc);
request->set(db->i->lt, txn_anc_id, left_key, right_key, lock_type, toku_is_big_txn(txn_anc));
txn->get_client_id(txn, &client_id, &client_extra);
request->set(db->i->lt, txn_anc_id, left_key, right_key, lock_type,
toku_is_big_txn(txn_anc), client_extra);
const int r = request->start();
if (r == 0) {
@ -241,6 +254,8 @@ int toku_db_get_point_write_lock(DB *db, DB_TXN *txn, const DBT *key) {
// acquire a point write lock on the key for a given txn.
// this does not block the calling thread.
void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
uint64_t client_id;
void *client_extra;
DB_TXN *txn = toku_txn_get_container_db_txn(tokutxn);
DB_TXN *txn_anc = txn_oldest_ancester(txn);
TXNID txn_anc_id = txn_anc->id64(txn_anc);
@ -248,7 +263,10 @@ void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
// This lock request must succeed, so we do not want to wait
toku::lock_request request;
request.create();
request.set(db->i->lt, txn_anc_id, key, key, toku::lock_request::type::WRITE, toku_is_big_txn(txn_anc));
txn->get_client_id(txn, &client_id, &client_extra);
request.set(db->i->lt, txn_anc_id, key, key,
toku::lock_request::type::WRITE, toku_is_big_txn(txn_anc),
client_extra);
int r = request.start();
invariant_zero(r);
db_txn_note_row_lock(db, txn_anc, key, key);

8
storage/tokudb/PerconaFT/src/ydb_txn.cc

@ -323,12 +323,12 @@ int locked_txn_abort(DB_TXN *txn) {
return r;
}
static void locked_txn_set_client_id(DB_TXN *txn, uint64_t client_id) {
toku_txn_set_client_id(db_txn_struct_i(txn)->tokutxn, client_id);
static void locked_txn_set_client_id(DB_TXN *txn, uint64_t client_id, void *client_extra) {
toku_txn_set_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
}
static uint64_t locked_txn_get_client_id(DB_TXN *txn) {
return toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn);
static void locked_txn_get_client_id(DB_TXN *txn, uint64_t *client_id, void **client_extra) {
toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
}
static int toku_txn_discard(DB_TXN *txn, uint32_t flags) {

8
storage/tokudb/PerconaFT/tools/CMakeLists.txt

@ -6,6 +6,14 @@ foreach(tool ${tools})
add_dependencies(${tool} install_tdb_h)
target_link_libraries(${tool} ${LIBTOKUDB}_static ft_static z lzma snappy ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
(CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
target_link_libraries(${tool} sql binlog rpl master slave)
endif()
endif ()
add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden)
endforeach(tool)

2
storage/tokudb/PerconaFT/tools/ftverify.cc

@ -325,7 +325,7 @@ check_block(BLOCKNUM blocknum, int64_t UU(blocksize), int64_t UU(address), void
}
just_decompress_sub_block(&sb);
r = verify_ftnode_sub_block(&sb);
r = verify_ftnode_sub_block(&sb, nullptr, blocknum);
if (r != 0) {
printf(" Uncompressed child partition %d checksum failed.\n", i);
failure++;

3
storage/tokudb/PerconaFT/tools/tokuftdump.cc

@ -158,7 +158,8 @@ static void dump_descriptor(DESCRIPTOR d) {
static void open_header(int fd, FT *header, CACHEFILE cf) {
FT ft = NULL;
int r;
r = toku_deserialize_ft_from (fd, MAX_LSN, &ft);
const char *fn = toku_cachefile_fname_in_env(cf);
r = toku_deserialize_ft_from (fd, fn, MAX_LSN, &ft);
if (r != 0) {
fprintf(stderr, "%s: can not deserialize from %s error %d\n", arg0, fname, r);
exit(1);

124
storage/tokudb/ha_tokudb.cc

@ -532,51 +532,6 @@ typedef struct index_read_info {
DBT* orig_key;
} *INDEX_READ_INFO;
static int ai_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting add index.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Adding of indexes about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static int loader_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting bulk load.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Loading of data about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static void loader_ai_err_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
}
static void loader_dup_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
}
}
//
// smart DBT callback function for optimize
// in optimize, we want to flatten DB by doing
@ -3396,11 +3351,13 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
lc.thd = thd;
lc.ha = this;
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
error = loader->set_poll_function(
loader, ha_tokudb::bulk_insert_poll, &lc);
assert_always(!error);
error = loader->set_error_callback(loader, loader_dup_fun, &lc);
error = loader->set_error_callback(
loader, ha_tokudb::loader_dup, &lc);
assert_always(!error);
trx->stmt_progress.using_loader = true;
@ -3413,6 +3370,47 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
int ha_tokudb::bulk_insert_poll(void* extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg,
"The process has been killed, aborting bulk load.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg,
"Loading of data t %s about %.1f%% done",
context->ha->share->full_table_name(),
percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long)percentage, 100);
#endif
return 0;
}
void ha_tokudb::loader_add_index_err(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
}
void ha_tokudb::loader_dup(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
}
}
//
// Method that is called at the end of many calls to insert rows
@ -8179,12 +8177,14 @@ int ha_tokudb::tokudb_add_index(
goto cleanup;
}
error = indexer->set_poll_function(indexer, ai_poll_fun, &lc);
error = indexer->set_poll_function(
indexer, ha_tokudb::tokudb_add_index_poll, &lc);
if (error) {
goto cleanup;
}
error = indexer->set_error_callback(indexer, loader_ai_err_fun, &lc);
error = indexer->set_error_callback(
indexer, ha_tokudb::loader_add_index_err, &lc);
if (error) {
goto cleanup;
}
@ -8239,12 +8239,14 @@ int ha_tokudb::tokudb_add_index(
goto cleanup;
}
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
error =
loader->set_poll_function(loader, ha_tokudb::bulk_insert_poll, &lc);
if (error) {
goto cleanup;
}
error = loader->set_error_callback(loader, loader_ai_err_fun, &lc);
error = loader->set_error_callback(
loader, ha_tokudb::loader_add_index_err, &lc);
if (error) {
goto cleanup;
}
@ -8451,6 +8453,24 @@ cleanup:
thd_proc_info(thd, orig_proc_info);
TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error);
}
int ha_tokudb::tokudb_add_index_poll(void* extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg,
"The process has been killed, aborting add index.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg,
"Adding of indexes to %s about %.1f%% done",
context->ha->share->full_table_name(),
percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long)percentage, 100);
#endif
return 0;
}
//
// Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2

41
storage/tokudb/ha_tokudb.h

@ -799,6 +799,19 @@ public:
#else
void start_bulk_insert(ha_rows rows);
#endif
static int bulk_insert_poll(void* extra, float progress);
static void loader_add_index_err(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra);
static void loader_dup(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra);
int end_bulk_insert();
int end_bulk_insert(bool abort);
@ -938,17 +951,23 @@ public:
#endif
private:
int tokudb_add_index(
TABLE *table_arg,
KEY *key_info,
uint num_of_keys,
DB_TXN* txn,
bool* inc_num_DBs,
bool* modified_DB
);
void restore_add_index(TABLE* table_arg, uint num_of_keys, bool incremented_numDBs, bool modified_DBs);
int drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, KEY *key_info, DB_TXN* txn);
void restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys);
int tokudb_add_index(TABLE* table_arg,
KEY* key_info,
uint num_of_keys,
DB_TXN* txn,
bool* inc_num_DBs,
bool* modified_DB);
static int tokudb_add_index_poll(void *extra, float progress);
void restore_add_index(TABLE* table_arg,
uint num_of_keys,
bool incremented_numDBs,
bool modified_DBs);
int drop_indexes(TABLE* table_arg,
uint* key_num,
uint num_of_keys,
KEY* key_info,
DB_TXN* txn);
void restore_drop_indexes(TABLE* table_arg, uint* key_num, uint num_of_keys);
public:
// delete all rows from the table

8
storage/tokudb/hatoku_hton.cc

@ -67,6 +67,7 @@ static bool tokudb_show_status(
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
#endif
static int tokudb_close_connection(handlerton* hton, THD* thd);
static void tokudb_kill_connection(handlerton *hton, THD *thd);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
#if TOKU_INCLUDE_XA
@ -343,6 +344,7 @@ static int tokudb_init_func(void *p) {
tokudb_hton->create = tokudb_create_handler;
tokudb_hton->close_connection = tokudb_close_connection;
tokudb_hton->kill_connection = tokudb_kill_connection;
tokudb_hton->savepoint_offset = sizeof(SP_INFO_T);
tokudb_hton->savepoint_set = tokudb_savepoint;
@ -766,6 +768,12 @@ static int tokudb_close_connection(handlerton* hton, THD* thd) {
return error;
}
void tokudb_kill_connection(handlerton *hton, THD *thd) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
}
bool tokudb_flush_logs(handlerton * hton) {
TOKUDB_DBUG_ENTER("");
int error;

26
storage/tokudb/mysql-test/tokudb/r/kill_query_blocked_in_lt.result

@ -0,0 +1,26 @@
### connection default
CREATE TABLE t (a INT PRIMARY KEY, b INT) ENGINE=TokuDB;
INSERT INTO t (a, b) VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5),
(6, 6), (7, 7), (8, 8), (9, 9), (10, 10),
(11, 11), (12, 12), (13, 13), (14, 14), (15, 15),
(16, 16), (17, 17), (18, 18), (19, 19), (20, 20);
### connection con1
SET DEBUG_SYNC= 'toku_range_lock_granted_immediately SIGNAL lock_granted WAIT_FOR lock_granted_continue';
UPDATE t SET b=1 WHERE a BETWEEN 5 AND 10;
### connection default
SET DEBUG_SYNC= 'now WAIT_FOR lock_granted';
### connection con2
SET DEBUG_SYNC= 'toku_range_lock_before_wait SIGNAL lock_not_granted WAIT_FOR lock_not_granted_continue';
SET DEBUG_SYNC= 'toku_range_lock_not_granted_after_wait SIGNAL lock_not_granted_after_wait';
UPDATE t SET b=1 WHERE a BETWEEN 5 AND 10;
### connection default
SET DEBUG_SYNC= 'now SIGNAL lock_not_granted_continue WAIT_FOR lock_not_granted';
KILL QUERY con2_id
SET DEBUG_SYNC= 'now SIGNAL lock_granted_continue WAIT_FOR lock_not_granted_after_wait';
### connection con1
### reap
### connection con2
### reap
ERROR 70100: Query execution was interrupted
### connection default
DROP TABLE t;

5
storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result

@ -1,6 +1,4 @@
SET DEFAULT_STORAGE_ENGINE='tokudb';
drop table if exists t;
create table t (a int primary key, b int);
create table t (a int primary key, b int) engine=tokudb;
insert into t values (1,0);
set session transaction isolation level read committed;
begin;
@ -8,6 +6,7 @@ select * from t where a=1 for update;
a b
1 0
update t set b=b+1 where a=1;
set session tokudb_lock_timeout=60000;
set session transaction isolation level read committed;
begin;
select * from t where a=1 for update;

56
storage/tokudb/mysql-test/tokudb/t/kill_query_blocked_in_lt.test

@ -0,0 +1,56 @@
--source include/have_tokudb.inc
--source include/have_debug_sync.inc
--echo ### connection default
CREATE TABLE t (a INT PRIMARY KEY, b INT) ENGINE=TokuDB;
INSERT INTO t (a, b) VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5),
(6, 6), (7, 7), (8, 8), (9, 9), (10, 10),
(11, 11), (12, 12), (13, 13), (14, 14), (15, 15),
(16, 16), (17, 17), (18, 18), (19, 19), (20, 20);
--connect(con1, localhost, root)
--connect(con2, localhost, root)
--connection con1
--echo ### connection con1
SET DEBUG_SYNC= 'toku_range_lock_granted_immediately SIGNAL lock_granted WAIT_FOR lock_granted_continue';
--send UPDATE t SET b=1 WHERE a BETWEEN 5 AND 10
--connection default
--echo ### connection default
SET DEBUG_SYNC= 'now WAIT_FOR lock_granted';
--connection con2
--echo ### connection con2
--let $con2_id= `SELECT CONNECTION_ID()`
SET DEBUG_SYNC= 'toku_range_lock_before_wait SIGNAL lock_not_granted WAIT_FOR lock_not_granted_continue';
SET DEBUG_SYNC= 'toku_range_lock_not_granted_after_wait SIGNAL lock_not_granted_after_wait';
--send UPDATE t SET b=1 WHERE a BETWEEN 5 AND 10
--connection default
--echo ### connection default
SET DEBUG_SYNC= 'now SIGNAL lock_not_granted_continue WAIT_FOR lock_not_granted';
--disable_query_log
--eval KILL QUERY $con2_id
--enable_query_log
--echo KILL QUERY con2_id
SET DEBUG_SYNC= 'now SIGNAL lock_granted_continue WAIT_FOR lock_not_granted_after_wait';
--connection con1
--echo ### connection con1
--echo ### reap
--reap
--connection con2
--echo ### connection con2
--echo ### reap
--error ER_QUERY_INTERRUPTED
--reap
--connection default
--echo ### connection default
DROP TABLE t;

10
storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test

@ -1,12 +1,8 @@
source include/have_tokudb.inc;
source include/count_sessions.inc;
# test that select for update is executed with serializable isolation
SET DEFAULT_STORAGE_ENGINE='tokudb';
--disable_warnings
drop table if exists t;
--enable_warnings
create table t (a int primary key, b int);
create table t (a int primary key, b int) engine=tokudb;
insert into t values (1,0);
set session transaction isolation level read committed;
begin;
@ -15,6 +11,7 @@ select * from t where a=1 for update;
# t2 update
update t set b=b+1 where a=1;
connect(conn1,localhost,root);
set session tokudb_lock_timeout=60000;
set session transaction isolation level read committed;
begin;
# t2 select for update, should hang until t1 commits
@ -33,3 +30,4 @@ connection default;
disconnect conn1;
drop table t;
source include/wait_until_count_sessions.inc;

2
storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result

@ -53,8 +53,6 @@ Variable_name Value
Slave_open_temp_tables 0
### Continue backup
SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
## Reset debug_sync points
SET DEBUG_SYNC = "RESET";
### Wait for backup finish
include/filter_file.inc
### Slave tokubackup_slave_info content:

3
storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc

@ -59,9 +59,6 @@ SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Continue backup
SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
--echo ## Reset debug_sync points
SET DEBUG_SYNC = "RESET";
--connection slave_2
--echo ### Wait for backup finish
--reap

42
storage/tokudb/mysql-test/tokudb_rpl/include/rpl_tokudb_row_img_general_loop.inc

@ -0,0 +1,42 @@
#
# This is a helper script for rpl_row_img.test. It creates
# all combinations MyISAM / InnoDB in a three server replication
# chain. Each engine combination is tested against the current
# seetings for binlog_row_image (on each server).
#
# The test script that is executed on every combination is the
# only argument to this wrapper script. See below.
#
# This script takes one parameter:
# - $row_img_test_script
# the name of the test script to include in every combination
#
# Sample usage:
# -- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
# -- source include/rpl_row_img_general_loop.test
-- let $engine_type_a= 2
-- let $server_1_engine= TokuDB
while($engine_type_a)
{
-- let $engine_type_b= 2
-- let $server_2_engine= TokuDB
while($engine_type_b)
{
-- let $engine_type_c= 2
-- let $server_3_engine= TokuDB
while($engine_type_c)
{
-- echo ### engines: $server_1_engine, $server_2_engine, $server_3_engine
-- source $row_img_test_script
-- let $server_3_engine= InnoDB
-- dec $engine_type_c
}
-- let $server_2_engine= InnoDB
-- dec $engine_type_b
}
-- let $server_1_engine= InnoDB
-- dec $engine_type_a
}

32
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_not_null_tokudb.result

@ -6,25 +6,25 @@ Note #### Storing MySQL user name or password information in the master info rep
SET SQL_LOG_BIN= 0;
CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
`c` INT DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t3(`a` INT, `b` DATE DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t4(`a` INT, `b` DATE DEFAULT NULL,
`c` INT DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
SET SQL_LOG_BIN= 1;
CREATE TABLE t1(`a` INT, `b` DATE DEFAULT NULL,
`c` INT DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t2(`a` INT, `b` DATE DEFAULT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t3(`a` INT, `b` DATE DEFAULT '0000-00-00',
`c` INT DEFAULT 500,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t4(`a` INT, `b` DATE DEFAULT '0000-00-00',
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
************* EXECUTION WITH INSERTS *************
INSERT INTO t1(a,b,c) VALUES (1, null, 1);
INSERT INTO t1(a,b,c) VALUES (2,'1111-11-11', 2);
@ -90,10 +90,10 @@ DROP TABLE t4;
include/sync_slave_sql_with_master.inc
SET SQL_LOG_BIN= 0;
CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT NULL, `c` BIT DEFAULT NULL,
PRIMARY KEY (`a`)) ENGINE= Innodb;
PRIMARY KEY (`a`)) ENGINE= TokuDB;
SET SQL_LOG_BIN= 1;
CREATE TABLE t1 (`a` INT, `b` BIT DEFAULT b'01', `c` BIT DEFAULT NULL,
PRIMARY KEY (`a`)) ENGINE= Innodb;
PRIMARY KEY (`a`)) ENGINE= TokuDB;
************* EXECUTION WITH INSERTS *************
INSERT INTO t1(a,b,c) VALUES (1, null, b'01');
INSERT INTO t1(a,b,c) VALUES (2,b'00', b'01');
@ -138,21 +138,21 @@ include/sync_slave_sql_with_master.inc
################################################################################
SET SQL_LOG_BIN= 0;
CREATE TABLE t1(`a` INT NOT NULL, `b` INT,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t2(`a` INT NOT NULL, `b` INT,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t3(`a` INT NOT NULL, `b` INT,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
SET SQL_LOG_BIN= 1;
CREATE TABLE t1(`a` INT NOT NULL, `b` INT NOT NULL,
`c` INT NOT NULL,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t2(`a` INT NOT NULL, `b` INT NOT NULL,
`c` INT,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
CREATE TABLE t3(`a` INT NOT NULL, `b` INT NOT NULL,
`c` INT DEFAULT 500,
PRIMARY KEY(`a`)) ENGINE=Innodb DEFAULT CHARSET=LATIN1;
PRIMARY KEY(`a`)) ENGINE=TokuDB DEFAULT CHARSET=LATIN1;
************* EXECUTION WITH INSERTS *************
INSERT INTO t1(a) VALUES (1);
INSERT INTO t1(a, b) VALUES (2, NULL);

40
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_row_basic_3tokudb.result

@ -3,7 +3,7 @@ Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
CREATE TABLE t1 (C1 CHAR(1), C2 CHAR(1), INDEX (C1)) ENGINE = 'INNODB' ;
CREATE TABLE t1 (C1 CHAR(1), C2 CHAR(1), INDEX (C1)) ENGINE = 'TokuDB' ;
SELECT * FROM t1;
C1 C2
include/sync_slave_sql_with_master.inc
@ -74,7 +74,7 @@ A B
A I
X Y
X Z
CREATE TABLE t2 (c1 INT, c12 char(1), c2 INT, PRIMARY KEY (c1)) ENGINE = 'INNODB' ;
CREATE TABLE t2 (c1 INT, c12 char(1), c2 INT, PRIMARY KEY (c1)) ENGINE = 'TokuDB' ;
INSERT INTO t2
VALUES (1,'A',2), (2,'A',4), (3,'A',9), (4,'A',15), (5,'A',25),
(6,'A',35), (7,'A',50), (8,'A',64), (9,'A',81);
@ -184,7 +184,7 @@ c1 c12 c2
7 A 49
9 A 81
UPDATE t2 SET c12='X';
CREATE TABLE t3 (C1 CHAR(1), C2 CHAR(1), pk1 INT, C3 CHAR(1), pk2 INT, PRIMARY KEY (pk1,pk2)) ENGINE = 'INNODB' ;
CREATE TABLE t3 (C1 CHAR(1), C2 CHAR(1), pk1 INT, C3 CHAR(1), pk2 INT, PRIMARY KEY (pk1,pk2)) ENGINE = 'TokuDB' ;
INSERT INTO t3 VALUES ('A','B',1,'B',1), ('X','Y',2,'B',1), ('X','X',3,'B',1);
INSERT INTO t3 VALUES ('A','C',1,'B',2), ('X','Z',2,'B',2), ('A','A',3,'B',2);
SELECT * FROM t3 ORDER BY C1,C2;
@ -232,7 +232,7 @@ A B 1 B 1
A I 1 B 2
X Y 2 B 1
X Z 2 B 2
CREATE TABLE t6 (C1 CHAR(1), C2 CHAR(1), C3 INT) ENGINE = 'INNODB' ;
CREATE TABLE t6 (C1 CHAR(1), C2 CHAR(1), C3 INT) ENGINE = 'TokuDB';
INSERT INTO t6 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
INSERT INTO t6 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
SELECT * FROM t6 ORDER BY C3;
@ -280,7 +280,7 @@ A B 1
X Y 2
A I 4
X Z 5
CREATE TABLE t5 (C1 CHAR(1), C2 CHAR(1), C3 INT PRIMARY KEY) ENGINE = 'INNODB' ;
CREATE TABLE t5 (C1 CHAR(1), C2 CHAR(1), C3 INT PRIMARY KEY) ENGINE = 'TokuDB' ;
INSERT INTO t5 VALUES ('A','B',1), ('X','Y',2), ('X','X',3);
INSERT INTO t5 VALUES ('A','C',4), ('X','Z',5), ('A','A',6);
UPDATE t5,t2,t3 SET t5.C2='Q', t2.c12='R', t3.C3 ='S' WHERE t5.C1 = t2.c12 AND t5.C1 = t3.C1;
@ -375,7 +375,7 @@ X Q 5 9 R 81 X Y 2 S 1
X Q 5 9 R 81 X Z 2 S 2
SET @saved_slave_type_conversions = @@SLAVE_TYPE_CONVERSIONS;
SET GLOBAL SLAVE_TYPE_CONVERSIONS = 'ALL_LOSSY';
CREATE TABLE t4 (C1 CHAR(1) PRIMARY KEY, B1 BIT(1), B2 BIT(1) NOT NULL DEFAULT 0, C2 CHAR(1) NOT NULL DEFAULT 'A') ENGINE = 'INNODB' ;
CREATE TABLE t4 (C1 CHAR(1) PRIMARY KEY, B1 BIT(1), B2 BIT(1) NOT NULL DEFAULT 0, C2 CHAR(1) NOT NULL DEFAULT 'A') ENGINE = 'TokuDB' ;
INSERT INTO t4 SET C1 = 1;
SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
C1 HEX(B1) HEX(B2)
@ -385,7 +385,7 @@ SELECT C1,HEX(B1),HEX(B2) FROM t4 ORDER BY C1;
C1 HEX(B1) HEX(B2)
1 NULL 0
SET GLOBAL SLAVE_TYPE_CONVERSIONS = @saved_slave_type_conversions;
CREATE TABLE t7 (C1 INT PRIMARY KEY, C2 INT) ENGINE = 'INNODB' ;
CREATE TABLE t7 (C1 INT PRIMARY KEY, C2 INT) ENGINE = 'TokuDB' ;
include/sync_slave_sql_with_master.inc
--- on slave: original values ---
INSERT INTO t7 VALUES (1,3), (2,6), (3,9);
@ -411,7 +411,7 @@ C1 C2
2 4
3 6
--- on master ---
CREATE TABLE t8 (a INT PRIMARY KEY, b INT UNIQUE, c INT UNIQUE) ENGINE = 'INNODB' ;
CREATE TABLE t8 (a INT PRIMARY KEY, b INT UNIQUE, c INT UNIQUE) ENGINE = 'TokuDB' ;
INSERT INTO t8 VALUES (99,99,99);
INSERT INTO t8 VALUES (99,22,33);
ERROR 23000: Duplicate entry '99' for key 'PRIMARY'
@ -480,33 +480,33 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7,t8;
include/sync_slave_sql_with_master.inc
CREATE TABLE t1 (i INT NOT NULL,
c CHAR(16) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
CREATE TABLE t2 (i INT NOT NULL,
c CHAR(16) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
include/sync_slave_sql_with_master.inc
ALTER TABLE t2 MODIFY c CHAR(128) CHARACTER SET utf8 NOT NULL;
CREATE TABLE t3 (i INT NOT NULL,
c CHAR(128) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
include/sync_slave_sql_with_master.inc
ALTER TABLE t3 MODIFY c CHAR(16) CHARACTER SET utf8 NOT NULL;
CREATE TABLE t4 (i INT NOT NULL,
c CHAR(128) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
CREATE TABLE t5 (i INT NOT NULL,
c CHAR(255) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
include/sync_slave_sql_with_master.inc
ALTER TABLE t5 MODIFY c CHAR(16) CHARACTER SET utf8 NOT NULL;
CREATE TABLE t6 (i INT NOT NULL,
c CHAR(255) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
include/sync_slave_sql_with_master.inc
ALTER TABLE t6 MODIFY c CHAR(128) CHARACTER SET utf8 NOT NULL;
CREATE TABLE t7 (i INT NOT NULL,
c CHAR(255) CHARACTER SET utf8 NOT NULL,
j INT NOT NULL) ENGINE = 'INNODB' ;
j INT NOT NULL) ENGINE = 'TokuDB' ;
SET @saved_slave_type_conversions = @@slave_type_conversions;
SET GLOBAL SLAVE_TYPE_CONVERSIONS = 'ALL_NON_LOSSY';
[expecting slave to replicate correctly]
@ -547,7 +547,7 @@ include/sync_slave_sql_with_master.inc
include/diff_tables.inc [master:t7, slave:t7]
drop table t1, t2, t3, t4, t5, t6, t7;
include/sync_slave_sql_with_master.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE='INNODB' ;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE='TokuDB';
INSERT INTO t1 VALUES (1), (2), (3);
UPDATE t1 SET a = 10;
ERROR 23000: Duplicate entry '10' for key 'PRIMARY'
@ -575,7 +575,7 @@ KEY `date_key` (`date_key`),
KEY `time_key` (`time_key`),
KEY `datetime_key` (`datetime_key`),
KEY `varchar_key` (`varchar_key`)
) ENGINE='INNODB' ;
) ENGINE='TokuDB';
INSERT INTO t1 VALUES (1,8,5,'0000-00-00','0000-00-00','10:37:38','10:37:38','0000-00-00 00:00:00','0000-00-00 00:00:00','p','p'),(2,0,9,'0000-00-00','0000-00-00','00:00:00','00:00:00','2007-10-14 00:00:00','2007-10-14 00:00:00','d','d');
CREATE TABLE t2 (
`pk` int(11) NOT NULL AUTO_INCREMENT,
@ -595,7 +595,7 @@ KEY `date_key` (`date_key`),
KEY `time_key` (`time_key`),
KEY `datetime_key` (`datetime_key`),
KEY `varchar_key` (`varchar_key`)
) ENGINE='INNODB' ;
) ENGINE='TokuDB';
INSERT INTO t2 VALUES (1,1,6,'2005-12-23','2005-12-23','02:24:28','02:24:28','0000-00-00 00:00:00','0000-00-00 00:00:00','g','g'),(2,0,3,'2009-09-14','2009-09-14','00:00:00','00:00:00','2000-01-30 16:39:40','2000-01-30 16:39:40','q','q'),(3,0,3,'0000-00-00','0000-00-00','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','c','c'),(4,1,6,'2007-03-29','2007-03-29','15:49:00','15:49:00','0000-00-00 00:00:00','0000-00-00 00:00:00','m','m'),(5,4,0,'2002-12-04','2002-12-04','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','o','o'),(6,9,0,'2005-01-28','2005-01-28','00:00:00','00:00:00','2001-05-18 00:00:00','2001-05-18 00:00:00','w','w'),(7,6,0,'0000-00-00','0000-00-00','06:57:25','06:57:25','0000-00-00 00:00:00','0000-00-00 00:00:00','m','m'),(8,0,0,'0000-00-00','0000-00-00','00:00:00','00:00:00','0000-00-00 00:00:00','0000-00-00 00:00:00','z','z'),(9,4,6,'2006-08-15','2006-08-15','00:00:00','00:00:00','2002-04-12 14:44:25','2002-04-12 14:44:25','j','j'),(10,0,5,'2006-12-20','2006-12-20','10:13:53','10:13:53','2008-07-22 00:00:00','2008-07-22 00:00:00','y','y'),(11,9,7,'0000-00-00','0000-00-00','00:00:00','00:00:00','2004-07-05 00:00:00','2004-07-05 00:00:00','{','{'),(12,4,3,'2007-01-26','2007-01-26','23:00:51','23:00:51','2001-05-16 00:00:00','2001-05-16 00:00:00','f','f'),(13,7,0,'2004-03-27','2004-03-27','00:00:00','00:00:00','2005-01-24 03:30:37','2005-01-24 03:30:37','',''),(14,6,0,'2006-07-26','2006-07-26','18:43:57','18:43:57','0000-00-00 00:00:00','0000-00-00 00:00:00','{','{'),(15,0,6,'2000-01-14','2000-01-14','00:00:00','00:00:00','2000-09-21 00:00:00','2000-09-21 00:00:00','o','o'),(16,9,8,'0000-00-00','0000-00-00','21:15:08','21:15:08','0000-00-00 00:00:00','0000-00-00 00:00:00','a','a'),(17,2,0,'2004-10-27','2004-10-27','00:00:00','00:00:00','2004-03-24 22:13:43','2004-03-24 22:13:43','',''),(18,7,4,'0000-00-00','0000-00-00','08:38:27','08:38:27','2002-03-18 19:51:44','2002-03-18 19:51:44','t','t'),(19,5,3,'2008-03-07','2008-03-07','03:29:07','03:29:07','2007-12-01 18:44:44','2007-12-01 18:44:44','t','t'),(20,0,0,'2002-04-09','2002-04-09','16:06:03','16:06:03','2009-04-22 00:00:00','2009-04-22 00:00:00','n','n');
DELETE FROM t2 WHERE `int_key` < 3 LIMIT 1;
UPDATE t1 SET `int_key` = 3 ORDER BY `pk` LIMIT 4;
@ -612,7 +612,7 @@ include/sync_slave_sql_with_master.inc
include/diff_tables.inc [master:t2, slave:t2]
DROP TABLE t1, t2;
EOF OF TESTS
CREATE TABLE t1 (a int) ENGINE='INNODB' ;
CREATE TABLE t1 (a int) ENGINE='TokuDB';
INSERT IGNORE INTO t1 VALUES (NULL);
INSERT INTO t1 ( a ) VALUES ( 0 );
INSERT INTO t1 ( a ) VALUES ( 9 );
@ -631,7 +631,7 @@ drop table t1;
include/sync_slave_sql_with_master.inc
SET @saved_slave_type_conversions = @@SLAVE_TYPE_CONVERSIONS;
SET GLOBAL SLAVE_TYPE_CONVERSIONS = 'ALL_LOSSY';
CREATE TABLE t1 (a bit) ENGINE='INNODB' ;
CREATE TABLE t1 (a bit) ENGINE='TokuDB';
INSERT IGNORE INTO t1 VALUES (NULL);
INSERT INTO t1 ( a ) VALUES ( 0 );
UPDATE t1 SET a = 0 WHERE a = 1 LIMIT 3;

15
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_commit_after_flush.result

@ -0,0 +1,15 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
CREATE TABLE t1 (a INT) ENGINE=tokudb;
begin;
insert into t1 values(1);
flush tables with read lock;
commit;
include/sync_slave_sql_with_master.inc
unlock tables;
drop table t1;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

554
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_insert_id.result

@ -0,0 +1,554 @@
#
# Setup
#
#
# See if queries that use both auto_increment and LAST_INSERT_ID()
# are replicated well
#
# We also check how the foreign_key_check variable is replicated
#
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
SET @old_concurrent_insert= @@global.concurrent_insert;
SET @@global.concurrent_insert= 0;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b)) engine=tokudb;
insert into t1 values (1),(2),(3);
insert into t1 values (null);
insert into t2 values (null,last_insert_id());
include/sync_slave_sql_with_master.inc
select * from t1 ORDER BY a;
a
1
2
3
4
select * from t2 ORDER BY b;
b c
1 4
drop table t1;
drop table t2;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b), foreign key(b) references t1(a)) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 values (null,last_insert_id());
SET FOREIGN_KEY_CHECKS=1;
include/sync_slave_sql_with_master.inc
select * from t1;
a
10
11
12
13
select * from t2;
b c
5 0
6 11
#
# check if INSERT SELECT in auto_increment is well replicated (bug #490)
#
drop table t2;
drop table t1;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b)) engine=tokudb;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 (c) select * from t1 ORDER BY a;
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
include/sync_slave_sql_with_master.inc
select * from t1 ORDER BY a;
a
10
11
12
13
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
drop table t1;
drop table t2;
include/sync_slave_sql_with_master.inc
#
# Bug#8412: Error codes reported in binary log for CHARACTER SET,
# FOREIGN_KEY_CHECKS
#
SET TIMESTAMP=1000000000;
CREATE TABLE t1 ( a INT UNIQUE ) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
include/sync_slave_sql_with_master.inc
drop table t1;
include/sync_slave_sql_with_master.inc
#
# Bug#14553: NULL in WHERE resets LAST_INSERT_ID
#
set @@session.sql_auto_is_null=1;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(a int) engine=tokudb;
insert into t1 (a) values (null);
insert into t2 (a) select a from t1 where a is null;
insert into t2 (a) select a from t1 where a is null;
select * from t2;
a
1
include/sync_slave_sql_with_master.inc
select * from t2;
a
1
drop table t1;
drop table t2;
#
# End of 4.1 tests
#
#
# BUG#15728: LAST_INSERT_ID function inside a stored function returns 0
#
# The solution is not to reset last_insert_id on enter to sub-statement.
#
drop function if exists bug15728;
drop function if exists bug15728_insert;
drop table if exists t1, t2;
create table t1 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
create function bug15728() returns int(11)
return last_insert_id();
insert into t1 (last_id) values (0);
insert into t1 (last_id) values (last_insert_id());
insert into t1 (last_id) values (bug15728());
create table t2 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
create function bug15728_insert() returns int(11) modifies sql data
begin
insert into t2 (last_id) values (bug15728());
return bug15728();
end|
create trigger t1_bi before insert on t1 for each row
begin
declare res int;
select bug15728_insert() into res;
set NEW.last_id = res;
end|
insert into t1 (last_id) values (0);
drop trigger t1_bi;
select last_insert_id();
last_insert_id()
4
select bug15728_insert();
bug15728_insert()
2
select last_insert_id();
last_insert_id()
4
insert into t1 (last_id) values (bug15728());
select last_insert_id();
last_insert_id()
5
drop procedure if exists foo;
create procedure foo()
begin
declare res int;
insert into t2 (last_id) values (bug15728());
insert into t1 (last_id) values (bug15728());
end|
call foo();
select * from t1;
id last_id
1 0
2 1
3 2
4 1
5 4
6 3
select * from t2;
id last_id
1 3
2 4
3 5
include/sync_slave_sql_with_master.inc
select * from t1;
id last_id
1 0
2 1
3 2
4 1
5 4
6 3
select * from t2;
id last_id
1 3
2 4
3 5
drop function bug15728;
drop function bug15728_insert;
drop table t1,t2;
drop procedure foo;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
n b
2 50
3 100
4 150
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
include/sync_slave_sql_with_master.inc
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
n b
2 100
3 350
include/sync_slave_sql_with_master.inc
select * from t1 order by n;
n b
2 100
3 350
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
n b
2 100
4 400
5 600
1000 350
include/sync_slave_sql_with_master.inc
select * from t1 order by n;
n b
2 100
4 400
5 600
1000 350
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
include/sync_slave_sql_with_master.inc
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
n b
2 100
3 350
include/sync_slave_sql_with_master.inc
select * from t1 order by n;
n b
2 100
3 350
drop table t1;
include/sync_slave_sql_with_master.inc
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b INT,
UNIQUE(b)) ENGINE=tokudb;
INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10;
SELECT * FROM t1 ORDER BY a;
a b
1 10
2 2
include/sync_slave_sql_with_master.inc
SELECT * FROM t1 ORDER BY a;
a b
1 10
2 2
drop table t1;
CREATE TABLE t1 (
id bigint(20) unsigned NOT NULL auto_increment,
field_1 int(10) unsigned NOT NULL,
field_2 varchar(255) NOT NULL,
field_3 varchar(255) NOT NULL,
PRIMARY KEY (id),
UNIQUE KEY field_1 (field_1, field_2)
) ENGINE=tokudb;
CREATE TABLE t2 (
field_a int(10) unsigned NOT NULL,
field_b varchar(255) NOT NULL,
field_c varchar(255) NOT NULL
) ENGINE=tokudb;
INSERT INTO t2 (field_a, field_b, field_c) VALUES (1, 'a', '1a');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (2, 'b', '2b');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (3, 'c', '3c');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (4, 'd', '4d');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (5, 'e', '5e');
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
SELECT * FROM t1 ORDER BY id;
id field_1 field_2 field_3
1 1 a 1a
2 2 b 2b
3 3 c 3c
4 4 d 4d
5 5 e 5e
8 6 f 6f
include/sync_slave_sql_with_master.inc
SELECT * FROM t1 ORDER BY id;
id field_1 field_2 field_3
1 1 a 1a
2 2 b 2b
3 3 c 3c
4 4 d 4d
5 5 e 5e
8 6 f 6f
drop table t1, t2;
DROP PROCEDURE IF EXISTS p1;
DROP TABLE IF EXISTS t1, t2;
SELECT LAST_INSERT_ID(0);
LAST_INSERT_ID(0)
0
CREATE TABLE t1 (
id INT NOT NULL DEFAULT 0,
last_id INT,
PRIMARY KEY (id)
) ENGINE=tokudb;
CREATE TABLE t2 (
id INT NOT NULL AUTO_INCREMENT,
last_id INT,
PRIMARY KEY (id)
) ENGINE=tokudb;
CREATE PROCEDURE p1()
BEGIN
INSERT INTO t2 (last_id) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (last_id) VALUES (LAST_INSERT_ID());
END|
CALL p1();
SELECT * FROM t1 ORDER BY id;
id last_id
0 1
SELECT * FROM t2 ORDER BY id;
id last_id
1 0
include/sync_slave_sql_with_master.inc
SELECT * FROM t1 ORDER BY id;
id last_id
0 1
SELECT * FROM t2 ORDER BY id;
id last_id
1 0
DROP PROCEDURE p1;
DROP TABLE t1, t2;
DROP PROCEDURE IF EXISTS p1;
DROP FUNCTION IF EXISTS f1;
DROP FUNCTION IF EXISTS f2;
DROP FUNCTION IF EXISTS f3;
DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (
i INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
j INT DEFAULT 0
) ENGINE=tokudb;
CREATE TABLE t2 (i INT) ENGINE=tokudb;
CREATE PROCEDURE p1()
BEGIN
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (i) VALUES (NULL), (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
END |
CREATE FUNCTION f1() RETURNS INT MODIFIES SQL DATA
BEGIN
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (i) VALUES (NULL), (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
RETURN 0;
END |
CREATE FUNCTION f2() RETURNS INT NOT DETERMINISTIC
RETURN LAST_INSERT_ID() |
CREATE FUNCTION f3() RETURNS INT MODIFIES SQL DATA
BEGIN
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
RETURN 0;
END |
INSERT INTO t1 VALUES (NULL, -1);
CALL p1();
SELECT f1();
f1()
0
INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()),
(NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2());
INSERT INTO t1 VALUES (NULL, f2());
INSERT INTO t1 VALUES (NULL, 0), (NULL, LAST_INSERT_ID());
UPDATE t1 SET j= -1 WHERE i IS NULL;
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t1 (i) VALUES (NULL);
SELECT f3();
f3()
0
SELECT * FROM t1 ORDER BY i;
i j
1 -1
2 0
3 0
4 0
5 0
6 0
7 0
8 3
9 3
10 3
11 3
12 3
13 8
14 -1
15 13
16 0
17 0
SELECT * FROM t2 ORDER BY i;
i
2
3
5
6
16
include/sync_slave_sql_with_master.inc
SELECT * FROM t1;
i j
1 -1
2 0
3 0
4 0
5 0
6 0
7 0
8 3
9 3
10 3
11 3
12 3
13 8
14 -1
15 13
16 0
17 0
SELECT * FROM t2;
i
2
3
5
6
16
DROP PROCEDURE p1;
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
DROP TABLE t1, t2;
include/sync_slave_sql_with_master.inc
#
# End of 5.0 tests
#
create table t2 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
truncate table t2;
create table t1 (id tinyint primary key) engine=tokudb;
create function insid() returns int
begin
insert into t2 (last_id) values (0);
return 0;
end|
set sql_log_bin=0;
insert into t2 (id) values(1),(2),(3);
delete from t2;
set sql_log_bin=1;
select insid();
insid()
0
set sql_log_bin=0;
insert into t2 (id) values(5),(6),(7);
delete from t2 where id>=5;
set sql_log_bin=1;
insert into t1 select insid();
select * from t1 order by id;
id
0
select * from t2 order by id;
id last_id
4 0
8 0
include/sync_slave_sql_with_master.inc
select * from t1 order by id;
id
0
select * from t2 order by id;
id last_id
4 0
8 0
drop table t1;
drop function insid;
truncate table t2;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
create procedure foo()
begin
insert into t1 values(null,10);
insert ignore into t1 values(null,10);
insert ignore into t1 values(null,10);
insert into t2 values(null,3);
end|
call foo();
select * from t1 order by n;
n b
1 10
select * from t2 order by id;
id last_id
1 3
include/sync_slave_sql_with_master.inc
select * from t1 order by n;
n b
1 10
select * from t2 order by id;
id last_id
1 3
drop table t1, t2;
drop procedure foo;
SET @@global.concurrent_insert= @old_concurrent_insert;
set @@session.sql_auto_is_null=default;
include/rpl_end.inc

82
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_insert_id_pk.result

@ -0,0 +1,82 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.");
create table t1(a int auto_increment, primary key(a));
create table t2(b int auto_increment, c int, primary key(b));
insert into t1 values (1),(2),(3);
insert into t1 values (null);
insert into t2 values (null,last_insert_id());
include/sync_slave_sql_with_master.inc
select * from t1 ORDER BY a;
a
1
2
3
4
select * from t2 ORDER BY b;
b c
1 4
drop table t1;
drop table t2;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b), foreign key(b) references t1(a)) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 values (null,last_insert_id());
SET FOREIGN_KEY_CHECKS=1;
include/sync_slave_sql_with_master.inc
select * from t1;
a
10
11
12
13
select * from t2;
b c
5 0
6 11
drop table t2;
drop table t1;
create table t1(a int auto_increment, primary key(a));
create table t2(b int auto_increment, c int, primary key(b));
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 (c) select * from t1 ORDER BY a;
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
include/sync_slave_sql_with_master.inc
select * from t1 ORDER BY a;
a
10
11
12
13
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
drop table t1;
drop table t2;
include/sync_slave_sql_with_master.inc
SET TIMESTAMP=1000000000;
CREATE TABLE t1 ( a INT UNIQUE );
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
include/sync_slave_sql_with_master.inc
drop table t1;
include/rpl_end.inc

29
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update.result

@ -0,0 +1,29 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t1, t2 SET t1.b = t2.b WHERE t1.a = t2.a;
include/sync_slave_sql_with_master.inc
drop table t1, t2;
include/rpl_end.inc

60
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update2.result

@ -0,0 +1,60 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT');
drop table if exists t1,t2;
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t1, t2 SET t1.b = (t2.b+4) WHERE t1.a = t2.a;
SELECT * FROM t1 ORDER BY a;
a b
1 4
2 5
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
SELECT * FROM t1 ORDER BY a;
a b
1 4
2 5
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
drop table t1,t2;
include/sync_slave_sql_with_master.inc
reset master;
RESET MASTER;
CREATE TABLE t1 ( a INT );
INSERT INTO t1 VALUES (0);
UPDATE t1, (SELECT 3 as b) AS x SET t1.a = x.b;
select * from t1;
a
3
include/sync_slave_sql_with_master.inc
select * from t1;
a
3
drop table t1;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

202
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_multi_update3.result

@ -0,0 +1,202 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
-------- Test for BUG#9361 --------
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t2, (SELECT a FROM t1 ORDER BY a) AS t SET t2.b = t.a+5 ;
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 6
2 6
include/sync_slave_sql_with_master.inc
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 6
2 6
drop table t1,t2;
-------- Test 1 for BUG#9361 --------
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1 (
a1 char(30),
a2 int,
a3 int,
a4 char(30),
a5 char(30)
);
CREATE TABLE t2 (
b1 int,
b2 char(30)
);
INSERT INTO t1 VALUES ('Yes', 1, NULL, 'foo', 'bar');
INSERT INTO t2 VALUES (1, 'baz');
UPDATE t1 a, t2
SET a.a1 = 'No'
WHERE a.a2 =
(SELECT b1
FROM t2
WHERE b2 = 'baz')
AND a.a3 IS NULL
AND a.a4 = 'foo'
AND a.a5 = 'bar';
include/sync_slave_sql_with_master.inc
SELECT * FROM t1;
a1 a2 a3 a4 a5
No 1 NULL foo bar
SELECT * FROM t2;
b1 b2
1 baz
DROP TABLE t1, t2;
-------- Test 2 for BUG#9361 --------
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
CREATE TABLE t1 (
i INT,
j INT,
x INT,
y INT,
z INT
);
CREATE TABLE t2 (
i INT,
k INT,
x INT,
y INT,
z INT
);
CREATE TABLE t3 (
j INT,
k INT,
x INT,
y INT,
z INT
);
INSERT INTO t1 VALUES ( 1, 2,13,14,15);
INSERT INTO t2 VALUES ( 1, 3,23,24,25);
INSERT INTO t3 VALUES ( 2, 3, 1,34,35), ( 2, 3, 1,34,36);
UPDATE t1 AS a
INNER JOIN t2 AS b
ON a.i = b.i
INNER JOIN t3 AS c
ON a.j = c.j AND b.k = c.k
SET a.x = b.x,
a.y = b.y,
a.z = (
SELECT sum(z)
FROM t3
WHERE y = 34
)
WHERE b.x = 23;
include/sync_slave_sql_with_master.inc
SELECT * FROM t1;
i j x y z
1 2 23 24 71
DROP TABLE t1, t2, t3;
DROP TABLE IF EXISTS t1;
Warnings:
Note 1051 Unknown table 'test.t1'
DROP TABLE IF EXISTS t2;
Warnings:
Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (
idp int(11) NOT NULL default '0',
idpro int(11) default NULL,
price decimal(19,4) default NULL,
PRIMARY KEY (idp)
);
CREATE TABLE t2 (
idpro int(11) NOT NULL default '0',
price decimal(19,4) default NULL,
nbprice int(11) default NULL,
PRIMARY KEY (idpro)
);
INSERT INTO t1 VALUES
(1,1,'3.0000'),
(2,2,'1.0000'),
(3,1,'1.0000'),
(4,1,'4.0000'),
(5,3,'2.0000'),
(6,2,'4.0000');
INSERT INTO t2 VALUES
(1,'0.0000',0),
(2,'0.0000',0),
(3,'0.0000',0);
update
t2
join
( select idpro, min(price) as min_price, count(*) as nbr_price
from t1
where idpro>0 and price>0
group by idpro
) as table_price
on t2.idpro = table_price.idpro
set t2.price = table_price.min_price,
t2.nbprice = table_price.nbr_price;
select "-- MASTER AFTER JOIN --" as "";
-- MASTER AFTER JOIN --
select * from t1;
idp idpro price
1 1 3.0000
2 2 1.0000
3 1 1.0000
4 1 4.0000
5 3 2.0000
6 2 4.0000
select * from t2;
idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
include/sync_slave_sql_with_master.inc
select "-- SLAVE AFTER JOIN --" as "";
-- SLAVE AFTER JOIN --
select * from t1;
idp idpro price
1 1 3.0000
2 2 1.0000
3 1 1.0000
4 1 4.0000
5 3 2.0000
6 2 4.0000
select * from t2;
idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
DROP TABLE t1, t2;
include/rpl_end.inc

2183
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_crash_safe.result
File diff suppressed because it is too large
View File

4739
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_blobs.result
File diff suppressed because it is too large
View File

3681
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_full.result
File diff suppressed because it is too large
View File

3522
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_min.result
File diff suppressed because it is too large
View File

3522
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_eng_noblob.result
File diff suppressed because it is too large
View File

3505
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_full.result
File diff suppressed because it is too large
View File

3530
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_min.result
File diff suppressed because it is too large
View File

3530
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_img_idx_noblob.result
File diff suppressed because it is too large
View File

275
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_log.result

@ -0,0 +1,275 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
include/sync_slave_sql_with_master.inc
include/stop_slave.inc
include/wait_for_slave_to_stop.inc
reset master;
reset slave;
start slave;
include/wait_for_slave_to_start.inc
create table t1(n int not null auto_increment primary key)ENGINE=TokuDB;
insert into t1 values (NULL);
drop table t1;
create table t1 (word char(20) not null)ENGINE=TokuDB;
load data infile 'LOAD_FILE' into table t1 ignore 1 lines;
select count(*) from t1;
count(*)
69
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
flush logs;
create table t3 (a int)ENGINE=TokuDB;
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
include/sync_slave_sql_with_master.inc
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
flush logs;
include/stop_slave.inc
include/start_slave.inc
create table t2 (n int)ENGINE=TokuDB;
insert into t2 values (1);
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
master-bin.000002 # Query # # BEGIN
master-bin.000002 # Table_map # # table_id: # (test.t2)
master-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000002 # Xid # # COMMIT /* XID */
show binary logs;
Log_name File_size
master-bin.000001 #
master-bin.000002 #
include/sync_slave_sql_with_master.inc
show binary logs;
Log_name File_size
slave-bin.000001 #
slave-bin.000002 #
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
slave-bin.000001 # Query # # BEGIN
slave-bin.000001 # Table_map # # table_id: # (test.t1)
slave-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
slave-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
slave-bin.000001 # Query # # BEGIN
slave-bin.000001 # Table_map # # table_id: # (test.t1)
slave-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
slave-bin.000002 # Query # # BEGIN
slave-bin.000002 # Table_map # # table_id: # (test.t2)
slave-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
slave-bin.000002 # Xid # # COMMIT /* XID */
include/check_slave_is_running.inc
show binlog events in 'slave-bin.000005' from 4;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
include/rpl_reset.inc
create table t1(a int auto_increment primary key, b int);
insert into t1 values (NULL, 1);
set insert_id=5;
insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id());
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(a int auto_increment primary key, b int)
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
select * from t1;
a b
1 1
5 1
6 1
drop table t1;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

51
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_lower_case_table_names.result

@ -0,0 +1,51 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
******** [ MASTER ] ********
CREATE DATABASE BUG_37656;
use BUG_37656;
show databases like 'BUG_37656';
Database (BUG_37656)
BUG_37656
******** [ SLAVE ] ********
show databases like 'bug_37656';
Database (bug_37656)
bug_37656
******** [ MASTER ] ********
CREATE TABLE T1 (a int);
CREATE TABLE T2 (b int) ENGINE=TokuDB;
CREATE TABLE T3 (txt TEXT);
show tables;
Tables_in_BUG_37656
T1
T2
T3
******** [ SLAVE ] ********
use bug_37656;
show tables;
Tables_in_bug_37656
t2
t3
CREATE TABLE t1 (a INT);
******** [ MASTER ] ********
use BUG_37656;
INSERT INTO T1 VALUES (1);
INSERT INTO T2 VALUES (1);
use test;
INSERT INTO BUG_37656.T1 VALUES (2);
INSERT INTO BUG_37656.T2 VALUES (2);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE BUG_37656.T3;
******** [ SLAVE ] ********
include/diff_tables.inc [master:BUG_37656.T2, slave:bug_37656.t2]
include/diff_tables.inc [master:BUG_37656.T3, slave:bug_37656.t3]
******** [ MASTER ] ********
DROP DATABASE BUG_37656;
include/rpl_reset.inc
CREATE DATABASE B50653;
USE B50653;
CREATE PROCEDURE b50653_proc() BEGIN SELECT 1; END;
DROP PROCEDURE b50653_proc;
DROP DATABASE B50653;
include/rpl_end.inc

60
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_sp003.result

@ -0,0 +1,60 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1(a INT,PRIMARY KEY(a))ENGINE=TOKUDB;
CREATE PROCEDURE test.p1()
BEGIN
INSERT INTO test.t1 VALUES (4);
SELECT get_lock("test", 100);
UPDATE test.t1 set a=a+4 WHERE a=4;
END|
CREATE PROCEDURE test.p2()
BEGIN
UPDATE test.t1 SET a=a+1;
END|
SELECT get_lock("test", 200);
get_lock("test", 200)
1
CALL test.p1();
CALL test.p2();
SELECT release_lock("test");
release_lock("test")
1
get_lock("test", 100)
1
SELECT release_lock("test");
release_lock("test")
1
SELECT * FROM test.t1;
a
5
include/sync_slave_sql_with_master.inc
SELECT * FROM test.t1;
a
5
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1(a INT,PRIMARY KEY(a))ENGINE=TOKUDB;
CALL test.p2();
CALL test.p1();
get_lock("test", 100)
1
SELECT release_lock("test");
release_lock("test")
1
SELECT * FROM test.t1;
a
8
include/sync_slave_sql_with_master.inc
SELECT * FROM test.t1;
a
8
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

47
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_sp006.result

@ -0,0 +1,47 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP PROCEDURE IF EXISTS p1;
DROP PROCEDURE IF EXISTS p2;
CREATE TABLE IF NOT EXISTS t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=TokuDB;
CREATE TABLE IF NOT EXISTS t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=TokuDB;
CREATE PROCEDURE p1()
BEGIN
DECLARE done INT DEFAULT 0;
DECLARE spa CHAR(16);
DECLARE spb INT;
DECLARE cur1 CURSOR FOR SELECT name,
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
FROM t1;
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
OPEN cur1;
SET AUTOCOMMIT=0;
REPEAT
FETCH cur1 INTO spa, spb;
IF NOT done THEN
START TRANSACTION;
INSERT INTO t2 VALUES (spa,spb);
COMMIT;
END IF;
UNTIL done END REPEAT;
SET AUTOCOMMIT=1;
CLOSE cur1;
END|
CREATE PROCEDURE p2()
BEGIN
INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
END|
CALL p2();
include/sync_slave_sql_with_master.inc
CALL p1();
include/sync_slave_sql_with_master.inc
DROP TABLE t1;
DROP TABLE t2;
DROP PROCEDURE p1;
DROP PROCEDURE p2;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

32
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_row_trig004.result

@ -0,0 +1,32 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
DROP TRIGGER test.t1_bi_t2;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
CREATE TABLE test.t1 (n MEDIUMINT NOT NULL AUTO_INCREMENT, d FLOAT, PRIMARY KEY(n))ENGINE=TOKUDB;
CREATE TABLE test.t2 (n MEDIUMINT NOT NULL, f FLOAT, PRIMARY KEY(n))ENGINE=TOKUDB;
CREATE TRIGGER test.t1_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW INSERT INTO test.t1 VALUES (NULL, 1.234)//
INSERT INTO test.t2 VALUES (1, 0.0);
INSERT INTO test.t2 VALUES (1, 0.0);
Got one of the listed errors
select * from test.t1;
n d
1 1.234
select * from test.t2;
n f
1 0
include/sync_slave_sql_with_master.inc
select * from test.t1;
n d
1 1.234
select * from test.t2;
n f
1 0
DROP TRIGGER test.t1_bi_t2;
DROP TABLE test.t1;
DROP TABLE test.t2;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

274
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_log.result

@ -0,0 +1,274 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
include/sync_slave_sql_with_master.inc
include/stop_slave.inc
include/wait_for_slave_to_stop.inc
reset master;
reset slave;
start slave;
include/wait_for_slave_to_start.inc
create table t1(n int not null auto_increment primary key)ENGINE=TokuDB;
insert into t1 values (NULL);
drop table t1;
create table t1 (word char(20) not null)ENGINE=TokuDB;
load data infile 'LOAD_FILE' into table t1 ignore 1 lines;
select count(*) from t1;
count(*)
69
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
master-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
master-bin.000001 # Xid # # COMMIT /* XID */
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
flush logs;
create table t3 (a int)ENGINE=TokuDB;
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
include/sync_slave_sql_with_master.inc
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
flush logs;
include/stop_slave.inc
include/start_slave.inc
create table t2 (n int)ENGINE=TokuDB;
insert into t2 values (1);
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
master-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
master-bin.000002 # Query # # BEGIN
master-bin.000002 # Query # # use `test`; insert into t2 values (1)
master-bin.000002 # Xid # # COMMIT /* XID */
show binary logs;
Log_name File_size
master-bin.000001 #
master-bin.000002 #
include/sync_slave_sql_with_master.inc
show binary logs;
Log_name File_size
slave-bin.000001 #
slave-bin.000002 #
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
slave-bin.000001 # Query # # BEGIN
slave-bin.000001 # Intvar # # INSERT_ID=1
slave-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
slave-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
slave-bin.000001 # Query # # BEGIN
slave-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
slave-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../tmp/SQL_LOAD-<SERVER UUID>-<MASTER server-id>-<file-id>.<extension>' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
slave-bin.000002 # Query # # BEGIN
slave-bin.000002 # Query # # use `test`; insert into t2 values (1)
slave-bin.000002 # Xid # # COMMIT /* XID */
include/check_slave_is_running.inc
show binlog events in 'slave-bin.000005' from 4;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
include/rpl_reset.inc
create table t1(a int auto_increment primary key, b int);
insert into t1 values (NULL, 1);
set insert_id=5;
insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id());
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # use `test`; create table t1(a int auto_increment primary key, b int)
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL, 1)
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # LAST_INSERT_ID=1
master-bin.000001 # Intvar # # INSERT_ID=5
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id())
master-bin.000001 # Query # # COMMIT
select * from t1;
a b
1 1
5 1
6 1
drop table t1;
include/sync_slave_sql_with_master.inc
include/rpl_end.inc

1773
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_mixed_crash_safe.result
File diff suppressed because it is too large
View File

48
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_tokudb_stm_mixed_lower_case_table_names.result

@ -0,0 +1,48 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
******** [ MASTER ] ********
CREATE DATABASE BUG_37656;
use BUG_37656;
show databases like 'BUG_37656';
Database (BUG_37656)
BUG_37656
******** [ SLAVE ] ********
show databases like 'bug_37656';
Database (bug_37656)
bug_37656
******** [ MASTER ] ********
CREATE TABLE T1 (a int);
CREATE TABLE T2 (b int) ENGINE=TokuDB;
CREATE TABLE T3 (txt TEXT);
show tables;
Tables_in_BUG_37656
T1
T2
T3
******** [ SLAVE ] ********
use bug_37656;
show tables;
Tables_in_bug_37656
t2
t3
CREATE TABLE t1 (a INT);
******** [ MASTER ] ********
use BUG_37656;
INSERT INTO T1 VALUES (1);
INSERT INTO T2 VALUES (1);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE BUG_37656.T3;
******** [ SLAVE ] ********
include/diff_tables.inc [master:BUG_37656.T2, slave:bug_37656.t2]
include/diff_tables.inc [master:BUG_37656.T3, slave:bug_37656.t3]
******** [ MASTER ] ********
DROP DATABASE BUG_37656;
include/rpl_reset.inc
CREATE DATABASE B50653;
USE B50653;
CREATE PROCEDURE b50653_proc() BEGIN SELECT 1; END;
DROP PROCEDURE b50653_proc;
DROP DATABASE B50653;
include/rpl_end.inc

4
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_not_null_tokudb.test

@ -11,10 +11,10 @@
# 3 - NULL --> NOT NULL ( sql-mode != STRICT and no failures)
#
#################################################################################
--source include/master-slave.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
let $engine=Innodb;
let $engine=TokuDB;
--source extra/rpl_tests/rpl_not_null.test
--source include/rpl_end.inc

12
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_row_basic_3tokudb.test

@ -1,11 +1,11 @@
-- source include/have_tokudb.inc
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
connection slave;
let $bit_field_special = ALL_LOSSY;
let $type= 'INNODB' ;
let $extra_index= ;
-- source extra/rpl_tests/rpl_row_basic.test
let $type = 'TokuDB';
let $extra_index = ;
--source extra/rpl_tests/rpl_row_basic.test
--source include/rpl_end.inc

6
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_commit_after_flush.test

@ -0,0 +1,6 @@
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
-- source include/master-slave.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_commit_after_flush.test
--source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id-master.opt

@ -0,0 +1 @@
${?PB_HOST_SPECIFIC_MYSQLD_ARGS}

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id-slave.opt

@ -0,0 +1 @@
${?PB_HOST_SPECIFIC_MYSQLD_ARGS}

7
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id.test

@ -0,0 +1,7 @@
#################################
# Wrapper for rpl_insert_id.test#
#################################
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_insert_id.test

7
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_insert_id_pk.test

@ -0,0 +1,7 @@
#################################
# Wrapper for rpl_insert_id.test#
#################################
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_insert_id_pk.test

4
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update.test

@ -0,0 +1,4 @@
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=TokuDB;
-- source extra/rpl_tests/rpl_multi_update.test

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update2-slave.opt

@ -0,0 +1 @@
--replicate-ignore-table=nothing.sensible

14
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update2.test

@ -0,0 +1,14 @@
#######################################################
# Wrapper for rpl_multi_update2.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
# Added comments section and to skip when ndb is #
# Default engine. #
#######################################################
--source include/not_gtid_enabled.inc
--source include/not_ndb_default.inc
--source include/have_tokudb.inc
--source include/master-slave.inc
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT');
let $engine_type=TokuDB;
--source extra/rpl_tests/rpl_multi_update2.test
--source include/rpl_end.inc

13
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_multi_update3.test

@ -0,0 +1,13 @@
#######################################################
# Wrapper for rpl_multi_update3.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
# Added comments section and to skip when ndb is #
# Default engine. #
#######################################################
--source include/have_tokudb.inc
--source include/not_ndb_default.inc
--source include/master-slave.inc
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
let $engine_type=TokuDB;
--source extra/rpl_tests/rpl_multi_update3.test
--source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe-master.opt

@ -0,0 +1 @@
--transaction_isolation=READ-COMMITTED

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe-slave.opt

@ -0,0 +1 @@
--skip-slave-start --relay-log-info-repository=TABLE --relay-log-recovery=1 --transaction_isolation=READ-COMMITTED

19
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_crash_safe.test

@ -0,0 +1,19 @@
# This test takes long time, so only run it with the --big-test mtr-flag.
--source include/big_test.inc
--source include/not_embedded.inc
--source include/not_valgrind.inc
--source include/have_debug.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/not_mts_slave_parallel_workers.inc
--source include/master-slave.inc
call mtr.add_suppression('Attempting backtrace');
call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
call mtr.add_suppression(".* InnoDB: Warning: allocated tablespace .*, old maximum was .*");
let $engine_type=TokuDB;
let $database_name=test;
--source extra/rpl_tests/rpl_crash_safe.test
--source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_blobs.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

53
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_blobs.test

@ -0,0 +1,53 @@
#Want to skip this test from daily Valgrind execution
--source include/no_valgrind_without_big.inc
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
#
# This file contains tests for WL#5096.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
#
# WL#5096 Tests.
#
#
# Tests combinations of binlog-row-image against mixes of MyISAM and InnoDB
# storage engines on all three servers.
#
# All the combinarions need not to be separated into their own files as
# the tests for indexes and engines mixes are, because noblobs test script
# does not take too long time, thence we do not risk triggering PB2 timeout
# on valgrind runs.
#
## NOBLOB
-- let $row_img_set=server_1:NOBLOB:N,server_2:NOBLOB:Y,server_3:NOBLOB:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
## MINIMAL
-- let $row_img_set=server_1:MINIMAL:N,server_2:MINIMAL:Y,server_3:MINIMAL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
## FULL
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_full.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

50
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_full.test

@ -0,0 +1,50 @@
#Want to skip this test from daily Valgrind execution
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- source include/have_binlog_format_row.inc
-- source include/not_gtid_enabled.inc
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using FULL binlog-row-image on all servers.
#
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
#
# BUG#49100
#
-- echo ### Testing with TokuDB storage engine
-- let $engine=TokuDB
-- source extra/rpl_tests/rpl_row_empty_imgs.test
-- source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_min.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

42
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_min.test

@ -0,0 +1,42 @@
#Want to skip this test from daily Valgrind execution
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using MINIMAL binlog-row-image on all servers.
#
-- let $row_img_set=server_1:MINIMAL:N,server_2:MINIMAL:Y,server_3:MINIMAL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_noblob.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

42
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_eng_noblob.test

@ -0,0 +1,42 @@
#Want to skip this test from daily Valgrind execution
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using NOBLOB binlog-row-image on all servers.
#
-- let $row_img_set=server_1:NOBLOB:N,server_2:NOBLOB:Y,server_3:NOBLOB:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_full.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

38
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_full.test

@ -0,0 +1,38 @@
#Want to skip this test from daily Valgrind execution
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096 Tests.
#
#
# Tests FULL image against a mix of MyISAM and InnoDB engines on
# each of the three servers.
#
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_diff_indexes.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- source include/rpl_end.inc

1
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_min.cnf

@ -0,0 +1 @@
!include suite/rpl/t/rpl_row_img.cnf

41
storage/tokudb/mysql-test/tokudb_rpl/t/rpl_tokudb_row_img_idx_min.test

@ -0,0 +1,41 @@
#Want to skip this test from daily Valgrind execution
--source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096.
#
--let $rpl_topology= 1->2->3
--source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096 Tests.
#
#
# Tests MINIMAL image against a mix of MyISAM and InnoDB engines on
# each of the three servers.
#
-- let $row_img_set=server_1:MINIMAL:N,server_2:MINIMAL:Y,server_3:MINIMAL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_diff_indexes.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- source include/rpl_end.inc

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save