From a85f32968ce3272b816756177b25316df1868d20 Mon Sep 17 00:00:00 2001 From: Zardosht Kasheff Date: Wed, 17 Apr 2013 00:01:30 -0400 Subject: [PATCH] refs #6058, merge to main! git-svn-id: file:///svn/toku/tokudb@54234 c7de825b-a66e-492c-adef-691d508d4ae1 --- buildheader/make_tdb.cc | 1 + ft/ft-ops.cc | 5 +- ft/fttypes.h | 1 + ft/log-internal.h | 1 + ft/logformat.cc | 2 + ft/recover.cc | 11 +- ft/tests/cachetable-rwlock-test.cc | 2 + ft/tests/is_empty.cc | 12 +- ft/tests/le-cursor-provdel.cc | 10 +- ft/tests/le-cursor-right.cc | 4 +- ft/tests/le-cursor-walk.cc | 4 +- ft/tests/test-txn-child-manager.cc | 21 +- ft/tests/xid_lsn_independent.cc | 12 +- ft/txn.cc | 103 +- ft/txn.h | 10 +- ft/txn_manager.cc | 111 +- ft/txn_manager.h | 3 +- ft/xids.cc | 36 +- ft/xids.h | 2 + src/indexer.cc | 1 + src/loader.cc | 1 + src/tests/CMakeLists.txt | 1701 +++++++++++----------- src/tests/perf_child_txn.cc | 2 +- src/tests/perf_iibench.cc | 1 + src/tests/perf_ptquery.cc | 1 + src/tests/perf_ptquery2.cc | 1 + src/tests/perf_rangequery.cc | 1 + src/tests/perf_read_txn.cc | 50 + src/tests/perf_read_txn_single_thread.cc | 76 + src/tests/perf_txn_single_thread.cc | 2 +- src/tests/test_cursor_with_read_txn.cc | 92 ++ src/tests/test_locking_with_read_txn.cc | 57 + src/tests/test_read_txn_invalid_ops.cc | 163 +++ src/tests/test_simple_read_txn.cc | 64 + src/tests/test_stress1.cc | 2 + src/tests/test_stress2.cc | 2 + src/tests/test_stress3.cc | 2 + src/tests/test_stress5.cc | 1 + src/tests/threaded_stress_test_helpers.h | 10 +- src/ydb-internal.h | 10 + src/ydb.cc | 4 + src/ydb_db.cc | 5 + src/ydb_txn.cc | 33 +- src/ydb_write.cc | 7 + 44 files changed, 1663 insertions(+), 977 deletions(-) create mode 100644 src/tests/perf_read_txn.cc create mode 100644 src/tests/perf_read_txn_single_thread.cc create mode 100644 src/tests/test_cursor_with_read_txn.cc create mode 100644 src/tests/test_locking_with_read_txn.cc create mode 100644 src/tests/test_read_txn_invalid_ops.cc create mode 100644 src/tests/test_simple_read_txn.cc diff --git a/buildheader/make_tdb.cc b/buildheader/make_tdb.cc index e97c593e881..3ee9a70e7f9 100644 --- a/buildheader/make_tdb.cc +++ b/buildheader/make_tdb.cc @@ -249,6 +249,7 @@ static void print_defines (void) { #endif dodefine_from_track(txn_flags, DB_INHERIT_ISOLATION); dodefine_from_track(txn_flags, DB_SERIALIZABLE); + dodefine_from_track(txn_flags, DB_TXN_READ_ONLY); } /* TOKUDB specific error codes*/ diff --git a/ft/ft-ops.cc b/ft/ft-ops.cc index ffb17ab29ad..73823ea95f8 100644 --- a/ft/ft-ops.cc +++ b/ft/ft-ops.cc @@ -3935,7 +3935,10 @@ static int does_txn_read_entry(TXNID id, TOKUTXN context) { int rval; TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(context); - if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) { + if (oldest_live_in_snapshot == TXNID_NONE && id < context->snapshot_txnid64) { + rval = TOKUDB_ACCEPT; + } + else if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) { rval = TOKUDB_ACCEPT; } else if (id > context->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*context->live_root_txn_list, id)) { diff --git a/ft/fttypes.h b/ft/fttypes.h index 62fe3f6114e..f328bec6716 100644 --- a/ft/fttypes.h +++ b/ft/fttypes.h @@ -47,6 +47,7 @@ typedef struct txnid_pair_s { #define TXNID_NONE_LIVING ((TXNID)0) #define TXNID_NONE ((TXNID)0) +#define TXNID_MAX ((TXNID)-1) static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE }; diff --git a/ft/log-internal.h b/ft/log-internal.h index 806764a464f..454bb9bf809 100644 --- a/ft/log-internal.h +++ b/ft/log-internal.h @@ -170,6 +170,7 @@ struct tokutxn { TXNID oldest_referenced_xid; bool begin_was_logged; + bool declared_read_only; // true if the txn was declared read only when began // These are not read until a commit, prepare, or abort starts, and // they're "monotonic" (only go false->true) during operation: bool do_fsync; diff --git a/ft/logformat.cc b/ft/logformat.cc index 4926fe6db7d..0e14ef97585 100644 --- a/ft/logformat.cc +++ b/ft/logformat.cc @@ -412,6 +412,7 @@ generate_log_writer (void) { fprintf(cf, " //txn can be NULL during tests\n"); fprintf(cf, " //never null when not checkpoint.\n"); fprintf(cf, " if (txn && !txn->begin_was_logged) {\n"); + fprintf(cf, " invariant(!txn_declared_read_only(txn));\n"); fprintf(cf, " toku_maybe_log_begin_txn_for_write_operation(txn);\n"); fprintf(cf, " }\n"); break; @@ -419,6 +420,7 @@ generate_log_writer (void) { case ASSERT_BEGIN_WAS_LOGGED: { fprintf(cf, " //txn can be NULL during tests\n"); fprintf(cf, " invariant(!txn || txn->begin_was_logged);\n"); + fprintf(cf, " invariant(!txn || !txn_declared_read_only(txn));\n"); break; } case IGNORE_LOG_BEGIN: break; diff --git a/ft/recover.cc b/ft/recover.cc index 14bdc508cbe..f2992cbad61 100644 --- a/ft/recover.cc +++ b/ft/recover.cc @@ -480,7 +480,16 @@ recover_transaction(TOKUTXN *txnp, TXNID_PAIR xid, TXNID_PAIR parentxid, TOKULOG toku_txnid2txn(logger, xid, &txn); assert(txn==NULL); } - r = toku_txn_begin_with_xid(parent, &txn, logger, xid, TXN_SNAPSHOT_NONE, NULL, true); + r = toku_txn_begin_with_xid( + parent, + &txn, + logger, + xid, + TXN_SNAPSHOT_NONE, + NULL, + true, // for_recovery + false // read_only + ); assert(r == 0); // We only know about it because it was logged. Restore the log bit. // Logging is 'off' but it will still set the bit. diff --git a/ft/tests/cachetable-rwlock-test.cc b/ft/tests/cachetable-rwlock-test.cc index 4bbb102decb..ea002e8359e 100644 --- a/ft/tests/cachetable-rwlock-test.cc +++ b/ft/tests/cachetable-rwlock-test.cc @@ -97,6 +97,7 @@ test_writer_priority_thread (void *arg) { static void test_writer_priority (void) { struct rw_event rw_event, *rwe = &rw_event; + ZERO_STRUCT(rw_event); int r; rw_event_init(rwe); @@ -152,6 +153,7 @@ test_single_writer_thread (void *arg) { static void test_single_writer (void) { struct rw_event rw_event, *rwe = &rw_event; + ZERO_STRUCT(rw_event); int r; rw_event_init(rwe); diff --git a/ft/tests/is_empty.cc b/ft/tests/is_empty.cc index bae126456df..95ce27c5fa1 100644 --- a/ft/tests/is_empty.cc +++ b/ft/tests/is_empty.cc @@ -32,7 +32,7 @@ static void test_it (int N) { r = toku_logger_open_rollback(logger, ct, true); CKERR(r); TOKUTXN txn; - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r); r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); @@ -44,12 +44,12 @@ static void test_it (int N) { unsigned int rands[N]; for (int i=0; ichild_manager; @@ -108,7 +111,8 @@ void txn_child_manager_unit_test::run_test() { root_txn, &child_txn, logger, - TXN_SNAPSHOT_ROOT + TXN_SNAPSHOT_ROOT, + false ); CKERR(r); assert(child_txn->child_manager == cm); @@ -128,7 +132,8 @@ void txn_child_manager_unit_test::run_test() { child_txn, &grandchild_txn, logger, - TXN_SNAPSHOT_ROOT + TXN_SNAPSHOT_ROOT, + false ); CKERR(r); assert(grandchild_txn->child_manager == cm); @@ -153,7 +158,8 @@ void txn_child_manager_unit_test::run_test() { child_txn, &grandchild_txn, logger, - TXN_SNAPSHOT_ROOT + TXN_SNAPSHOT_ROOT, + false ); CKERR(r); assert(grandchild_txn->child_manager == cm); @@ -177,7 +183,8 @@ void txn_child_manager_unit_test::run_test() { xid, TXN_SNAPSHOT_NONE, NULL, - true // for recovery + true, // for recovery + false // read_only ); assert(recovery_txn->child_manager == cm); diff --git a/ft/tests/xid_lsn_independent.cc b/ft/tests/xid_lsn_independent.cc index a1aa4bed48f..f3fbca07541 100644 --- a/ft/tests/xid_lsn_independent.cc +++ b/ft/tests/xid_lsn_independent.cc @@ -15,7 +15,7 @@ static void do_txn(TOKULOGGER logger, bool readonly) { int r; TOKUTXN txn; - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false); CKERR(r); if (!readonly) { @@ -37,7 +37,7 @@ static void test_xid_lsn_independent(int N) { FT_HANDLE brt; TOKUTXN txn; - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false); CKERR(r); r = toku_open_ft_handle("ftfile", 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); @@ -47,7 +47,7 @@ static void test_xid_lsn_independent(int N) { CKERR(r); toku_txn_close_txn(txn); - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false); CKERR(r); TXNID xid_first = txn->txnid.parent_id64; unsigned int rands[N]; @@ -62,7 +62,7 @@ static void test_xid_lsn_independent(int N) { } { TOKUTXN txn2; - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE, false); CKERR(r); // Verify the txnid has gone up only by one (even though many log entries were done) invariant(txn2->txnid.parent_id64 == xid_first + 1); @@ -77,7 +77,7 @@ static void test_xid_lsn_independent(int N) { //TODO(yoni) #5067 will break this portion of the test. (End ids are also assigned, so it would increase by 4 instead of 2.) // Verify the txnid has gone up only by two (even though many log entries were done) TOKUTXN txn3; - r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE, false); CKERR(r); invariant(txn3->txnid.parent_id64 == xid_first + 2); r = toku_txn_commit_txn(txn3, false, NULL, NULL); @@ -173,7 +173,7 @@ static void test_xid_lsn_independent_parents(int N) { ZERO_ARRAY(txns_hack); for (int i = 0; i < N; i++) { - r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE); + r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE, false); CKERR(r); if (i < num_non_cascade) { diff --git a/ft/txn.cc b/ft/txn.cc index 24f87a90170..f02a36b2b9b 100644 --- a/ft/txn.cc +++ b/ft/txn.cc @@ -37,6 +37,7 @@ txn_status_init(void) { // Note, this function initializes the keyname, type, and legend fields. // Value fields are initialized to zero by compiler. STATUS_INIT(TXN_BEGIN, PARCOUNT, "begin"); + STATUS_INIT(TXN_READ_BEGIN, PARCOUNT, "begin read only"); STATUS_INIT(TXN_COMMIT, PARCOUNT, "successful commits"); STATUS_INIT(TXN_ABORT, PARCOUNT, "aborts"); txn_status.initialized = true; @@ -77,19 +78,52 @@ toku_txn_get_root_id(TOKUTXN txn) return txn->txnid.parent_id64; } +bool txn_declared_read_only(TOKUTXN txn) { + return txn->declared_read_only; +} + int toku_txn_begin_txn ( DB_TXN *container_db_txn, TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TOKULOGGER logger, - TXN_SNAPSHOT_TYPE snapshot_type + TXN_SNAPSHOT_TYPE snapshot_type, + bool read_only ) { - int r = toku_txn_begin_with_xid(parent_tokutxn, tokutxn, logger, TXNID_PAIR_NONE, snapshot_type, container_db_txn, false); + int r = toku_txn_begin_with_xid( + parent_tokutxn, + tokutxn, + logger, + TXNID_PAIR_NONE, + snapshot_type, + container_db_txn, + false, // for_recovery + read_only + ); return r; } + +static void +txn_create_xids(TOKUTXN txn, TOKUTXN parent) { + XIDS xids; + XIDS parent_xids; + if (parent == NULL) { + parent_xids = xids_get_root_xids(); + } else { + parent_xids = parent->xids; + } + xids_create_unknown_child(parent_xids, &xids); + TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64; + xids_finalize_with_child(xids, finalized_xid); + txn->xids = xids; +} + +// Allocate and initialize a txn +static void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, bool for_checkpoint, bool read_only); + int toku_txn_begin_with_xid ( TOKUTXN parent, @@ -98,24 +132,22 @@ toku_txn_begin_with_xid ( TXNID_PAIR xid, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, - bool for_recovery + bool for_recovery, + bool read_only ) { int r = 0; - TOKUTXN txn; - XIDS xids; - // Do as much (safe) work as possible before serializing on the txn_manager lock. - XIDS parent_xids; - if (parent == NULL) { - parent_xids = xids_get_root_xids(); - } else { - parent_xids = parent->xids; + TOKUTXN txn; + // check for case where we are trying to + // create too many nested transactions + if (!read_only && parent && !xids_can_create_child(parent->xids)) { + r = EINVAL; + goto exit; } - r = xids_create_unknown_child(parent_xids, &xids); - if (r != 0) { - return r; + if (read_only && parent) { + invariant(txn_declared_read_only(parent)); } - toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, xids, for_recovery); + toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, for_recovery, read_only); // txnid64, snapshot_txnid64 // will be set in here. if (for_recovery) { @@ -139,7 +171,8 @@ toku_txn_begin_with_xid ( toku_txn_manager_start_txn( txn, logger->txn_manager, - snapshot_type + snapshot_type, + read_only ); } else { @@ -152,10 +185,12 @@ toku_txn_begin_with_xid ( ); } } - TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64; - xids_finalize_with_child(txn->xids, finalized_xid); + if (!read_only) { + // this call will set txn->xids + txn_create_xids(txn, parent); + } *txnp = txn; - +exit: return r; } @@ -174,14 +209,14 @@ static void invalidate_xa_xid (TOKU_XA_XID *xid) { xid->formatID = -1; // According to the XA spec, -1 means "invalid data" } -void toku_txn_create_txn ( +static void toku_txn_create_txn ( TOKUTXN *tokutxn, TOKUTXN parent_tokutxn, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, - XIDS xids, - bool for_recovery + bool for_recovery, + bool read_only ) { assert(logger->rollback_cachefile); @@ -216,9 +251,10 @@ static txn_child_manager tcm; .child_manager = NULL, .container_db_txn = container_db_txn, .live_root_txn_list = nullptr, - .xids = xids, + .xids = NULL, .oldest_referenced_xid = TXNID_NONE, .begin_was_logged = false, + .declared_read_only = read_only, .do_fsync = false, .force_fsync_on_commit = false, .do_fsync_lsn = ZERO_LSN, @@ -257,7 +293,12 @@ static txn_child_manager tcm; *tokutxn = result; - STATUS_INC(TXN_BEGIN, 1); + if (read_only) { + STATUS_INC(TXN_READ_BEGIN, 1); + } + else { + STATUS_INC(TXN_BEGIN, 1); + } } void @@ -540,7 +581,9 @@ void toku_txn_complete_txn(TOKUTXN txn) { void toku_txn_destroy_txn(TOKUTXN txn) { txn->open_fts.destroy(); - xids_destroy(&txn->xids); + if (txn->xids) { + xids_destroy(&txn->xids); + } toku_mutex_destroy(&txn->txn_lock); toku_mutex_destroy(&txn->state_lock); toku_cond_destroy(&txn->state_cond); @@ -557,10 +600,14 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn) { } TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) { - invariant(txn->live_root_txn_list->size()>0); TXNID xid; - int r = txn->live_root_txn_list->fetch(0, &xid); - assert_zero(r); + if (txn->live_root_txn_list->size()>0) { + int r = txn->live_root_txn_list->fetch(0, &xid); + assert_zero(r); + } + else { + xid = TXNID_NONE; + } return xid; } diff --git a/ft/txn.h b/ft/txn.h index fc7d62301d1..39ab10d29a9 100644 --- a/ft/txn.h +++ b/ft/txn.h @@ -29,13 +29,15 @@ void toku_txn_lock(TOKUTXN txn); void toku_txn_unlock(TOKUTXN txn); uint64_t toku_txn_get_root_id(TOKUTXN txn); +bool txn_declared_read_only(TOKUTXN txn); int toku_txn_begin_txn ( DB_TXN *container_db_txn, TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TOKULOGGER logger, - TXN_SNAPSHOT_TYPE snapshot_type + TXN_SNAPSHOT_TYPE snapshot_type, + bool read_only ); DB_TXN * toku_txn_get_container_db_txn (TOKUTXN tokutxn); @@ -49,11 +51,10 @@ int toku_txn_begin_with_xid ( TXNID_PAIR xid, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, - bool for_recovery + bool for_recovery, + bool read_only ); -// Allocate and initialize a txn -void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, XIDS xids, bool for_checkpoint); void toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid); int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info); @@ -94,6 +95,7 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn); typedef enum { TXN_BEGIN, // total number of transactions begun (does not include recovered txns) + TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns) TXN_COMMIT, // successful commits TXN_ABORT, TXN_STATUS_NUM_ROWS diff --git a/ft/txn_manager.cc b/ft/txn_manager.cc index 5b60b44c651..44bb922a012 100644 --- a/ft/txn_manager.cc +++ b/ft/txn_manager.cc @@ -192,9 +192,13 @@ void toku_txn_manager_init(TXN_MANAGER* txn_managerp) { void toku_txn_manager_destroy(TXN_MANAGER txn_manager) { toku_mutex_destroy(&txn_manager->txn_manager_lock); + invariant(txn_manager->live_root_txns.size() == 0); txn_manager->live_root_txns.destroy(); + invariant(txn_manager->live_root_ids.size() == 0); txn_manager->live_root_ids.destroy(); + invariant(txn_manager->snapshot_txnids.size() == 0); txn_manager->snapshot_txnids.destroy(); + invariant(txn_manager->referenced_xids.size() == 0); txn_manager->referenced_xids.destroy(); toku_free(txn_manager); } @@ -264,19 +268,33 @@ max_xid(TXNID a, TXNID b) { } static TXNID get_oldest_referenced_xid_unlocked(TXN_MANAGER txn_manager) { - TXNID oldest_referenced_xid = TXNID_NONE_LIVING; - int r = txn_manager->live_root_ids.fetch(0, &oldest_referenced_xid); - // this function should only be called when we know there is at least - // one live transaction - invariant_zero(r); - - struct referenced_xid_tuple* tuple; + TXNID oldest_referenced_xid = TXNID_MAX; + int r; + if (txn_manager->live_root_ids.size() > 0) { + r = txn_manager->live_root_ids.fetch(0, &oldest_referenced_xid); + // this function should only be called when we know there is at least + // one live transaction + invariant_zero(r); + } + if (txn_manager->referenced_xids.size() > 0) { + struct referenced_xid_tuple* tuple; r = txn_manager->referenced_xids.fetch(0, &tuple); if (r == 0 && tuple->begin_id < oldest_referenced_xid) { oldest_referenced_xid = tuple->begin_id; } } + if (txn_manager->snapshot_txnids.size() > 0) { + TXNID id; + r = txn_manager->snapshot_txnids.fetch(0, &id); + if (r == 0 && id < oldest_referenced_xid) { + oldest_referenced_xid = id; + } + } + if (txn_manager->last_xid < oldest_referenced_xid) { + oldest_referenced_xid = txn_manager->last_xid; + } + paranoid_invariant(oldest_referenced_xid != TXNID_MAX); return oldest_referenced_xid; } @@ -492,7 +510,8 @@ void toku_txn_manager_start_txn_for_recovery( void toku_txn_manager_start_txn( TOKUTXN txn, TXN_MANAGER txn_manager, - TXN_SNAPSHOT_TYPE snapshot_type + TXN_SNAPSHOT_TYPE snapshot_type, + bool read_only ) { int r; @@ -528,13 +547,15 @@ void toku_txn_manager_start_txn( // is taken into account when the transaction is closed. // add ancestor information, and maintain global live root txn list - xid = ++txn_manager->last_xid; + xid = ++txn_manager->last_xid; // we always need an ID, needed for lock tree toku_txn_update_xids_in_txn(txn, xid); - uint32_t idx = txn_manager->live_root_txns.size(); - r = txn_manager->live_root_txns.insert_at(txn, idx); - invariant_zero(r); - r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx); - invariant_zero(r); + if (!read_only) { + uint32_t idx = txn_manager->live_root_txns.size(); + r = txn_manager->live_root_txns.insert_at(txn, idx); + invariant_zero(r); + r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx); + invariant_zero(r); + } txn->oldest_referenced_xid = get_oldest_referenced_xid_unlocked(txn_manager); if (needs_snapshot) { @@ -548,6 +569,7 @@ void toku_txn_manager_start_txn( verify_snapshot_system(txn_manager); } txn_manager_unlock(txn_manager); + return; } TXNID @@ -593,37 +615,39 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) { ); } - uint32_t idx; - //Remove txn from list of live root txns - TOKUTXN txnagain; - r = txn_manager->live_root_txns.find_zero(txn, &txnagain, &idx); - invariant_zero(r); - invariant(txn==txnagain); + if (!txn_declared_read_only(txn)) { + uint32_t idx; + //Remove txn from list of live root txns + TOKUTXN txnagain; + r = txn_manager->live_root_txns.find_zero(txn, &txnagain, &idx); + invariant_zero(r); + invariant(txn==txnagain); - r = txn_manager->live_root_txns.delete_at(idx); - invariant_zero(r); - r = txn_manager->live_root_ids.delete_at(idx); - invariant_zero(r); + r = txn_manager->live_root_txns.delete_at(idx); + invariant_zero(r); + r = txn_manager->live_root_ids.delete_at(idx); + invariant_zero(r); - if (!toku_txn_is_read_only(txn) || garbage_collection_debug) { - if (!is_snapshot) { - // - // If it's a snapshot, we already calculated index_in_snapshot_txnids. - // Otherwise, calculate it now. - // - r = txn_manager->snapshot_txnids.find_zero(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids); - invariant(r == DB_NOTFOUND); - } - uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids; - if (num_references > 0) { - // This transaction exists in a live list of another transaction. - struct referenced_xid_tuple tuple = { - .begin_id = txn->txnid.parent_id64, - .end_id = ++txn_manager->last_xid, - .references = num_references - }; - r = txn_manager->referenced_xids.insert(tuple, txn->txnid.parent_id64, nullptr); - lazy_assert_zero(r); + if (!toku_txn_is_read_only(txn) || garbage_collection_debug) { + if (!is_snapshot) { + // + // If it's a snapshot, we already calculated index_in_snapshot_txnids. + // Otherwise, calculate it now. + // + r = txn_manager->snapshot_txnids.find_zero(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids); + invariant(r == DB_NOTFOUND); + } + uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids; + if (num_references > 0) { + // This transaction exists in a live list of another transaction. + struct referenced_xid_tuple tuple = { + .begin_id = txn->txnid.parent_id64, + .end_id = ++txn_manager->last_xid, + .references = num_references + }; + r = txn_manager->referenced_xids.insert(tuple, txn->txnid.parent_id64, nullptr); + lazy_assert_zero(r); + } } } @@ -638,6 +662,7 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) { txn->live_root_txn_list->destroy(); toku_free(txn->live_root_txn_list); } + return; } void toku_txn_manager_clone_state_for_gc( diff --git a/ft/txn_manager.h b/ft/txn_manager.h index 1fdd404f6e4..2393c9ad09d 100644 --- a/ft/txn_manager.h +++ b/ft/txn_manager.h @@ -58,7 +58,8 @@ void toku_txn_manager_handle_snapshot_destroy_for_child_txn( void toku_txn_manager_start_txn( TOKUTXN txn, TXN_MANAGER txn_manager, - TXN_SNAPSHOT_TYPE snapshot_type + TXN_SNAPSHOT_TYPE snapshot_type, + bool read_only ); void toku_txn_manager_start_txn_for_recovery( diff --git a/ft/xids.cc b/ft/xids.cc index c7d2b94818d..da0b020ff71 100644 --- a/ft/xids.cc +++ b/ft/xids.cc @@ -62,6 +62,12 @@ xids_get_root_xids(void) { return rval; } +bool +xids_can_create_child(XIDS xids) { + invariant(xids->num_xids < MAX_TRANSACTION_RECORDS); + return (xids->num_xids + 1) != MAX_TRANSACTION_RECORDS; +} + int xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { @@ -70,17 +76,15 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { int rval; invariant(parent_xids); uint32_t num_child_xids = parent_xids->num_xids + 1; - invariant(num_child_xids > 0); - invariant(num_child_xids <= MAX_TRANSACTION_RECORDS); - if (num_child_xids == MAX_TRANSACTION_RECORDS) rval = EINVAL; - else { - size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]); - XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size)); - // Clone everything (parent does not have the newest xid). - memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0])); - *xids_p = xids; - rval = 0; - } + // assumes that caller has verified that num_child_xids will + // be less than MAX_TRANSACTIN_RECORDS + invariant(num_child_xids < MAX_TRANSACTION_RECORDS); + size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]); + XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size)); + // Clone everything (parent does not have the newest xid). + memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0])); + *xids_p = xids; + rval = 0; return rval; } @@ -99,11 +103,13 @@ int xids_create_child(XIDS parent_xids, // xids list for parent transaction XIDS * xids_p, // xids list created TXNID this_xid) { // xid of this transaction (new innermost) - int rval = xids_create_unknown_child(parent_xids, xids_p); - if (rval == 0) { - xids_finalize_with_child(*xids_p, this_xid); + bool can_create_child = xids_can_create_child(parent_xids); + if (!can_create_child) { + return EINVAL; } - return rval; + xids_create_unknown_child(parent_xids, xids_p); + xids_finalize_with_child(*xids_p, this_xid); + return 0; } void diff --git a/ft/xids.h b/ft/xids.h index fa6e51e2c7c..c6778c9dc1a 100644 --- a/ft/xids.h +++ b/ft/xids.h @@ -28,6 +28,8 @@ //Retrieve an XIDS representing the root transaction. XIDS xids_get_root_xids(void); +bool xids_can_create_child(XIDS xids); + void xids_cpy(XIDS target, XIDS source); //Creates an XIDS representing this transaction. diff --git a/src/indexer.cc b/src/indexer.cc index 98bb28511c7..893a06d5202 100644 --- a/src/indexer.cc +++ b/src/indexer.cc @@ -161,6 +161,7 @@ toku_indexer_create_indexer(DB_ENV *env, { int rval; DB_INDEXER *indexer = 0; // set later when created + HANDLE_READ_ONLY_TXN(txn); *indexerp = NULL; diff --git a/src/loader.cc b/src/loader.cc index 7953d330b5a..3a49e622d04 100644 --- a/src/loader.cc +++ b/src/loader.cc @@ -169,6 +169,7 @@ toku_loader_create_loader(DB_ENV *env, uint32_t loader_flags, bool check_empty) { int rval; + HANDLE_READ_ONLY_TXN(txn); *blp = NULL; // set later when created diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 1f214594590..6a93fb53989 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -1,848 +1,853 @@ -set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO) - -if(BUILD_TESTING OR BUILD_SRC_TESTS) - function(add_ydb_test bin) - add_toku_test(ydb ${bin} ${ARGN}) - endfunction(add_ydb_test) - function(add_ydb_test_aux name bin) - add_toku_test_aux(ydb ${name} ${bin} ${ARGN}) - endfunction(add_ydb_test_aux) - - function(add_ydb_helgrind_test bin) - add_helgrind_test(ydb helgrind_${bin} $ ${ARGN}) - endfunction(add_ydb_helgrind_test) - function(add_ydb_drd_test_aux name bin) - add_drd_test(ydb ${name} $ ${ARGN}) - endfunction(add_ydb_drd_test_aux) - function(add_ydb_drd_test bin) - add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN}) - endfunction(add_ydb_drd_test) - - file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc) - - file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc) - string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}") - - file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc) - list(REMOVE_ITEM srcs ${transparent_upgrade_srcs}) - - set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc) - file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc) - file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc) - file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc) - string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}") - string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}") - string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}") - string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}") - - set(tdb_srcs ${srcs}) - list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs}) - string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}") - list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs}) - string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}") - - if(BDB_FOUND) - set(bdb_dontrun_srcs - backwards_10_each_le_and_msg - blackhole - blocking-prelock-range - blocking-set-range-reverse-0 - blocking-table-lock - bug1381 - bug627 - cachetable-race - checkpoint_1 - checkpoint_callback - checkpoint_stress - cursor-isolation - cursor-set-del-rmw - cursor-set-range-rmw - db-put-simple-deadlock - del-simple - del-multiple - del-multiple-huge-primary-row - del-multiple-srcdb - directory_lock - diskfull - dump-env - env-put-multiple - env_startup - execute-updates - filesize - helgrind1 - helgrind2 - helgrind3 - hotindexer-bw - hotindexer-error-callback - hotindexer-insert-committed-optimized - hotindexer-insert-committed - hotindexer-insert-provisional - hotindexer-lock-test - hotindexer-multiclient - hotindexer-nested-insert-committed - hotindexer-put-abort - hotindexer-put-commit - hotindexer-put-multiple - hotindexer-simple-abort - hotindexer-simple-abort-put - hotindexer-undo-do-test - hotindexer-with-queries - hot-optimize-table-tests - insert-dup-prelock - isolation - isolation-read-committed - keyrange - keyrange-merge - last-verify-time - loader-cleanup-test - loader-create-abort - loader-create-close - loader-dup-test - loader-no-puts - loader-reference-test - loader-stress-del - loader-stress-test - loader-tpch-load - lock-pressure - manyfiles - maxsize-for-loader - multiprocess - mvcc-create-table - mvcc-many-committed - mvcc-read-committed - perf_checkpoint_var - perf_child_txn - perf_cursor_nop - perf_iibench - perf_insert - perf_insert_multiple - perf_malloc_free - perf_nop - perf_ptquery - perf_ptquery2 - perf_rangequery - perf_read_write - perf_txn_single_thread - perf_xmalloc_free - prelock-read-read - prelock-read-write - prelock-write-read - prelock-write-write - print_engine_status - powerfail - preload-db - preload-db-nested - progress - put-multiple - queries_with_deletes - recover-2483 - recover-3113 - recover-5146 - recover-compare-db - recover-compare-db-descriptor - recover-del-multiple - recover-del-multiple-abort - recover-del-multiple-srcdb-fdelete-all - recover-delboth-after-checkpoint - recover-delboth-checkpoint - recover-descriptor - recover-descriptor2 - recover-descriptor3 - recover-descriptor4 - recover-descriptor5 - recover-descriptor6 - recover-descriptor7 - recover-descriptor8 - recover-descriptor9 - recover-descriptor10 - recover-descriptor11 - recover-descriptor12 - recover-fclose-in-checkpoint - recover-fcreate-basementnodesize - recover-flt1 - recover-flt2 - recover-flt3 - recover-flt4 - recover-flt5 - recover-flt6 - recover-flt7 - recover-flt8 - recover-flt9 - recover-flt10 - recover-hotindexer-simple-abort-put - recover-loader-test - recover-lsn-filter-multiple - recover-put-multiple - recover-put-multiple-abort - recover-put-multiple-fdelete-all - recover-put-multiple-fdelete-some - recover-put-multiple-srcdb-fdelete-all - recover-split-checkpoint - recover-tablelock - recover-test-logsuppress - recover-test-logsuppress-put - recover-test_stress1 - recover-test_stress2 - recover-test_stress3 - recover-test_stress_openclose - recover-upgrade-db-descriptor-multihandle - recover-upgrade-db-descriptor - recover-update-multiple - recover-update-multiple-abort - recover-update_aborts - recover-update_aborts_before_checkpoint - recover-update_aborts_before_close - recover-update_changes_values - recover-update_changes_values_before_checkpoint - recover-update_changes_values_before_close - recover-update_broadcast_aborts - recover-update_broadcast_aborts2 - recover-update_broadcast_aborts3 - recover-update_broadcast_aborts_before_checkpoint - recover-update_broadcast_aborts_before_close - recover-update_broadcast_changes_values - recover-update_broadcast_changes_values2 - recover-update_broadcast_changes_values3 - recover-update_broadcast_changes_values_before_checkpoint - recover-update_broadcast_changes_values_before_close - recover-update_changes_values_before_close - recovery_fileops_stress - recovery_fileops_unit - recovery_stress - redirect - replace-into-write-lock - root_fifo_2 - root_fifo_32 - root_fifo_41 - seqinsert - shutdown-3344 - stat64 - stat64-create-modify-times - stat64_flatten - stat64-null-txn - stat64-root-changes - stress-gc - stress-gc2 - test-xa-prepare - test1324 - test1572 - test3219 - test3522 - test3522b - test3529 - test_3645 - test_3529_insert_2 - test_3529_table_lock - test_3755 - test_4015 - test_4368 - test_4657 - test_5015 - test_5469 - test-5138 - test938c - test_abort1 - test_abort4 - test_abort5 - test_blobs_leaf_split - test_bulk_fetch - test_compression_methods - test_cmp_descriptor - test_db_change_pagesize - test_db_change_xxx - test_cursor_delete_2119 - test_db_descriptor - test_db_descriptor_named_db - test_db_txn_locks_read_uncommitted - test_get_max_row_size - test_large_update_broadcast_small_cachetable - test_locktree_close - test_logflush - test_multiple_checkpoints_block_commit - test_query - test_redirect_func - test_row_size_supported - test_stress0 - test_stress1 - test_stress2 - test_stress3 - test_stress4 - test_stress5 - test_stress6 - test_stress7 - test_stress_openclose - test_stress_with_verify - test_stress_hot_indexing - test_transactional_descriptor - test_trans_desc_during_chkpt - test_trans_desc_during_chkpt2 - test_trans_desc_during_chkpt3 - test_trans_desc_during_chkpt4 - test_txn_abort6 - test_txn_abort8 - test_txn_abort9 - test_txn_close_open_commit - test_txn_commit8 - test_txn_nested1 - test_txn_nested2 - test_txn_nested3 - test_txn_nested4 - test_txn_nested5 - test_update_abort_works - test_update_calls_back - test_update_can_delete_elements - test_update_changes_values - test_update_nonexistent_keys - test_update_previously_deleted - test_update_stress - test_update_txn_snapshot_works_concurrently - test_update_txn_snapshot_works_correctly_with_deletes - test_update_broadcast_abort_works - test_update_broadcast_calls_back - test_update_broadcast_can_delete_elements - test_update_broadcast_changes_values - test_update_broadcast_previously_deleted - test_update_broadcast_stress - test_update_broadcast_update_fun_has_choices - test_update_broadcast_with_empty_table - test_update_broadcast_indexer - test_update_broadcast_loader - test_update_broadcast_nested_updates - test_update_nested_updates - test_update_with_empty_table - test_updates_single_key - txn-ignore - transactional_fileops - update-multiple-data-diagonal - update-multiple-key0 - update-multiple-nochange - update-multiple-with-indexer - update - upgrade_simple - upgrade-test-1 - upgrade-test-2 - upgrade-test-3 - upgrade-test-4 - upgrade-test-5 - upgrade-test-6 - upgrade-test-7 - zombie_db - ) - set(bdb_srcs ${srcs}) - string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}") - list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs}) - string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}") - set(bdb_bins ${bdb_tests}) - endif() - - set(tdb_tests_that_should_fail - test_db_no_env.tdb - test_log8.recover - test_log9.recover - test_log10.recover - recover-missing-dbfile.abortrecover - recover-missing-dbfile-2.abortrecover - loader-tpch-load.loader - ) - - ## #5138 only reproduces when using the static library. - list(REMOVE_ITEM tdb_bins test-5138.tdb) - add_executable(test-5138.tdb test-5138) - target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) - set_property(TARGET test-5138.tdb APPEND PROPERTY - COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") - add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden) - add_ydb_test(test-5138.tdb) - - foreach(bin ${tdb_bins}) - get_filename_component(base ${bin} NAME_WE) - - add_executable(${base}.tdb ${base}) - # Some of the symbols in util may not be exported properly by - # libtokudb.so. - # We link the test with util directly so that the test code itself can use - # some of those things (i.e. kibbutz in the threaded tests). - target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY}) - set_property(TARGET ${base}.tdb APPEND PROPERTY - COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") - add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden) - endforeach(bin) - - if(BDB_FOUND) - foreach(bin ${bdb_bins}) - get_filename_component(base ${bin} NAME_WE) - - add_executable(${base}.bdb ${base}) - set_property(TARGET ${base}.bdb APPEND PROPERTY - COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED") - set_target_properties(${base}.bdb PROPERTIES - INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..") - target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES}) - add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden) - endforeach(bin) - endif() - - foreach(bin loader-cleanup-test.tdb diskfull.tdb) - set_property(TARGET ${bin} APPEND PROPERTY - COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES) - endforeach(bin) - - macro(declare_custom_tests) - foreach(test ${ARGN}) - list(REMOVE_ITEM tdb_tests ${test}) - endforeach(test) - endmacro(declare_custom_tests) - - declare_custom_tests(test1426.tdb) - if(BDB_FOUND) - macro(declare_custom_bdb_tests) - foreach(test ${ARGN}) - list(REMOVE_ITEM bdb_tests ${test}) - endforeach(test) - endmacro(declare_custom_bdb_tests) - - declare_custom_bdb_tests(test1426.bdb) - configure_file(run_test1426.sh . COPYONLY) - add_test(NAME ydb/test1426.tdb - COMMAND run_test1426.sh - $ $ - "test1426.tdb.ctest-data" "test1426.bdb.ctest-data" - $ "${BDB_INCLUDE_DIR}/../bin/db_dump") - add_dependencies(test1426.tdb tokudb_dump) - endif() - - string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}") - declare_custom_tests(${recover_would_be_tdb_tests}) - - declare_custom_tests(powerfail.tdb) - add_test(ydb/powerfail.tdb echo must run powerfail by hand) - - declare_custom_tests(checkpoint_stress.tdb) - configure_file(run_checkpoint_stress_test.sh . COPYONLY) - add_test(NAME ydb/checkpoint_stress.tdb - COMMAND run_checkpoint_stress_test.sh $ 5 5001 137) - setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb) - - configure_file(run_recover_stress_test.sh . COPYONLY) - add_test(NAME ydb/recover_stress.tdb - COMMAND run_recover_stress_test.sh $ 5 5001 137) - setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb) - - declare_custom_tests(diskfull.tdb) - configure_file(run_diskfull_test.sh . COPYONLY) - add_test(NAME ydb/diskfull.tdb - COMMAND run_diskfull_test.sh $ 134) - setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb) - - declare_custom_tests(recovery_fileops_unit.tdb) - configure_file(run_recovery_fileops_unit.sh . COPYONLY) - file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir") - foreach(ov c d r) - - if (ov STREQUAL c) - set(gset 0) - set(hset 0) - else () - set(gset 0 1 2 3 4 5) - set(hset 0 1) - endif () - - foreach(av 0 1) - foreach(bv 0 1) - - if (bv) - set(dset 0 1) - set(eset 0 1) - else () - set(dset 0) - set(eset 0) - endif () - - foreach(cv 0 1 2) - foreach(dv ${dset}) - foreach(ev ${eset}) - foreach(fv 0 1) - foreach(gv ${gset}) - foreach(hv ${hset}) - - if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv))) - set(iset 0 1) - else () - set(iset 0) - endif () - - foreach(iv ${iset}) - set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") - set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") - set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors") - add_test(NAME ${testname} - COMMAND run_recovery_fileops_unit.sh $ ${errfile} 137 - -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} - ) - setup_toku_test_properties(${testname} ${envdir}) - set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}") - endforeach(iv) - endforeach(hv) - endforeach(gv) - endforeach(fv) - endforeach(ev) - endforeach(dv) - endforeach(cv) - endforeach(bv) - endforeach(av) - endforeach(ov) - - if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR - (CMAKE_CXX_COMPILER_ID STREQUAL Intel AND - CMAKE_BUILD_TYPE STREQUAL Release) - OR USE_GCOV)) - declare_custom_tests(helgrind1.tdb) - add_test(NAME ydb/helgrind_helgrind1.tdb - COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $) - setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb) - set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE) - endif() - declare_custom_tests(helgrind2.tdb) - declare_custom_tests(helgrind3.tdb) - add_ydb_helgrind_test(helgrind2.tdb) - add_ydb_helgrind_test(helgrind3.tdb) - - declare_custom_tests(test_groupcommit_count.tdb) - add_ydb_test(test_groupcommit_count.tdb -n 1) - add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1) - add_ydb_drd_test(test_groupcommit_count.tdb -n 1) - - add_ydb_drd_test(test_4015.tdb) - - # We link the locktree so that stress test 0 can call some - # functions (ie: lock escalation) directly. - target_link_libraries(test_stress0.tdb locktree) - - # Set up default stress tests and drd tests. Exclude hot_index. - foreach(src ${stress_test_srcs}) - if(NOT ${src} MATCHES hot_index) - get_filename_component(base ${src} NAME_WE) - set(test ${base}.tdb) - - if (${src} MATCHES test_stress0) - add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600) - else () - add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600) - endif () - - add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000) - set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600) - - add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000) - setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test}) - add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400) - set_tests_properties(ydb/drd_mid_${test} PROPERTIES - DEPENDS ydb/drd_mid_${test}/prepare - REQUIRED_FILES "drd_mid_${test}.ctest-data" - TIMEOUT 15000 - ) - - add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000) - setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test}) - add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800) - set_tests_properties(ydb/drd_large_${test} PROPERTIES - DEPENDS ydb/drd_large_${test}/prepare - REQUIRED_FILES "drd_large_${test}.ctest-data" - TIMEOUT 30000 - ) - endif() - endforeach(src) - - # Set up upgrade tests. Exclude test_stress_openclose - foreach(src ${stress_test_srcs}) - if (NOT ${src} MATCHES test_stress_openclose) - get_filename_component(base ${src} NAME_WE) - set(test ${base}.tdb) - - foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3) - set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver}) - if (NOT EXISTS "${versiondir}/") - message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.") - endif () - foreach(p_or_s pristine stressed) - if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed)) - foreach(size 2000) - set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir") - set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}") - set(envdir "${envdirbase}.ctest-data") - set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size}) - - add_test(NAME ${testnamebase}/remove - COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}") - add_test(NAME ${testnamebase}/copy - COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}") - set_tests_properties(${testnamebase}/copy PROPERTIES - DEPENDS ${testnamebase}/remove - REQUIRED_FILES "${oldenvdir}") - - add_test(NAME ${testnamebase} - COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200) - setup_toku_test_properties(${testnamebase} "${envdirbase}") - set_tests_properties(${testnamebase} PROPERTIES - DEPENDS ${testnamebase}/copy - REQUIRED_FILES "${envdir}" - TIMEOUT 10800) - endforeach(size) - endif () - endforeach(p_or_s) - endforeach(oldver) - endif () - endforeach(src) - - if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/") - message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.") - endif () - declare_custom_tests(dump-env.tdb) - add_test(NAME ydb/dump-env.tdb/remove - COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data") - add_test(NAME ydb/dump-env.tdb/copy - COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data") - set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES - DEPENDS ydb/dump-env.tdb/remove - REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902") - add_ydb_test(dump-env.tdb) - set_tests_properties(ydb/dump-env.tdb PROPERTIES - DEPENDS ydb/dump-env.tdb/copy - REQUIRED_FILES "dump-env.tdb.ctest-data") - - ## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case - #declare_custom_tests(test_thread_stack.tdb) - #add_custom_command(OUTPUT run_test_thread_stack.sh - # COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}" - # MAIN_DEPENDENCY run_test_thread_stack.sh - # VERBATIM) - #add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh) - #add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb") - - declare_custom_tests(root_fifo_41.tdb) - foreach(num RANGE 1 100) - add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate) - add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num}) - endforeach(num) - - add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000) - - declare_custom_tests(test_abort4.tdb) - foreach(num RANGE -1 19) - add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num}) - add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num}) - endforeach(num) - - set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown") - if (NOT EXISTS "${old_loader_upgrade_data}/") - message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.") - endif () - function(add_loader_upgrade_test name bin) - add_test(NAME ydb/${name}/remove - COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data") - add_test(NAME ydb/${name}/copy - COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data") - set_tests_properties(ydb/${name}/copy PROPERTIES - DEPENDS ydb/${name}/remove - REQUIRED_FILES "${old_loader_upgrade_data}") - add_ydb_test_aux(${name} ${bin} -u ${ARGN}) - set_tests_properties(ydb/${name} PROPERTIES - DEPENDS ydb/${name}/copy - REQUIRED_FILES "${name}.ctest-data") - endfunction(add_loader_upgrade_test) - - list(REMOVE_ITEM loader_tests loader-stress-test.loader) - add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c) - add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p) - add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s) - add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c) - add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c) - add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z) - add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z) - add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z) - add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z) - add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28) - - list(REMOVE_ITEM loader_tests loader-dup-test.loader) - add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb) - add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000) - add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000) - add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100) - add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000) - add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E) - add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z) - add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z) - add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z) - add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z) - add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z) - add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z) - - ## as part of #4503, we took out test 1 and 3 - list(REMOVE_ITEM loader_tests loader-cleanup-test.loader) - add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800) - #add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p) - add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000) - #add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p) - add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z) - add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z) - - declare_custom_tests(keyrange.tdb) - add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0) - add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1) - if (0) - add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1) - add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1) - else () - message(WARNING "TODO(leif): re-enable keyrange tests, see #5666") - endif () - add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1) - add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1) - - declare_custom_tests(maxsize-for-loader.tdb) - add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c) - add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c) - add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c) - add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c) - - declare_custom_tests(hotindexer-undo-do-test.tdb) - file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test") - file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result") - configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY) - foreach(result ${hotindexer_results}) - configure_file(${result} ${result} COPYONLY) - endforeach(result) - foreach(test ${hotindexer_tests}) - configure_file(${test} ${test} COPYONLY) - add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test}) - setup_toku_test_properties(ydb/${test} ${test}) - endforeach() - - foreach(test ${tdb_tests} ${bdb_tests}) - add_ydb_test(${test}) - endforeach(test) - - configure_file(run_recover_test.sh . COPYONLY) - foreach(recover_test ${recover_tests}) - get_filename_component(base ${recover_test} NAME_WE) - add_test(NAME ydb/${recover_test} - COMMAND run_recover_test.sh $ "${recover_test}.ctest-data" $ $) - setup_toku_test_properties(ydb/${recover_test} ${recover_test}) - endforeach(recover_test) - - configure_file(run_abortrecover_test.sh . COPYONLY) - foreach(abortrecover_test ${abortrecover_tests}) - get_filename_component(base ${abortrecover_test} NAME_WE) - add_test(NAME ydb/${abortrecover_test} - COMMAND run_abortrecover_test.sh $) - setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test}) - endforeach(abortrecover_test) - ## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected - # foreach(abortrecover_test ${abortrecover_tests}) - # get_filename_component(base ${abortrecover_test} NAME_WE) - # set(test ${base}.tdb) - # add_test(NAME ydb/${test}/abort - # COMMAND ${test} --test) - # setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test}) - # set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE) - - # add_test(NAME ydb/${test}/recover - # COMMAND ${test} --recover) - # setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test}) - # set_tests_properties(ydb/${test}/recover PROPERTIES - # DEPENDS ydb/${test}/abort - # REQUIRED_FILES "${abortrecover_test}.ctest-data") - # endforeach(abortrecover_test) - - foreach(loader_test ${loader_tests}) - get_filename_component(base ${loader_test} NAME_WE) - add_ydb_test_aux(${base}.nop.loader ${base}.tdb) - add_ydb_test_aux(${base}.p.loader ${base}.tdb -p) - add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z) - if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader") - list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader) - list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader) - endif() - endforeach(loader_test) - - set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}") - string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}") - set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE) - - ## give some tests, that time out normally, 1 hour to complete - set(long_tests - ydb/checkpoint_1.tdb - ydb/drd_test_groupcommit_count.tdb - ydb/env-put-multiple.tdb - ydb/filesize.tdb - ydb/loader-cleanup-test0.tdb - ydb/loader-cleanup-test0z.tdb - ydb/manyfiles.tdb - ydb/recover-loader-test.abortrecover - ydb/recovery_fileops_stress.tdb - ydb/root_fifo_1.tdb - ydb/root_fifo_2.tdb - ydb/root_fifo_31.tdb - ydb/root_fifo_32.tdb - ydb/shutdown-3344.tdb - ydb/stat64-create-modify-times.tdb - ydb/test1572.tdb - ydb/test_abort4_19_0.tdb - ydb/test_abort4_19_1.tdb - ydb/test_abort5.tdb - ydb/test_archive1.tdb - ydb/test_logmax.tdb - ydb/test_query.tdb - ydb/test_txn_abort5.tdb - ydb/test_txn_abort5a.tdb - ydb/test_txn_abort6.tdb - ydb/test_txn_nested2.tdb - ydb/test_txn_nested4.tdb - ydb/test_txn_nested5.tdb - ydb/test_update_broadcast_stress.tdb - ) - if (BDB_FOUND) - list(APPEND long_tests - ydb/root_fifo_1.bdb - ydb/root_fifo_31.bdb - ydb/rowsize.bdb - ydb/test_log10.bdb - ydb/test_log7.bdb - ydb/test_logmax.bdb - ) - endif (BDB_FOUND) - set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600) - ## some take even longer, with valgrind - set(extra_long_tests - ydb/drd_test_4015.tdb - ydb/hotindexer-with-queries.tdb - ydb/hot-optimize-table-tests.tdb - ydb/loader-cleanup-test2.tdb - ydb/loader-cleanup-test2z.tdb - ydb/loader-dup-test0.tdb - ydb/loader-stress-del.nop.loader - ydb/loader-stress-del.p.loader - ydb/loader-stress-del.comp.loader - ydb/test3039.tdb - ydb/test_update_stress.tdb - ) - if (BDB_FOUND) - list(APPEND extra_long_tests - ydb/test_groupcommit_count.bdb - ) - endif (BDB_FOUND) - set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200) - ## these really take a long time with valgrind - set(phenomenally_long_tests - ydb/checkpoint_stress.tdb - ydb/loader-stress-test4.tdb - ydb/loader-stress-test4z.tdb - ydb/recover_stress.tdb - ydb/test3529.tdb - ) - if (BDB_FOUND) - list(APPEND phenomenally_long_tests - ydb/test1426.tdb - ) - endif (BDB_FOUND) - set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400) -endif(BUILD_TESTING OR BUILD_SRC_TESTS) +set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO) + +if(BUILD_TESTING OR BUILD_SRC_TESTS) + function(add_ydb_test bin) + add_toku_test(ydb ${bin} ${ARGN}) + endfunction(add_ydb_test) + function(add_ydb_test_aux name bin) + add_toku_test_aux(ydb ${name} ${bin} ${ARGN}) + endfunction(add_ydb_test_aux) + + function(add_ydb_helgrind_test bin) + add_helgrind_test(ydb helgrind_${bin} $ ${ARGN}) + endfunction(add_ydb_helgrind_test) + function(add_ydb_drd_test_aux name bin) + add_drd_test(ydb ${name} $ ${ARGN}) + endfunction(add_ydb_drd_test_aux) + function(add_ydb_drd_test bin) + add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN}) + endfunction(add_ydb_drd_test) + + file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc) + + file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc) + string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}") + + file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc) + list(REMOVE_ITEM srcs ${transparent_upgrade_srcs}) + + set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc) + file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc) + file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc) + file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc) + string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}") + string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}") + string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}") + string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}") + + set(tdb_srcs ${srcs}) + list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs}) + string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}") + list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs}) + string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}") + + if(BDB_FOUND) + set(bdb_dontrun_srcs + backwards_10_each_le_and_msg + blackhole + blocking-prelock-range + blocking-set-range-reverse-0 + blocking-table-lock + bug1381 + bug627 + cachetable-race + checkpoint_1 + checkpoint_callback + checkpoint_stress + cursor-isolation + cursor-set-del-rmw + cursor-set-range-rmw + db-put-simple-deadlock + del-simple + del-multiple + del-multiple-huge-primary-row + del-multiple-srcdb + directory_lock + diskfull + dump-env + env-put-multiple + env_startup + execute-updates + filesize + helgrind1 + helgrind2 + helgrind3 + hotindexer-bw + hotindexer-error-callback + hotindexer-insert-committed-optimized + hotindexer-insert-committed + hotindexer-insert-provisional + hotindexer-lock-test + hotindexer-multiclient + hotindexer-nested-insert-committed + hotindexer-put-abort + hotindexer-put-commit + hotindexer-put-multiple + hotindexer-simple-abort + hotindexer-simple-abort-put + hotindexer-undo-do-test + hotindexer-with-queries + hot-optimize-table-tests + insert-dup-prelock + isolation + isolation-read-committed + keyrange + keyrange-merge + last-verify-time + loader-cleanup-test + loader-create-abort + loader-create-close + loader-dup-test + loader-no-puts + loader-reference-test + loader-stress-del + loader-stress-test + loader-tpch-load + lock-pressure + manyfiles + maxsize-for-loader + multiprocess + mvcc-create-table + mvcc-many-committed + mvcc-read-committed + perf_checkpoint_var + perf_child_txn + perf_cursor_nop + perf_iibench + perf_insert + perf_insert_multiple + perf_malloc_free + perf_nop + perf_ptquery + perf_ptquery2 + perf_rangequery + perf_read_txn + perf_read_txn_single_thread + perf_read_write + perf_txn_single_thread + perf_xmalloc_free + prelock-read-read + prelock-read-write + prelock-write-read + prelock-write-write + print_engine_status + powerfail + preload-db + preload-db-nested + progress + put-multiple + queries_with_deletes + recover-2483 + recover-3113 + recover-5146 + recover-compare-db + recover-compare-db-descriptor + recover-del-multiple + recover-del-multiple-abort + recover-del-multiple-srcdb-fdelete-all + recover-delboth-after-checkpoint + recover-delboth-checkpoint + recover-descriptor + recover-descriptor2 + recover-descriptor3 + recover-descriptor4 + recover-descriptor5 + recover-descriptor6 + recover-descriptor7 + recover-descriptor8 + recover-descriptor9 + recover-descriptor10 + recover-descriptor11 + recover-descriptor12 + recover-fclose-in-checkpoint + recover-fcreate-basementnodesize + recover-flt1 + recover-flt2 + recover-flt3 + recover-flt4 + recover-flt5 + recover-flt6 + recover-flt7 + recover-flt8 + recover-flt9 + recover-flt10 + recover-hotindexer-simple-abort-put + recover-loader-test + recover-lsn-filter-multiple + recover-put-multiple + recover-put-multiple-abort + recover-put-multiple-fdelete-all + recover-put-multiple-fdelete-some + recover-put-multiple-srcdb-fdelete-all + recover-split-checkpoint + recover-tablelock + recover-test-logsuppress + recover-test-logsuppress-put + recover-test_stress1 + recover-test_stress2 + recover-test_stress3 + recover-test_stress_openclose + recover-upgrade-db-descriptor-multihandle + recover-upgrade-db-descriptor + recover-update-multiple + recover-update-multiple-abort + recover-update_aborts + recover-update_aborts_before_checkpoint + recover-update_aborts_before_close + recover-update_changes_values + recover-update_changes_values_before_checkpoint + recover-update_changes_values_before_close + recover-update_broadcast_aborts + recover-update_broadcast_aborts2 + recover-update_broadcast_aborts3 + recover-update_broadcast_aborts_before_checkpoint + recover-update_broadcast_aborts_before_close + recover-update_broadcast_changes_values + recover-update_broadcast_changes_values2 + recover-update_broadcast_changes_values3 + recover-update_broadcast_changes_values_before_checkpoint + recover-update_broadcast_changes_values_before_close + recover-update_changes_values_before_close + recovery_fileops_stress + recovery_fileops_unit + recovery_stress + redirect + replace-into-write-lock + root_fifo_2 + root_fifo_32 + root_fifo_41 + seqinsert + shutdown-3344 + stat64 + stat64-create-modify-times + stat64_flatten + stat64-null-txn + stat64-root-changes + stress-gc + stress-gc2 + test-xa-prepare + test1324 + test1572 + test3219 + test3522 + test3522b + test3529 + test_3645 + test_3529_insert_2 + test_3529_table_lock + test_3755 + test_4015 + test_4368 + test_4657 + test_5015 + test_5469 + test-5138 + test938c + test_abort1 + test_abort4 + test_abort5 + test_blobs_leaf_split + test_bulk_fetch + test_compression_methods + test_cmp_descriptor + test_cursor_with_read_txn + test_db_change_pagesize + test_db_change_xxx + test_cursor_delete_2119 + test_db_descriptor + test_db_descriptor_named_db + test_db_txn_locks_read_uncommitted + test_get_max_row_size + test_large_update_broadcast_small_cachetable + test_locktree_close + test_logflush + test_multiple_checkpoints_block_commit + test_query + test_read_txn_invalid_ops + test_redirect_func + test_row_size_supported + test_simple_read_txn + test_stress0 + test_stress1 + test_stress2 + test_stress3 + test_stress4 + test_stress5 + test_stress6 + test_stress7 + test_stress_openclose + test_stress_with_verify + test_stress_hot_indexing + test_transactional_descriptor + test_trans_desc_during_chkpt + test_trans_desc_during_chkpt2 + test_trans_desc_during_chkpt3 + test_trans_desc_during_chkpt4 + test_txn_abort6 + test_txn_abort8 + test_txn_abort9 + test_txn_close_open_commit + test_txn_commit8 + test_txn_nested1 + test_txn_nested2 + test_txn_nested3 + test_txn_nested4 + test_txn_nested5 + test_update_abort_works + test_update_calls_back + test_update_can_delete_elements + test_update_changes_values + test_update_nonexistent_keys + test_update_previously_deleted + test_update_stress + test_update_txn_snapshot_works_concurrently + test_update_txn_snapshot_works_correctly_with_deletes + test_update_broadcast_abort_works + test_update_broadcast_calls_back + test_update_broadcast_can_delete_elements + test_update_broadcast_changes_values + test_update_broadcast_previously_deleted + test_update_broadcast_stress + test_update_broadcast_update_fun_has_choices + test_update_broadcast_with_empty_table + test_update_broadcast_indexer + test_update_broadcast_loader + test_update_broadcast_nested_updates + test_update_nested_updates + test_update_with_empty_table + test_updates_single_key + txn-ignore + transactional_fileops + update-multiple-data-diagonal + update-multiple-key0 + update-multiple-nochange + update-multiple-with-indexer + update + upgrade_simple + upgrade-test-1 + upgrade-test-2 + upgrade-test-3 + upgrade-test-4 + upgrade-test-5 + upgrade-test-6 + upgrade-test-7 + zombie_db + ) + set(bdb_srcs ${srcs}) + string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}") + list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs}) + string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}") + set(bdb_bins ${bdb_tests}) + endif() + + set(tdb_tests_that_should_fail + test_db_no_env.tdb + test_log8.recover + test_log9.recover + test_log10.recover + recover-missing-dbfile.abortrecover + recover-missing-dbfile-2.abortrecover + loader-tpch-load.loader + ) + + ## #5138 only reproduces when using the static library. + list(REMOVE_ITEM tdb_bins test-5138.tdb) + add_executable(test-5138.tdb test-5138) + target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) + set_property(TARGET test-5138.tdb APPEND PROPERTY + COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") + add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden) + add_ydb_test(test-5138.tdb) + + foreach(bin ${tdb_bins}) + get_filename_component(base ${bin} NAME_WE) + + add_executable(${base}.tdb ${base}) + # Some of the symbols in util may not be exported properly by + # libtokudb.so. + # We link the test with util directly so that the test code itself can use + # some of those things (i.e. kibbutz in the threaded tests). + target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY}) + set_property(TARGET ${base}.tdb APPEND PROPERTY + COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") + add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden) + endforeach(bin) + + if(BDB_FOUND) + foreach(bin ${bdb_bins}) + get_filename_component(base ${bin} NAME_WE) + + add_executable(${base}.bdb ${base}) + set_property(TARGET ${base}.bdb APPEND PROPERTY + COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED") + set_target_properties(${base}.bdb PROPERTIES + INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..") + target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES}) + add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden) + endforeach(bin) + endif() + + foreach(bin loader-cleanup-test.tdb diskfull.tdb) + set_property(TARGET ${bin} APPEND PROPERTY + COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES) + endforeach(bin) + + macro(declare_custom_tests) + foreach(test ${ARGN}) + list(REMOVE_ITEM tdb_tests ${test}) + endforeach(test) + endmacro(declare_custom_tests) + + declare_custom_tests(test1426.tdb) + if(BDB_FOUND) + macro(declare_custom_bdb_tests) + foreach(test ${ARGN}) + list(REMOVE_ITEM bdb_tests ${test}) + endforeach(test) + endmacro(declare_custom_bdb_tests) + + declare_custom_bdb_tests(test1426.bdb) + configure_file(run_test1426.sh . COPYONLY) + add_test(NAME ydb/test1426.tdb + COMMAND run_test1426.sh + $ $ + "test1426.tdb.ctest-data" "test1426.bdb.ctest-data" + $ "${BDB_INCLUDE_DIR}/../bin/db_dump") + add_dependencies(test1426.tdb tokudb_dump) + endif() + + string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}") + declare_custom_tests(${recover_would_be_tdb_tests}) + + declare_custom_tests(powerfail.tdb) + add_test(ydb/powerfail.tdb echo must run powerfail by hand) + + declare_custom_tests(checkpoint_stress.tdb) + configure_file(run_checkpoint_stress_test.sh . COPYONLY) + add_test(NAME ydb/checkpoint_stress.tdb + COMMAND run_checkpoint_stress_test.sh $ 5 5001 137) + setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb) + + configure_file(run_recover_stress_test.sh . COPYONLY) + add_test(NAME ydb/recover_stress.tdb + COMMAND run_recover_stress_test.sh $ 5 5001 137) + setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb) + + declare_custom_tests(diskfull.tdb) + configure_file(run_diskfull_test.sh . COPYONLY) + add_test(NAME ydb/diskfull.tdb + COMMAND run_diskfull_test.sh $ 134) + setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb) + + declare_custom_tests(recovery_fileops_unit.tdb) + configure_file(run_recovery_fileops_unit.sh . COPYONLY) + file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir") + foreach(ov c d r) + + if (ov STREQUAL c) + set(gset 0) + set(hset 0) + else () + set(gset 0 1 2 3 4 5) + set(hset 0 1) + endif () + + foreach(av 0 1) + foreach(bv 0 1) + + if (bv) + set(dset 0 1) + set(eset 0 1) + else () + set(dset 0) + set(eset 0) + endif () + + foreach(cv 0 1 2) + foreach(dv ${dset}) + foreach(ev ${eset}) + foreach(fv 0 1) + foreach(gv ${gset}) + foreach(hv ${hset}) + + if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv))) + set(iset 0 1) + else () + set(iset 0) + endif () + + foreach(iv ${iset}) + set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") + set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") + set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors") + add_test(NAME ${testname} + COMMAND run_recovery_fileops_unit.sh $ ${errfile} 137 + -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} + ) + setup_toku_test_properties(${testname} ${envdir}) + set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}") + endforeach(iv) + endforeach(hv) + endforeach(gv) + endforeach(fv) + endforeach(ev) + endforeach(dv) + endforeach(cv) + endforeach(bv) + endforeach(av) + endforeach(ov) + + if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR + (CMAKE_CXX_COMPILER_ID STREQUAL Intel AND + CMAKE_BUILD_TYPE STREQUAL Release) + OR USE_GCOV)) + declare_custom_tests(helgrind1.tdb) + add_test(NAME ydb/helgrind_helgrind1.tdb + COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $) + setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb) + set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE) + endif() + declare_custom_tests(helgrind2.tdb) + declare_custom_tests(helgrind3.tdb) + add_ydb_helgrind_test(helgrind2.tdb) + add_ydb_helgrind_test(helgrind3.tdb) + + declare_custom_tests(test_groupcommit_count.tdb) + add_ydb_test(test_groupcommit_count.tdb -n 1) + add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1) + add_ydb_drd_test(test_groupcommit_count.tdb -n 1) + + add_ydb_drd_test(test_4015.tdb) + + # We link the locktree so that stress test 0 can call some + # functions (ie: lock escalation) directly. + target_link_libraries(test_stress0.tdb locktree) + + # Set up default stress tests and drd tests. Exclude hot_index. + foreach(src ${stress_test_srcs}) + if(NOT ${src} MATCHES hot_index) + get_filename_component(base ${src} NAME_WE) + set(test ${base}.tdb) + + if (${src} MATCHES test_stress0) + add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600) + else () + add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600) + endif () + + add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000) + set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600) + + add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000) + setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test}) + add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400) + set_tests_properties(ydb/drd_mid_${test} PROPERTIES + DEPENDS ydb/drd_mid_${test}/prepare + REQUIRED_FILES "drd_mid_${test}.ctest-data" + TIMEOUT 15000 + ) + + add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000) + setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test}) + add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800) + set_tests_properties(ydb/drd_large_${test} PROPERTIES + DEPENDS ydb/drd_large_${test}/prepare + REQUIRED_FILES "drd_large_${test}.ctest-data" + TIMEOUT 30000 + ) + endif() + endforeach(src) + + # Set up upgrade tests. Exclude test_stress_openclose + foreach(src ${stress_test_srcs}) + if (NOT ${src} MATCHES test_stress_openclose) + get_filename_component(base ${src} NAME_WE) + set(test ${base}.tdb) + + foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3) + set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver}) + if (NOT EXISTS "${versiondir}/") + message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.") + endif () + foreach(p_or_s pristine stressed) + if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed)) + foreach(size 2000) + set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir") + set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}") + set(envdir "${envdirbase}.ctest-data") + set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size}) + + add_test(NAME ${testnamebase}/remove + COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}") + add_test(NAME ${testnamebase}/copy + COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}") + set_tests_properties(${testnamebase}/copy PROPERTIES + DEPENDS ${testnamebase}/remove + REQUIRED_FILES "${oldenvdir}") + + add_test(NAME ${testnamebase} + COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200) + setup_toku_test_properties(${testnamebase} "${envdirbase}") + set_tests_properties(${testnamebase} PROPERTIES + DEPENDS ${testnamebase}/copy + REQUIRED_FILES "${envdir}" + TIMEOUT 10800) + endforeach(size) + endif () + endforeach(p_or_s) + endforeach(oldver) + endif () + endforeach(src) + + if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/") + message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.") + endif () + declare_custom_tests(dump-env.tdb) + add_test(NAME ydb/dump-env.tdb/remove + COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data") + add_test(NAME ydb/dump-env.tdb/copy + COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data") + set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES + DEPENDS ydb/dump-env.tdb/remove + REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902") + add_ydb_test(dump-env.tdb) + set_tests_properties(ydb/dump-env.tdb PROPERTIES + DEPENDS ydb/dump-env.tdb/copy + REQUIRED_FILES "dump-env.tdb.ctest-data") + + ## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case + #declare_custom_tests(test_thread_stack.tdb) + #add_custom_command(OUTPUT run_test_thread_stack.sh + # COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}" + # MAIN_DEPENDENCY run_test_thread_stack.sh + # VERBATIM) + #add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh) + #add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb") + + declare_custom_tests(root_fifo_41.tdb) + foreach(num RANGE 1 100) + add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate) + add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num}) + endforeach(num) + + add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000) + + declare_custom_tests(test_abort4.tdb) + foreach(num RANGE -1 19) + add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num}) + add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num}) + endforeach(num) + + set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown") + if (NOT EXISTS "${old_loader_upgrade_data}/") + message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.") + endif () + function(add_loader_upgrade_test name bin) + add_test(NAME ydb/${name}/remove + COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data") + add_test(NAME ydb/${name}/copy + COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data") + set_tests_properties(ydb/${name}/copy PROPERTIES + DEPENDS ydb/${name}/remove + REQUIRED_FILES "${old_loader_upgrade_data}") + add_ydb_test_aux(${name} ${bin} -u ${ARGN}) + set_tests_properties(ydb/${name} PROPERTIES + DEPENDS ydb/${name}/copy + REQUIRED_FILES "${name}.ctest-data") + endfunction(add_loader_upgrade_test) + + list(REMOVE_ITEM loader_tests loader-stress-test.loader) + add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c) + add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p) + add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s) + add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c) + add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c) + add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z) + add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z) + add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z) + add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z) + add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28) + + list(REMOVE_ITEM loader_tests loader-dup-test.loader) + add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb) + add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000) + add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000) + add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100) + add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000) + add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E) + add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z) + add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z) + add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z) + add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z) + add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z) + add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z) + + ## as part of #4503, we took out test 1 and 3 + list(REMOVE_ITEM loader_tests loader-cleanup-test.loader) + add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800) + #add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p) + add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000) + #add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p) + add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z) + add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z) + + declare_custom_tests(keyrange.tdb) + add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0) + add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1) + if (0) + add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1) + add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1) + else () + message(WARNING "TODO(leif): re-enable keyrange tests, see #5666") + endif () + add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1) + add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1) + + declare_custom_tests(maxsize-for-loader.tdb) + add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c) + add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c) + add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c) + add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c) + + declare_custom_tests(hotindexer-undo-do-test.tdb) + file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test") + file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result") + configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY) + foreach(result ${hotindexer_results}) + configure_file(${result} ${result} COPYONLY) + endforeach(result) + foreach(test ${hotindexer_tests}) + configure_file(${test} ${test} COPYONLY) + add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test}) + setup_toku_test_properties(ydb/${test} ${test}) + endforeach() + + foreach(test ${tdb_tests} ${bdb_tests}) + add_ydb_test(${test}) + endforeach(test) + + configure_file(run_recover_test.sh . COPYONLY) + foreach(recover_test ${recover_tests}) + get_filename_component(base ${recover_test} NAME_WE) + add_test(NAME ydb/${recover_test} + COMMAND run_recover_test.sh $ "${recover_test}.ctest-data" $ $) + setup_toku_test_properties(ydb/${recover_test} ${recover_test}) + endforeach(recover_test) + + configure_file(run_abortrecover_test.sh . COPYONLY) + foreach(abortrecover_test ${abortrecover_tests}) + get_filename_component(base ${abortrecover_test} NAME_WE) + add_test(NAME ydb/${abortrecover_test} + COMMAND run_abortrecover_test.sh $) + setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test}) + endforeach(abortrecover_test) + ## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected + # foreach(abortrecover_test ${abortrecover_tests}) + # get_filename_component(base ${abortrecover_test} NAME_WE) + # set(test ${base}.tdb) + # add_test(NAME ydb/${test}/abort + # COMMAND ${test} --test) + # setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test}) + # set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE) + + # add_test(NAME ydb/${test}/recover + # COMMAND ${test} --recover) + # setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test}) + # set_tests_properties(ydb/${test}/recover PROPERTIES + # DEPENDS ydb/${test}/abort + # REQUIRED_FILES "${abortrecover_test}.ctest-data") + # endforeach(abortrecover_test) + + foreach(loader_test ${loader_tests}) + get_filename_component(base ${loader_test} NAME_WE) + add_ydb_test_aux(${base}.nop.loader ${base}.tdb) + add_ydb_test_aux(${base}.p.loader ${base}.tdb -p) + add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z) + if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader") + list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader) + list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader) + endif() + endforeach(loader_test) + + set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}") + string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}") + set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE) + + ## give some tests, that time out normally, 1 hour to complete + set(long_tests + ydb/checkpoint_1.tdb + ydb/drd_test_groupcommit_count.tdb + ydb/env-put-multiple.tdb + ydb/filesize.tdb + ydb/loader-cleanup-test0.tdb + ydb/loader-cleanup-test0z.tdb + ydb/manyfiles.tdb + ydb/recover-loader-test.abortrecover + ydb/recovery_fileops_stress.tdb + ydb/root_fifo_1.tdb + ydb/root_fifo_2.tdb + ydb/root_fifo_31.tdb + ydb/root_fifo_32.tdb + ydb/shutdown-3344.tdb + ydb/stat64-create-modify-times.tdb + ydb/test1572.tdb + ydb/test_abort4_19_0.tdb + ydb/test_abort4_19_1.tdb + ydb/test_abort5.tdb + ydb/test_archive1.tdb + ydb/test_logmax.tdb + ydb/test_query.tdb + ydb/test_txn_abort5.tdb + ydb/test_txn_abort5a.tdb + ydb/test_txn_abort6.tdb + ydb/test_txn_nested2.tdb + ydb/test_txn_nested4.tdb + ydb/test_txn_nested5.tdb + ydb/test_update_broadcast_stress.tdb + ) + if (BDB_FOUND) + list(APPEND long_tests + ydb/root_fifo_1.bdb + ydb/root_fifo_31.bdb + ydb/rowsize.bdb + ydb/test_log10.bdb + ydb/test_log7.bdb + ydb/test_logmax.bdb + ) + endif (BDB_FOUND) + set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600) + ## some take even longer, with valgrind + set(extra_long_tests + ydb/drd_test_4015.tdb + ydb/hotindexer-with-queries.tdb + ydb/hot-optimize-table-tests.tdb + ydb/loader-cleanup-test2.tdb + ydb/loader-cleanup-test2z.tdb + ydb/loader-dup-test0.tdb + ydb/loader-stress-del.nop.loader + ydb/loader-stress-del.p.loader + ydb/loader-stress-del.comp.loader + ydb/test3039.tdb + ydb/test_update_stress.tdb + ) + if (BDB_FOUND) + list(APPEND extra_long_tests + ydb/test_groupcommit_count.bdb + ) + endif (BDB_FOUND) + set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200) + ## these really take a long time with valgrind + set(phenomenally_long_tests + ydb/checkpoint_stress.tdb + ydb/loader-stress-test4.tdb + ydb/loader-stress-test4z.tdb + ydb/recover_stress.tdb + ydb/test3529.tdb + ) + if (BDB_FOUND) + list(APPEND phenomenally_long_tests + ydb/test1426.tdb + ) + endif (BDB_FOUND) + set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400) +endif(BUILD_TESTING OR BUILD_SRC_TESTS) diff --git a/src/tests/perf_child_txn.cc b/src/tests/perf_child_txn.cc index bff33e8a9ba..4e099c3e270 100644 --- a/src/tests/perf_child_txn.cc +++ b/src/tests/perf_child_txn.cc @@ -24,7 +24,7 @@ static int create_child_txn(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) { DB_TXN* child_txn = NULL; DB_ENV* env = arg->env; - int r = env->txn_begin(env, txn, &child_txn, arg->txn_type); + int r = env->txn_begin(env, txn, &child_txn, arg->txn_flags); CKERR(r); r = child_txn->commit(child_txn, 0); CKERR(r); diff --git a/src/tests/perf_iibench.cc b/src/tests/perf_iibench.cc index 97f793f6a63..17e6d2b4cd2 100644 --- a/src/tests/perf_iibench.cc +++ b/src/tests/perf_iibench.cc @@ -376,6 +376,7 @@ stress_table(DB_ENV* env, DB **dbs, struct cli_args *cli_args) { } else { myargs[i].operation = iibench_rangequery_op; myargs[i].operation_extra = &put_extra; + myargs[i].txn_flags |= DB_TXN_READ_ONLY; myargs[i].sleep_ms = 1000; // 1 second between range queries } } diff --git a/src/tests/perf_ptquery.cc b/src/tests/perf_ptquery.cc index f88f7588400..dfd25d8e1c2 100644 --- a/src/tests/perf_ptquery.cc +++ b/src/tests/perf_ptquery.cc @@ -54,6 +54,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { for (int i = 0; i < num_threads; i++) { arg_init(&myargs[i], dbp, env, cli_args); myargs[i].operation = ptquery_op; + myargs[i].txn_flags |= DB_TXN_READ_ONLY; } run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); } diff --git a/src/tests/perf_ptquery2.cc b/src/tests/perf_ptquery2.cc index 116ae435746..66af246a837 100644 --- a/src/tests/perf_ptquery2.cc +++ b/src/tests/perf_ptquery2.cc @@ -67,6 +67,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { thread_ids[i] = i % cli_args->num_DBs; myargs[i].operation = ptquery_op2; myargs[i].operation_extra = &thread_ids[i]; + myargs[i].txn_flags |= DB_TXN_READ_ONLY; } run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); } diff --git a/src/tests/perf_rangequery.cc b/src/tests/perf_rangequery.cc index f08ec0e4a07..0aa3f337201 100644 --- a/src/tests/perf_rangequery.cc +++ b/src/tests/perf_rangequery.cc @@ -23,6 +23,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { for (int i = 0; i < num_threads; i++) { arg_init(&myargs[i], dbp, env, cli_args); myargs[i].operation = rangequery_op; + myargs[i].txn_flags |= DB_TXN_READ_ONLY; } run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); } diff --git a/src/tests/perf_read_txn.cc b/src/tests/perf_read_txn.cc new file mode 100644 index 00000000000..e5f07a60360 --- /dev/null +++ b/src/tests/perf_read_txn.cc @@ -0,0 +1,50 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." +#ident "$Id: perf_nop.cc 45903 2012-07-19 13:06:39Z leifwalsh $" +#include "test.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "threaded_stress_test_helpers.h" + +// The intent of this test is to measure the throughput of creating and destroying +// root read-only transactions that create snapshots + +static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) { + return 0; +} + + +static void +stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { + if (verbose) printf("starting creation of pthreads\n"); + const int num_threads = cli_args->num_ptquery_threads; + struct arg myargs[num_threads]; + for (int i = 0; i < num_threads; i++) { + arg_init(&myargs[i], dbp, env, cli_args); + myargs[i].txn_flags |= DB_TXN_READ_ONLY; + myargs[i].operation = nop; + } + run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); +} + +int +test_main(int argc, char *const argv[]) { + struct cli_args args = get_default_args_for_perf(); + parse_stress_test_args(argc, argv, &args); + args.single_txn = false; + args.num_elements = 0; + args.num_DBs = 0; + args.num_put_threads = 0; + args.num_update_threads = 0; + stress_test_main(&args); + return 0; +} diff --git a/src/tests/perf_read_txn_single_thread.cc b/src/tests/perf_read_txn_single_thread.cc new file mode 100644 index 00000000000..c2384076761 --- /dev/null +++ b/src/tests/perf_read_txn_single_thread.cc @@ -0,0 +1,76 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." +#ident "$Id: perf_txn_single_thread.cc 51911 2013-01-10 18:21:29Z zardosht $" +#include "test.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "threaded_stress_test_helpers.h" + +// The intent of this test is to measure how fast a single thread can +// commit and create transactions when there exist N transactions. + +DB_TXN** txns; +int num_txns; + +static int commit_and_create_txn( + DB_TXN* UU(txn), + ARG arg, + void* UU(operation_extra), + void* UU(stats_extra) + ) +{ + int rand_txn_id = random() % num_txns; + int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0); + CKERR(r); + r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags | DB_TXN_READ_ONLY); + CKERR(r); + return 0; +} + +static void +stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { + if (verbose) printf("starting running of stress\n"); + + num_txns = cli_args->txn_size; + XCALLOC_N(num_txns, txns); + for (int i = 0; i < num_txns; i++) { + int r = env->txn_begin(env, 0, &txns[i], DB_TXN_SNAPSHOT); + CKERR(r); + } + + struct arg myarg; + arg_init(&myarg, dbp, env, cli_args); + myarg.operation = commit_and_create_txn; + + run_workers(&myarg, 1, cli_args->num_seconds, false, cli_args); + + for (int i = 0; i < num_txns; i++) { + int chk_r = txns[i]->commit(txns[i], 0); + CKERR(chk_r); + } + toku_free(txns); + num_txns = 0; +} + +int +test_main(int argc, char *const argv[]) { + num_txns = 0; + txns = NULL; + struct cli_args args = get_default_args_for_perf(); + parse_stress_test_args(argc, argv, &args); + args.single_txn = true; + // this test is all about transactions, make the DB small + args.num_elements = 1; + args.num_DBs= 1; + perf_test_main(&args); + return 0; +} diff --git a/src/tests/perf_txn_single_thread.cc b/src/tests/perf_txn_single_thread.cc index b58e2923795..fd440fcff30 100644 --- a/src/tests/perf_txn_single_thread.cc +++ b/src/tests/perf_txn_single_thread.cc @@ -31,7 +31,7 @@ static int commit_and_create_txn( int rand_txn_id = random() % num_txns; int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0); CKERR(r); - r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_type); + r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags); CKERR(r); return 0; } diff --git a/src/tests/test_cursor_with_read_txn.cc b/src/tests/test_cursor_with_read_txn.cc new file mode 100644 index 00000000000..5ba5a9b8433 --- /dev/null +++ b/src/tests/test_cursor_with_read_txn.cc @@ -0,0 +1,92 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $" +#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +int test_main(int argc, char * const argv[]) +{ + int r; + DB * db; + DB_ENV * env; + (void) argc; + (void) argv; + + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); } + + // set things up + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755); + CKERR(r); + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644); + CKERR(r); + + + DB_TXN* txn = NULL; + r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT); + CKERR(r); + + int k = 1; + int v = 10; + DBT key, val; + r = db->put( + db, + txn, + dbt_init(&key, &k, sizeof k), + dbt_init(&val, &v, sizeof v), + 0 + ); + CKERR(r); + k = 2; + v = 20; + r = db->put( + db, + txn, + dbt_init(&key, &k, sizeof k), + dbt_init(&val, &v, sizeof v), + 0 + ); + CKERR(r); + r = txn->commit(txn, 0); + CKERR(r); + + r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY); + CKERR(r); + DBC* cursor = NULL; + r = db->cursor(db, txn, &cursor, 0); + CKERR(r); + DBT key1, val1; + memset(&key1, 0, sizeof key1); + memset(&val1, 0, sizeof val1); + r = cursor->c_get(cursor, &key1, &val1, DB_FIRST); + CKERR(r); + invariant(key1.size == sizeof(int)); + invariant(*(int *)key1.data == 1); + invariant(val1.size == sizeof(int)); + invariant(*(int *)val1.data == 10); + + r = cursor->c_get(cursor, &key1, &val1, DB_NEXT); + CKERR(r); + invariant(key1.size == sizeof(int)); + invariant(*(int *)key1.data == 2); + invariant(val1.size == sizeof(int)); + invariant(*(int *)val1.data == 20); + + r = cursor->c_close(cursor); + CKERR(r); + r = txn->commit(txn, 0); + CKERR(r); + + // clean things up + r = db->close(db, 0); + CKERR(r); + r = env->close(env, 0); + CKERR(r); + + return 0; +} diff --git a/src/tests/test_locking_with_read_txn.cc b/src/tests/test_locking_with_read_txn.cc new file mode 100644 index 00000000000..e6b3b2d1060 --- /dev/null +++ b/src/tests/test_locking_with_read_txn.cc @@ -0,0 +1,57 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $" +#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +int test_main(int argc, char * const argv[]) +{ + int r; + DB * db; + DB_ENV * env; + (void) argc; + (void) argv; + + const char *db_env_dir = TOKU_TEST_FILENAME; + char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1]; + snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir); + + r = system(rm_cmd); { int chk_r = r; CKERR(chk_r); } + r = toku_os_mkdir(db_env_dir, 0755); { int chk_r = r; CKERR(chk_r); } + + // set things up + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, db_env_dir, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755); + CKERR(r); + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644); + CKERR(r); + + + DB_TXN* txn1 = NULL; + DB_TXN* txn2 = NULL; + r = env->txn_begin(env, 0, &txn1, DB_TXN_READ_ONLY); + CKERR(r); + r = env->txn_begin(env, 0, &txn2, DB_TXN_READ_ONLY); + CKERR(r); + + + r=db->pre_acquire_table_lock(db, txn1); CKERR(r); + r=db->pre_acquire_table_lock(db, txn2); CKERR2(r, DB_LOCK_NOTGRANTED); + + r = txn1->commit(txn1, 0); + CKERR(r); + r = txn2->commit(txn2, 0); + CKERR(r); + + // clean things up + r = db->close(db, 0); + CKERR(r); + r = env->close(env, 0); + CKERR(r); + + return 0; +} diff --git a/src/tests/test_read_txn_invalid_ops.cc b/src/tests/test_read_txn_invalid_ops.cc new file mode 100644 index 00000000000..72cebfa932d --- /dev/null +++ b/src/tests/test_read_txn_invalid_ops.cc @@ -0,0 +1,163 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $" +#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + + +static int update_fun(DB *UU(db), + const DBT *UU(key), + const DBT *UU(old_val), const DBT *UU(extra), + void (*set_val)(const DBT *new_val, + void *set_extra), + void *UU(set_extra)) +{ + abort(); + assert(set_val != NULL); + return 0; +} + +static int generate_row_for_put( + DB *UU(dest_db), + DB *UU(src_db), + DBT *UU(dest_key), + DBT *UU(dest_val), + const DBT *UU(src_key), + const DBT *UU(src_val) + ) +{ + abort(); + return 0; +} + +static int generate_row_for_del( + DB *UU(dest_db), + DB *UU(src_db), + DBT *UU(dest_key), + const DBT *UU(src_key), + const DBT *UU(src_val) + ) +{ + abort(); + return 0; +} + +static void test_invalid_ops(uint32_t iso_flags) { + int r; + DB * db; + DB_ENV * env; + + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); } + + // set things up + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_generate_row_callback_for_put(env,generate_row_for_put); + CKERR(r); + r = env->set_generate_row_callback_for_del(env,generate_row_for_del); + CKERR(r); + env->set_update(env, update_fun); + r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755); + CKERR(r); + r = db_create(&db, env, 0); + CKERR(r); + + DB_TXN* txn = NULL; + r = env->txn_begin(env, 0, &txn, iso_flags | DB_TXN_READ_ONLY); + CKERR(r); + + r = db->open(db, txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644); + CKERR2(r, EINVAL); + r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644); + CKERR(r); + + int k = 1; + int v = 10; + DBT key, val; + dbt_init(&key, &k, sizeof k); + dbt_init(&val, &v, sizeof v); + + uint32_t db_flags = 0; + uint32_t indexer_flags = 0; + DB_INDEXER* indexer; + r = env->create_indexer( + env, + txn, + &indexer, + db, + 1, + &db, + &db_flags, + indexer_flags + ); + CKERR2(r, EINVAL); + + + // test invalid operations of ydb_db.cc, + // db->open tested above + DB_LOADER* loader; + uint32_t put_flags = 0; + uint32_t dbt_flags = 0; + r = env->create_loader(env, txn, &loader, NULL, 1, &db, &put_flags, &dbt_flags, 0); + CKERR2(r, EINVAL); + + r = db->change_descriptor(db, txn, &key, 0); + CKERR2(r, EINVAL); + + // + // test invalid operations return EINVAL from ydb_write.cc + // + r = db->put(db, txn, &key, &val,0); + CKERR2(r, EINVAL); + r = db->del(db, txn, &key, DB_DELETE_ANY); + CKERR2(r, EINVAL); + r = db->update(db, txn, &key, &val, 0); + CKERR2(r, EINVAL); + r = db->update_broadcast(db, txn, &val, 0); + CKERR2(r, EINVAL); + + r = env->put_multiple(env, NULL, txn, &key, &val, 1, &db, &key, &val, 0); + CKERR2(r, EINVAL); + r = env->del_multiple(env, NULL, txn, &key, &val, 1, &db, &key, 0); + CKERR2(r, EINVAL); + uint32_t flags; + r = env->update_multiple( + env, NULL, txn, + &key, &val, + &key, &val, + 1, &db, &flags, + 1, &key, + 1, &val + ); + CKERR2(r, EINVAL); + + r = db->close(db, 0); + CKERR(r); + + // test invalid operations of ydb.cc, dbrename and dbremove + r = env->dbremove(env, txn, "foo.db", NULL, 0); + CKERR2(r, EINVAL); + // test invalid operations of ydb.cc, dbrename and dbremove + r = env->dbrename(env, txn, "foo.db", NULL, "bar.db", 0); + CKERR2(r, EINVAL); + + r = txn->commit(txn, 0); + CKERR(r); + + // clean things up + r = env->close(env, 0); + CKERR(r); +} + + +int test_main(int argc, char * const argv[]) { + (void) argc; + (void) argv; + test_invalid_ops(0); + test_invalid_ops(DB_TXN_SNAPSHOT); + test_invalid_ops(DB_READ_COMMITTED); + test_invalid_ops(DB_READ_UNCOMMITTED); + return 0; +} diff --git a/src/tests/test_simple_read_txn.cc b/src/tests/test_simple_read_txn.cc new file mode 100644 index 00000000000..ee8a1298aca --- /dev/null +++ b/src/tests/test_simple_read_txn.cc @@ -0,0 +1,64 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $" +#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + + +static void test_read_txn_creation(DB_ENV* env, uint32_t iso_flags) { + int r; + DB_TXN* parent_txn = NULL; + DB_TXN* child_txn = NULL; + r = env->txn_begin(env, 0, &parent_txn, iso_flags); + CKERR(r); + r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY); + CKERR2(r, EINVAL); + r = env->txn_begin(env, parent_txn, &child_txn, iso_flags); + CKERR(r); + r = child_txn->commit(child_txn, 0); + CKERR(r); + r = parent_txn->commit(parent_txn, 0); + CKERR(r); + + r = env->txn_begin(env, 0, &parent_txn, iso_flags | DB_TXN_READ_ONLY); + CKERR(r); + r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY); + CKERR(r); + r = child_txn->commit(child_txn, 0); + CKERR(r); + r = env->txn_begin(env, parent_txn, &child_txn, iso_flags); + CKERR(r); + r = child_txn->commit(child_txn, 0); + CKERR(r); + r = parent_txn->commit(parent_txn, 0); + CKERR(r); + +} + +int test_main(int argc, char * const argv[]) +{ + int r; + DB_ENV * env; + (void) argc; + (void) argv; + + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); } + + // set things up + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755); + CKERR(r); + + test_read_txn_creation(env, 0); + test_read_txn_creation(env, DB_TXN_SNAPSHOT); + test_read_txn_creation(env, DB_READ_COMMITTED); + test_read_txn_creation(env, DB_READ_UNCOMMITTED); + + r = env->close(env, 0); + CKERR(r); + + return 0; +} diff --git a/src/tests/test_stress1.cc b/src/tests/test_stress1.cc index ccb1525ac85..b8b2ad2480f 100644 --- a/src/tests/test_stress1.cc +++ b/src/tests/test_stress1.cc @@ -69,6 +69,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[1].prefetch = false; myargs[1].operation_extra = &soe[1]; myargs[1].operation = scan_op; + myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward fast scanner soe[2].fast = true; @@ -76,6 +77,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[2].prefetch = false; myargs[2].operation_extra = &soe[2]; myargs[2].operation = scan_op; + myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward slow scanner soe[3].fast = false; diff --git a/src/tests/test_stress2.cc b/src/tests/test_stress2.cc index cfe6d844eda..7d166a1da23 100644 --- a/src/tests/test_stress2.cc +++ b/src/tests/test_stress2.cc @@ -63,6 +63,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[1].prefetch = false; myargs[1].operation_extra = &soe[1]; myargs[1].operation = scan_op; + myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward fast scanner soe[2].fast = true; @@ -70,6 +71,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[2].prefetch = false; myargs[2].operation_extra = &soe[2]; myargs[2].operation = scan_op; + myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward slow scanner soe[3].fast = false; diff --git a/src/tests/test_stress3.cc b/src/tests/test_stress3.cc index 5453d6e58c6..4726bf64603 100644 --- a/src/tests/test_stress3.cc +++ b/src/tests/test_stress3.cc @@ -62,6 +62,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[1].prefetch = false; myargs[1].operation_extra = &soe[1]; myargs[1].operation = scan_op; + myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward fast scanner soe[2].fast = true; @@ -69,6 +70,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[2].prefetch = false; myargs[2].operation_extra = &soe[2]; myargs[2].operation = scan_op; + myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the backward slow scanner soe[3].fast = false; diff --git a/src/tests/test_stress5.cc b/src/tests/test_stress5.cc index 2bac6ee2dc9..8aef88db196 100644 --- a/src/tests/test_stress5.cc +++ b/src/tests/test_stress5.cc @@ -36,6 +36,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { soe[0].prefetch = false; myargs[0].operation_extra = &soe[0]; myargs[0].operation = scan_op; + myargs[0].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY; // make the forward slow scanner soe[1].fast = false; diff --git a/src/tests/threaded_stress_test_helpers.h b/src/tests/threaded_stress_test_helpers.h index 54e3ade6ca5..2814808dd99 100644 --- a/src/tests/threaded_stress_test_helpers.h +++ b/src/tests/threaded_stress_test_helpers.h @@ -135,7 +135,7 @@ struct arg { // DB are in [0, num_elements) // false otherwise int sleep_ms; // number of milliseconds to sleep between operations - uint32_t txn_type; // isolation level for txn running operation + uint32_t txn_flags; // isolation level for txn running operation operation_t operation; // function that is the operation to be run void* operation_extra; // extra parameter passed to operation enum stress_lock_type lock_type; // states if operation must be exclusive, shared, or does not require locking @@ -155,7 +155,7 @@ static void arg_init(struct arg *arg, DB **dbp, DB_ENV *env, struct cli_args *cl arg->bounded_element_range = true; arg->sleep_ms = 0; arg->lock_type = STRESS_LOCK_NONE; - arg->txn_type = DB_TXN_SNAPSHOT; + arg->txn_flags = DB_TXN_SNAPSHOT; arg->operation_extra = nullptr; arg->do_prepare = false; arg->prelock_updates = false; @@ -488,12 +488,12 @@ static void *worker(void *arg_v) { printf("%lu starting %p\n", (unsigned long) intself, arg->operation); } if (arg->cli->single_txn) { - r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r); + r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r); } while (run_test) { lock_worker_op(we); if (!arg->cli->single_txn) { - r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r); + r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r); } r = arg->operation(txn, arg, arg->operation_extra, we->counters); if (r==0 && !arg->cli->single_txn && arg->do_prepare) { @@ -2654,7 +2654,7 @@ UU() stress_recover(struct cli_args *args) { DB_TXN* txn = nullptr; struct arg recover_args; arg_init(&recover_args, dbs, env, args); - int r = env->txn_begin(env, 0, &txn, recover_args.txn_type); + int r = env->txn_begin(env, 0, &txn, recover_args.txn_flags); CKERR(r); struct scan_op_extra soe = { .fast = true, diff --git a/src/ydb-internal.h b/src/ydb-internal.h index 8dc35484e02..7ae05f920d3 100644 --- a/src/ydb-internal.h +++ b/src/ydb-internal.h @@ -209,6 +209,16 @@ env_opened(DB_ENV *env) { return env->i->cachetable != 0; } +static inline bool +txn_is_read_only(DB_TXN* txn) { + if (txn && (db_txn_struct_i(txn)->flags & DB_TXN_READ_ONLY)) { + return true; + } + return false; +} + +#define HANDLE_READ_ONLY_TXN(txn) if(txn_is_read_only(txn)) return EINVAL; + void env_panic(DB_ENV * env, int cause, const char * msg); void env_note_db_opened(DB_ENV *env, DB *db); void env_note_db_closed(DB_ENV *env, DB *db); diff --git a/src/ydb.cc b/src/ydb.cc index 59090655ab2..28703dbd378 100644 --- a/src/ydb.cc +++ b/src/ydb.cc @@ -1205,6 +1205,7 @@ static int locked_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) { int ret, r; HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); + HANDLE_READ_ONLY_TXN(txn); DB_TXN *child_txn = NULL; int using_txns = env->i->open_flags & DB_INIT_TXN; @@ -1235,6 +1236,7 @@ static int env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char static int locked_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) { int ret, r; + HANDLE_READ_ONLY_TXN(txn); HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); DB_TXN *child_txn = NULL; @@ -2413,6 +2415,7 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u if (!env_opened(env) || flags != 0) { return EINVAL; } + HANDLE_READ_ONLY_TXN(txn); if (dbname != NULL) { // env_dbremove_subdb() converts (fname, dbname) to dname return env_dbremove_subdb(env, txn, fname, dbname, flags); @@ -2519,6 +2522,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co if (!env_opened(env) || flags != 0) { return EINVAL; } + HANDLE_READ_ONLY_TXN(txn); if (dbname != NULL) { // env_dbrename_subdb() converts (fname, dbname) to dname and (fname, newname) to newdname return env_dbrename_subdb(env, txn, fname, dbname, newname, flags); diff --git a/src/ydb_db.cc b/src/ydb_db.cc index 0bae211511a..ee7eaaae5bc 100644 --- a/src/ydb_db.cc +++ b/src/ydb_db.cc @@ -210,6 +210,7 @@ static uint64_t nontransactional_open_id = 0; static int toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) { HANDLE_PANICKED_DB(db); + HANDLE_READ_ONLY_TXN(txn); if (dbname != NULL) { return db_open_subdb(db, txn, fname, dbname, dbtype, flags, mode); } @@ -347,6 +348,7 @@ void toku_db_lt_on_destroy_callback(toku::locktree *lt) { int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) { //Set comparison functions if not yet set. + HANDLE_READ_ONLY_TXN(txn); if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) { toku_ft_set_bt_compare(db->i->ft_handle, db->dbenv->i->bt_compare); db->i->key_compare_was_set = true; @@ -469,6 +471,7 @@ int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn) { static int toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t flags) { HANDLE_PANICKED_DB(db); + HANDLE_READ_ONLY_TXN(txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); int r = 0; TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; @@ -695,6 +698,7 @@ autotxn_db_getf_set (DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK static int locked_db_open(DB *db, DB_TXN *txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) { int ret, r; + HANDLE_READ_ONLY_TXN(txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); // @@ -1024,6 +1028,7 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new int locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) { int ret, r; + HANDLE_READ_ONLY_TXN(txn); DB_TXN *child_txn = NULL; int using_txns = env->i->open_flags & DB_INIT_TXN; diff --git a/src/ydb_txn.cc b/src/ydb_txn.cc index d400f35f5dd..85e071e7395 100644 --- a/src/ydb_txn.cc +++ b/src/ydb_txn.cc @@ -329,6 +329,36 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) { uint32_t txn_flags = 0; txn_flags |= DB_TXN_NOWAIT; //We do not support blocking locks. RFP remove this? + + // handle whether txn is declared as read only + bool parent_txn_declared_read_only = + stxn && + (db_txn_struct_i(stxn)->flags & DB_TXN_READ_ONLY); + bool txn_declared_read_only = false; + if (flags & DB_TXN_READ_ONLY) { + txn_declared_read_only = true; + txn_flags |= DB_TXN_READ_ONLY; + flags &= ~(DB_TXN_READ_ONLY); + } + if (txn_declared_read_only && stxn && + !parent_txn_declared_read_only + ) + { + return toku_ydb_do_error( + env, + EINVAL, + "Current transaction set as read only, but parent transaction is not\n" + ); + } + if (parent_txn_declared_read_only) + { + // don't require child transaction to also set transaction as read only + // if parent has already done so + txn_flags |= DB_TXN_READ_ONLY; + txn_declared_read_only = true; + } + + TOKU_ISOLATION child_isolation = TOKU_ISO_SERIALIZABLE; uint32_t iso_flags = flags & DB_ISOLATION_FLAGS; if (!(iso_flags == 0 || @@ -434,7 +464,8 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) { TXNID_PAIR_NONE, snapshot_type, result, - false + false, // for_recovery + txn_declared_read_only // read_only ); if (r != 0) { toku_free(result); diff --git a/src/ydb_write.cc b/src/ydb_write.cc index 89d93dd129a..81e78f7aec4 100644 --- a/src/ydb_write.cc +++ b/src/ydb_write.cc @@ -132,6 +132,7 @@ int toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); + HANDLE_READ_ONLY_TXN(txn); uint32_t unchecked_flags = flags; //DB_DELETE_ANY means delete regardless of whether it exists in the db. @@ -175,6 +176,7 @@ int toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); + HANDLE_READ_ONLY_TXN(txn); int r = 0; uint32_t lock_flags = get_prelocked_flags(flags); @@ -222,6 +224,7 @@ toku_db_update(DB *db, DB_TXN *txn, uint32_t flags) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); + HANDLE_READ_ONLY_TXN(txn); int r = 0; uint32_t lock_flags = get_prelocked_flags(flags); @@ -263,6 +266,7 @@ toku_db_update_broadcast(DB *db, DB_TXN *txn, uint32_t flags) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); + HANDLE_READ_ONLY_TXN(txn); int r = 0; uint32_t lock_flags = get_prelocked_flags(flags); @@ -428,6 +432,7 @@ env_del_multiple( DB_INDEXER* indexer = NULL; HANDLE_PANICKED_ENV(env); + HANDLE_READ_ONLY_TXN(txn); uint32_t lock_flags[num_dbs]; uint32_t remaining_flags[num_dbs]; @@ -574,6 +579,7 @@ env_put_multiple_internal( DB_INDEXER* indexer = NULL; HANDLE_PANICKED_ENV(env); + HANDLE_READ_ONLY_TXN(txn); uint32_t lock_flags[num_dbs]; uint32_t remaining_flags[num_dbs]; @@ -674,6 +680,7 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, HANDLE_PANICKED_ENV(env); DB_INDEXER* indexer = NULL; + HANDLE_READ_ONLY_TXN(txn); if (!txn) { r = EINVAL;