You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

8406 lines
259 KiB

  1. /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
  2. // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
  3. #ident "$Id$"
  4. /*
  5. COPYING CONDITIONS NOTICE:
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of version 2 of the GNU General Public License as
  8. published by the Free Software Foundation, and provided that the
  9. following conditions are met:
  10. * Redistributions of source code must retain this COPYING
  11. CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
  12. DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
  13. PATENT MARKING NOTICE (below), and the PATENT RIGHTS
  14. GRANT (below).
  15. * Redistributions in binary form must reproduce this COPYING
  16. CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
  17. DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
  18. PATENT MARKING NOTICE (below), and the PATENT RIGHTS
  19. GRANT (below) in the documentation and/or other materials
  20. provided with the distribution.
  21. You should have received a copy of the GNU General Public License
  22. along with this program; if not, write to the Free Software
  23. Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  24. 02110-1301, USA.
  25. COPYRIGHT NOTICE:
  26. TokuDB, Tokutek Fractal Tree Indexing Library.
  27. Copyright (C) 2007-2013 Tokutek, Inc.
  28. DISCLAIMER:
  29. This program is distributed in the hope that it will be useful, but
  30. WITHOUT ANY WARRANTY; without even the implied warranty of
  31. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  32. General Public License for more details.
  33. UNIVERSITY PATENT NOTICE:
  34. The technology is licensed by the Massachusetts Institute of
  35. Technology, Rutgers State University of New Jersey, and the Research
  36. Foundation of State University of New York at Stony Brook under
  37. United States of America Serial No. 11/760379 and to the patents
  38. and/or patent applications resulting from it.
  39. PATENT MARKING NOTICE:
  40. This software is covered by US Patent No. 8,185,551.
  41. PATENT RIGHTS GRANT:
  42. "THIS IMPLEMENTATION" means the copyrightable works distributed by
  43. Tokutek as part of the Fractal Tree project.
  44. "PATENT CLAIMS" means the claims of patents that are owned or
  45. licensable by Tokutek, both currently or in the future; and that in
  46. the absence of this license would be infringed by THIS
  47. IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
  48. "PATENT CHALLENGE" shall mean a challenge to the validity,
  49. patentability, enforceability and/or non-infringement of any of the
  50. PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
  51. Tokutek hereby grants to you, for the term and geographical scope of
  52. the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
  53. irrevocable (except as stated in this section) patent license to
  54. make, have made, use, offer to sell, sell, import, transfer, and
  55. otherwise run, modify, and propagate the contents of THIS
  56. IMPLEMENTATION, where such license applies only to the PATENT
  57. CLAIMS. This grant does not include claims that would be infringed
  58. only as a consequence of further modifications of THIS
  59. IMPLEMENTATION. If you or your agent or licensee institute or order
  60. or agree to the institution of patent litigation against any entity
  61. (including a cross-claim or counterclaim in a lawsuit) alleging that
  62. THIS IMPLEMENTATION constitutes direct or contributory patent
  63. infringement, or inducement of patent infringement, then any rights
  64. granted to you under this License shall terminate as of the date
  65. such litigation is filed. If you or your agent or exclusive
  66. licensee institute or order or agree to the institution of a PATENT
  67. CHALLENGE, then Tokutek may terminate any rights granted to you
  68. under this License.
  69. */
  70. #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
  71. #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
  72. #ifdef USE_PRAGMA_IMPLEMENTATION
  73. #pragma implementation // gcc: Class implementation
  74. #endif
  75. #define MYSQL_SERVER 1
  76. #include "hatoku_defines.h"
  77. #include "hatoku_cmp.h"
  78. extern "C" {
  79. #include "stdint.h"
  80. #if defined(_WIN32)
  81. #include "misc.h"
  82. #endif
  83. }
  84. static inline void *thd_data_get(THD *thd, int slot) {
  85. return thd->ha_data[slot].ha_ptr;
  86. }
  87. static inline void thd_data_set(THD *thd, int slot, void *data) {
  88. thd->ha_data[slot].ha_ptr = data;
  89. }
  90. static inline uint get_key_parts(const KEY *key);
  91. #undef PACKAGE
  92. #undef VERSION
  93. #undef HAVE_DTRACE
  94. #undef _DTRACE_VERSION
  95. /* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
  96. #ifdef HAVE_DTRACE
  97. #define _DTRACE_VERSION 1
  98. #else
  99. #endif
  100. #include "tokudb_buffer.h"
  101. #include "tokudb_status.h"
  102. #include "tokudb_card.h"
  103. #include "ha_tokudb.h"
  104. #include "hatoku_hton.h"
  105. #include <mysql/plugin.h>
  106. static const char *ha_tokudb_exts[] = {
  107. ha_tokudb_ext,
  108. NullS
  109. };
  110. //
  111. // This offset is calculated starting from AFTER the NULL bytes
  112. //
  113. static inline uint32_t get_fixed_field_size(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
  114. uint offset = 0;
  115. for (uint i = 0; i < table_share->fields; i++) {
  116. if (kc_info->field_lengths[i] && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
  117. offset += kc_info->field_lengths[i];
  118. }
  119. }
  120. return offset;
  121. }
  122. static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
  123. uint len = 0;
  124. for (uint i = 0; i < table_share->fields; i++) {
  125. if (kc_info->length_bytes[i] && !bitmap_is_set(&kc_info->key_filters[keynr],i)) {
  126. len += kc_info->num_offset_bytes;
  127. }
  128. }
  129. return len;
  130. }
  131. static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info) {
  132. int error;
  133. //
  134. // initialize all of the bitmaps
  135. //
  136. for (uint i = 0; i < MAX_KEY + 1; i++) {
  137. error = bitmap_init(
  138. &kc_info->key_filters[i],
  139. NULL,
  140. table_share->fields,
  141. false
  142. );
  143. if (error) {
  144. goto exit;
  145. }
  146. }
  147. //
  148. // create the field lengths
  149. //
  150. kc_info->field_lengths = (uint16_t *)my_malloc(table_share->fields*sizeof(uint16_t), MYF(MY_WME | MY_ZEROFILL));
  151. kc_info->length_bytes= (uchar *)my_malloc(table_share->fields, MYF(MY_WME | MY_ZEROFILL));
  152. kc_info->blob_fields= (uint32_t *)my_malloc(table_share->fields*sizeof(uint32_t), MYF(MY_WME | MY_ZEROFILL));
  153. if (kc_info->field_lengths == NULL ||
  154. kc_info->length_bytes == NULL ||
  155. kc_info->blob_fields == NULL ) {
  156. error = ENOMEM;
  157. goto exit;
  158. }
  159. exit:
  160. if (error) {
  161. for (uint i = 0; MAX_KEY + 1; i++) {
  162. bitmap_free(&kc_info->key_filters[i]);
  163. }
  164. my_free(kc_info->field_lengths, MYF(MY_ALLOW_ZERO_PTR));
  165. my_free(kc_info->length_bytes, MYF(MY_ALLOW_ZERO_PTR));
  166. my_free(kc_info->blob_fields, MYF(MY_ALLOW_ZERO_PTR));
  167. }
  168. return error;
  169. }
  170. /** @brief
  171. Simple lock controls. The "share" it creates is a structure we will
  172. pass to each tokudb handler. Do you have to have one of these? Well, you have
  173. pieces that are used for locking, and they are needed to function.
  174. MUST have tokudb_mutex locked on input
  175. */
  176. static TOKUDB_SHARE *get_share(const char *table_name, TABLE_SHARE* table_share) {
  177. TOKUDB_SHARE *share = NULL;
  178. int error = 0;
  179. uint length;
  180. length = (uint) strlen(table_name);
  181. if (!(share = (TOKUDB_SHARE *) my_hash_search(&tokudb_open_tables, (uchar *) table_name, length))) {
  182. char *tmp_name;
  183. //
  184. // create share and fill it with all zeroes
  185. // hence, all pointers are initialized to NULL
  186. //
  187. share = (TOKUDB_SHARE *) my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
  188. &share, sizeof(*share),
  189. &tmp_name, length + 1,
  190. NullS
  191. );
  192. assert(share);
  193. share->use_count = 0;
  194. share->table_name_length = length;
  195. share->table_name = tmp_name;
  196. strmov(share->table_name, table_name);
  197. error = allocate_key_and_col_info(table_share, &share->kc_info);
  198. if (error) {
  199. goto exit;
  200. }
  201. memset((void *) share->key_file, 0, sizeof(share->key_file));
  202. error = my_hash_insert(&tokudb_open_tables, (uchar *) share);
  203. if (error) {
  204. goto exit;
  205. }
  206. thr_lock_init(&share->lock);
  207. pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
  208. my_rwlock_init(&share->num_DBs_lock, 0);
  209. }
  210. exit:
  211. if (error) {
  212. pthread_mutex_destroy(&share->mutex);
  213. my_free((uchar *) share, MYF(0));
  214. share = NULL;
  215. }
  216. return share;
  217. }
  218. static void free_key_and_col_info (KEY_AND_COL_INFO* kc_info) {
  219. for (uint i = 0; i < MAX_KEY+1; i++) {
  220. bitmap_free(&kc_info->key_filters[i]);
  221. }
  222. for (uint i = 0; i < MAX_KEY+1; i++) {
  223. my_free(kc_info->cp_info[i], MYF(MY_ALLOW_ZERO_PTR));
  224. kc_info->cp_info[i] = NULL; // 3144
  225. }
  226. my_free(kc_info->field_lengths, MYF(MY_ALLOW_ZERO_PTR));
  227. my_free(kc_info->length_bytes, MYF(MY_ALLOW_ZERO_PTR));
  228. my_free(kc_info->blob_fields, MYF(MY_ALLOW_ZERO_PTR));
  229. }
  230. //
  231. // MUST have tokudb_mutex locked on input
  232. // bool mutex_is_locked specifies if share->mutex is locked
  233. //
  234. static int free_share(TOKUDB_SHARE * share, bool mutex_is_locked) {
  235. int error, result = 0;
  236. if (mutex_is_locked) {
  237. pthread_mutex_unlock(&share->mutex);
  238. }
  239. if (!--share->use_count) {
  240. DBUG_PRINT("info", ("share->use_count %u", share->use_count));
  241. //
  242. // number of open DB's may not be equal to number of keys we have because add_index
  243. // may have added some. So, we loop through entire array and close any non-NULL value
  244. // It is imperative that we reset a DB to NULL once we are done with it.
  245. //
  246. for (uint i = 0; i < sizeof(share->key_file)/sizeof(share->key_file[0]); i++) {
  247. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  248. TOKUDB_TRACE("dbclose:%p\n", share->key_file[i]);
  249. }
  250. if (share->key_file[i]) {
  251. error = share->key_file[i]->close(share->key_file[i], 0);
  252. assert(error == 0);
  253. if (error) {
  254. result = error;
  255. }
  256. share->key_file[i] = NULL;
  257. }
  258. }
  259. free_key_and_col_info(&share->kc_info);
  260. error = tokudb::close_status(&share->status_block);
  261. assert(error == 0);
  262. my_hash_delete(&tokudb_open_tables, (uchar *) share);
  263. thr_lock_delete(&share->lock);
  264. pthread_mutex_destroy(&share->mutex);
  265. rwlock_destroy(&share->num_DBs_lock);
  266. my_free((uchar *) share, MYF(0));
  267. }
  268. return result;
  269. }
  270. #define HANDLE_INVALID_CURSOR() \
  271. if (cursor == NULL) { \
  272. error = last_cursor_error; \
  273. goto cleanup; \
  274. }
  275. const char *ha_tokudb::table_type() const {
  276. extern const char * const tokudb_hton_name;
  277. return tokudb_hton_name;
  278. }
  279. const char *ha_tokudb::index_type(uint inx) {
  280. return "BTREE";
  281. }
  282. /*
  283. * returns NULL terminated file extension string
  284. */
  285. const char **ha_tokudb::bas_ext() const {
  286. TOKUDB_DBUG_ENTER("ha_tokudb::bas_ext");
  287. DBUG_RETURN(ha_tokudb_exts);
  288. }
  289. static inline bool is_insert_ignore (THD* thd) {
  290. //
  291. // from http://lists.mysql.com/internals/37735
  292. //
  293. return thd->lex->ignore && thd->lex->duplicates == DUP_ERROR;
  294. }
  295. static inline bool is_replace_into(THD* thd) {
  296. return thd->lex->duplicates == DUP_REPLACE;
  297. }
  298. static inline bool do_ignore_flag_optimization(THD* thd, TABLE* table, bool opt_eligible) {
  299. uint pk_insert_mode = get_pk_insert_mode(thd);
  300. return (
  301. opt_eligible &&
  302. (is_replace_into(thd) || is_insert_ignore(thd)) &&
  303. ((!table->triggers && pk_insert_mode < 2) || pk_insert_mode == 0)
  304. );
  305. }
  306. static inline uint get_key_parts(const KEY *key) {
  307. #if 50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
  308. return key->user_defined_key_parts;
  309. #else
  310. return key->key_parts;
  311. #endif
  312. }
  313. #if TOKU_INCLUDE_EXTENDED_KEYS
  314. static inline uint get_ext_key_parts(const KEY *key) {
  315. #if 50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
  316. return key->actual_key_parts;
  317. #elif defined(MARIADB_BASE_VERSION)
  318. return key->ext_key_parts;
  319. #else
  320. #error
  321. #endif
  322. }
  323. #endif
  324. ulonglong ha_tokudb::table_flags() const {
  325. return (table && do_ignore_flag_optimization(ha_thd(), table, share->replace_into_fast) ?
  326. int_table_flags | HA_BINLOG_STMT_CAPABLE :
  327. int_table_flags | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE);
  328. }
  329. //
  330. // Returns a bit mask of capabilities of the key or its part specified by
  331. // the arguments. The capabilities are defined in sql/handler.h.
  332. //
  333. ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
  334. TOKUDB_DBUG_ENTER("ha_tokudb::index_flags");
  335. assert(table_share);
  336. ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
  337. #if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
  338. flags |= HA_DO_INDEX_COND_PUSHDOWN;
  339. #endif
  340. if (table_share->key_info[idx].flags & HA_CLUSTERING) {
  341. flags |= HA_CLUSTERED_INDEX;
  342. }
  343. DBUG_RETURN(flags);
  344. }
  345. //
  346. // struct that will be used as a context for smart DBT callbacks
  347. // contains parameters needed to complete the smart DBT cursor call
  348. //
  349. typedef struct smart_dbt_info {
  350. ha_tokudb* ha; //instance to ha_tokudb needed for reading the row
  351. uchar* buf; // output buffer where row will be written
  352. uint keynr; // index into share->key_file that represents DB we are currently operating on
  353. } *SMART_DBT_INFO;
  354. typedef struct smart_dbt_bf_info {
  355. ha_tokudb* ha;
  356. bool need_val;
  357. int direction;
  358. THD* thd;
  359. uchar* buf;
  360. DBT* key_to_compare;
  361. } *SMART_DBT_BF_INFO;
  362. typedef struct index_read_info {
  363. struct smart_dbt_info smart_dbt_info;
  364. int cmp;
  365. DBT* orig_key;
  366. } *INDEX_READ_INFO;
  367. static int ai_poll_fun(void *extra, float progress) {
  368. LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
  369. if (context->thd->killed) {
  370. sprintf(context->write_status_msg, "The process has been killed, aborting add index.");
  371. return ER_ABORTING_CONNECTION;
  372. }
  373. float percentage = progress * 100;
  374. sprintf(context->write_status_msg, "Adding of indexes about %.1f%% done", percentage);
  375. thd_proc_info(context->thd, context->write_status_msg);
  376. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  377. thd_progress_report(context->thd, (unsigned long long) percentage, 100);
  378. #endif
  379. return 0;
  380. }
  381. static int loader_poll_fun(void *extra, float progress) {
  382. LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
  383. if (context->thd->killed) {
  384. sprintf(context->write_status_msg, "The process has been killed, aborting bulk load.");
  385. return ER_ABORTING_CONNECTION;
  386. }
  387. float percentage = progress * 100;
  388. sprintf(context->write_status_msg, "Loading of data about %.1f%% done", percentage);
  389. thd_proc_info(context->thd, context->write_status_msg);
  390. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  391. thd_progress_report(context->thd, (unsigned long long) percentage, 100);
  392. #endif
  393. return 0;
  394. }
  395. static void loader_ai_err_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
  396. LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
  397. assert(context->ha);
  398. context->ha->set_loader_error(err);
  399. }
  400. static void loader_dup_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
  401. LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
  402. assert(context->ha);
  403. context->ha->set_loader_error(err);
  404. if (err == DB_KEYEXIST) {
  405. context->ha->set_dup_value_for_pk(key);
  406. }
  407. }
  408. //
  409. // smart DBT callback function for optimize
  410. // in optimize, we want to flatten DB by doing
  411. // a full table scan. Therefore, we don't
  412. // want to actually do anything with the data, hence
  413. // callback does nothing
  414. //
  415. static int smart_dbt_do_nothing (DBT const *key, DBT const *row, void *context) {
  416. return 0;
  417. }
  418. static int smart_dbt_metacallback (DBT const *key, DBT const *row, void *context) {
  419. DBT* val = (DBT *)context;
  420. val->data = my_malloc(row->size, MYF(MY_WME|MY_ZEROFILL));
  421. if (val->data == NULL) return ENOMEM;
  422. memcpy(val->data, row->data, row->size);
  423. val->size = row->size;
  424. return 0;
  425. }
  426. static int
  427. smart_dbt_callback_rowread_ptquery (DBT const *key, DBT const *row, void *context) {
  428. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  429. info->ha->extract_hidden_primary_key(info->keynr, key);
  430. return info->ha->read_row_callback(info->buf,info->keynr,row,key);
  431. }
  432. //
  433. // Smart DBT callback function in case where we have a covering index
  434. //
  435. static int
  436. smart_dbt_callback_keyread(DBT const *key, DBT const *row, void *context) {
  437. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  438. info->ha->extract_hidden_primary_key(info->keynr, key);
  439. info->ha->read_key_only(info->buf,info->keynr,key);
  440. return 0;
  441. }
  442. //
  443. // Smart DBT callback function in case where we do NOT have a covering index
  444. //
  445. static int
  446. smart_dbt_callback_rowread(DBT const *key, DBT const *row, void *context) {
  447. int error = 0;
  448. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  449. info->ha->extract_hidden_primary_key(info->keynr, key);
  450. error = info->ha->read_primary_key(info->buf,info->keynr,row,key);
  451. return error;
  452. }
  453. //
  454. // Smart DBT callback function in case where we have a covering index
  455. //
  456. static int
  457. smart_dbt_callback_ir_keyread(DBT const *key, DBT const *row, void *context) {
  458. INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
  459. ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
  460. if (ir_info->cmp) {
  461. return 0;
  462. }
  463. return smart_dbt_callback_keyread(key, row, &ir_info->smart_dbt_info);
  464. }
  465. static int
  466. smart_dbt_callback_lookup(DBT const *key, DBT const *row, void *context) {
  467. INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
  468. ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
  469. return 0;
  470. }
  471. //
  472. // Smart DBT callback function in case where we do NOT have a covering index
  473. //
  474. static int
  475. smart_dbt_callback_ir_rowread(DBT const *key, DBT const *row, void *context) {
  476. INDEX_READ_INFO ir_info = (INDEX_READ_INFO)context;
  477. ir_info->cmp = ir_info->smart_dbt_info.ha->prefix_cmp_dbts(ir_info->smart_dbt_info.keynr, ir_info->orig_key, key);
  478. if (ir_info->cmp) {
  479. return 0;
  480. }
  481. return smart_dbt_callback_rowread(key, row, &ir_info->smart_dbt_info);
  482. }
  483. //
  484. // macro for Smart DBT callback function,
  485. // so we do not need to put this long line of code in multiple places
  486. //
  487. #define SMART_DBT_CALLBACK ( this->key_read ? smart_dbt_callback_keyread : smart_dbt_callback_rowread )
  488. #define SMART_DBT_IR_CALLBACK ( this->key_read ? smart_dbt_callback_ir_keyread : smart_dbt_callback_ir_rowread )
  489. //
  490. // macro that modifies read flag for cursor operations depending on whether
  491. // we have preacquired lock or not
  492. //
  493. #define SET_PRELOCK_FLAG(flg) ((flg) | (range_lock_grabbed ? (use_write_locks ? DB_PRELOCKED_WRITE : DB_PRELOCKED) : 0))
  494. //
  495. // This method retrieves the value of the auto increment column of a record in MySQL format
  496. // This was basically taken from MyISAM
  497. // Parameters:
  498. // type - the type of the auto increment column (e.g. int, float, double...)
  499. // offset - offset into the record where the auto increment column is stored
  500. // [in] record - MySQL row whose auto increment value we want to extract
  501. // Returns:
  502. // The value of the auto increment column in record
  503. //
  504. ulonglong retrieve_auto_increment(uint16 type, uint32 offset,const uchar *record)
  505. {
  506. const uchar *key; /* Key */
  507. ulonglong unsigned_autoinc = 0; /* Unsigned auto-increment */
  508. longlong signed_autoinc = 0; /* Signed auto-increment */
  509. enum { unsigned_type, signed_type } autoinc_type;
  510. float float_tmp; /* Temporary variable */
  511. double double_tmp; /* Temporary variable */
  512. key = ((uchar *) record) + offset;
  513. /* Set default autoincrement type */
  514. autoinc_type = unsigned_type;
  515. switch (type) {
  516. case HA_KEYTYPE_INT8:
  517. signed_autoinc = (longlong) *(char*)key;
  518. autoinc_type = signed_type;
  519. break;
  520. case HA_KEYTYPE_BINARY:
  521. unsigned_autoinc = (ulonglong) *(uchar*) key;
  522. break;
  523. case HA_KEYTYPE_SHORT_INT:
  524. signed_autoinc = (longlong) sint2korr(key);
  525. autoinc_type = signed_type;
  526. break;
  527. case HA_KEYTYPE_USHORT_INT:
  528. unsigned_autoinc = (ulonglong) uint2korr(key);
  529. break;
  530. case HA_KEYTYPE_LONG_INT:
  531. signed_autoinc = (longlong) sint4korr(key);
  532. autoinc_type = signed_type;
  533. break;
  534. case HA_KEYTYPE_ULONG_INT:
  535. unsigned_autoinc = (ulonglong) uint4korr(key);
  536. break;
  537. case HA_KEYTYPE_INT24:
  538. signed_autoinc = (longlong) sint3korr(key);
  539. autoinc_type = signed_type;
  540. break;
  541. case HA_KEYTYPE_UINT24:
  542. unsigned_autoinc = (ulonglong) uint3korr(key);
  543. break;
  544. case HA_KEYTYPE_LONGLONG:
  545. signed_autoinc = sint8korr(key);
  546. autoinc_type = signed_type;
  547. break;
  548. case HA_KEYTYPE_ULONGLONG:
  549. unsigned_autoinc = uint8korr(key);
  550. break;
  551. /* The remaining two cases should not be used but are included for
  552. compatibility */
  553. case HA_KEYTYPE_FLOAT:
  554. float4get(float_tmp, key); /* Note: float4get is a macro */
  555. signed_autoinc = (longlong) float_tmp;
  556. autoinc_type = signed_type;
  557. break;
  558. case HA_KEYTYPE_DOUBLE:
  559. float8get(double_tmp, key); /* Note: float8get is a macro */
  560. signed_autoinc = (longlong) double_tmp;
  561. autoinc_type = signed_type;
  562. break;
  563. default:
  564. DBUG_ASSERT(0);
  565. unsigned_autoinc = 0;
  566. }
  567. if (signed_autoinc < 0) {
  568. signed_autoinc = 0;
  569. }
  570. return autoinc_type == unsigned_type ?
  571. unsigned_autoinc : (ulonglong) signed_autoinc;
  572. }
  573. static inline bool
  574. is_null_field( TABLE* table, Field* field, const uchar* record) {
  575. uint null_offset;
  576. bool ret_val;
  577. if (!field->real_maybe_null()) {
  578. ret_val = false;
  579. goto exitpt;
  580. }
  581. null_offset = get_null_offset(table,field);
  582. ret_val = (record[null_offset] & field->null_bit) ? true: false;
  583. exitpt:
  584. return ret_val;
  585. }
  586. static inline ulong field_offset(Field* field, TABLE* table) {
  587. return((ulong) (field->ptr - table->record[0]));
  588. }
  589. static inline HA_TOKU_ISO_LEVEL tx_to_toku_iso(ulong tx_isolation) {
  590. if (tx_isolation == ISO_READ_UNCOMMITTED) {
  591. return hatoku_iso_read_uncommitted;
  592. }
  593. else if (tx_isolation == ISO_READ_COMMITTED) {
  594. return hatoku_iso_read_committed;
  595. }
  596. else if (tx_isolation == ISO_REPEATABLE_READ) {
  597. return hatoku_iso_repeatable_read;
  598. }
  599. else {
  600. return hatoku_iso_serializable;
  601. }
  602. }
  603. static inline uint32_t toku_iso_to_txn_flag (HA_TOKU_ISO_LEVEL lvl) {
  604. if (lvl == hatoku_iso_read_uncommitted) {
  605. return DB_READ_UNCOMMITTED;
  606. }
  607. else if (lvl == hatoku_iso_read_committed) {
  608. return DB_READ_COMMITTED;
  609. }
  610. else if (lvl == hatoku_iso_repeatable_read) {
  611. return DB_TXN_SNAPSHOT;
  612. }
  613. else {
  614. return 0;
  615. }
  616. }
  617. static int filter_key_part_compare (const void* left, const void* right) {
  618. FILTER_KEY_PART_INFO* left_part= (FILTER_KEY_PART_INFO *)left;
  619. FILTER_KEY_PART_INFO* right_part = (FILTER_KEY_PART_INFO *)right;
  620. return left_part->offset - right_part->offset;
  621. }
  622. //
  623. // Be very careful with parameters passed to this function. Who knows
  624. // if key, table have proper info set. I had to verify by checking
  625. // in the debugger.
  626. //
  627. void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offset_from_keypart) {
  628. FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
  629. uint curr_skip_index = 0;
  630. for (uint i = 0; i < get_key_parts(key); i++) {
  631. //
  632. // horrendous hack due to bugs in mysql, basically
  633. // we cannot always reliably get the offset from the same source
  634. //
  635. parts[i].offset = get_offset_from_keypart ? key->key_part[i].offset : field_offset(key->key_part[i].field, table);
  636. parts[i].part_index = i;
  637. }
  638. qsort(
  639. parts, // start of array
  640. get_key_parts(key), //num elements
  641. sizeof(*parts), //size of each element
  642. filter_key_part_compare
  643. );
  644. for (uint i = 0; i < table->s->fields; i++) {
  645. Field* field = table->field[i];
  646. uint curr_field_offset = field_offset(field, table);
  647. if (curr_skip_index < get_key_parts(key)) {
  648. uint curr_skip_offset = 0;
  649. curr_skip_offset = parts[curr_skip_index].offset;
  650. if (curr_skip_offset == curr_field_offset) {
  651. //
  652. // we have hit a field that is a portion of the primary key
  653. //
  654. uint curr_key_index = parts[curr_skip_index].part_index;
  655. curr_skip_index++;
  656. //
  657. // only choose to continue over the key if the key's length matches the field's length
  658. // otherwise, we may have a situation where the column is a varchar(10), the
  659. // key is only the first 3 characters, and we end up losing the last 7 bytes of the
  660. // column
  661. //
  662. TOKU_TYPE toku_type = mysql_to_toku_type(field);
  663. switch (toku_type) {
  664. case toku_type_blob:
  665. break;
  666. case toku_type_varbinary:
  667. case toku_type_varstring:
  668. case toku_type_fixbinary:
  669. case toku_type_fixstring:
  670. if (key->key_part[curr_key_index].length == field->field_length) {
  671. bitmap_set_bit(key_filter,i);
  672. }
  673. break;
  674. default:
  675. bitmap_set_bit(key_filter,i);
  676. break;
  677. }
  678. }
  679. }
  680. }
  681. }
  682. static inline uchar* pack_fixed_field(
  683. uchar* to_tokudb,
  684. const uchar* from_mysql,
  685. uint32_t num_bytes
  686. )
  687. {
  688. switch (num_bytes) {
  689. case (1):
  690. memcpy(to_tokudb, from_mysql, 1);
  691. break;
  692. case (2):
  693. memcpy(to_tokudb, from_mysql, 2);
  694. break;
  695. case (3):
  696. memcpy(to_tokudb, from_mysql, 3);
  697. break;
  698. case (4):
  699. memcpy(to_tokudb, from_mysql, 4);
  700. break;
  701. case (8):
  702. memcpy(to_tokudb, from_mysql, 8);
  703. break;
  704. default:
  705. memcpy(to_tokudb, from_mysql, num_bytes);
  706. break;
  707. }
  708. return to_tokudb+num_bytes;
  709. }
  710. static inline const uchar* unpack_fixed_field(
  711. uchar* to_mysql,
  712. const uchar* from_tokudb,
  713. uint32_t num_bytes
  714. )
  715. {
  716. switch (num_bytes) {
  717. case (1):
  718. memcpy(to_mysql, from_tokudb, 1);
  719. break;
  720. case (2):
  721. memcpy(to_mysql, from_tokudb, 2);
  722. break;
  723. case (3):
  724. memcpy(to_mysql, from_tokudb, 3);
  725. break;
  726. case (4):
  727. memcpy(to_mysql, from_tokudb, 4);
  728. break;
  729. case (8):
  730. memcpy(to_mysql, from_tokudb, 8);
  731. break;
  732. default:
  733. memcpy(to_mysql, from_tokudb, num_bytes);
  734. break;
  735. }
  736. return from_tokudb+num_bytes;
  737. }
  738. static inline uchar* write_var_field(
  739. uchar* to_tokudb_offset_ptr, //location where offset data is going to be written
  740. uchar* to_tokudb_data, // location where data is going to be written
  741. uchar* to_tokudb_offset_start, //location where offset starts, IS THIS A BAD NAME????
  742. const uchar * data, // the data to write
  743. uint32_t data_length, // length of data to write
  744. uint32_t offset_bytes // number of offset bytes
  745. )
  746. {
  747. memcpy(to_tokudb_data, data, data_length);
  748. //
  749. // for offset, we pack the offset where the data ENDS!
  750. //
  751. uint32_t offset = to_tokudb_data + data_length - to_tokudb_offset_start;
  752. switch(offset_bytes) {
  753. case (1):
  754. to_tokudb_offset_ptr[0] = (uchar)offset;
  755. break;
  756. case (2):
  757. int2store(to_tokudb_offset_ptr,offset);
  758. break;
  759. default:
  760. assert(false);
  761. break;
  762. }
  763. return to_tokudb_data + data_length;
  764. }
  765. static inline uint32_t get_var_data_length(
  766. const uchar * from_mysql,
  767. uint32_t mysql_length_bytes
  768. )
  769. {
  770. uint32_t data_length;
  771. switch(mysql_length_bytes) {
  772. case(1):
  773. data_length = from_mysql[0];
  774. break;
  775. case(2):
  776. data_length = uint2korr(from_mysql);
  777. break;
  778. default:
  779. assert(false);
  780. break;
  781. }
  782. return data_length;
  783. }
  784. static inline uchar* pack_var_field(
  785. uchar* to_tokudb_offset_ptr, //location where offset data is going to be written
  786. uchar* to_tokudb_data, // pointer to where tokudb data should be written
  787. uchar* to_tokudb_offset_start, //location where data starts, IS THIS A BAD NAME????
  788. const uchar * from_mysql, // mysql data
  789. uint32_t mysql_length_bytes, //number of bytes used to store length in from_mysql
  790. uint32_t offset_bytes //number of offset_bytes used in tokudb row
  791. )
  792. {
  793. uint data_length = get_var_data_length(from_mysql, mysql_length_bytes);
  794. return write_var_field(
  795. to_tokudb_offset_ptr,
  796. to_tokudb_data,
  797. to_tokudb_offset_start,
  798. from_mysql + mysql_length_bytes,
  799. data_length,
  800. offset_bytes
  801. );
  802. }
  803. static inline void unpack_var_field(
  804. uchar* to_mysql,
  805. const uchar* from_tokudb_data,
  806. uint32_t from_tokudb_data_len,
  807. uint32_t mysql_length_bytes
  808. )
  809. {
  810. //
  811. // store the length
  812. //
  813. switch (mysql_length_bytes) {
  814. case(1):
  815. to_mysql[0] = (uchar)from_tokudb_data_len;
  816. break;
  817. case(2):
  818. int2store(to_mysql, from_tokudb_data_len);
  819. break;
  820. default:
  821. assert(false);
  822. break;
  823. }
  824. //
  825. // store the data
  826. //
  827. memcpy(to_mysql+mysql_length_bytes, from_tokudb_data, from_tokudb_data_len);
  828. }
  829. static uchar* pack_toku_field_blob(
  830. uchar* to_tokudb,
  831. const uchar* from_mysql,
  832. Field* field
  833. )
  834. {
  835. uint32_t len_bytes = field->row_pack_length();
  836. uint32_t length = 0;
  837. uchar* data_ptr = NULL;
  838. memcpy(to_tokudb, from_mysql, len_bytes);
  839. switch (len_bytes) {
  840. case (1):
  841. length = (uint32_t)(*from_mysql);
  842. break;
  843. case (2):
  844. length = uint2korr(from_mysql);
  845. break;
  846. case (3):
  847. length = uint3korr(from_mysql);
  848. break;
  849. case (4):
  850. length = uint4korr(from_mysql);
  851. break;
  852. default:
  853. assert(false);
  854. }
  855. if (length > 0) {
  856. memcpy_fixed((uchar *)(&data_ptr), from_mysql + len_bytes, sizeof(uchar*));
  857. memcpy(to_tokudb + len_bytes, data_ptr, length);
  858. }
  859. return (to_tokudb + len_bytes + length);
  860. }
  861. static int add_table_to_metadata(const char *name, TABLE* table, DB_TXN* txn) {
  862. int error = 0;
  863. DBT key;
  864. DBT val;
  865. uchar hidden_primary_key = (table->s->primary_key >= MAX_KEY);
  866. assert(txn);
  867. memset((void *)&key, 0, sizeof(key));
  868. memset((void *)&val, 0, sizeof(val));
  869. key.data = (void *)name;
  870. key.size = strlen(name) + 1;
  871. val.data = &hidden_primary_key;
  872. val.size = sizeof(hidden_primary_key);
  873. error = metadata_db->put(
  874. metadata_db,
  875. txn,
  876. &key,
  877. &val,
  878. 0
  879. );
  880. return error;
  881. }
  882. static int drop_table_from_metadata(const char *name, DB_TXN* txn) {
  883. int error = 0;
  884. DBT key;
  885. DBT data;
  886. assert(txn);
  887. memset((void *)&key, 0, sizeof(key));
  888. memset((void *)&data, 0, sizeof(data));
  889. key.data = (void *)name;
  890. key.size = strlen(name) + 1;
  891. error = metadata_db->del(
  892. metadata_db,
  893. txn,
  894. &key ,
  895. DB_DELETE_ANY
  896. );
  897. return error;
  898. }
  899. static int rename_table_in_metadata(const char *from, const char *to, DB_TXN* txn) {
  900. int error = 0;
  901. DBT from_key;
  902. DBT to_key;
  903. DBT val;
  904. assert(txn);
  905. memset((void *)&from_key, 0, sizeof(from_key));
  906. memset((void *)&to_key, 0, sizeof(to_key));
  907. memset((void *)&val, 0, sizeof(val));
  908. from_key.data = (void *)from;
  909. from_key.size = strlen(from) + 1;
  910. to_key.data = (void *)to;
  911. to_key.size = strlen(to) + 1;
  912. error = metadata_db->getf_set(
  913. metadata_db,
  914. txn,
  915. 0,
  916. &from_key,
  917. smart_dbt_metacallback,
  918. &val
  919. );
  920. if (error) {
  921. goto cleanup;
  922. }
  923. error = metadata_db->put(
  924. metadata_db,
  925. txn,
  926. &to_key,
  927. &val,
  928. 0
  929. );
  930. if (error) {
  931. goto cleanup;
  932. }
  933. error = metadata_db->del(
  934. metadata_db,
  935. txn,
  936. &from_key,
  937. DB_DELETE_ANY
  938. );
  939. if (error) {
  940. goto cleanup;
  941. }
  942. error = 0;
  943. cleanup:
  944. my_free(val.data, MYF(MY_ALLOW_ZERO_PTR));
  945. return error;
  946. }
  947. static int check_table_in_metadata(const char *name, bool* table_found, DB_TXN* txn) {
  948. int error = 0;
  949. DBT key;
  950. pthread_mutex_lock(&tokudb_meta_mutex);
  951. memset((void *)&key, 0, sizeof(key));
  952. key.data = (void *)name;
  953. key.size = strlen(name) + 1;
  954. error = metadata_db->getf_set(
  955. metadata_db,
  956. txn,
  957. 0,
  958. &key,
  959. smart_dbt_do_nothing,
  960. NULL
  961. );
  962. if (error == 0) {
  963. *table_found = true;
  964. }
  965. else if (error == DB_NOTFOUND){
  966. *table_found = false;
  967. error = 0;
  968. }
  969. pthread_mutex_unlock(&tokudb_meta_mutex);
  970. return error;
  971. }
  972. static int create_tokudb_trx_data_instance(tokudb_trx_data** out_trx) {
  973. int error;
  974. tokudb_trx_data* trx = NULL;
  975. trx = (tokudb_trx_data *) my_malloc(sizeof(*trx), MYF(MY_ZEROFILL));
  976. if (!trx) {
  977. error = ENOMEM;
  978. goto cleanup;
  979. }
  980. *out_trx = trx;
  981. error = 0;
  982. cleanup:
  983. return error;
  984. }
  985. static inline int tokudb_generate_row(
  986. DB *dest_db,
  987. DB *src_db,
  988. DBT *dest_key,
  989. DBT *dest_val,
  990. const DBT *src_key,
  991. const DBT *src_val
  992. )
  993. {
  994. int error;
  995. DB* curr_db = dest_db;
  996. uchar* row_desc = NULL;
  997. uint32_t desc_size;
  998. uchar* buff = NULL;
  999. uint32_t max_key_len = 0;
  1000. row_desc = (uchar *)curr_db->descriptor->dbt.data;
  1001. row_desc += (*(uint32_t *)row_desc);
  1002. desc_size = (*(uint32_t *)row_desc) - 4;
  1003. row_desc += 4;
  1004. if (is_key_pk(row_desc, desc_size)) {
  1005. if (dest_key->flags == DB_DBT_REALLOC && dest_key->data != NULL) {
  1006. free(dest_key->data);
  1007. }
  1008. if (dest_val != NULL) {
  1009. if (dest_val->flags == DB_DBT_REALLOC && dest_val->data != NULL) {
  1010. free(dest_val->data);
  1011. }
  1012. }
  1013. dest_key->data = src_key->data;
  1014. dest_key->size = src_key->size;
  1015. dest_key->flags = 0;
  1016. if (dest_val != NULL) {
  1017. dest_val->data = src_val->data;
  1018. dest_val->size = src_val->size;
  1019. dest_val->flags = 0;
  1020. }
  1021. error = 0;
  1022. goto cleanup;
  1023. }
  1024. // at this point, we need to create the key/val and set it
  1025. // in the DBTs
  1026. if (dest_key->flags == 0) {
  1027. dest_key->ulen = 0;
  1028. dest_key->size = 0;
  1029. dest_key->data = NULL;
  1030. dest_key->flags = DB_DBT_REALLOC;
  1031. }
  1032. if (dest_key->flags == DB_DBT_REALLOC) {
  1033. max_key_len = max_key_size_from_desc(row_desc, desc_size);
  1034. max_key_len += src_key->size;
  1035. if (max_key_len > dest_key->ulen) {
  1036. void* old_ptr = dest_key->data;
  1037. void* new_ptr = NULL;
  1038. new_ptr = realloc(old_ptr, max_key_len);
  1039. assert(new_ptr);
  1040. dest_key->data = new_ptr;
  1041. dest_key->ulen = max_key_len;
  1042. }
  1043. buff = (uchar *)dest_key->data;
  1044. assert(buff != NULL && max_key_len > 0);
  1045. }
  1046. else {
  1047. assert(false);
  1048. }
  1049. dest_key->size = pack_key_from_desc(
  1050. buff,
  1051. row_desc,
  1052. desc_size,
  1053. src_key,
  1054. src_val
  1055. );
  1056. assert(dest_key->ulen >= dest_key->size);
  1057. if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY && !max_key_len) {
  1058. max_key_len = max_key_size_from_desc(row_desc, desc_size);
  1059. max_key_len += src_key->size;
  1060. }
  1061. if (max_key_len) {
  1062. assert(max_key_len >= dest_key->size);
  1063. }
  1064. row_desc += desc_size;
  1065. desc_size = (*(uint32_t *)row_desc) - 4;
  1066. row_desc += 4;
  1067. if (dest_val != NULL) {
  1068. if (!is_key_clustering(row_desc, desc_size) || src_val->size == 0) {
  1069. dest_val->size = 0;
  1070. }
  1071. else {
  1072. uchar* buff = NULL;
  1073. if (dest_val->flags == 0) {
  1074. dest_val->ulen = 0;
  1075. dest_val->size = 0;
  1076. dest_val->data = NULL;
  1077. dest_val->flags = DB_DBT_REALLOC;
  1078. }
  1079. if (dest_val->flags == DB_DBT_REALLOC){
  1080. if (dest_val->ulen < src_val->size) {
  1081. void* old_ptr = dest_val->data;
  1082. void* new_ptr = NULL;
  1083. new_ptr = realloc(old_ptr, src_val->size);
  1084. assert(new_ptr);
  1085. dest_val->data = new_ptr;
  1086. dest_val->ulen = src_val->size;
  1087. }
  1088. buff = (uchar *)dest_val->data;
  1089. assert(buff != NULL);
  1090. }
  1091. else {
  1092. assert(false);
  1093. }
  1094. dest_val->size = pack_clustering_val_from_desc(
  1095. buff,
  1096. row_desc,
  1097. desc_size,
  1098. src_val
  1099. );
  1100. assert(dest_val->ulen >= dest_val->size);
  1101. }
  1102. }
  1103. error = 0;
  1104. cleanup:
  1105. return error;
  1106. }
  1107. static int generate_row_for_del(
  1108. DB *dest_db,
  1109. DB *src_db,
  1110. DBT *dest_key,
  1111. const DBT *src_key,
  1112. const DBT *src_val
  1113. )
  1114. {
  1115. return tokudb_generate_row(
  1116. dest_db,
  1117. src_db,
  1118. dest_key,
  1119. NULL,
  1120. src_key,
  1121. src_val
  1122. );
  1123. }
  1124. static int generate_row_for_put(
  1125. DB *dest_db,
  1126. DB *src_db,
  1127. DBT *dest_key,
  1128. DBT *dest_val,
  1129. const DBT *src_key,
  1130. const DBT *src_val
  1131. )
  1132. {
  1133. return tokudb_generate_row(
  1134. dest_db,
  1135. src_db,
  1136. dest_key,
  1137. dest_val,
  1138. src_key,
  1139. src_val
  1140. );
  1141. }
  1142. ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, table_arg)
  1143. // flags defined in sql\handler.h
  1144. {
  1145. share = NULL;
  1146. int_table_flags = HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_PRIMARY_KEY_IN_READ_INDEX |
  1147. HA_FILE_BASED | HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX | HA_CAN_WRITE_DURING_OPTIMIZE;
  1148. alloc_ptr = NULL;
  1149. rec_buff = NULL;
  1150. rec_update_buff = NULL;
  1151. transaction = NULL;
  1152. is_fast_alter_running = false;
  1153. cursor = NULL;
  1154. fixed_cols_for_query = NULL;
  1155. var_cols_for_query = NULL;
  1156. num_fixed_cols_for_query = 0;
  1157. num_var_cols_for_query = 0;
  1158. unpack_entire_row = true;
  1159. read_blobs = false;
  1160. read_key = false;
  1161. added_rows = 0;
  1162. deleted_rows = 0;
  1163. last_dup_key = UINT_MAX;
  1164. using_ignore = 0;
  1165. last_cursor_error = 0;
  1166. range_lock_grabbed = false;
  1167. blob_buff = NULL;
  1168. num_blob_bytes = 0;
  1169. delay_updating_ai_metadata = false;
  1170. ai_metadata_update_required = false;
  1171. memset(mult_key_dbt, 0, sizeof(mult_key_dbt));
  1172. memset(mult_rec_dbt, 0, sizeof(mult_rec_dbt));
  1173. loader = NULL;
  1174. abort_loader = false;
  1175. memset(&lc, 0, sizeof(lc));
  1176. lock.type = TL_IGNORE;
  1177. for (uint32_t i = 0; i < MAX_KEY+1; i++) {
  1178. mult_put_flags[i] = 0;
  1179. mult_del_flags[i] = DB_DELETE_ANY;
  1180. mult_dbt_flags[i] = DB_DBT_REALLOC;
  1181. }
  1182. num_DBs_locked_in_bulk = false;
  1183. lock_count = 0;
  1184. use_write_locks = false;
  1185. range_query_buff = NULL;
  1186. size_range_query_buff = 0;
  1187. bytes_used_in_range_query_buff = 0;
  1188. curr_range_query_buff_offset = 0;
  1189. doing_bulk_fetch = false;
  1190. prelocked_left_range_size = 0;
  1191. prelocked_right_range_size = 0;
  1192. tokudb_active_index = MAX_KEY;
  1193. invalidate_icp();
  1194. }
  1195. ha_tokudb::~ha_tokudb() {
  1196. }
  1197. //
  1198. // states if table has an auto increment column, if so, sets index where auto inc column is to index
  1199. // Parameters:
  1200. // [out] index - if auto inc exists, then this param is set to where it exists in table, if not, then unchanged
  1201. // Returns:
  1202. // true if auto inc column exists, false otherwise
  1203. //
  1204. bool ha_tokudb::has_auto_increment_flag(uint* index) {
  1205. //
  1206. // check to see if we have auto increment field
  1207. //
  1208. bool ai_found = false;
  1209. uint ai_index = 0;
  1210. for (uint i = 0; i < table_share->fields; i++, ai_index++) {
  1211. Field* field = table->field[i];
  1212. if (field->flags & AUTO_INCREMENT_FLAG) {
  1213. ai_found = true;
  1214. *index = ai_index;
  1215. break;
  1216. }
  1217. }
  1218. return ai_found;
  1219. }
  1220. static int open_status_dictionary(DB** ptr, const char* name, DB_TXN* txn) {
  1221. int error;
  1222. char* newname = NULL;
  1223. newname = (char *)my_malloc(
  1224. get_max_dict_name_path_length(name),
  1225. MYF(MY_WME));
  1226. if (newname == NULL) {
  1227. error = ENOMEM;
  1228. goto cleanup;
  1229. }
  1230. make_name(newname, name, "status");
  1231. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1232. TOKUDB_TRACE("open:%s\n", newname);
  1233. }
  1234. error = tokudb::open_status(db_env, ptr, newname, txn);
  1235. cleanup:
  1236. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  1237. return error;
  1238. }
  1239. int ha_tokudb::open_main_dictionary(const char* name, bool is_read_only, DB_TXN* txn) {
  1240. int error;
  1241. char* newname = NULL;
  1242. uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
  1243. assert(share->file == NULL);
  1244. assert(share->key_file[primary_key] == NULL);
  1245. newname = (char *)my_malloc(
  1246. get_max_dict_name_path_length(name),
  1247. MYF(MY_WME|MY_ZEROFILL)
  1248. );
  1249. if (newname == NULL) {
  1250. error = ENOMEM;
  1251. goto exit;
  1252. }
  1253. make_name(newname, name, "main");
  1254. error = db_create(&share->file, db_env, 0);
  1255. if (error) {
  1256. goto exit;
  1257. }
  1258. share->key_file[primary_key] = share->file;
  1259. error = share->file->open(share->file, txn, newname, NULL, DB_BTREE, open_flags, 0);
  1260. if (error) {
  1261. goto exit;
  1262. }
  1263. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1264. TOKUDB_TRACE("open:%s:file=%p\n", newname, share->file);
  1265. }
  1266. error = 0;
  1267. exit:
  1268. if (error) {
  1269. if (share->file) {
  1270. int r = share->file->close(
  1271. share->file,
  1272. 0
  1273. );
  1274. assert(r==0);
  1275. share->file = NULL;
  1276. share->key_file[primary_key] = NULL;
  1277. }
  1278. }
  1279. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  1280. return error;
  1281. }
  1282. //
  1283. // Open a secondary table, the key will be a secondary index, the data will be a primary key
  1284. //
  1285. int ha_tokudb::open_secondary_dictionary(DB** ptr, KEY* key_info, const char* name, bool is_read_only, DB_TXN* txn) {
  1286. int error = ENOSYS;
  1287. char dict_name[MAX_DICT_NAME_LEN];
  1288. uint open_flags = (is_read_only ? DB_RDONLY : 0) | DB_THREAD;
  1289. char* newname = NULL;
  1290. uint newname_len = 0;
  1291. sprintf(dict_name, "key-%s", key_info->name);
  1292. newname_len = get_max_dict_name_path_length(name);
  1293. newname = (char *)my_malloc(newname_len, MYF(MY_WME|MY_ZEROFILL));
  1294. if (newname == NULL) {
  1295. error = ENOMEM;
  1296. goto cleanup;
  1297. }
  1298. make_name(newname, name, dict_name);
  1299. if ((error = db_create(ptr, db_env, 0))) {
  1300. my_errno = error;
  1301. goto cleanup;
  1302. }
  1303. if ((error = (*ptr)->open(*ptr, txn, newname, NULL, DB_BTREE, open_flags, 0))) {
  1304. my_errno = error;
  1305. goto cleanup;
  1306. }
  1307. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1308. TOKUDB_TRACE("open:%s:file=%p\n", newname, *ptr);
  1309. }
  1310. cleanup:
  1311. if (error) {
  1312. if (*ptr) {
  1313. int r = (*ptr)->close(*ptr, 0);
  1314. assert(r==0);
  1315. *ptr = NULL;
  1316. }
  1317. }
  1318. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  1319. return error;
  1320. }
  1321. static int initialize_col_pack_info(KEY_AND_COL_INFO* kc_info, TABLE_SHARE* table_share, uint keynr) {
  1322. int error = ENOSYS;
  1323. //
  1324. // set up the cp_info
  1325. //
  1326. assert(kc_info->cp_info[keynr] == NULL);
  1327. kc_info->cp_info[keynr] = (COL_PACK_INFO *)my_malloc(
  1328. table_share->fields*sizeof(COL_PACK_INFO),
  1329. MYF(MY_WME | MY_ZEROFILL)
  1330. );
  1331. if (kc_info->cp_info[keynr] == NULL) {
  1332. error = ENOMEM;
  1333. goto exit;
  1334. }
  1335. {
  1336. uint32_t curr_fixed_offset = 0;
  1337. uint32_t curr_var_index = 0;
  1338. for (uint j = 0; j < table_share->fields; j++) {
  1339. COL_PACK_INFO* curr = &kc_info->cp_info[keynr][j];
  1340. //
  1341. // need to set the offsets / indexes
  1342. // offsets are calculated AFTER the NULL bytes
  1343. //
  1344. if (!bitmap_is_set(&kc_info->key_filters[keynr],j)) {
  1345. if (kc_info->field_lengths[j]) {
  1346. curr->col_pack_val = curr_fixed_offset;
  1347. curr_fixed_offset += kc_info->field_lengths[j];
  1348. }
  1349. else if (kc_info->length_bytes[j]) {
  1350. curr->col_pack_val = curr_var_index;
  1351. curr_var_index++;
  1352. }
  1353. }
  1354. }
  1355. //
  1356. // set up the mcp_info
  1357. //
  1358. kc_info->mcp_info[keynr].fixed_field_size = get_fixed_field_size(
  1359. kc_info,
  1360. table_share,
  1361. keynr
  1362. );
  1363. kc_info->mcp_info[keynr].len_of_offsets = get_len_of_offsets(
  1364. kc_info,
  1365. table_share,
  1366. keynr
  1367. );
  1368. error = 0;
  1369. }
  1370. exit:
  1371. return error;
  1372. }
  1373. // reset the kc_info state at keynr
  1374. static void reset_key_and_col_info(KEY_AND_COL_INFO *kc_info, uint keynr) {
  1375. bitmap_clear_all(&kc_info->key_filters[keynr]);
  1376. my_free(kc_info->cp_info[keynr], MYF(MY_ALLOW_ZERO_PTR));
  1377. kc_info->cp_info[keynr] = NULL;
  1378. kc_info->mcp_info[keynr] = (MULTI_COL_PACK_INFO) { 0, 0 };
  1379. }
  1380. static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, KEY_AND_COL_INFO* kc_info, uint hidden_primary_key, uint primary_key) {
  1381. int error = 0;
  1382. uint32_t curr_blob_field_index = 0;
  1383. uint32_t max_var_bytes = 0;
  1384. //
  1385. // fill in the field lengths. 0 means it is a variable sized field length
  1386. // fill in length_bytes, 0 means it is fixed or blob
  1387. //
  1388. for (uint i = 0; i < table_share->fields; i++) {
  1389. Field* field = table_share->field[i];
  1390. TOKU_TYPE toku_type = mysql_to_toku_type(field);
  1391. uint32 pack_length = 0;
  1392. switch (toku_type) {
  1393. case toku_type_int:
  1394. case toku_type_double:
  1395. case toku_type_float:
  1396. case toku_type_fixbinary:
  1397. case toku_type_fixstring:
  1398. pack_length = field->pack_length();
  1399. assert(pack_length < 1<<16);
  1400. kc_info->field_lengths[i] = (uint16_t)pack_length;
  1401. kc_info->length_bytes[i] = 0;
  1402. break;
  1403. case toku_type_blob:
  1404. kc_info->field_lengths[i] = 0;
  1405. kc_info->length_bytes[i] = 0;
  1406. kc_info->blob_fields[curr_blob_field_index] = i;
  1407. curr_blob_field_index++;
  1408. break;
  1409. case toku_type_varstring:
  1410. case toku_type_varbinary:
  1411. //
  1412. // meaning it is variable sized
  1413. //
  1414. kc_info->field_lengths[i] = 0;
  1415. kc_info->length_bytes[i] = (uchar)((Field_varstring *)field)->length_bytes;
  1416. max_var_bytes += field->field_length;
  1417. break;
  1418. default:
  1419. assert(false);
  1420. }
  1421. }
  1422. kc_info->num_blobs = curr_blob_field_index;
  1423. //
  1424. // initialize share->num_offset_bytes
  1425. // because MAX_REF_LENGTH is 65536, we
  1426. // can safely set num_offset_bytes to 1 or 2
  1427. //
  1428. if (max_var_bytes < 256) {
  1429. kc_info->num_offset_bytes = 1;
  1430. }
  1431. else {
  1432. kc_info->num_offset_bytes = 2;
  1433. }
  1434. for (uint i = 0; i < table_share->keys + test(hidden_primary_key); i++) {
  1435. //
  1436. // do the cluster/primary key filtering calculations
  1437. //
  1438. if (! (i==primary_key && hidden_primary_key) ){
  1439. if ( i == primary_key ) {
  1440. set_key_filter(
  1441. &kc_info->key_filters[primary_key],
  1442. &table_share->key_info[primary_key],
  1443. table,
  1444. true
  1445. );
  1446. }
  1447. else {
  1448. set_key_filter(
  1449. &kc_info->key_filters[i],
  1450. &table_share->key_info[i],
  1451. table,
  1452. true
  1453. );
  1454. if (!hidden_primary_key) {
  1455. set_key_filter(
  1456. &kc_info->key_filters[i],
  1457. &table_share->key_info[primary_key],
  1458. table,
  1459. true
  1460. );
  1461. }
  1462. }
  1463. }
  1464. if (i == primary_key || table_share->key_info[i].flags & HA_CLUSTERING) {
  1465. error = initialize_col_pack_info(kc_info,table_share,i);
  1466. if (error) {
  1467. goto exit;
  1468. }
  1469. }
  1470. }
  1471. exit:
  1472. return error;
  1473. }
  1474. bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk) {
  1475. uint curr_num_DBs = table_share->keys + test(hidden_primary_key);
  1476. bool ret_val;
  1477. if (curr_num_DBs == 1) {
  1478. ret_val = true;
  1479. goto exit;
  1480. }
  1481. ret_val = true;
  1482. for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
  1483. if (curr_index == pk) continue;
  1484. KEY* curr_key_info = &table_share->key_info[curr_index];
  1485. for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
  1486. uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
  1487. if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
  1488. ret_val = false;
  1489. goto exit;
  1490. }
  1491. if (bitmap_is_set(&kc_info->key_filters[curr_index], curr_field_index) &&
  1492. !bitmap_is_set(&kc_info->key_filters[pk], curr_field_index)) {
  1493. ret_val = false;
  1494. goto exit;
  1495. }
  1496. }
  1497. }
  1498. exit:
  1499. return ret_val;
  1500. }
  1501. int ha_tokudb::initialize_share(
  1502. const char* name,
  1503. int mode
  1504. )
  1505. {
  1506. int error = 0;
  1507. uint64_t num_rows = 0;
  1508. bool table_exists;
  1509. DB_TXN* txn = NULL;
  1510. bool do_commit = false;
  1511. THD* thd = ha_thd();
  1512. tokudb_trx_data *trx = NULL;
  1513. trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot);
  1514. if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
  1515. txn = trx->sub_sp_level;
  1516. }
  1517. else {
  1518. do_commit = true;
  1519. error = db_env->txn_begin(db_env, 0, &txn, 0);
  1520. if (error) { goto exit; }
  1521. }
  1522. DBUG_PRINT("info", ("share->use_count %u", share->use_count));
  1523. table_exists = true;
  1524. error = check_table_in_metadata(name, &table_exists, txn);
  1525. if (error) {
  1526. goto exit;
  1527. }
  1528. if (!table_exists) {
  1529. sql_print_error("table %s does not exist in metadata, was it moved from someplace else? Not opening table", name);
  1530. error = HA_ADMIN_FAILED;
  1531. goto exit;
  1532. }
  1533. error = get_status(txn);
  1534. if (error) {
  1535. goto exit;
  1536. }
  1537. if (share->version != HA_TOKU_VERSION) {
  1538. error = ENOSYS;
  1539. goto exit;
  1540. }
  1541. #if TOKU_PARTITION_WRITE_FRM_DATA
  1542. // verify frm data for all tables
  1543. error = verify_frm_data(table->s->path.str, txn);
  1544. if (error)
  1545. goto exit;
  1546. #else
  1547. // verify frm data for non-partitioned tables
  1548. if (table->part_info == NULL) {
  1549. error = verify_frm_data(table->s->path.str, txn);
  1550. if (error)
  1551. goto exit;
  1552. } else {
  1553. // remove the frm data for partitions since we are not maintaining it
  1554. error = remove_frm_data(share->status_block, txn);
  1555. if (error)
  1556. goto exit;
  1557. }
  1558. #endif
  1559. error = initialize_key_and_col_info(
  1560. table_share,
  1561. table,
  1562. &share->kc_info,
  1563. hidden_primary_key,
  1564. primary_key
  1565. );
  1566. if (error) { goto exit; }
  1567. error = open_main_dictionary(name, mode == O_RDONLY, txn);
  1568. if (error) { goto exit; }
  1569. share->has_unique_keys = false;
  1570. /* Open other keys; These are part of the share structure */
  1571. for (uint i = 0; i < table_share->keys; i++) {
  1572. if (table_share->key_info[i].flags & HA_NOSAME) {
  1573. share->has_unique_keys = true;
  1574. }
  1575. if (i != primary_key) {
  1576. error = open_secondary_dictionary(
  1577. &share->key_file[i],
  1578. &table_share->key_info[i],
  1579. name,
  1580. mode == O_RDONLY,
  1581. txn
  1582. );
  1583. if (error) {
  1584. goto exit;
  1585. }
  1586. }
  1587. }
  1588. share->replace_into_fast = can_replace_into_be_fast(
  1589. table_share,
  1590. &share->kc_info,
  1591. primary_key
  1592. );
  1593. if (!hidden_primary_key) {
  1594. //
  1595. // We need to set the ref_length to start at 5, to account for
  1596. // the "infinity byte" in keys, and for placing the DBT size in the first four bytes
  1597. //
  1598. ref_length = sizeof(uint32_t) + sizeof(uchar);
  1599. KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
  1600. KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
  1601. for (; key_part != end; key_part++) {
  1602. ref_length += key_part->field->max_packed_col_length(key_part->length);
  1603. }
  1604. share->status |= STATUS_PRIMARY_KEY_INIT;
  1605. }
  1606. share->ref_length = ref_length;
  1607. error = estimate_num_rows(share->file,&num_rows, txn);
  1608. //
  1609. // estimate_num_rows should not fail under normal conditions
  1610. //
  1611. if (error == 0) {
  1612. share->rows = num_rows;
  1613. }
  1614. else {
  1615. goto exit;
  1616. }
  1617. //
  1618. // initialize auto increment data
  1619. //
  1620. share->has_auto_inc = has_auto_increment_flag(&share->ai_field_index);
  1621. if (share->has_auto_inc) {
  1622. init_auto_increment();
  1623. }
  1624. if (may_table_be_empty(NULL)) {
  1625. share->try_table_lock = true;
  1626. }
  1627. else {
  1628. share->try_table_lock = false;
  1629. }
  1630. share->num_DBs = table_share->keys + test(hidden_primary_key);
  1631. error = 0;
  1632. exit:
  1633. if (do_commit && txn) {
  1634. commit_txn(txn,0);
  1635. }
  1636. return error;
  1637. }
  1638. //
  1639. // Creates and opens a handle to a table which already exists in a tokudb
  1640. // database.
  1641. // Parameters:
  1642. // [in] name - table name
  1643. // mode - seems to specify if table is read only
  1644. // test_if_locked - unused
  1645. // Returns:
  1646. // 0 on success
  1647. // 1 on error
  1648. //
  1649. int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
  1650. TOKUDB_DBUG_ENTER("ha_tokudb::open %p %s", this, name);
  1651. THD* thd = ha_thd();
  1652. int error = 0;
  1653. int ret_val = 0;
  1654. uint curr_num_DBs = 0;
  1655. transaction = NULL;
  1656. cursor = NULL;
  1657. /* Open primary key */
  1658. hidden_primary_key = 0;
  1659. if ((primary_key = table_share->primary_key) >= MAX_KEY) {
  1660. // No primary key
  1661. primary_key = table_share->keys;
  1662. key_used_on_scan = MAX_KEY;
  1663. hidden_primary_key = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  1664. ref_length = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t);
  1665. }
  1666. else {
  1667. key_used_on_scan = primary_key;
  1668. }
  1669. curr_num_DBs = table_share->keys + test(hidden_primary_key);
  1670. /* Need some extra memory in case of packed keys */
  1671. // the "+ 1" is for the first byte that states +/- infinity
  1672. // multiply everything by 2 to account for clustered keys having a key and primary key together
  1673. max_key_length = 2*(table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar));
  1674. alloc_ptr = my_multi_malloc(MYF(MY_WME),
  1675. &key_buff, max_key_length,
  1676. &key_buff2, max_key_length,
  1677. &key_buff3, max_key_length,
  1678. &prelocked_left_range, max_key_length,
  1679. &prelocked_right_range, max_key_length,
  1680. &primary_key_buff, (hidden_primary_key ? 0 : max_key_length),
  1681. &fixed_cols_for_query, table_share->fields*sizeof(uint32_t),
  1682. &var_cols_for_query, table_share->fields*sizeof(uint32_t),
  1683. NullS
  1684. );
  1685. if (alloc_ptr == NULL) {
  1686. ret_val = 1;
  1687. goto exit;
  1688. }
  1689. size_range_query_buff = get_tokudb_read_buf_size(thd);
  1690. range_query_buff = (uchar *)my_malloc(size_range_query_buff, MYF(MY_WME));
  1691. if (range_query_buff == NULL) {
  1692. ret_val = 1;
  1693. goto exit;
  1694. }
  1695. alloced_rec_buff_length = table_share->rec_buff_length + table_share->fields;
  1696. rec_buff = (uchar *) my_malloc(alloced_rec_buff_length, MYF(MY_WME));
  1697. if (rec_buff == NULL) {
  1698. ret_val = 1;
  1699. goto exit;
  1700. }
  1701. alloced_update_rec_buff_length = alloced_rec_buff_length;
  1702. rec_update_buff = (uchar *) my_malloc(alloced_update_rec_buff_length, MYF(MY_WME));
  1703. if (rec_update_buff == NULL) {
  1704. ret_val = 1;
  1705. goto exit;
  1706. }
  1707. for (uint32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) {
  1708. mult_key_dbt[i].flags = DB_DBT_REALLOC;
  1709. }
  1710. for (uint32_t i = 0; i < curr_num_DBs; i++) {
  1711. mult_rec_dbt[i].flags = DB_DBT_REALLOC;
  1712. }
  1713. /* Init shared structure */
  1714. pthread_mutex_lock(&tokudb_mutex);
  1715. share = get_share(name, table_share);
  1716. assert(share);
  1717. thr_lock_data_init(&share->lock, &lock, NULL);
  1718. /* Fill in shared structure, if needed */
  1719. pthread_mutex_lock(&share->mutex);
  1720. if (!share->use_count++) {
  1721. ret_val = initialize_share(
  1722. name,
  1723. mode
  1724. );
  1725. if (ret_val) {
  1726. free_share(share, true);
  1727. pthread_mutex_unlock(&tokudb_mutex);
  1728. goto exit;
  1729. }
  1730. }
  1731. pthread_mutex_unlock(&share->mutex);
  1732. pthread_mutex_unlock(&tokudb_mutex);
  1733. ref_length = share->ref_length; // If second open
  1734. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1735. pthread_mutex_lock(&share->mutex);
  1736. TOKUDB_TRACE("tokudbopen:%p:share=%p:file=%p:table=%p:table->s=%p:%d\n",
  1737. this, share, share->file, table, table->s, share->use_count);
  1738. pthread_mutex_unlock(&share->mutex);
  1739. }
  1740. key_read = false;
  1741. stats.block_size = 1<<20; // QQQ Tokudb DB block size
  1742. init_hidden_prim_key_info();
  1743. info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
  1744. exit:
  1745. if (ret_val) {
  1746. my_free(range_query_buff, MYF(MY_ALLOW_ZERO_PTR));
  1747. range_query_buff = NULL;
  1748. my_free(alloc_ptr, MYF(MY_ALLOW_ZERO_PTR));
  1749. alloc_ptr = NULL;
  1750. my_free(rec_buff, MYF(MY_ALLOW_ZERO_PTR));
  1751. rec_buff = NULL;
  1752. my_free(rec_update_buff, MYF(MY_ALLOW_ZERO_PTR));
  1753. rec_update_buff = NULL;
  1754. if (error) {
  1755. my_errno = error;
  1756. }
  1757. }
  1758. TOKUDB_DBUG_RETURN(ret_val);
  1759. }
  1760. //
  1761. // estimate the number of rows in a DB
  1762. // Parameters:
  1763. // [in] db - DB whose number of rows will be estimated
  1764. // [out] num_rows - number of estimated rows in db
  1765. // Returns:
  1766. // 0 on success
  1767. // error otherwise
  1768. //
  1769. int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) {
  1770. int error = ENOSYS;
  1771. DBC* crsr = NULL;
  1772. bool do_commit = false;
  1773. DB_BTREE_STAT64 dict_stats;
  1774. DB_TXN* txn_to_use = NULL;
  1775. if (txn == NULL) {
  1776. error = db_env->txn_begin(db_env, 0, &txn_to_use, DB_READ_UNCOMMITTED);
  1777. if (error) goto cleanup;
  1778. do_commit = true;
  1779. }
  1780. else {
  1781. txn_to_use = txn;
  1782. }
  1783. error = db->stat64(
  1784. share->file,
  1785. txn_to_use,
  1786. &dict_stats
  1787. );
  1788. if (error) { goto cleanup; }
  1789. *num_rows = dict_stats.bt_ndata;
  1790. error = 0;
  1791. cleanup:
  1792. if (crsr != NULL) {
  1793. int r = crsr->c_close(crsr);
  1794. assert(r==0);
  1795. crsr = NULL;
  1796. }
  1797. if (do_commit) {
  1798. commit_txn(txn_to_use, 0);
  1799. txn_to_use = NULL;
  1800. }
  1801. return error;
  1802. }
  1803. int ha_tokudb::write_to_status(DB* db, HA_METADATA_KEY curr_key_data, void* data, uint size, DB_TXN* txn ){
  1804. return write_metadata(db, &curr_key_data, sizeof curr_key_data, data, size, txn);
  1805. }
  1806. int ha_tokudb::remove_from_status(DB *db, HA_METADATA_KEY curr_key_data, DB_TXN *txn) {
  1807. return remove_metadata(db, &curr_key_data, sizeof curr_key_data, txn);
  1808. }
  1809. int ha_tokudb::remove_metadata(DB* db, void* key_data, uint key_size, DB_TXN* transaction){
  1810. int error;
  1811. DBT key;
  1812. DB_TXN* txn = NULL;
  1813. bool do_commit = false;
  1814. //
  1815. // transaction to be used for putting metadata into status.tokudb
  1816. //
  1817. if (transaction == NULL) {
  1818. error = db_env->txn_begin(db_env, 0, &txn, 0);
  1819. if (error) {
  1820. goto cleanup;
  1821. }
  1822. do_commit = true;
  1823. }
  1824. else {
  1825. txn = transaction;
  1826. }
  1827. memset(&key, 0, sizeof(key));
  1828. key.data = key_data;
  1829. key.size = key_size;
  1830. error = db->del(db, txn, &key, DB_DELETE_ANY);
  1831. if (error) {
  1832. goto cleanup;
  1833. }
  1834. error = 0;
  1835. cleanup:
  1836. if (do_commit && txn) {
  1837. if (!error) {
  1838. commit_txn(txn, DB_TXN_NOSYNC);
  1839. }
  1840. else {
  1841. abort_txn(txn);
  1842. }
  1843. }
  1844. return error;
  1845. }
  1846. //
  1847. // helper function to write a piece of metadata in to status.tokudb
  1848. //
  1849. int ha_tokudb::write_metadata(DB* db, void* key_data, uint key_size, void* val_data, uint val_size, DB_TXN* transaction ){
  1850. int error;
  1851. DBT key;
  1852. DBT value;
  1853. DB_TXN* txn = NULL;
  1854. bool do_commit = false;
  1855. //
  1856. // transaction to be used for putting metadata into status.tokudb
  1857. //
  1858. if (transaction == NULL) {
  1859. error = db_env->txn_begin(db_env, 0, &txn, 0);
  1860. if (error) {
  1861. goto cleanup;
  1862. }
  1863. do_commit = true;
  1864. }
  1865. else {
  1866. txn = transaction;
  1867. }
  1868. memset(&key, 0, sizeof(key));
  1869. memset(&value, 0, sizeof(value));
  1870. key.data = key_data;
  1871. key.size = key_size;
  1872. value.data = val_data;
  1873. value.size = val_size;
  1874. error = db->put(db, txn, &key, &value, 0);
  1875. if (error) {
  1876. goto cleanup;
  1877. }
  1878. error = 0;
  1879. cleanup:
  1880. if (do_commit && txn) {
  1881. if (!error) {
  1882. commit_txn(txn, DB_TXN_NOSYNC);
  1883. }
  1884. else {
  1885. abort_txn(txn);
  1886. }
  1887. }
  1888. return error;
  1889. }
  1890. int ha_tokudb::write_frm_data(DB* db, DB_TXN* txn, const char* frm_name) {
  1891. TOKUDB_DBUG_ENTER("ha_tokudb::write_frm_data %p %p %p %s", this, db, txn, frm_name);
  1892. uchar* frm_data = NULL;
  1893. size_t frm_len = 0;
  1894. int error = 0;
  1895. error = readfrm(frm_name,&frm_data,&frm_len);
  1896. if (error) { goto cleanup; }
  1897. error = write_to_status(db,hatoku_frm_data,frm_data,(uint)frm_len, txn);
  1898. if (error) { goto cleanup; }
  1899. error = 0;
  1900. cleanup:
  1901. my_free(frm_data, MYF(MY_ALLOW_ZERO_PTR));
  1902. TOKUDB_DBUG_RETURN(error);
  1903. }
  1904. int ha_tokudb::remove_frm_data(DB *db, DB_TXN *txn) {
  1905. return remove_from_status(db, hatoku_frm_data, txn);
  1906. }
  1907. static int
  1908. smart_dbt_callback_verify_frm (DBT const *key, DBT const *row, void *context) {
  1909. DBT* stored_frm = (DBT *)context;
  1910. stored_frm->size = row->size;
  1911. stored_frm->data = (uchar *)my_malloc(row->size, MYF(MY_WME));
  1912. assert(stored_frm->data);
  1913. memcpy(stored_frm->data, row->data, row->size);
  1914. return 0;
  1915. }
  1916. int ha_tokudb::verify_frm_data(const char* frm_name, DB_TXN* txn) {
  1917. TOKUDB_DBUG_ENTER("ha_tokudb::verify_frm_data %s", frm_name);
  1918. uchar* mysql_frm_data = NULL;
  1919. size_t mysql_frm_len = 0;
  1920. DBT key, stored_frm;
  1921. int error = 0;
  1922. HA_METADATA_KEY curr_key = hatoku_frm_data;
  1923. memset(&key, 0, sizeof(key));
  1924. memset(&stored_frm, 0, sizeof(&stored_frm));
  1925. // get the frm data from MySQL
  1926. error = readfrm(frm_name,&mysql_frm_data,&mysql_frm_len);
  1927. if (error) { goto cleanup; }
  1928. key.data = &curr_key;
  1929. key.size = sizeof(curr_key);
  1930. error = share->status_block->getf_set(
  1931. share->status_block,
  1932. txn,
  1933. 0,
  1934. &key,
  1935. smart_dbt_callback_verify_frm,
  1936. &stored_frm
  1937. );
  1938. if (error == DB_NOTFOUND) {
  1939. // if not found, write it
  1940. error = write_frm_data(
  1941. share->status_block,
  1942. txn,
  1943. frm_name
  1944. );
  1945. goto cleanup;
  1946. }
  1947. else if (error) {
  1948. goto cleanup;
  1949. }
  1950. if (stored_frm.size != mysql_frm_len ||
  1951. memcmp(stored_frm.data, mysql_frm_data, stored_frm.size))
  1952. {
  1953. error = HA_ERR_TABLE_DEF_CHANGED;
  1954. goto cleanup;
  1955. }
  1956. error = 0;
  1957. cleanup:
  1958. my_free(mysql_frm_data, MYF(MY_ALLOW_ZERO_PTR));
  1959. my_free(stored_frm.data, MYF(MY_ALLOW_ZERO_PTR));
  1960. TOKUDB_DBUG_RETURN(error);
  1961. }
  1962. //
  1963. // Updates status.tokudb with a new max value used for the auto increment column
  1964. // Parameters:
  1965. // [in] db - this will always be status.tokudb
  1966. // val - value to store
  1967. // Returns:
  1968. // 0 on success, error otherwise
  1969. //
  1970. //
  1971. int ha_tokudb::update_max_auto_inc(DB* db, ulonglong val){
  1972. return write_to_status(db,hatoku_max_ai,&val,sizeof(val), NULL);
  1973. }
  1974. //
  1975. // Writes the initial auto increment value, as specified by create table
  1976. // so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
  1977. // then the value 100 will be stored here in val
  1978. // Parameters:
  1979. // [in] db - this will always be status.tokudb
  1980. // val - value to store
  1981. // Returns:
  1982. // 0 on success, error otherwise
  1983. //
  1984. //
  1985. int ha_tokudb::write_auto_inc_create(DB* db, ulonglong val, DB_TXN* txn){
  1986. return write_to_status(db,hatoku_ai_create_value,&val,sizeof(val), txn);
  1987. }
  1988. //
  1989. // Closes a handle to a table.
  1990. //
  1991. int ha_tokudb::close(void) {
  1992. TOKUDB_DBUG_ENTER("ha_tokudb::close %p", this);
  1993. TOKUDB_DBUG_RETURN(__close());
  1994. }
  1995. int ha_tokudb::__close() {
  1996. TOKUDB_DBUG_ENTER("ha_tokudb::__close %p", this);
  1997. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  1998. TOKUDB_TRACE("close:%p\n", this);
  1999. my_free(rec_buff, MYF(MY_ALLOW_ZERO_PTR));
  2000. my_free(rec_update_buff, MYF(MY_ALLOW_ZERO_PTR));
  2001. my_free(blob_buff, MYF(MY_ALLOW_ZERO_PTR));
  2002. my_free(alloc_ptr, MYF(MY_ALLOW_ZERO_PTR));
  2003. my_free(range_query_buff, MYF(MY_ALLOW_ZERO_PTR));
  2004. for (uint32_t i = 0; i < sizeof(mult_rec_dbt)/sizeof(mult_rec_dbt[0]); i++) {
  2005. if (mult_rec_dbt[i].flags == DB_DBT_REALLOC &&
  2006. mult_rec_dbt[i].data != NULL) {
  2007. free(mult_rec_dbt[i].data);
  2008. }
  2009. }
  2010. for (uint32_t i = 0; i < sizeof(mult_key_dbt)/sizeof(mult_key_dbt[0]); i++) {
  2011. if (mult_key_dbt[i].flags == DB_DBT_REALLOC &&
  2012. mult_key_dbt[i].data != NULL) {
  2013. free(mult_key_dbt[i].data);
  2014. }
  2015. }
  2016. rec_buff = NULL;
  2017. rec_update_buff = NULL;
  2018. alloc_ptr = NULL;
  2019. ha_tokudb::reset();
  2020. pthread_mutex_lock(&tokudb_mutex);
  2021. int retval = free_share(share, false);
  2022. pthread_mutex_unlock(&tokudb_mutex);
  2023. TOKUDB_DBUG_RETURN(retval);
  2024. }
  2025. //
  2026. // Reallocate record buffer (rec_buff) if needed
  2027. // If not needed, does nothing
  2028. // Parameters:
  2029. // length - size of buffer required for rec_buff
  2030. //
  2031. bool ha_tokudb::fix_rec_buff_for_blob(ulong length) {
  2032. if (!rec_buff || (length > alloced_rec_buff_length)) {
  2033. uchar *newptr;
  2034. if (!(newptr = (uchar *) my_realloc((void *) rec_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
  2035. return 1;
  2036. rec_buff = newptr;
  2037. alloced_rec_buff_length = length;
  2038. }
  2039. return 0;
  2040. }
  2041. //
  2042. // Reallocate record buffer (rec_buff) if needed
  2043. // If not needed, does nothing
  2044. // Parameters:
  2045. // length - size of buffer required for rec_buff
  2046. //
  2047. bool ha_tokudb::fix_rec_update_buff_for_blob(ulong length) {
  2048. if (!rec_update_buff || (length > alloced_update_rec_buff_length)) {
  2049. uchar *newptr;
  2050. if (!(newptr = (uchar *) my_realloc((void *) rec_update_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
  2051. return 1;
  2052. rec_update_buff= newptr;
  2053. alloced_update_rec_buff_length = length;
  2054. }
  2055. return 0;
  2056. }
  2057. /* Calculate max length needed for row */
  2058. ulong ha_tokudb::max_row_length(const uchar * buf) {
  2059. ulong length = table_share->reclength + table_share->fields * 2;
  2060. uint *ptr, *end;
  2061. for (ptr = table_share->blob_field, end = ptr + table_share->blob_fields; ptr != end; ptr++) {
  2062. Field_blob *blob = ((Field_blob *) table->field[*ptr]);
  2063. length += blob->get_length((uchar *) (buf + field_offset(blob, table))) + 2;
  2064. }
  2065. return length;
  2066. }
  2067. /*
  2068. */
  2069. //
  2070. // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
  2071. // Pack a row for storage.
  2072. // If the row is of fixed length, just store the row 'as is'.
  2073. // If not, we will generate a packed row suitable for storage.
  2074. // This will only fail if we don't have enough memory to pack the row,
  2075. // which may only happen in rows with blobs, as the default row length is
  2076. // pre-allocated.
  2077. // Parameters:
  2078. // [out] row - row stored in DBT to be converted
  2079. // [out] buf - buffer where row is packed
  2080. // [in] record - row in MySQL format
  2081. //
  2082. int ha_tokudb::pack_row_in_buff(
  2083. DBT * row,
  2084. const uchar* record,
  2085. uint index,
  2086. uchar* row_buff
  2087. )
  2088. {
  2089. uchar* fixed_field_ptr = NULL;
  2090. uchar* var_field_offset_ptr = NULL;
  2091. uchar* start_field_data_ptr = NULL;
  2092. uchar* var_field_data_ptr = NULL;
  2093. int r = ENOSYS;
  2094. memset((void *) row, 0, sizeof(*row));
  2095. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  2096. // Copy null bytes
  2097. memcpy(row_buff, record, table_share->null_bytes);
  2098. fixed_field_ptr = row_buff + table_share->null_bytes;
  2099. var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[index].fixed_field_size;
  2100. start_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
  2101. var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
  2102. // assert that when the hidden primary key exists, primary_key_offsets is NULL
  2103. for (uint i = 0; i < table_share->fields; i++) {
  2104. Field* field = table->field[i];
  2105. uint curr_field_offset = field_offset(field, table);
  2106. if (bitmap_is_set(&share->kc_info.key_filters[index],i)) {
  2107. continue;
  2108. }
  2109. if (share->kc_info.field_lengths[i]) {
  2110. fixed_field_ptr = pack_fixed_field(
  2111. fixed_field_ptr,
  2112. record + curr_field_offset,
  2113. share->kc_info.field_lengths[i]
  2114. );
  2115. }
  2116. else if (share->kc_info.length_bytes[i]) {
  2117. var_field_data_ptr = pack_var_field(
  2118. var_field_offset_ptr,
  2119. var_field_data_ptr,
  2120. start_field_data_ptr,
  2121. record + curr_field_offset,
  2122. share->kc_info.length_bytes[i],
  2123. share->kc_info.num_offset_bytes
  2124. );
  2125. var_field_offset_ptr += share->kc_info.num_offset_bytes;
  2126. }
  2127. }
  2128. for (uint i = 0; i < share->kc_info.num_blobs; i++) {
  2129. Field* field = table->field[share->kc_info.blob_fields[i]];
  2130. var_field_data_ptr = pack_toku_field_blob(
  2131. var_field_data_ptr,
  2132. record + field_offset(field, table),
  2133. field
  2134. );
  2135. }
  2136. row->data = row_buff;
  2137. row->size = (size_t) (var_field_data_ptr - row_buff);
  2138. r = 0;
  2139. dbug_tmp_restore_column_map(table->write_set, old_map);
  2140. return r;
  2141. }
  2142. int ha_tokudb::pack_row(
  2143. DBT * row,
  2144. const uchar* record,
  2145. uint index
  2146. )
  2147. {
  2148. return pack_row_in_buff(row,record,index,rec_buff);
  2149. }
  2150. int ha_tokudb::pack_old_row_for_update(
  2151. DBT * row,
  2152. const uchar* record,
  2153. uint index
  2154. )
  2155. {
  2156. return pack_row_in_buff(row,record,index,rec_update_buff);
  2157. }
  2158. int ha_tokudb::unpack_blobs(
  2159. uchar* record,
  2160. const uchar* from_tokudb_blob,
  2161. uint32_t num_bytes,
  2162. bool check_bitmap
  2163. )
  2164. {
  2165. uint error = 0;
  2166. uchar* ptr = NULL;
  2167. const uchar* buff = NULL;
  2168. //
  2169. // assert that num_bytes > 0 iff share->num_blobs > 0
  2170. //
  2171. assert( !((share->kc_info.num_blobs == 0) && (num_bytes > 0)) );
  2172. if (num_bytes > num_blob_bytes) {
  2173. ptr = (uchar *)my_realloc((void *)blob_buff, num_bytes, MYF(MY_ALLOW_ZERO_PTR));
  2174. if (ptr == NULL) {
  2175. error = ENOMEM;
  2176. goto exit;
  2177. }
  2178. blob_buff = ptr;
  2179. num_blob_bytes = num_bytes;
  2180. }
  2181. memcpy(blob_buff, from_tokudb_blob, num_bytes);
  2182. buff= blob_buff;
  2183. for (uint i = 0; i < share->kc_info.num_blobs; i++) {
  2184. uint32_t curr_field_index = share->kc_info.blob_fields[i];
  2185. bool skip = check_bitmap ?
  2186. !(bitmap_is_set(table->read_set,curr_field_index) ||
  2187. bitmap_is_set(table->write_set,curr_field_index)) :
  2188. false;
  2189. Field* field = table->field[curr_field_index];
  2190. uint32_t len_bytes = field->row_pack_length();
  2191. buff = unpack_toku_field_blob(
  2192. record + field_offset(field, table),
  2193. buff,
  2194. len_bytes,
  2195. skip
  2196. );
  2197. }
  2198. error = 0;
  2199. exit:
  2200. return error;
  2201. }
  2202. //
  2203. // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
  2204. // Parameters:
  2205. // [out] record - row in MySQL format
  2206. // [in] row - row stored in DBT to be converted
  2207. //
  2208. int ha_tokudb::unpack_row(
  2209. uchar* record,
  2210. DBT const *row,
  2211. DBT const *key,
  2212. uint index
  2213. )
  2214. {
  2215. //
  2216. // two cases, fixed length row, and variable length row
  2217. // fixed length row is first below
  2218. //
  2219. /* Copy null bits */
  2220. int error = 0;
  2221. const uchar* fixed_field_ptr = (const uchar *) row->data;
  2222. const uchar* var_field_offset_ptr = NULL;
  2223. const uchar* var_field_data_ptr = NULL;
  2224. uint32_t data_end_offset = 0;
  2225. memcpy(record, fixed_field_ptr, table_share->null_bytes);
  2226. fixed_field_ptr += table_share->null_bytes;
  2227. var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[index].fixed_field_size;
  2228. var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[index].len_of_offsets;
  2229. //
  2230. // unpack the key, if necessary
  2231. //
  2232. if (!(hidden_primary_key && index == primary_key)) {
  2233. unpack_key(record,key,index);
  2234. }
  2235. uint32_t last_offset = 0;
  2236. //
  2237. // we have two methods of unpacking, one if we need to unpack the entire row
  2238. // the second if we unpack a subset of the entire row
  2239. // first method here is if we unpack the entire row
  2240. //
  2241. if (unpack_entire_row) {
  2242. //
  2243. // fill in parts of record that are not part of the key
  2244. //
  2245. for (uint i = 0; i < table_share->fields; i++) {
  2246. Field* field = table->field[i];
  2247. if (bitmap_is_set(&share->kc_info.key_filters[index],i)) {
  2248. continue;
  2249. }
  2250. if (share->kc_info.field_lengths[i]) {
  2251. fixed_field_ptr = unpack_fixed_field(
  2252. record + field_offset(field, table),
  2253. fixed_field_ptr,
  2254. share->kc_info.field_lengths[i]
  2255. );
  2256. }
  2257. //
  2258. // here, we DO modify var_field_data_ptr or var_field_offset_ptr
  2259. // as we unpack variable sized fields
  2260. //
  2261. else if (share->kc_info.length_bytes[i]) {
  2262. switch (share->kc_info.num_offset_bytes) {
  2263. case (1):
  2264. data_end_offset = var_field_offset_ptr[0];
  2265. break;
  2266. case (2):
  2267. data_end_offset = uint2korr(var_field_offset_ptr);
  2268. break;
  2269. default:
  2270. assert(false);
  2271. break;
  2272. }
  2273. unpack_var_field(
  2274. record + field_offset(field, table),
  2275. var_field_data_ptr,
  2276. data_end_offset - last_offset,
  2277. share->kc_info.length_bytes[i]
  2278. );
  2279. var_field_offset_ptr += share->kc_info.num_offset_bytes;
  2280. var_field_data_ptr += data_end_offset - last_offset;
  2281. last_offset = data_end_offset;
  2282. }
  2283. }
  2284. error = unpack_blobs(
  2285. record,
  2286. var_field_data_ptr,
  2287. row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data),
  2288. false
  2289. );
  2290. if (error) {
  2291. goto exit;
  2292. }
  2293. }
  2294. //
  2295. // in this case, we unpack only what is specified
  2296. // in fixed_cols_for_query and var_cols_for_query
  2297. //
  2298. else {
  2299. //
  2300. // first the fixed fields
  2301. //
  2302. for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
  2303. uint field_index = fixed_cols_for_query[i];
  2304. Field* field = table->field[field_index];
  2305. unpack_fixed_field(
  2306. record + field_offset(field, table),
  2307. fixed_field_ptr + share->kc_info.cp_info[index][field_index].col_pack_val,
  2308. share->kc_info.field_lengths[field_index]
  2309. );
  2310. }
  2311. //
  2312. // now the var fields
  2313. // here, we do NOT modify var_field_data_ptr or var_field_offset_ptr
  2314. //
  2315. for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
  2316. uint field_index = var_cols_for_query[i];
  2317. Field* field = table->field[field_index];
  2318. uint32_t var_field_index = share->kc_info.cp_info[index][field_index].col_pack_val;
  2319. uint32_t data_start_offset;
  2320. uint32_t field_len;
  2321. get_var_field_info(
  2322. &field_len,
  2323. &data_start_offset,
  2324. var_field_index,
  2325. var_field_offset_ptr,
  2326. share->kc_info.num_offset_bytes
  2327. );
  2328. unpack_var_field(
  2329. record + field_offset(field, table),
  2330. var_field_data_ptr + data_start_offset,
  2331. field_len,
  2332. share->kc_info.length_bytes[field_index]
  2333. );
  2334. }
  2335. if (read_blobs) {
  2336. //
  2337. // now the blobs
  2338. //
  2339. get_blob_field_info(
  2340. &data_end_offset,
  2341. share->kc_info.mcp_info[index].len_of_offsets,
  2342. var_field_data_ptr,
  2343. share->kc_info.num_offset_bytes
  2344. );
  2345. var_field_data_ptr += data_end_offset;
  2346. error = unpack_blobs(
  2347. record,
  2348. var_field_data_ptr,
  2349. row->size - (uint32_t)(var_field_data_ptr - (const uchar *)row->data),
  2350. true
  2351. );
  2352. if (error) {
  2353. goto exit;
  2354. }
  2355. }
  2356. }
  2357. error = 0;
  2358. exit:
  2359. return error;
  2360. }
  2361. uint32_t ha_tokudb::place_key_into_mysql_buff(
  2362. KEY* key_info,
  2363. uchar * record,
  2364. uchar* data
  2365. )
  2366. {
  2367. KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
  2368. uchar *pos = data;
  2369. for (; key_part != end; key_part++) {
  2370. if (key_part->field->null_bit) {
  2371. uint null_offset = get_null_offset(table, key_part->field);
  2372. if (*pos++ == NULL_COL_VAL) { // Null value
  2373. //
  2374. // We don't need to reset the record data as we will not access it
  2375. // if the null data is set
  2376. //
  2377. record[null_offset] |= key_part->field->null_bit;
  2378. continue;
  2379. }
  2380. record[null_offset] &= ~key_part->field->null_bit;
  2381. }
  2382. #if !defined(MARIADB_BASE_VERSION)
  2383. //
  2384. // HOPEFULLY TEMPORARY
  2385. //
  2386. assert(table->s->db_low_byte_first);
  2387. #endif
  2388. pos = unpack_toku_key_field(
  2389. record + field_offset(key_part->field, table),
  2390. pos,
  2391. key_part->field,
  2392. key_part->length
  2393. );
  2394. }
  2395. return pos-data;
  2396. }
  2397. //
  2398. // Store the key and the primary key into the row
  2399. // Parameters:
  2400. // [out] record - key stored in MySQL format
  2401. // [in] key - key stored in DBT to be converted
  2402. // index -index into key_file that represents the DB
  2403. // unpacking a key of
  2404. //
  2405. void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
  2406. uint32_t bytes_read;
  2407. uchar *pos = (uchar *) key->data + 1;
  2408. bytes_read = place_key_into_mysql_buff(
  2409. &table->key_info[index],
  2410. record,
  2411. pos
  2412. );
  2413. if( (index != primary_key) && !hidden_primary_key) {
  2414. //
  2415. // also unpack primary key
  2416. //
  2417. place_key_into_mysql_buff(
  2418. &table->key_info[primary_key],
  2419. record,
  2420. pos+bytes_read
  2421. );
  2422. }
  2423. }
  2424. uint32_t ha_tokudb::place_key_into_dbt_buff(
  2425. KEY* key_info,
  2426. uchar * buff,
  2427. const uchar * record,
  2428. bool* has_null,
  2429. int key_length
  2430. )
  2431. {
  2432. KEY_PART_INFO *key_part = key_info->key_part;
  2433. KEY_PART_INFO *end = key_part + get_key_parts(key_info);
  2434. uchar* curr_buff = buff;
  2435. *has_null = false;
  2436. for (; key_part != end && key_length > 0; key_part++) {
  2437. //
  2438. // accessing key_part->field->null_bit instead off key_part->null_bit
  2439. // because key_part->null_bit is not set in add_index
  2440. // filed ticket 862 to look into this
  2441. //
  2442. if (key_part->field->null_bit) {
  2443. /* Store 0 if the key part is a NULL part */
  2444. uint null_offset = get_null_offset(table, key_part->field);
  2445. if (record[null_offset] & key_part->field->null_bit) {
  2446. *curr_buff++ = NULL_COL_VAL;
  2447. *has_null = true;
  2448. continue;
  2449. }
  2450. *curr_buff++ = NONNULL_COL_VAL; // Store NOT NULL marker
  2451. }
  2452. #if !defined(MARIADB_BASE_VERSION)
  2453. //
  2454. // HOPEFULLY TEMPORARY
  2455. //
  2456. assert(table->s->db_low_byte_first);
  2457. #endif
  2458. //
  2459. // accessing field_offset(key_part->field) instead off key_part->offset
  2460. // because key_part->offset is SET INCORRECTLY in add_index
  2461. // filed ticket 862 to look into this
  2462. //
  2463. curr_buff = pack_toku_key_field(
  2464. curr_buff,
  2465. (uchar *) (record + field_offset(key_part->field, table)),
  2466. key_part->field,
  2467. key_part->length
  2468. );
  2469. key_length -= key_part->length;
  2470. }
  2471. return curr_buff - buff;
  2472. }
  2473. //
  2474. // Create a packed key from a row. This key will be written as such
  2475. // to the index tree. This will never fail as the key buffer is pre-allocated.
  2476. // Parameters:
  2477. // [out] key - DBT that holds the key
  2478. // [in] key_info - holds data about the key, such as it's length and offset into record
  2479. // [out] buff - buffer that will hold the data for key (unless
  2480. // we have a hidden primary key)
  2481. // [in] record - row from which to create the key
  2482. // key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
  2483. // Returns:
  2484. // the parameter key
  2485. //
  2486. DBT* ha_tokudb::create_dbt_key_from_key(
  2487. DBT * key,
  2488. KEY* key_info,
  2489. uchar * buff,
  2490. const uchar * record,
  2491. bool* has_null,
  2492. bool dont_pack_pk,
  2493. int key_length
  2494. )
  2495. {
  2496. uint32_t size = 0;
  2497. uchar* tmp_buff = buff;
  2498. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  2499. key->data = buff;
  2500. //
  2501. // first put the "infinity" byte at beginning. States if missing columns are implicitly
  2502. // positive infinity or negative infinity or zero. For this, because we are creating key
  2503. // from a row, there is no way that columns can be missing, so in practice,
  2504. // this will be meaningless. Might as well put in a value
  2505. //
  2506. *tmp_buff++ = COL_ZERO;
  2507. size++;
  2508. size += place_key_into_dbt_buff(
  2509. key_info,
  2510. tmp_buff,
  2511. record,
  2512. has_null,
  2513. key_length
  2514. );
  2515. if (!dont_pack_pk) {
  2516. tmp_buff = buff + size;
  2517. if (hidden_primary_key) {
  2518. memcpy_fixed(tmp_buff, current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  2519. size += TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  2520. }
  2521. else {
  2522. bool tmp_bool = false;
  2523. size += place_key_into_dbt_buff(
  2524. &table->key_info[primary_key],
  2525. tmp_buff,
  2526. record,
  2527. &tmp_bool,
  2528. MAX_KEY_LENGTH //this parameter does not matter
  2529. );
  2530. }
  2531. }
  2532. key->size = size;
  2533. DBUG_DUMP("key", (uchar *) key->data, key->size);
  2534. dbug_tmp_restore_column_map(table->write_set, old_map);
  2535. return key;
  2536. }
  2537. //
  2538. // Create a packed key from a row. This key will be written as such
  2539. // to the index tree. This will never fail as the key buffer is pre-allocated.
  2540. // Parameters:
  2541. // [out] key - DBT that holds the key
  2542. // keynr - index for which to create the key
  2543. // [out] buff - buffer that will hold the data for key (unless
  2544. // we have a hidden primary key)
  2545. // [in] record - row from which to create the key
  2546. // [out] has_null - says if the key has a NULL value for one of its columns
  2547. // key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
  2548. // Returns:
  2549. // the parameter key
  2550. //
  2551. DBT *ha_tokudb::create_dbt_key_from_table(
  2552. DBT * key,
  2553. uint keynr,
  2554. uchar * buff,
  2555. const uchar * record,
  2556. bool* has_null,
  2557. int key_length
  2558. )
  2559. {
  2560. TOKUDB_DBUG_ENTER("ha_tokudb::create_dbt_key_from_table");
  2561. memset((void *) key, 0, sizeof(*key));
  2562. if (hidden_primary_key && keynr == primary_key) {
  2563. key->data = buff;
  2564. memcpy(buff, &current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  2565. key->size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  2566. *has_null = false;
  2567. DBUG_RETURN(key);
  2568. }
  2569. DBUG_RETURN(create_dbt_key_from_key(key, &table->key_info[keynr],buff,record, has_null, (keynr == primary_key), key_length));
  2570. }
  2571. DBT* ha_tokudb::create_dbt_key_for_lookup(
  2572. DBT * key,
  2573. KEY* key_info,
  2574. uchar * buff,
  2575. const uchar * record,
  2576. bool* has_null,
  2577. int key_length
  2578. )
  2579. {
  2580. TOKUDB_DBUG_ENTER("ha_tokudb::create_dbt_key_from_lookup");
  2581. DBUG_RETURN(create_dbt_key_from_key(key, key_info, buff, record, has_null, true, key_length));
  2582. }
  2583. //
  2584. // Create a packed key from from a MySQL unpacked key (like the one that is
  2585. // sent from the index_read() This key is to be used to read a row
  2586. // Parameters:
  2587. // [out] key - DBT that holds the key
  2588. // keynr - index for which to pack the key
  2589. // [out] buff - buffer that will hold the data for key
  2590. // [in] key_ptr - MySQL unpacked key
  2591. // key_length - length of key_ptr
  2592. // Returns:
  2593. // the parameter key
  2594. //
  2595. DBT *ha_tokudb::pack_key(
  2596. DBT * key,
  2597. uint keynr,
  2598. uchar * buff,
  2599. const uchar * key_ptr,
  2600. uint key_length,
  2601. int8_t inf_byte
  2602. )
  2603. {
  2604. TOKUDB_DBUG_ENTER("ha_tokudb::pack_key");
  2605. #if TOKU_INCLUDE_EXTENDED_KEYS
  2606. if (keynr != primary_key && !test(hidden_primary_key)) {
  2607. DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
  2608. }
  2609. #endif
  2610. KEY *key_info = &table->key_info[keynr];
  2611. KEY_PART_INFO *key_part = key_info->key_part;
  2612. KEY_PART_INFO *end = key_part + get_key_parts(key_info);
  2613. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  2614. memset((void *) key, 0, sizeof(*key));
  2615. key->data = buff;
  2616. // first put the "infinity" byte at beginning. States if missing columns are implicitly
  2617. // positive infinity or negative infinity
  2618. *buff++ = (uchar)inf_byte;
  2619. for (; key_part != end && (int) key_length > 0; key_part++) {
  2620. uint offset = 0;
  2621. if (key_part->null_bit) {
  2622. if (!(*key_ptr == 0)) {
  2623. *buff++ = NULL_COL_VAL;
  2624. key_length -= key_part->store_length;
  2625. key_ptr += key_part->store_length;
  2626. continue;
  2627. }
  2628. *buff++ = NONNULL_COL_VAL;
  2629. offset = 1; // Data is at key_ptr+1
  2630. }
  2631. #if !defined(MARIADB_BASE_VERSION)
  2632. assert(table->s->db_low_byte_first);
  2633. #endif
  2634. buff = pack_key_toku_key_field(
  2635. buff,
  2636. (uchar *) key_ptr + offset,
  2637. key_part->field,
  2638. key_part->length
  2639. );
  2640. key_ptr += key_part->store_length;
  2641. key_length -= key_part->store_length;
  2642. }
  2643. key->size = (buff - (uchar *) key->data);
  2644. DBUG_DUMP("key", (uchar *) key->data, key->size);
  2645. dbug_tmp_restore_column_map(table->write_set, old_map);
  2646. DBUG_RETURN(key);
  2647. }
  2648. #if TOKU_INCLUDE_EXTENDED_KEYS
  2649. DBT *ha_tokudb::pack_ext_key(
  2650. DBT * key,
  2651. uint keynr,
  2652. uchar * buff,
  2653. const uchar * key_ptr,
  2654. uint key_length,
  2655. int8_t inf_byte
  2656. )
  2657. {
  2658. TOKUDB_DBUG_ENTER("ha_tokudb::pack_ext_key");
  2659. // build a list of PK parts that are in the SK. we will use this list to build the
  2660. // extended key if necessary.
  2661. KEY *pk_key_info = &table->key_info[primary_key];
  2662. uint pk_parts = get_key_parts(pk_key_info);
  2663. uint pk_next = 0;
  2664. struct {
  2665. const uchar *key_ptr;
  2666. KEY_PART_INFO *key_part;
  2667. } pk_info[pk_parts];
  2668. KEY *key_info = &table->key_info[keynr];
  2669. KEY_PART_INFO *key_part = key_info->key_part;
  2670. KEY_PART_INFO *end = key_part + get_key_parts(key_info);
  2671. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  2672. memset((void *) key, 0, sizeof(*key));
  2673. key->data = buff;
  2674. // first put the "infinity" byte at beginning. States if missing columns are implicitly
  2675. // positive infinity or negative infinity
  2676. *buff++ = (uchar)inf_byte;
  2677. for (; key_part != end && (int) key_length > 0; key_part++) {
  2678. // if the SK part is part of the PK, then append it to the list.
  2679. if (key_part->field->part_of_key.is_set(primary_key)) {
  2680. assert(pk_next < pk_parts);
  2681. pk_info[pk_next].key_ptr = key_ptr;
  2682. pk_info[pk_next].key_part = key_part;
  2683. pk_next++;
  2684. }
  2685. uint offset = 0;
  2686. if (key_part->null_bit) {
  2687. if (!(*key_ptr == 0)) {
  2688. *buff++ = NULL_COL_VAL;
  2689. key_length -= key_part->store_length;
  2690. key_ptr += key_part->store_length;
  2691. continue;
  2692. }
  2693. *buff++ = NONNULL_COL_VAL;
  2694. offset = 1; // Data is at key_ptr+1
  2695. }
  2696. #if !defined(MARIADB_BASE_VERSION)
  2697. assert(table->s->db_low_byte_first);
  2698. #endif
  2699. buff = pack_key_toku_key_field(
  2700. buff,
  2701. (uchar *) key_ptr + offset,
  2702. key_part->field,
  2703. key_part->length
  2704. );
  2705. key_ptr += key_part->store_length;
  2706. key_length -= key_part->store_length;
  2707. }
  2708. if (key_length > 0) {
  2709. assert(key_part == end);
  2710. end = key_info->key_part + get_ext_key_parts(key_info);
  2711. // pack PK in order of PK key parts
  2712. for (uint pk_index = 0; key_part != end && (int) key_length > 0 && pk_index < pk_parts; pk_index++) {
  2713. uint i;
  2714. for (i = 0; i < pk_next; i++) {
  2715. if (pk_info[i].key_part->fieldnr == pk_key_info->key_part[pk_index].fieldnr)
  2716. break;
  2717. }
  2718. if (i < pk_next) {
  2719. const uchar *this_key_ptr = pk_info[i].key_ptr;
  2720. KEY_PART_INFO *this_key_part = pk_info[i].key_part;
  2721. buff = pack_key_toku_key_field(buff, (uchar *) this_key_ptr, this_key_part->field, this_key_part->length);
  2722. } else {
  2723. buff = pack_key_toku_key_field(buff, (uchar *) key_ptr, key_part->field, key_part->length);
  2724. key_ptr += key_part->store_length;
  2725. key_length -= key_part->store_length;
  2726. key_part++;
  2727. }
  2728. }
  2729. }
  2730. key->size = (buff - (uchar *) key->data);
  2731. DBUG_DUMP("key", (uchar *) key->data, key->size);
  2732. dbug_tmp_restore_column_map(table->write_set, old_map);
  2733. DBUG_RETURN(key);
  2734. }
  2735. #endif
  2736. //
  2737. // get max used hidden primary key value
  2738. //
  2739. void ha_tokudb::init_hidden_prim_key_info() {
  2740. TOKUDB_DBUG_ENTER("ha_tokudb::init_prim_key_info");
  2741. pthread_mutex_lock(&share->mutex);
  2742. if (!(share->status & STATUS_PRIMARY_KEY_INIT)) {
  2743. int error = 0;
  2744. THD* thd = ha_thd();
  2745. DB_TXN* txn = NULL;
  2746. DBC* c = NULL;
  2747. tokudb_trx_data *trx = NULL;
  2748. trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot);
  2749. bool do_commit = false;
  2750. if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
  2751. txn = trx->sub_sp_level;
  2752. }
  2753. else {
  2754. do_commit = true;
  2755. error = db_env->txn_begin(db_env, 0, &txn, 0);
  2756. assert(error == 0);
  2757. }
  2758. error = share->key_file[primary_key]->cursor(
  2759. share->key_file[primary_key],
  2760. txn,
  2761. &c,
  2762. 0
  2763. );
  2764. assert(error == 0);
  2765. DBT key,val;
  2766. memset(&key, 0, sizeof(key));
  2767. memset(&val, 0, sizeof(val));
  2768. error = c->c_get(c, &key, &val, DB_LAST);
  2769. if (error == 0) {
  2770. assert(key.size == TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  2771. share->auto_ident = hpk_char_to_num((uchar *)key.data);
  2772. }
  2773. error = c->c_close(c);
  2774. assert(error == 0);
  2775. if (do_commit) {
  2776. commit_txn(txn, 0);
  2777. }
  2778. share->status |= STATUS_PRIMARY_KEY_INIT;
  2779. }
  2780. pthread_mutex_unlock(&share->mutex);
  2781. DBUG_VOID_RETURN;
  2782. }
  2783. /** @brief
  2784. Get metadata info stored in status.tokudb
  2785. */
  2786. int ha_tokudb::get_status(DB_TXN* txn) {
  2787. TOKUDB_DBUG_ENTER("ha_tokudb::get_status");
  2788. DBT key, value;
  2789. HA_METADATA_KEY curr_key;
  2790. int error;
  2791. //
  2792. // open status.tokudb
  2793. //
  2794. if (!share->status_block) {
  2795. error = open_status_dictionary(
  2796. &share->status_block,
  2797. share->table_name,
  2798. txn
  2799. );
  2800. if (error) {
  2801. goto cleanup;
  2802. }
  2803. }
  2804. //
  2805. // transaction to be used for putting metadata into status.tokudb
  2806. //
  2807. memset(&key, 0, sizeof(key));
  2808. memset(&value, 0, sizeof(value));
  2809. key.data = &curr_key;
  2810. key.size = sizeof(curr_key);
  2811. value.flags = DB_DBT_USERMEM;
  2812. assert(share->status_block);
  2813. //
  2814. // get version
  2815. //
  2816. value.ulen = sizeof(share->version);
  2817. value.data = &share->version;
  2818. curr_key = hatoku_new_version;
  2819. error = share->status_block->get(
  2820. share->status_block,
  2821. txn,
  2822. &key,
  2823. &value,
  2824. 0
  2825. );
  2826. if (error == DB_NOTFOUND) {
  2827. //
  2828. // hack to keep handle the issues of going back and forth
  2829. // between 5.0.3 to 5.0.4
  2830. // the problem with going back and forth
  2831. // is with storing the frm file, 5.0.4 stores it, 5.0.3 does not
  2832. // so, if a user goes back and forth and alters the schema
  2833. // the frm stored can get out of sync with the schema of the table
  2834. // This can cause issues.
  2835. // To take care of this, we are doing this versioning work here.
  2836. // We change the key that stores the version.
  2837. // In 5.0.3, it is hatoku_old_version, in 5.0.4 it is hatoku_new_version
  2838. // When we encounter a table that does not have hatoku_new_version
  2839. // set, we give it the right one, and overwrite the old one with zero.
  2840. // This ensures that 5.0.3 cannot open the table. Once it has been opened by 5.0.4
  2841. //
  2842. uint dummy_version = 0;
  2843. share->version = HA_TOKU_ORIG_VERSION;
  2844. error = write_to_status(
  2845. share->status_block,
  2846. hatoku_new_version,
  2847. &share->version,
  2848. sizeof(share->version),
  2849. txn
  2850. );
  2851. if (error) { goto cleanup; }
  2852. error = write_to_status(
  2853. share->status_block,
  2854. hatoku_old_version,
  2855. &dummy_version,
  2856. sizeof(dummy_version),
  2857. txn
  2858. );
  2859. if (error) { goto cleanup; }
  2860. }
  2861. else if (error || value.size != sizeof(share->version)) {
  2862. if (error == 0) {
  2863. error = HA_ERR_INTERNAL_ERROR;
  2864. }
  2865. goto cleanup;
  2866. }
  2867. //
  2868. // get capabilities
  2869. //
  2870. curr_key = hatoku_capabilities;
  2871. value.ulen = sizeof(share->capabilities);
  2872. value.data = &share->capabilities;
  2873. error = share->status_block->get(
  2874. share->status_block,
  2875. txn,
  2876. &key,
  2877. &value,
  2878. 0
  2879. );
  2880. if (error == DB_NOTFOUND) {
  2881. share->capabilities= 0;
  2882. }
  2883. else if (error || value.size != sizeof(share->version)) {
  2884. if (error == 0) {
  2885. error = HA_ERR_INTERNAL_ERROR;
  2886. }
  2887. goto cleanup;
  2888. }
  2889. error = 0;
  2890. cleanup:
  2891. TOKUDB_DBUG_RETURN(error);
  2892. }
  2893. /** @brief
  2894. Return an estimated of the number of rows in the table.
  2895. Used when sorting to allocate buffers and by the optimizer.
  2896. This is used in filesort.cc.
  2897. */
  2898. ha_rows ha_tokudb::estimate_rows_upper_bound() {
  2899. TOKUDB_DBUG_ENTER("ha_tokudb::estimate_rows_upper_bound");
  2900. DBUG_RETURN(share->rows + HA_TOKUDB_EXTRA_ROWS);
  2901. }
  2902. //
  2903. // Function that compares two primary keys that were saved as part of rnd_pos
  2904. // and ::position
  2905. //
  2906. int ha_tokudb::cmp_ref(const uchar * ref1, const uchar * ref2) {
  2907. int ret_val = 0;
  2908. ret_val = tokudb_compare_two_keys(
  2909. ref1 + sizeof(uint32_t),
  2910. *(uint32_t *)ref1,
  2911. ref2 + sizeof(uint32_t),
  2912. *(uint32_t *)ref2,
  2913. (uchar *)share->file->descriptor->dbt.data + 4,
  2914. *(uint32_t *)share->file->descriptor->dbt.data - 4,
  2915. false
  2916. );
  2917. return ret_val;
  2918. }
  2919. bool ha_tokudb::check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes) {
  2920. //
  2921. // This is a horrendous hack for now, as copied by InnoDB.
  2922. // This states that if the auto increment create field has changed,
  2923. // via a "alter table foo auto_increment=new_val", that this
  2924. // change is incompatible, and to rebuild the entire table
  2925. // This will need to be fixed
  2926. //
  2927. if ((info->used_fields & HA_CREATE_USED_AUTO) &&
  2928. info->auto_increment_value != 0) {
  2929. return COMPATIBLE_DATA_NO;
  2930. }
  2931. if (table_changes != IS_EQUAL_YES)
  2932. return COMPATIBLE_DATA_NO;
  2933. return COMPATIBLE_DATA_YES;
  2934. }
  2935. //
  2936. // Method that is called before the beginning of many calls
  2937. // to insert rows (ha_tokudb::write_row). There is no guarantee
  2938. // that start_bulk_insert is called, however there is a guarantee
  2939. // that if start_bulk_insert is called, then end_bulk_insert may be
  2940. // called as well.
  2941. // Parameters:
  2942. // [in] rows - an estimate of the number of rows that will be inserted
  2943. // if number of rows is unknown (such as if doing
  2944. // "insert into foo select * from bar), then rows
  2945. // will be 0
  2946. //
  2947. //
  2948. // This function returns true if the table MAY be empty.
  2949. // It is NOT meant to be a 100% check for emptiness.
  2950. // This is used for a bulk load optimization.
  2951. //
  2952. bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
  2953. int error;
  2954. bool ret_val = false;
  2955. DBC* tmp_cursor = NULL;
  2956. DB_TXN* tmp_txn = NULL;
  2957. if (txn == NULL) {
  2958. error = db_env->txn_begin(db_env, 0, &tmp_txn, 0);
  2959. if (error) {
  2960. goto cleanup;
  2961. }
  2962. txn = tmp_txn;
  2963. }
  2964. error = share->file->cursor(share->file, txn, &tmp_cursor, 0);
  2965. if (error) {
  2966. goto cleanup;
  2967. }
  2968. error = tmp_cursor->c_getf_next(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
  2969. if (error == DB_NOTFOUND) {
  2970. ret_val = true;
  2971. }
  2972. else {
  2973. ret_val = false;
  2974. }
  2975. error = 0;
  2976. cleanup:
  2977. if (tmp_cursor) {
  2978. int r = tmp_cursor->c_close(tmp_cursor);
  2979. assert(r==0);
  2980. tmp_cursor = NULL;
  2981. }
  2982. if (tmp_txn) {
  2983. commit_txn(tmp_txn, 0);
  2984. tmp_txn = NULL;
  2985. }
  2986. return ret_val;
  2987. }
  2988. void ha_tokudb::start_bulk_insert(ha_rows rows) {
  2989. TOKUDB_DBUG_ENTER("ha_tokudb::start_bulk_insert");
  2990. THD* thd = ha_thd();
  2991. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  2992. delay_updating_ai_metadata = true;
  2993. ai_metadata_update_required = false;
  2994. abort_loader = false;
  2995. rw_rdlock(&share->num_DBs_lock);
  2996. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  2997. num_DBs_locked_in_bulk = true;
  2998. lock_count = 0;
  2999. if (share->try_table_lock) {
  3000. if (get_prelock_empty(thd) && may_table_be_empty(transaction)) {
  3001. if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR) {
  3002. acquire_table_lock(transaction, lock_write);
  3003. }
  3004. else {
  3005. mult_dbt_flags[primary_key] = 0;
  3006. if (!thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) && !hidden_primary_key) {
  3007. mult_put_flags[primary_key] = DB_NOOVERWRITE;
  3008. }
  3009. uint32_t loader_flags = (get_load_save_space(thd)) ?
  3010. LOADER_COMPRESS_INTERMEDIATES : 0;
  3011. int error = db_env->create_loader(
  3012. db_env,
  3013. transaction,
  3014. &loader,
  3015. NULL, // no src_db needed
  3016. curr_num_DBs,
  3017. share->key_file,
  3018. mult_put_flags,
  3019. mult_dbt_flags,
  3020. loader_flags
  3021. );
  3022. if (error) {
  3023. assert(loader == NULL);
  3024. goto exit_try_table_lock;
  3025. }
  3026. lc.thd = thd;
  3027. lc.ha = this;
  3028. error = loader->set_poll_function(loader, loader_poll_fun, &lc);
  3029. assert(!error);
  3030. error = loader->set_error_callback(loader, loader_dup_fun, &lc);
  3031. assert(!error);
  3032. trx->stmt_progress.using_loader = true;
  3033. }
  3034. }
  3035. exit_try_table_lock:
  3036. pthread_mutex_lock(&share->mutex);
  3037. share->try_table_lock = false;
  3038. pthread_mutex_unlock(&share->mutex);
  3039. }
  3040. DBUG_VOID_RETURN;
  3041. }
  3042. //
  3043. // Method that is called at the end of many calls to insert rows
  3044. // (ha_tokudb::write_row). If start_bulk_insert is called, then
  3045. // this is guaranteed to be called.
  3046. //
  3047. int ha_tokudb::end_bulk_insert(bool abort) {
  3048. TOKUDB_DBUG_ENTER("ha_tokudb::end_bulk_insert");
  3049. int error = 0;
  3050. THD* thd = ha_thd();
  3051. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  3052. bool using_loader = (loader != NULL);
  3053. if (ai_metadata_update_required) {
  3054. pthread_mutex_lock(&share->mutex);
  3055. error = update_max_auto_inc(share->status_block, share->last_auto_increment);
  3056. pthread_mutex_unlock(&share->mutex);
  3057. if (error) { goto cleanup; }
  3058. }
  3059. delay_updating_ai_metadata = false;
  3060. ai_metadata_update_required = false;
  3061. loader_error = 0;
  3062. if (loader) {
  3063. if (!abort_loader && !thd->killed) {
  3064. error = loader->close(loader);
  3065. loader = NULL;
  3066. if (error) {
  3067. if (thd->killed) {
  3068. my_error(ER_QUERY_INTERRUPTED, MYF(0));
  3069. }
  3070. goto cleanup;
  3071. }
  3072. for (uint i = 0; i < table_share->keys; i++) {
  3073. if (table_share->key_info[i].flags & HA_NOSAME) {
  3074. bool is_unique;
  3075. if (i == primary_key) {
  3076. continue;
  3077. }
  3078. error = is_index_unique(
  3079. &is_unique,
  3080. transaction,
  3081. share->key_file[i],
  3082. &table->key_info[i]
  3083. );
  3084. if (error) goto cleanup;
  3085. if (!is_unique) {
  3086. error = HA_ERR_FOUND_DUPP_KEY;
  3087. last_dup_key = i;
  3088. goto cleanup;
  3089. }
  3090. }
  3091. }
  3092. }
  3093. else {
  3094. error = sprintf(write_status_msg, "aborting bulk load");
  3095. thd_proc_info(thd, write_status_msg);
  3096. loader->abort(loader);
  3097. loader = NULL;
  3098. share->try_table_lock = true;
  3099. }
  3100. }
  3101. cleanup:
  3102. if (num_DBs_locked_in_bulk) {
  3103. rw_unlock(&share->num_DBs_lock);
  3104. }
  3105. num_DBs_locked_in_bulk = false;
  3106. lock_count = 0;
  3107. if (loader) {
  3108. error = sprintf(write_status_msg, "aborting bulk load");
  3109. thd_proc_info(thd, write_status_msg);
  3110. loader->abort(loader);
  3111. loader = NULL;
  3112. }
  3113. abort_loader = false;
  3114. memset(&lc, 0, sizeof(lc));
  3115. if (error || loader_error) {
  3116. my_errno = error ? error : loader_error;
  3117. if (using_loader) {
  3118. share->try_table_lock = true;
  3119. }
  3120. }
  3121. trx->stmt_progress.using_loader = false;
  3122. TOKUDB_DBUG_RETURN(error ? error : loader_error);
  3123. }
  3124. int ha_tokudb::end_bulk_insert() {
  3125. return end_bulk_insert( false );
  3126. }
  3127. int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info) {
  3128. int error;
  3129. DBC* tmp_cursor1 = NULL;
  3130. DBC* tmp_cursor2 = NULL;
  3131. DBT key1, key2, val, packed_key1, packed_key2;
  3132. uint64_t cnt = 0;
  3133. char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound.
  3134. THD* thd = ha_thd();
  3135. memset(&key1, 0, sizeof(key1));
  3136. memset(&key2, 0, sizeof(key2));
  3137. memset(&val, 0, sizeof(val));
  3138. memset(&packed_key1, 0, sizeof(packed_key1));
  3139. memset(&packed_key2, 0, sizeof(packed_key2));
  3140. *is_unique = true;
  3141. error = db->cursor(
  3142. db,
  3143. txn,
  3144. &tmp_cursor1,
  3145. DB_SERIALIZABLE
  3146. );
  3147. if (error) { goto cleanup; }
  3148. error = db->cursor(
  3149. db,
  3150. txn,
  3151. &tmp_cursor2,
  3152. DB_SERIALIZABLE
  3153. );
  3154. if (error) { goto cleanup; }
  3155. error = tmp_cursor1->c_get(
  3156. tmp_cursor1,
  3157. &key1,
  3158. &val,
  3159. DB_NEXT
  3160. );
  3161. if (error == DB_NOTFOUND) {
  3162. *is_unique = true;
  3163. error = 0;
  3164. goto cleanup;
  3165. }
  3166. else if (error) { goto cleanup; }
  3167. error = tmp_cursor2->c_get(
  3168. tmp_cursor2,
  3169. &key2,
  3170. &val,
  3171. DB_NEXT
  3172. );
  3173. if (error) { goto cleanup; }
  3174. error = tmp_cursor2->c_get(
  3175. tmp_cursor2,
  3176. &key2,
  3177. &val,
  3178. DB_NEXT
  3179. );
  3180. if (error == DB_NOTFOUND) {
  3181. *is_unique = true;
  3182. error = 0;
  3183. goto cleanup;
  3184. }
  3185. else if (error) { goto cleanup; }
  3186. while (error != DB_NOTFOUND) {
  3187. bool has_null1;
  3188. bool has_null2;
  3189. int cmp;
  3190. place_key_into_mysql_buff(
  3191. key_info,
  3192. table->record[0],
  3193. (uchar *) key1.data + 1
  3194. );
  3195. place_key_into_mysql_buff(
  3196. key_info,
  3197. table->record[1],
  3198. (uchar *) key2.data + 1
  3199. );
  3200. create_dbt_key_for_lookup(
  3201. &packed_key1,
  3202. key_info,
  3203. key_buff,
  3204. table->record[0],
  3205. &has_null1
  3206. );
  3207. create_dbt_key_for_lookup(
  3208. &packed_key2,
  3209. key_info,
  3210. key_buff2,
  3211. table->record[1],
  3212. &has_null2
  3213. );
  3214. if (!has_null1 && !has_null2) {
  3215. cmp = tokudb_prefix_cmp_dbt_key(db, &packed_key1, &packed_key2);
  3216. if (cmp == 0) {
  3217. memcpy(key_buff, key1.data, key1.size);
  3218. place_key_into_mysql_buff(
  3219. key_info,
  3220. table->record[0],
  3221. (uchar *) key_buff + 1
  3222. );
  3223. *is_unique = false;
  3224. break;
  3225. }
  3226. }
  3227. error = tmp_cursor1->c_get(
  3228. tmp_cursor1,
  3229. &key1,
  3230. &val,
  3231. DB_NEXT
  3232. );
  3233. if (error) { goto cleanup; }
  3234. error = tmp_cursor2->c_get(
  3235. tmp_cursor2,
  3236. &key2,
  3237. &val,
  3238. DB_NEXT
  3239. );
  3240. if (error && (error != DB_NOTFOUND)) { goto cleanup; }
  3241. cnt++;
  3242. if ((cnt % 10000) == 0) {
  3243. sprintf(
  3244. status_msg,
  3245. "Verifying index uniqueness: Checked %llu of %llu rows in key-%s.",
  3246. (long long unsigned) cnt,
  3247. share->rows,
  3248. key_info->name);
  3249. thd_proc_info(thd, status_msg);
  3250. if (thd->killed) {
  3251. my_error(ER_QUERY_INTERRUPTED, MYF(0));
  3252. error = ER_QUERY_INTERRUPTED;
  3253. goto cleanup;
  3254. }
  3255. }
  3256. }
  3257. error = 0;
  3258. cleanup:
  3259. if (tmp_cursor1) {
  3260. tmp_cursor1->c_close(tmp_cursor1);
  3261. tmp_cursor1 = NULL;
  3262. }
  3263. if (tmp_cursor2) {
  3264. tmp_cursor2->c_close(tmp_cursor2);
  3265. tmp_cursor2 = NULL;
  3266. }
  3267. return error;
  3268. }
  3269. int ha_tokudb::is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn) {
  3270. DBT key;
  3271. int error = 0;
  3272. bool has_null;
  3273. DBC* tmp_cursor = NULL;
  3274. struct index_read_info ir_info;
  3275. struct smart_dbt_info info;
  3276. memset((void *)&key, 0, sizeof(key));
  3277. info.ha = this;
  3278. info.buf = NULL;
  3279. info.keynr = dict_index;
  3280. ir_info.smart_dbt_info = info;
  3281. create_dbt_key_for_lookup(
  3282. &key,
  3283. key_info,
  3284. key_buff3,
  3285. record,
  3286. &has_null
  3287. );
  3288. ir_info.orig_key = &key;
  3289. if (has_null) {
  3290. error = 0;
  3291. *is_unique = true;
  3292. goto cleanup;
  3293. }
  3294. error = share->key_file[dict_index]->cursor(
  3295. share->key_file[dict_index],
  3296. txn,
  3297. &tmp_cursor,
  3298. DB_SERIALIZABLE
  3299. );
  3300. if (error) { goto cleanup; }
  3301. error = tmp_cursor->c_getf_set_range(
  3302. tmp_cursor,
  3303. 0,
  3304. &key,
  3305. smart_dbt_callback_lookup,
  3306. &ir_info
  3307. );
  3308. if (error == DB_NOTFOUND) {
  3309. *is_unique = true;
  3310. error = 0;
  3311. goto cleanup;
  3312. }
  3313. else if (error) {
  3314. goto cleanup;
  3315. }
  3316. if (ir_info.cmp) {
  3317. *is_unique = true;
  3318. }
  3319. else {
  3320. *is_unique = false;
  3321. }
  3322. error = 0;
  3323. cleanup:
  3324. if (tmp_cursor) {
  3325. int r = tmp_cursor->c_close(tmp_cursor);
  3326. assert(r==0);
  3327. tmp_cursor = NULL;
  3328. }
  3329. return error;
  3330. }
  3331. int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
  3332. int error;
  3333. //
  3334. // first do uniqueness checks
  3335. //
  3336. if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
  3337. for (uint keynr = 0; keynr < table_share->keys; keynr++) {
  3338. bool is_unique_key = table->key_info[keynr].flags & HA_NOSAME;
  3339. bool is_unique = false;
  3340. //
  3341. // don't need to do check for primary key
  3342. //
  3343. if (keynr == primary_key) {
  3344. continue;
  3345. }
  3346. if (!is_unique_key) {
  3347. continue;
  3348. }
  3349. //
  3350. // if unique key, check uniqueness constraint
  3351. // but, we do not need to check it if the key has a null
  3352. // and we do not need to check it if unique_checks is off
  3353. //
  3354. error = is_val_unique(&is_unique, record, &table->key_info[keynr], keynr, txn);
  3355. if (error) { goto cleanup; }
  3356. if (!is_unique) {
  3357. error = DB_KEYEXIST;
  3358. last_dup_key = keynr;
  3359. goto cleanup;
  3360. }
  3361. }
  3362. }
  3363. error = 0;
  3364. cleanup:
  3365. return error;
  3366. }
  3367. void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
  3368. int error;
  3369. DBT row, key;
  3370. //
  3371. // variables for testing key packing, only used in some debug modes
  3372. //
  3373. uchar* tmp_pk_key_data = NULL;
  3374. uchar* tmp_pk_val_data = NULL;
  3375. DBT tmp_pk_key;
  3376. DBT tmp_pk_val;
  3377. bool has_null;
  3378. int cmp;
  3379. memset(&tmp_pk_key, 0, sizeof(DBT));
  3380. memset(&tmp_pk_val, 0, sizeof(DBT));
  3381. //
  3382. //use for testing the packing of keys
  3383. //
  3384. tmp_pk_key_data = (uchar *)my_malloc(pk_key->size, MYF(MY_WME));
  3385. assert(tmp_pk_key_data);
  3386. tmp_pk_val_data = (uchar *)my_malloc(pk_val->size, MYF(MY_WME));
  3387. assert(tmp_pk_val_data);
  3388. memcpy(tmp_pk_key_data, pk_key->data, pk_key->size);
  3389. memcpy(tmp_pk_val_data, pk_val->data, pk_val->size);
  3390. tmp_pk_key.data = tmp_pk_key_data;
  3391. tmp_pk_key.size = pk_key->size;
  3392. tmp_pk_val.data = tmp_pk_val_data;
  3393. tmp_pk_val.size = pk_val->size;
  3394. for (uint keynr = 0; keynr < table_share->keys; keynr++) {
  3395. uint32_t tmp_num_bytes = 0;
  3396. uchar* row_desc = NULL;
  3397. uint32_t desc_size = 0;
  3398. if (keynr == primary_key) {
  3399. continue;
  3400. }
  3401. create_dbt_key_from_table(&key, keynr, key_buff2, record, &has_null);
  3402. //
  3403. // TEST
  3404. //
  3405. row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data;
  3406. row_desc += (*(uint32_t *)row_desc);
  3407. desc_size = (*(uint32_t *)row_desc) - 4;
  3408. row_desc += 4;
  3409. tmp_num_bytes = pack_key_from_desc(
  3410. key_buff3,
  3411. row_desc,
  3412. desc_size,
  3413. &tmp_pk_key,
  3414. &tmp_pk_val
  3415. );
  3416. assert(tmp_num_bytes == key.size);
  3417. cmp = memcmp(key_buff3,key_buff2,tmp_num_bytes);
  3418. assert(cmp == 0);
  3419. //
  3420. // test key packing of clustering keys
  3421. //
  3422. if (table->key_info[keynr].flags & HA_CLUSTERING) {
  3423. error = pack_row(&row, (const uchar *) record, keynr);
  3424. assert(error == 0);
  3425. uchar* tmp_buff = NULL;
  3426. tmp_buff = (uchar *)my_malloc(alloced_rec_buff_length,MYF(MY_WME));
  3427. assert(tmp_buff);
  3428. row_desc = (uchar *)share->key_file[keynr]->descriptor->dbt.data;
  3429. row_desc += (*(uint32_t *)row_desc);
  3430. row_desc += (*(uint32_t *)row_desc);
  3431. desc_size = (*(uint32_t *)row_desc) - 4;
  3432. row_desc += 4;
  3433. tmp_num_bytes = pack_clustering_val_from_desc(
  3434. tmp_buff,
  3435. row_desc,
  3436. desc_size,
  3437. &tmp_pk_val
  3438. );
  3439. assert(tmp_num_bytes == row.size);
  3440. cmp = memcmp(tmp_buff,rec_buff,tmp_num_bytes);
  3441. assert(cmp == 0);
  3442. my_free(tmp_buff,MYF(MY_ALLOW_ZERO_PTR));
  3443. }
  3444. }
  3445. //
  3446. // copy stuff back out
  3447. //
  3448. error = pack_row(pk_val, (const uchar *) record, primary_key);
  3449. assert(pk_val->size == tmp_pk_val.size);
  3450. cmp = memcmp(pk_val->data, tmp_pk_val_data, pk_val->size);
  3451. assert( cmp == 0);
  3452. my_free(tmp_pk_key_data,MYF(MY_ALLOW_ZERO_PTR));
  3453. my_free(tmp_pk_val_data,MYF(MY_ALLOW_ZERO_PTR));
  3454. }
  3455. //
  3456. // set the put flags for the main dictionary
  3457. //
  3458. void ha_tokudb::set_main_dict_put_flags(
  3459. THD* thd,
  3460. bool opt_eligible,
  3461. uint32_t* put_flags
  3462. )
  3463. {
  3464. uint32_t old_prelock_flags = 0;
  3465. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  3466. bool in_hot_index = share->num_DBs > curr_num_DBs;
  3467. bool using_ignore_flag_opt = do_ignore_flag_optimization(
  3468. thd, table, share->replace_into_fast);
  3469. //
  3470. // optimization for "REPLACE INTO..." (and "INSERT IGNORE") command
  3471. // if the command is "REPLACE INTO" and the only table
  3472. // is the main table (or all indexes are a subset of the pk),
  3473. // then we can simply insert the element
  3474. // with DB_YESOVERWRITE. If the element does not exist,
  3475. // it will act as a normal insert, and if it does exist, it
  3476. // will act as a replace, which is exactly what REPLACE INTO is supposed
  3477. // to do. We cannot do this if otherwise, because then we lose
  3478. // consistency between indexes
  3479. //
  3480. if (hidden_primary_key)
  3481. {
  3482. *put_flags = old_prelock_flags;
  3483. }
  3484. else if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)
  3485. && !is_replace_into(thd) && !is_insert_ignore(thd))
  3486. {
  3487. *put_flags = old_prelock_flags;
  3488. }
  3489. else if (using_ignore_flag_opt && is_replace_into(thd)
  3490. && !in_hot_index)
  3491. {
  3492. *put_flags = old_prelock_flags;
  3493. }
  3494. else if (opt_eligible && using_ignore_flag_opt && is_insert_ignore(thd)
  3495. && !in_hot_index)
  3496. {
  3497. *put_flags = DB_NOOVERWRITE_NO_ERROR | old_prelock_flags;
  3498. }
  3499. else
  3500. {
  3501. *put_flags = DB_NOOVERWRITE | old_prelock_flags;
  3502. }
  3503. }
  3504. int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) {
  3505. int error = 0;
  3506. uint32_t put_flags = mult_put_flags[primary_key];
  3507. THD *thd = ha_thd();
  3508. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  3509. assert(curr_num_DBs == 1);
  3510. set_main_dict_put_flags(thd, true, &put_flags);
  3511. error = share->file->put(
  3512. share->file,
  3513. txn,
  3514. pk_key,
  3515. pk_val,
  3516. put_flags
  3517. );
  3518. if (error) {
  3519. last_dup_key = primary_key;
  3520. goto cleanup;
  3521. }
  3522. cleanup:
  3523. return error;
  3524. }
  3525. int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN* txn, THD* thd) {
  3526. int error = 0;
  3527. uint curr_num_DBs = share->num_DBs;
  3528. set_main_dict_put_flags(thd, true, &mult_put_flags[primary_key]);
  3529. uint32_t i, flags = mult_put_flags[primary_key];
  3530. // the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR,
  3531. // which is not allowed with env->put_multiple.
  3532. // we have to insert the rows one by one in this case.
  3533. if (flags & DB_NOOVERWRITE_NO_ERROR) {
  3534. DB * src_db = share->key_file[primary_key];
  3535. for (i = 0; i < curr_num_DBs; i++) {
  3536. DB * db = share->key_file[i];
  3537. if (i == primary_key) {
  3538. // if it's the primary key, insert the rows
  3539. // as they are.
  3540. error = db->put(db, txn, pk_key, pk_val, flags);
  3541. } else {
  3542. // generate a row for secondary keys.
  3543. // use our multi put key/rec buffers
  3544. // just as the ydb layer would have in
  3545. // env->put_multiple(), except that
  3546. // we will just do a put() right away.
  3547. error = tokudb_generate_row(db, src_db,
  3548. &mult_key_dbt[i], &mult_rec_dbt[i],
  3549. pk_key, pk_val);
  3550. if (error != 0) {
  3551. goto out;
  3552. }
  3553. error = db->put(db, txn, &mult_key_dbt[i],
  3554. &mult_rec_dbt[i], flags);
  3555. }
  3556. if (error != 0) {
  3557. goto out;
  3558. }
  3559. }
  3560. } else {
  3561. // not insert ignore, so we can use put multiple
  3562. error = db_env->put_multiple(
  3563. db_env,
  3564. share->key_file[primary_key],
  3565. txn,
  3566. pk_key,
  3567. pk_val,
  3568. curr_num_DBs,
  3569. share->key_file,
  3570. mult_key_dbt,
  3571. mult_rec_dbt,
  3572. mult_put_flags
  3573. );
  3574. }
  3575. out:
  3576. //
  3577. // We break if we hit an error, unless it is a dup key error
  3578. // and MySQL told us to ignore duplicate key errors
  3579. //
  3580. if (error) {
  3581. last_dup_key = primary_key;
  3582. }
  3583. return error;
  3584. }
  3585. volatile int ha_tokudb_write_row_wait = 0; // debug
  3586. //
  3587. // Stores a row in the table, called when handling an INSERT query
  3588. // Parameters:
  3589. // [in] record - a row in MySQL format
  3590. // Returns:
  3591. // 0 on success
  3592. // error otherwise
  3593. //
  3594. int ha_tokudb::write_row(uchar * record) {
  3595. TOKUDB_DBUG_ENTER("ha_tokudb::write_row");
  3596. while (ha_tokudb_write_row_wait) sleep(1); // debug
  3597. DBT row, prim_key;
  3598. int error;
  3599. THD *thd = ha_thd();
  3600. bool has_null;
  3601. DB_TXN* sub_trans = NULL;
  3602. DB_TXN* txn = NULL;
  3603. tokudb_trx_data *trx = NULL;
  3604. uint curr_num_DBs;
  3605. bool create_sub_trans = false;
  3606. //
  3607. // some crap that needs to be done because MySQL does not properly abstract
  3608. // this work away from us, namely filling in auto increment and setting auto timestamp
  3609. //
  3610. ha_statistic_increment(&SSV::ha_write_count);
  3611. #if MYSQL_VERSION_ID < 50600
  3612. if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) {
  3613. table->timestamp_field->set_time();
  3614. }
  3615. #endif
  3616. if (table->next_number_field && record == table->record[0]) {
  3617. error = update_auto_increment();
  3618. if (error)
  3619. goto cleanup;
  3620. }
  3621. //
  3622. // check to see if some value for the auto increment column that is bigger
  3623. // than anything else til now is being used. If so, update the metadata to reflect it
  3624. // the goal here is we never want to have a dup key error due to a bad increment
  3625. // of the auto inc field.
  3626. //
  3627. if (share->has_auto_inc && record == table->record[0]) {
  3628. pthread_mutex_lock(&share->mutex);
  3629. ulonglong curr_auto_inc = retrieve_auto_increment(
  3630. table->field[share->ai_field_index]->key_type(),
  3631. field_offset(table->field[share->ai_field_index], table),
  3632. record
  3633. );
  3634. if (curr_auto_inc > share->last_auto_increment) {
  3635. share->last_auto_increment = curr_auto_inc;
  3636. if (delay_updating_ai_metadata) {
  3637. ai_metadata_update_required = true;
  3638. }
  3639. else {
  3640. update_max_auto_inc(share->status_block, share->last_auto_increment);
  3641. }
  3642. }
  3643. pthread_mutex_unlock(&share->mutex);
  3644. }
  3645. //
  3646. // grab reader lock on numDBs_lock
  3647. //
  3648. if (!num_DBs_locked_in_bulk) {
  3649. rw_rdlock(&share->num_DBs_lock);
  3650. }
  3651. else {
  3652. lock_count++;
  3653. if (lock_count >= 2000) {
  3654. rw_unlock(&share->num_DBs_lock);
  3655. rw_rdlock(&share->num_DBs_lock);
  3656. lock_count = 0;
  3657. }
  3658. }
  3659. curr_num_DBs = share->num_DBs;
  3660. if (hidden_primary_key) {
  3661. get_auto_primary_key(current_ident);
  3662. }
  3663. if (table_share->blob_fields) {
  3664. if (fix_rec_buff_for_blob(max_row_length(record))) {
  3665. error = HA_ERR_OUT_OF_MEM;
  3666. goto cleanup;
  3667. }
  3668. }
  3669. create_dbt_key_from_table(&prim_key, primary_key, primary_key_buff, record, &has_null);
  3670. if ((error = pack_row(&row, (const uchar *) record, primary_key))){
  3671. goto cleanup;
  3672. }
  3673. create_sub_trans = (using_ignore && !(do_ignore_flag_optimization(thd,table,share->replace_into_fast)));
  3674. if (create_sub_trans) {
  3675. error = db_env->txn_begin(db_env, transaction, &sub_trans, DB_INHERIT_ISOLATION);
  3676. if (error) {
  3677. goto cleanup;
  3678. }
  3679. }
  3680. txn = create_sub_trans ? sub_trans : transaction;
  3681. if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY) {
  3682. test_row_packing(record,&prim_key,&row);
  3683. }
  3684. if (loader) {
  3685. error = loader->put(loader, &prim_key, &row);
  3686. if (error) {
  3687. abort_loader = true;
  3688. goto cleanup;
  3689. }
  3690. }
  3691. else {
  3692. if (curr_num_DBs == 1) {
  3693. error = insert_row_to_main_dictionary(record,&prim_key, &row, txn);
  3694. if (error) { goto cleanup; }
  3695. }
  3696. else {
  3697. error = do_uniqueness_checks(record, txn, thd);
  3698. if (error) {
  3699. // for #4633
  3700. // if we have a duplicate key error, let's check the primary key to see
  3701. // if there is a duplicate there. If so, set last_dup_key to the pk
  3702. if (error == DB_KEYEXIST && !test(hidden_primary_key)) {
  3703. int r = share->file->getf_set(
  3704. share->file,
  3705. txn,
  3706. 0,
  3707. &prim_key,
  3708. smart_dbt_do_nothing,
  3709. NULL
  3710. );
  3711. if (r == 0) {
  3712. // if we get no error, that means the row
  3713. // was found and this is a duplicate key,
  3714. // so we set last_dup_key
  3715. last_dup_key = primary_key;
  3716. }
  3717. else if (r != DB_NOTFOUND) {
  3718. // if some other error is returned, return that to the user.
  3719. error = r;
  3720. }
  3721. }
  3722. goto cleanup;
  3723. }
  3724. error = insert_rows_to_dictionaries_mult(&prim_key, &row, txn, thd);
  3725. if (error) { goto cleanup; }
  3726. }
  3727. if (error == 0) {
  3728. uint64_t full_row_size = prim_key.size + row.size;
  3729. toku_hton_update_primary_key_bytes_inserted(full_row_size);
  3730. }
  3731. }
  3732. trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  3733. if (!error) {
  3734. added_rows++;
  3735. trx->stmt_progress.inserted++;
  3736. track_progress(thd);
  3737. }
  3738. cleanup:
  3739. if (!num_DBs_locked_in_bulk) {
  3740. rw_unlock(&share->num_DBs_lock);
  3741. }
  3742. if (error == DB_KEYEXIST) {
  3743. error = HA_ERR_FOUND_DUPP_KEY;
  3744. }
  3745. if (sub_trans) {
  3746. // no point in recording error value of abort.
  3747. // nothing we can do about it anyway and it is not what
  3748. // we want to return.
  3749. if (error) {
  3750. abort_txn(sub_trans);
  3751. }
  3752. else {
  3753. commit_txn(sub_trans, DB_TXN_NOSYNC);
  3754. }
  3755. }
  3756. TOKUDB_DBUG_RETURN(error);
  3757. }
  3758. /* Compare if a key in a row has changed */
  3759. bool ha_tokudb::key_changed(uint keynr, const uchar * old_row, const uchar * new_row) {
  3760. DBT old_key;
  3761. DBT new_key;
  3762. memset((void *) &old_key, 0, sizeof(old_key));
  3763. memset((void *) &new_key, 0, sizeof(new_key));
  3764. bool has_null;
  3765. create_dbt_key_from_table(&new_key, keynr, key_buff2, new_row, &has_null);
  3766. create_dbt_key_for_lookup(&old_key,&table->key_info[keynr], key_buff3, old_row, &has_null);
  3767. return tokudb_prefix_cmp_dbt_key(share->key_file[keynr], &old_key, &new_key);
  3768. }
  3769. //
  3770. // Updates a row in the table, called when handling an UPDATE query
  3771. // Parameters:
  3772. // [in] old_row - row to be updated, in MySQL format
  3773. // [in] new_row - new row, in MySQL format
  3774. // Returns:
  3775. // 0 on success
  3776. // error otherwise
  3777. //
  3778. int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
  3779. TOKUDB_DBUG_ENTER("update_row");
  3780. DBT prim_key, old_prim_key, prim_row, old_prim_row;
  3781. int error;
  3782. bool has_null;
  3783. THD* thd = ha_thd();
  3784. DB_TXN* sub_trans = NULL;
  3785. DB_TXN* txn = NULL;
  3786. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  3787. uint curr_num_DBs;
  3788. LINT_INIT(error);
  3789. memset((void *) &prim_key, 0, sizeof(prim_key));
  3790. memset((void *) &old_prim_key, 0, sizeof(old_prim_key));
  3791. memset((void *) &prim_row, 0, sizeof(prim_row));
  3792. memset((void *) &old_prim_row, 0, sizeof(old_prim_row));
  3793. ha_statistic_increment(&SSV::ha_update_count);
  3794. #if MYSQL_VERSION_ID < 50600
  3795. if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) {
  3796. table->timestamp_field->set_time();
  3797. }
  3798. #endif
  3799. //
  3800. // check to see if some value for the auto increment column that is bigger
  3801. // than anything else til now is being used. If so, update the metadata to reflect it
  3802. // the goal here is we never want to have a dup key error due to a bad increment
  3803. // of the auto inc field.
  3804. //
  3805. if (share->has_auto_inc && new_row == table->record[0]) {
  3806. pthread_mutex_lock(&share->mutex);
  3807. ulonglong curr_auto_inc = retrieve_auto_increment(
  3808. table->field[share->ai_field_index]->key_type(),
  3809. field_offset(table->field[share->ai_field_index], table),
  3810. new_row
  3811. );
  3812. if (curr_auto_inc > share->last_auto_increment) {
  3813. error = update_max_auto_inc(share->status_block, curr_auto_inc);
  3814. if (!error) {
  3815. share->last_auto_increment = curr_auto_inc;
  3816. }
  3817. }
  3818. pthread_mutex_unlock(&share->mutex);
  3819. }
  3820. //
  3821. // grab reader lock on numDBs_lock
  3822. //
  3823. rw_rdlock(&share->num_DBs_lock);
  3824. curr_num_DBs = share->num_DBs;
  3825. if (using_ignore) {
  3826. error = db_env->txn_begin(db_env, transaction, &sub_trans, DB_INHERIT_ISOLATION);
  3827. if (error) {
  3828. goto cleanup;
  3829. }
  3830. }
  3831. txn = using_ignore ? sub_trans : transaction;
  3832. if (hidden_primary_key) {
  3833. memset((void *) &prim_key, 0, sizeof(prim_key));
  3834. prim_key.data = (void *) current_ident;
  3835. prim_key.size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  3836. old_prim_key = prim_key;
  3837. }
  3838. else {
  3839. create_dbt_key_from_table(&prim_key, primary_key, key_buff, new_row, &has_null);
  3840. create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null);
  3841. }
  3842. //
  3843. // do uniqueness checks
  3844. //
  3845. if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
  3846. for (uint keynr = 0; keynr < table_share->keys; keynr++) {
  3847. bool is_unique_key = table->key_info[keynr].flags & HA_NOSAME;
  3848. if (keynr == primary_key) {
  3849. continue;
  3850. }
  3851. if (is_unique_key) {
  3852. bool key_ch = key_changed(keynr, old_row, new_row);
  3853. if (key_ch) {
  3854. bool is_unique;
  3855. error = is_val_unique(&is_unique, new_row, &table->key_info[keynr], keynr, txn);
  3856. if (error) goto cleanup;
  3857. if (!is_unique) {
  3858. error = DB_KEYEXIST;
  3859. last_dup_key = keynr;
  3860. goto cleanup;
  3861. }
  3862. }
  3863. }
  3864. }
  3865. }
  3866. if (table_share->blob_fields) {
  3867. if (fix_rec_buff_for_blob(max_row_length(new_row))) {
  3868. error = HA_ERR_OUT_OF_MEM;
  3869. goto cleanup;
  3870. }
  3871. if (fix_rec_update_buff_for_blob(max_row_length(old_row))) {
  3872. error = HA_ERR_OUT_OF_MEM;
  3873. goto cleanup;
  3874. }
  3875. }
  3876. error = pack_row(&prim_row, new_row, primary_key);
  3877. if (error) { goto cleanup; }
  3878. error = pack_old_row_for_update(&old_prim_row, old_row, primary_key);
  3879. if (error) { goto cleanup; }
  3880. set_main_dict_put_flags(thd, false, &mult_put_flags[primary_key]);
  3881. error = db_env->update_multiple(
  3882. db_env,
  3883. share->key_file[primary_key],
  3884. txn,
  3885. &old_prim_key,
  3886. &old_prim_row,
  3887. &prim_key,
  3888. &prim_row,
  3889. curr_num_DBs,
  3890. share->key_file,
  3891. mult_put_flags,
  3892. 2*curr_num_DBs,
  3893. mult_key_dbt,
  3894. curr_num_DBs,
  3895. mult_rec_dbt
  3896. );
  3897. if (error == DB_KEYEXIST) {
  3898. last_dup_key = primary_key;
  3899. }
  3900. else if (!error) {
  3901. trx->stmt_progress.updated++;
  3902. track_progress(thd);
  3903. }
  3904. cleanup:
  3905. rw_unlock(&share->num_DBs_lock);
  3906. if (error == DB_KEYEXIST) {
  3907. error = HA_ERR_FOUND_DUPP_KEY;
  3908. }
  3909. if (sub_trans) {
  3910. // no point in recording error value of abort.
  3911. // nothing we can do about it anyway and it is not what
  3912. // we want to return.
  3913. if (error) {
  3914. abort_txn(sub_trans);
  3915. }
  3916. else {
  3917. commit_txn(sub_trans, DB_TXN_NOSYNC);
  3918. }
  3919. }
  3920. TOKUDB_DBUG_RETURN(error);
  3921. }
  3922. //
  3923. // Deletes a row in the table, called when handling a DELETE query
  3924. // Parameters:
  3925. // [in] record - row to be deleted, in MySQL format
  3926. // Returns:
  3927. // 0 on success
  3928. // error otherwise
  3929. //
  3930. int ha_tokudb::delete_row(const uchar * record) {
  3931. TOKUDB_DBUG_ENTER("ha_tokudb::delete_row");
  3932. int error = ENOSYS;
  3933. DBT row, prim_key;
  3934. bool has_null;
  3935. THD* thd = ha_thd();
  3936. uint curr_num_DBs;
  3937. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
  3938. ha_statistic_increment(&SSV::ha_delete_count);
  3939. //
  3940. // grab reader lock on numDBs_lock
  3941. //
  3942. rw_rdlock(&share->num_DBs_lock);
  3943. curr_num_DBs = share->num_DBs;
  3944. create_dbt_key_from_table(&prim_key, primary_key, key_buff, record, &has_null);
  3945. if (table_share->blob_fields) {
  3946. if (fix_rec_buff_for_blob(max_row_length(record))) {
  3947. error = HA_ERR_OUT_OF_MEM;
  3948. goto cleanup;
  3949. }
  3950. }
  3951. if ((error = pack_row(&row, (const uchar *) record, primary_key))){
  3952. goto cleanup;
  3953. }
  3954. error = db_env->del_multiple(
  3955. db_env,
  3956. share->key_file[primary_key],
  3957. transaction,
  3958. &prim_key,
  3959. &row,
  3960. curr_num_DBs,
  3961. share->key_file,
  3962. mult_key_dbt,
  3963. mult_del_flags
  3964. );
  3965. if (error) {
  3966. DBUG_PRINT("error", ("Got error %d", error));
  3967. }
  3968. else {
  3969. deleted_rows++;
  3970. trx->stmt_progress.deleted++;
  3971. track_progress(thd);
  3972. }
  3973. cleanup:
  3974. rw_unlock(&share->num_DBs_lock);
  3975. TOKUDB_DBUG_RETURN(error);
  3976. }
  3977. //
  3978. // takes as input table->read_set and table->write_set
  3979. // and puts list of field indexes that need to be read in
  3980. // unpack_row in the member variables fixed_cols_for_query
  3981. // and var_cols_for_query
  3982. //
  3983. void ha_tokudb::set_query_columns(uint keynr) {
  3984. uint32_t curr_fixed_col_index = 0;
  3985. uint32_t curr_var_col_index = 0;
  3986. read_key = false;
  3987. read_blobs = false;
  3988. //
  3989. // i know this is probably confusing and will need to be explained better
  3990. //
  3991. uint key_index = 0;
  3992. if (keynr == primary_key || keynr == MAX_KEY) {
  3993. key_index = primary_key;
  3994. }
  3995. else {
  3996. key_index = (table->key_info[keynr].flags & HA_CLUSTERING ? keynr : primary_key);
  3997. }
  3998. for (uint i = 0; i < table_share->fields; i++) {
  3999. if (bitmap_is_set(table->read_set,i) ||
  4000. bitmap_is_set(table->write_set,i)
  4001. )
  4002. {
  4003. if (bitmap_is_set(&share->kc_info.key_filters[key_index],i)) {
  4004. read_key = true;
  4005. }
  4006. else {
  4007. //
  4008. // if fixed field length
  4009. //
  4010. if (share->kc_info.field_lengths[i] != 0) {
  4011. //
  4012. // save the offset into the list
  4013. //
  4014. fixed_cols_for_query[curr_fixed_col_index] = i;
  4015. curr_fixed_col_index++;
  4016. }
  4017. //
  4018. // varchar or varbinary
  4019. //
  4020. else if (share->kc_info.length_bytes[i] != 0) {
  4021. var_cols_for_query[curr_var_col_index] = i;
  4022. curr_var_col_index++;
  4023. }
  4024. //
  4025. // it is a blob
  4026. //
  4027. else {
  4028. read_blobs = true;
  4029. }
  4030. }
  4031. }
  4032. }
  4033. num_fixed_cols_for_query = curr_fixed_col_index;
  4034. num_var_cols_for_query = curr_var_col_index;
  4035. }
  4036. void ha_tokudb::column_bitmaps_signal() {
  4037. //
  4038. // if we have max number of indexes, then MAX_KEY == primary_key
  4039. //
  4040. if (tokudb_active_index != MAX_KEY || tokudb_active_index == primary_key) {
  4041. set_query_columns(tokudb_active_index);
  4042. }
  4043. }
  4044. //
  4045. // Notification that a scan of entire secondary table is about
  4046. // to take place. Will pre acquire table read lock
  4047. // Returns:
  4048. // 0 on success
  4049. // error otherwise
  4050. //
  4051. int ha_tokudb::prepare_index_scan() {
  4052. int error = 0;
  4053. HANDLE_INVALID_CURSOR();
  4054. error = prelock_range(NULL, NULL);
  4055. if (error) { last_cursor_error = error; goto cleanup; }
  4056. range_lock_grabbed = true;
  4057. error = 0;
  4058. cleanup:
  4059. return error;
  4060. }
  4061. //
  4062. // Notification that a range query getting all elements that equal a key
  4063. // to take place. Will pre acquire read lock
  4064. // Returns:
  4065. // 0 on success
  4066. // error otherwise
  4067. //
  4068. int ha_tokudb::prepare_index_key_scan(const uchar * key, uint key_len) {
  4069. int error = 0;
  4070. DBT start_key, end_key;
  4071. THD* thd = ha_thd();
  4072. HANDLE_INVALID_CURSOR();
  4073. pack_key(&start_key, tokudb_active_index, prelocked_left_range, key, key_len, COL_NEG_INF);
  4074. prelocked_left_range_size = start_key.size;
  4075. pack_key(&end_key, tokudb_active_index, prelocked_right_range, key, key_len, COL_POS_INF);
  4076. prelocked_right_range_size = end_key.size;
  4077. error = cursor->c_set_bounds(
  4078. cursor,
  4079. &start_key,
  4080. &end_key,
  4081. true,
  4082. (cursor_flags & DB_SERIALIZABLE) != 0 ? DB_NOTFOUND : 0
  4083. );
  4084. if (error){
  4085. goto cleanup;
  4086. }
  4087. range_lock_grabbed = true;
  4088. doing_bulk_fetch = (thd_sql_command(thd) == SQLCOM_SELECT);
  4089. bulk_fetch_iteration = 0;
  4090. rows_fetched_using_bulk_fetch = 0;
  4091. error = 0;
  4092. cleanup:
  4093. if (error) {
  4094. if (error == DB_LOCK_NOTGRANTED) {
  4095. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  4096. }
  4097. last_cursor_error = error;
  4098. //
  4099. // cursor should be initialized here, but in case it is not,
  4100. // we still check
  4101. //
  4102. if (cursor) {
  4103. int r = cursor->c_close(cursor);
  4104. assert(r==0);
  4105. cursor = NULL;
  4106. }
  4107. }
  4108. return error;
  4109. }
  4110. void ha_tokudb::invalidate_bulk_fetch() {
  4111. bytes_used_in_range_query_buff= 0;
  4112. curr_range_query_buff_offset = 0;
  4113. icp_went_out_of_range = false;
  4114. }
  4115. void ha_tokudb::invalidate_icp() {
  4116. toku_pushed_idx_cond = NULL;
  4117. toku_pushed_idx_cond_keyno = MAX_KEY;
  4118. icp_went_out_of_range = false;
  4119. }
  4120. volatile int ha_tokudb_index_init_wait = 0; // debug
  4121. //
  4122. // Initializes local cursor on DB with index keynr
  4123. // Parameters:
  4124. // keynr - key (index) number
  4125. // sorted - 1 if result MUST be sorted according to index
  4126. // Returns:
  4127. // 0 on success
  4128. // error otherwise
  4129. //
  4130. int ha_tokudb::index_init(uint keynr, bool sorted) {
  4131. TOKUDB_DBUG_ENTER("ha_tokudb::index_init %p %d", this, keynr);
  4132. while (ha_tokudb_index_init_wait) sleep(1); // debug
  4133. int error;
  4134. THD* thd = ha_thd();
  4135. DBUG_PRINT("enter", ("table: '%s' key: %d", table_share->table_name.str, keynr));
  4136. /*
  4137. Under some very rare conditions (like full joins) we may already have
  4138. an active cursor at this point
  4139. */
  4140. if (cursor) {
  4141. DBUG_PRINT("note", ("Closing active cursor"));
  4142. int r = cursor->c_close(cursor);
  4143. assert(r==0);
  4144. }
  4145. active_index = keynr;
  4146. if (active_index < MAX_KEY) {
  4147. DBUG_ASSERT(keynr <= table->s->keys);
  4148. } else {
  4149. DBUG_ASSERT(active_index == MAX_KEY);
  4150. keynr = primary_key;
  4151. }
  4152. tokudb_active_index = keynr;
  4153. last_cursor_error = 0;
  4154. range_lock_grabbed = false;
  4155. DBUG_ASSERT(share->key_file[keynr]);
  4156. cursor_flags = get_cursor_isolation_flags(lock.type, thd);
  4157. if (use_write_locks) {
  4158. cursor_flags |= DB_RMW;
  4159. }
  4160. if (get_disable_prefetching(thd)) {
  4161. cursor_flags |= DBC_DISABLE_PREFETCHING;
  4162. }
  4163. if ((error = share->key_file[keynr]->cursor(share->key_file[keynr], transaction, &cursor, cursor_flags))) {
  4164. if (error == TOKUDB_MVCC_DICTIONARY_TOO_NEW) {
  4165. error = HA_ERR_TABLE_DEF_CHANGED;
  4166. my_error(ER_TABLE_DEF_CHANGED, MYF(0));
  4167. }
  4168. if (error == DB_LOCK_NOTGRANTED) {
  4169. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  4170. my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
  4171. }
  4172. table->status = STATUS_NOT_FOUND;
  4173. last_cursor_error = error;
  4174. cursor = NULL; // Safety
  4175. goto exit;
  4176. }
  4177. memset((void *) &last_key, 0, sizeof(last_key));
  4178. if (thd_sql_command(thd) == SQLCOM_SELECT) {
  4179. set_query_columns(keynr);
  4180. unpack_entire_row = false;
  4181. }
  4182. else {
  4183. unpack_entire_row = true;
  4184. }
  4185. invalidate_bulk_fetch();
  4186. doing_bulk_fetch = false;
  4187. error = 0;
  4188. exit:
  4189. TOKUDB_DBUG_RETURN(error);
  4190. }
  4191. //
  4192. // closes the local cursor
  4193. //
  4194. int ha_tokudb::index_end() {
  4195. TOKUDB_DBUG_ENTER("ha_tokudb::index_end %p", this);
  4196. range_lock_grabbed = false;
  4197. if (cursor) {
  4198. DBUG_PRINT("enter", ("table: '%s'", table_share->table_name.str));
  4199. int r = cursor->c_close(cursor);
  4200. assert(r==0);
  4201. cursor = NULL;
  4202. last_cursor_error = 0;
  4203. }
  4204. active_index = tokudb_active_index = MAX_KEY;
  4205. //
  4206. // reset query variables
  4207. //
  4208. unpack_entire_row = true;
  4209. read_blobs = true;
  4210. read_key = true;
  4211. num_fixed_cols_for_query = 0;
  4212. num_var_cols_for_query = 0;
  4213. invalidate_bulk_fetch();
  4214. invalidate_icp();
  4215. doing_bulk_fetch = false;
  4216. close_dsmrr();
  4217. TOKUDB_DBUG_RETURN(0);
  4218. }
  4219. int ha_tokudb::handle_cursor_error(int error, int err_to_return, uint keynr) {
  4220. TOKUDB_DBUG_ENTER("ha_tokudb::handle_cursor_error");
  4221. if (error) {
  4222. if (error == DB_LOCK_NOTGRANTED) {
  4223. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  4224. }
  4225. last_cursor_error = error;
  4226. table->status = STATUS_NOT_FOUND;
  4227. if (error == DB_NOTFOUND) {
  4228. error = err_to_return;
  4229. }
  4230. }
  4231. TOKUDB_DBUG_RETURN(error);
  4232. }
  4233. //
  4234. // Helper function for read_row and smart_dbt_callback_xxx functions
  4235. // When using a hidden primary key, upon reading a row,
  4236. // we set the current_ident field to whatever the primary key we retrieved
  4237. // was
  4238. //
  4239. void ha_tokudb::extract_hidden_primary_key(uint keynr, DBT const *found_key) {
  4240. //
  4241. // extract hidden primary key to current_ident
  4242. //
  4243. if (hidden_primary_key) {
  4244. if (keynr == primary_key) {
  4245. memcpy_fixed(current_ident, (char *) found_key->data, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  4246. }
  4247. //
  4248. // if secondary key, hidden primary key is at end of found_key
  4249. //
  4250. else {
  4251. memcpy_fixed(
  4252. current_ident,
  4253. (char *) found_key->data + found_key->size - TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH,
  4254. TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH
  4255. );
  4256. }
  4257. }
  4258. }
  4259. int ha_tokudb::read_row_callback (uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
  4260. assert(keynr == primary_key);
  4261. return unpack_row(buf, row,found_key, keynr);
  4262. }
  4263. //
  4264. // Reads the contents of row and found_key, DBT's retrieved from the DB associated to keynr, into buf
  4265. // This function assumes that we are using a covering index, as a result, if keynr is the primary key,
  4266. // we do not read row into buf
  4267. // Parameters:
  4268. // [out] buf - buffer for the row, in MySQL format
  4269. // keynr - index into key_file that represents DB we are currently operating on.
  4270. // [in] row - the row that has been read from the preceding DB call
  4271. // [in] found_key - key used to retrieve the row
  4272. //
  4273. void ha_tokudb::read_key_only(uchar * buf, uint keynr, DBT const *found_key) {
  4274. TOKUDB_DBUG_ENTER("ha_tokudb::read_key_only");
  4275. table->status = 0;
  4276. //
  4277. // only case when we do not unpack the key is if we are dealing with the main dictionary
  4278. // of a table with a hidden primary key
  4279. //
  4280. if (!(hidden_primary_key && keynr == primary_key)) {
  4281. unpack_key(buf, found_key, keynr);
  4282. }
  4283. DBUG_VOID_RETURN;
  4284. }
  4285. //
  4286. // Helper function used to try to retrieve the entire row
  4287. // If keynr is associated with the main table, reads contents of found_key and row into buf, otherwise,
  4288. // makes copy of primary key and saves it to last_key. This can later be used to retrieve the entire row
  4289. // Parameters:
  4290. // [out] buf - buffer for the row, in MySQL format
  4291. // keynr - index into key_file that represents DB we are currently operating on.
  4292. // [in] row - the row that has been read from the preceding DB call
  4293. // [in] found_key - key used to retrieve the row
  4294. //
  4295. int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
  4296. TOKUDB_DBUG_ENTER("ha_tokudb::read_primary_key");
  4297. int error = 0;
  4298. table->status = 0;
  4299. //
  4300. // case where we read from secondary table that is not clustered
  4301. //
  4302. if (keynr != primary_key && !(table->key_info[keynr].flags & HA_CLUSTERING)) {
  4303. bool has_null;
  4304. //
  4305. // create a DBT that has the same data as row, this is inefficient
  4306. // extract_hidden_primary_key MUST have been called before this
  4307. //
  4308. memset((void *) &last_key, 0, sizeof(last_key));
  4309. if (!hidden_primary_key) {
  4310. unpack_key(buf, found_key, keynr);
  4311. }
  4312. create_dbt_key_from_table(
  4313. &last_key,
  4314. primary_key,
  4315. key_buff,
  4316. buf,
  4317. &has_null
  4318. );
  4319. }
  4320. //
  4321. // else read from clustered/primary key
  4322. //
  4323. else {
  4324. error = unpack_row(buf, row, found_key, keynr);
  4325. if (error) { goto exit; }
  4326. }
  4327. if (found_key) { DBUG_DUMP("read row key", (uchar *) found_key->data, found_key->size); }
  4328. error = 0;
  4329. exit:
  4330. TOKUDB_DBUG_RETURN(error);
  4331. }
  4332. //
  4333. // This function reads an entire row into buf. This function also assumes that
  4334. // the key needed to retrieve the row is stored in the member variable last_key
  4335. // Parameters:
  4336. // [out] buf - buffer for the row, in MySQL format
  4337. // Returns:
  4338. // 0 on success, error otherwise
  4339. //
  4340. int ha_tokudb::read_full_row(uchar * buf) {
  4341. TOKUDB_DBUG_ENTER("ha_tokudb::read_full_row");
  4342. int error = 0;
  4343. struct smart_dbt_info info;
  4344. info.ha = this;
  4345. info.buf = buf;
  4346. info.keynr = primary_key;
  4347. //
  4348. // assumes key is stored in this->last_key
  4349. //
  4350. error = share->file->getf_set(
  4351. share->file,
  4352. transaction,
  4353. cursor_flags,
  4354. &last_key,
  4355. smart_dbt_callback_rowread_ptquery,
  4356. &info
  4357. );
  4358. if (error) {
  4359. if (error == DB_LOCK_NOTGRANTED) {
  4360. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  4361. }
  4362. table->status = STATUS_NOT_FOUND;
  4363. TOKUDB_DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error);
  4364. }
  4365. TOKUDB_DBUG_RETURN(error);
  4366. }
  4367. //
  4368. // Reads the next row matching to the key, on success, advances cursor
  4369. // Parameters:
  4370. // [out] buf - buffer for the next row, in MySQL format
  4371. // [in] key - key value
  4372. // keylen - length of key
  4373. // Returns:
  4374. // 0 on success
  4375. // HA_ERR_END_OF_FILE if not found
  4376. // error otherwise
  4377. //
  4378. int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
  4379. TOKUDB_DBUG_ENTER("ha_tokudb::index_next_same");
  4380. ha_statistic_increment(&SSV::ha_read_next_count);
  4381. DBT curr_key;
  4382. DBT found_key;
  4383. bool has_null;
  4384. int cmp;
  4385. // create the key that will be used to compare with what is found
  4386. // in order to figure out if we should return an error
  4387. pack_key(&curr_key, tokudb_active_index, key_buff2, key, keylen, COL_ZERO);
  4388. int error = get_next(buf, 1, &curr_key);
  4389. if (error) {
  4390. goto cleanup;
  4391. }
  4392. //
  4393. // now do the comparison
  4394. //
  4395. create_dbt_key_from_table(&found_key,tokudb_active_index,key_buff3,buf,&has_null);
  4396. cmp = tokudb_prefix_cmp_dbt_key(share->key_file[tokudb_active_index], &curr_key, &found_key);
  4397. if (cmp) {
  4398. error = HA_ERR_END_OF_FILE;
  4399. }
  4400. cleanup:
  4401. TOKUDB_DBUG_RETURN(error);
  4402. }
  4403. //
  4404. // According to InnoDB handlerton: Positions an index cursor to the index
  4405. // specified in keynr. Fetches the row if any
  4406. // Parameters:
  4407. // [out] buf - buffer for the returned row
  4408. // [in] key - key value, according to InnoDB, if NULL,
  4409. // position cursor at start or end of index,
  4410. // not sure if this is done now
  4411. // key_len - length of key
  4412. // find_flag - according to InnoDB, search flags from my_base.h
  4413. // Returns:
  4414. // 0 on success
  4415. // HA_ERR_KEY_NOT_FOUND if not found (per InnoDB),
  4416. // we seem to return HA_ERR_END_OF_FILE if find_flag != HA_READ_KEY_EXACT
  4417. // TODO: investigate this for correctness
  4418. // error otherwise
  4419. //
  4420. int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) {
  4421. TOKUDB_DBUG_ENTER("ha_tokudb::index_read %p find %d", this, find_flag);
  4422. invalidate_bulk_fetch();
  4423. // TOKUDB_DBUG_DUMP("key=", key, key_len);
  4424. DBT row;
  4425. DBT lookup_key;
  4426. int error = 0;
  4427. uint32_t flags = 0;
  4428. THD* thd = ha_thd();
  4429. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
  4430. struct smart_dbt_info info;
  4431. struct index_read_info ir_info;
  4432. HANDLE_INVALID_CURSOR();
  4433. ha_statistic_increment(&SSV::ha_read_key_count);
  4434. memset((void *) &row, 0, sizeof(row));
  4435. info.ha = this;
  4436. info.buf = buf;
  4437. info.keynr = tokudb_active_index;
  4438. ir_info.smart_dbt_info = info;
  4439. ir_info.cmp = 0;
  4440. flags = SET_PRELOCK_FLAG(0);
  4441. switch (find_flag) {
  4442. case HA_READ_KEY_EXACT: /* Find first record else error */
  4443. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
  4444. ir_info.orig_key = &lookup_key;
  4445. error = cursor->c_getf_set_range(cursor, flags,
  4446. &lookup_key, SMART_DBT_IR_CALLBACK, &ir_info);
  4447. if (ir_info.cmp) {
  4448. error = DB_NOTFOUND;
  4449. }
  4450. break;
  4451. case HA_READ_AFTER_KEY: /* Find next rec. after key-record */
  4452. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
  4453. error = cursor->c_getf_set_range(cursor, flags,
  4454. &lookup_key, SMART_DBT_CALLBACK, &info);
  4455. break;
  4456. case HA_READ_BEFORE_KEY: /* Find next rec. before key-record */
  4457. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
  4458. error = cursor->c_getf_set_range_reverse(cursor, flags,
  4459. &lookup_key, SMART_DBT_CALLBACK, &info);
  4460. break;
  4461. case HA_READ_KEY_OR_NEXT: /* Record or next record */
  4462. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
  4463. error = cursor->c_getf_set_range(cursor, flags,
  4464. &lookup_key, SMART_DBT_CALLBACK, &info);
  4465. break;
  4466. //
  4467. // This case does not seem to ever be used, it is ok for it to be slow
  4468. //
  4469. case HA_READ_KEY_OR_PREV: /* Record or previous */
  4470. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_NEG_INF);
  4471. ir_info.orig_key = &lookup_key;
  4472. error = cursor->c_getf_set_range(cursor, flags,
  4473. &lookup_key, SMART_DBT_IR_CALLBACK, &ir_info);
  4474. if (error == DB_NOTFOUND) {
  4475. error = cursor->c_getf_last(cursor, flags, SMART_DBT_CALLBACK, &info);
  4476. }
  4477. else if (ir_info.cmp) {
  4478. error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK, &info);
  4479. }
  4480. break;
  4481. case HA_READ_PREFIX_LAST_OR_PREV: /* Last or prev key with the same prefix */
  4482. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
  4483. error = cursor->c_getf_set_range_reverse(cursor, flags,
  4484. &lookup_key, SMART_DBT_CALLBACK, &info);
  4485. break;
  4486. case HA_READ_PREFIX_LAST:
  4487. pack_key(&lookup_key, tokudb_active_index, key_buff3, key, key_len, COL_POS_INF);
  4488. ir_info.orig_key = &lookup_key;
  4489. error = cursor->c_getf_set_range_reverse(cursor, flags, &lookup_key, SMART_DBT_IR_CALLBACK, &ir_info);
  4490. if (ir_info.cmp) {
  4491. error = DB_NOTFOUND;
  4492. }
  4493. break;
  4494. default:
  4495. TOKUDB_TRACE("unsupported:%d\n", find_flag);
  4496. error = HA_ERR_UNSUPPORTED;
  4497. break;
  4498. }
  4499. error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index);
  4500. if (!error && !key_read && tokudb_active_index != primary_key && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING)) {
  4501. error = read_full_row(buf);
  4502. }
  4503. if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR)) {
  4504. TOKUDB_TRACE("error:%d:%d\n", error, find_flag);
  4505. }
  4506. trx->stmt_progress.queried++;
  4507. track_progress(thd);
  4508. cleanup:
  4509. TOKUDB_DBUG_RETURN(error);
  4510. }
  4511. int ha_tokudb::read_data_from_range_query_buff(uchar* buf, bool need_val) {
  4512. // buffer has the next row, get it from there
  4513. int error;
  4514. uchar* curr_pos = range_query_buff+curr_range_query_buff_offset;
  4515. DBT curr_key;
  4516. memset((void *) &curr_key, 0, sizeof(curr_key));
  4517. // get key info
  4518. uint32_t key_size = *(uint32_t *)curr_pos;
  4519. curr_pos += sizeof(key_size);
  4520. uchar* curr_key_buff = curr_pos;
  4521. curr_pos += key_size;
  4522. curr_key.data = curr_key_buff;
  4523. curr_key.size = key_size;
  4524. // if this is a covering index, this is all we need
  4525. if (this->key_read) {
  4526. assert(!need_val);
  4527. extract_hidden_primary_key(tokudb_active_index, &curr_key);
  4528. read_key_only(buf, tokudb_active_index, &curr_key);
  4529. error = 0;
  4530. }
  4531. // we need to get more data
  4532. else {
  4533. DBT curr_val;
  4534. memset((void *) &curr_val, 0, sizeof(curr_val));
  4535. uchar* curr_val_buff = NULL;
  4536. uint32_t val_size = 0;
  4537. // in this case, we don't have a val, we are simply extracting the pk
  4538. if (!need_val) {
  4539. curr_val.data = curr_val_buff;
  4540. curr_val.size = val_size;
  4541. extract_hidden_primary_key(tokudb_active_index, &curr_key);
  4542. error = read_primary_key( buf, tokudb_active_index, &curr_val, &curr_key);
  4543. }
  4544. else {
  4545. extract_hidden_primary_key(tokudb_active_index, &curr_key);
  4546. // need to extract a val and place it into buf
  4547. if (unpack_entire_row) {
  4548. // get val info
  4549. val_size = *(uint32_t *)curr_pos;
  4550. curr_pos += sizeof(val_size);
  4551. curr_val_buff = curr_pos;
  4552. curr_pos += val_size;
  4553. curr_val.data = curr_val_buff;
  4554. curr_val.size = val_size;
  4555. error = unpack_row(buf,&curr_val, &curr_key, tokudb_active_index);
  4556. }
  4557. else {
  4558. if (!(hidden_primary_key && tokudb_active_index == primary_key)) {
  4559. unpack_key(buf,&curr_key,tokudb_active_index);
  4560. }
  4561. // read rows we care about
  4562. // first the null bytes;
  4563. memcpy(buf, curr_pos, table_share->null_bytes);
  4564. curr_pos += table_share->null_bytes;
  4565. // now the fixed sized rows
  4566. for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
  4567. uint field_index = fixed_cols_for_query[i];
  4568. Field* field = table->field[field_index];
  4569. unpack_fixed_field(
  4570. buf + field_offset(field, table),
  4571. curr_pos,
  4572. share->kc_info.field_lengths[field_index]
  4573. );
  4574. curr_pos += share->kc_info.field_lengths[field_index];
  4575. }
  4576. // now the variable sized rows
  4577. for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
  4578. uint field_index = var_cols_for_query[i];
  4579. Field* field = table->field[field_index];
  4580. uint32_t field_len = *(uint32_t *)curr_pos;
  4581. curr_pos += sizeof(field_len);
  4582. unpack_var_field(
  4583. buf + field_offset(field, table),
  4584. curr_pos,
  4585. field_len,
  4586. share->kc_info.length_bytes[field_index]
  4587. );
  4588. curr_pos += field_len;
  4589. }
  4590. // now the blobs
  4591. if (read_blobs) {
  4592. uint32_t blob_size = *(uint32_t *)curr_pos;
  4593. curr_pos += sizeof(blob_size);
  4594. error = unpack_blobs(
  4595. buf,
  4596. curr_pos,
  4597. blob_size,
  4598. true
  4599. );
  4600. curr_pos += blob_size;
  4601. if (error) {
  4602. invalidate_bulk_fetch();
  4603. goto exit;
  4604. }
  4605. }
  4606. error = 0;
  4607. }
  4608. }
  4609. }
  4610. curr_range_query_buff_offset = curr_pos - range_query_buff;
  4611. exit:
  4612. return error;
  4613. }
  4614. static int
  4615. smart_dbt_bf_callback(DBT const *key, DBT const *row, void *context) {
  4616. SMART_DBT_BF_INFO info = (SMART_DBT_BF_INFO)context;
  4617. return info->ha->fill_range_query_buf(info->need_val, key, row, info->direction, info->thd, info->buf, info->key_to_compare);
  4618. }
  4619. #if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
  4620. enum icp_result ha_tokudb::toku_handler_index_cond_check(Item* pushed_idx_cond)
  4621. {
  4622. enum icp_result res;
  4623. if (end_range ) {
  4624. int cmp;
  4625. #ifdef MARIADB_BASE_VERSION
  4626. cmp = compare_key2(end_range);
  4627. #else
  4628. cmp = compare_key_icp(end_range);
  4629. #endif
  4630. if (cmp > 0) {
  4631. return ICP_OUT_OF_RANGE;
  4632. }
  4633. }
  4634. res = pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
  4635. return res;
  4636. }
  4637. #endif
  4638. // fill in the range query buf for bulk fetch
  4639. int ha_tokudb::fill_range_query_buf(
  4640. bool need_val,
  4641. DBT const *key,
  4642. DBT const *row,
  4643. int direction,
  4644. THD* thd,
  4645. uchar* buf,
  4646. DBT* key_to_compare
  4647. ) {
  4648. int error;
  4649. //
  4650. // first put the value into range_query_buf
  4651. //
  4652. uint32_t size_remaining = size_range_query_buff - bytes_used_in_range_query_buff;
  4653. uint32_t size_needed;
  4654. uint32_t user_defined_size = get_tokudb_read_buf_size(thd);
  4655. uchar* curr_pos = NULL;
  4656. if (key_to_compare) {
  4657. int cmp = tokudb_prefix_cmp_dbt_key(
  4658. share->key_file[tokudb_active_index],
  4659. key_to_compare,
  4660. key
  4661. );
  4662. if (cmp) {
  4663. icp_went_out_of_range = true;
  4664. error = 0;
  4665. goto cleanup;
  4666. }
  4667. }
  4668. #if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
  4669. // if we have an index condition pushed down, we check it
  4670. if (toku_pushed_idx_cond && (tokudb_active_index == toku_pushed_idx_cond_keyno)) {
  4671. unpack_key(buf, key, tokudb_active_index);
  4672. enum icp_result result = toku_handler_index_cond_check(toku_pushed_idx_cond);
  4673. // If we have reason to stop, we set icp_went_out_of_range and get out
  4674. if (result == ICP_OUT_OF_RANGE || thd_killed(thd)) {
  4675. icp_went_out_of_range = true;
  4676. error = 0;
  4677. goto cleanup;
  4678. }
  4679. // otherwise, if we simply see that the current key is no match,
  4680. // we tell the cursor to continue and don't store
  4681. // the key locally
  4682. else if (result == ICP_NO_MATCH) {
  4683. error = TOKUDB_CURSOR_CONTINUE;
  4684. goto cleanup;
  4685. }
  4686. }
  4687. #endif
  4688. // at this point, if ICP is on, we have verified that the key is one
  4689. // we are interested in, so we proceed with placing the data
  4690. // into the range query buffer
  4691. if (need_val) {
  4692. if (unpack_entire_row) {
  4693. size_needed = 2*sizeof(uint32_t) + key->size + row->size;
  4694. }
  4695. else {
  4696. // this is an upper bound
  4697. size_needed = sizeof(uint32_t) + // size of key length
  4698. key->size + row->size + //key and row
  4699. num_var_cols_for_query*(sizeof(uint32_t)) + //lengths of varchars stored
  4700. sizeof(uint32_t); //length of blobs
  4701. }
  4702. }
  4703. else {
  4704. size_needed = sizeof(uint32_t) + key->size;
  4705. }
  4706. if (size_remaining < size_needed) {
  4707. range_query_buff = (uchar *)my_realloc(
  4708. (void *)range_query_buff,
  4709. bytes_used_in_range_query_buff+size_needed,
  4710. MYF(MY_WME)
  4711. );
  4712. if (range_query_buff == NULL) {
  4713. error = ENOMEM;
  4714. invalidate_bulk_fetch();
  4715. goto cleanup;
  4716. }
  4717. size_range_query_buff = bytes_used_in_range_query_buff+size_needed;
  4718. }
  4719. //
  4720. // now we know we have the size, let's fill the buffer, starting with the key
  4721. //
  4722. curr_pos = range_query_buff + bytes_used_in_range_query_buff;
  4723. *(uint32_t *)curr_pos = key->size;
  4724. curr_pos += sizeof(uint32_t);
  4725. memcpy(curr_pos, key->data, key->size);
  4726. curr_pos += key->size;
  4727. if (need_val) {
  4728. if (unpack_entire_row) {
  4729. *(uint32_t *)curr_pos = row->size;
  4730. curr_pos += sizeof(uint32_t);
  4731. memcpy(curr_pos, row->data, row->size);
  4732. curr_pos += row->size;
  4733. }
  4734. else {
  4735. // need to unpack just the data we care about
  4736. const uchar* fixed_field_ptr = (const uchar *) row->data;
  4737. fixed_field_ptr += table_share->null_bytes;
  4738. const uchar* var_field_offset_ptr = NULL;
  4739. const uchar* var_field_data_ptr = NULL;
  4740. var_field_offset_ptr = fixed_field_ptr + share->kc_info.mcp_info[tokudb_active_index].fixed_field_size;
  4741. var_field_data_ptr = var_field_offset_ptr + share->kc_info.mcp_info[tokudb_active_index].len_of_offsets;
  4742. // first the null bytes
  4743. memcpy(curr_pos, row->data, table_share->null_bytes);
  4744. curr_pos += table_share->null_bytes;
  4745. // now the fixed fields
  4746. //
  4747. // first the fixed fields
  4748. //
  4749. for (uint32_t i = 0; i < num_fixed_cols_for_query; i++) {
  4750. uint field_index = fixed_cols_for_query[i];
  4751. memcpy(
  4752. curr_pos,
  4753. fixed_field_ptr + share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val,
  4754. share->kc_info.field_lengths[field_index]
  4755. );
  4756. curr_pos += share->kc_info.field_lengths[field_index];
  4757. }
  4758. //
  4759. // now the var fields
  4760. //
  4761. for (uint32_t i = 0; i < num_var_cols_for_query; i++) {
  4762. uint field_index = var_cols_for_query[i];
  4763. uint32_t var_field_index = share->kc_info.cp_info[tokudb_active_index][field_index].col_pack_val;
  4764. uint32_t data_start_offset;
  4765. uint32_t field_len;
  4766. get_var_field_info(
  4767. &field_len,
  4768. &data_start_offset,
  4769. var_field_index,
  4770. var_field_offset_ptr,
  4771. share->kc_info.num_offset_bytes
  4772. );
  4773. memcpy(curr_pos, &field_len, sizeof(field_len));
  4774. curr_pos += sizeof(field_len);
  4775. memcpy(curr_pos, var_field_data_ptr + data_start_offset, field_len);
  4776. curr_pos += field_len;
  4777. }
  4778. if (read_blobs) {
  4779. uint32_t blob_offset = 0;
  4780. uint32_t data_size = 0;
  4781. //
  4782. // now the blobs
  4783. //
  4784. get_blob_field_info(
  4785. &blob_offset,
  4786. share->kc_info.mcp_info[tokudb_active_index].len_of_offsets,
  4787. var_field_data_ptr,
  4788. share->kc_info.num_offset_bytes
  4789. );
  4790. data_size = row->size - blob_offset - (uint32_t)(var_field_data_ptr - (const uchar *)row->data);
  4791. memcpy(curr_pos, &data_size, sizeof(data_size));
  4792. curr_pos += sizeof(data_size);
  4793. memcpy(curr_pos, var_field_data_ptr + blob_offset, data_size);
  4794. curr_pos += data_size;
  4795. }
  4796. }
  4797. }
  4798. bytes_used_in_range_query_buff = curr_pos - range_query_buff;
  4799. assert(bytes_used_in_range_query_buff <= size_range_query_buff);
  4800. //
  4801. // now determine if we should continue with the bulk fetch
  4802. // we want to stop under these conditions:
  4803. // - we overran the prelocked range
  4804. // - we are close to the end of the buffer
  4805. // - we have fetched an exponential amount of rows with
  4806. // respect to the bulk fetch iteration, which is initialized
  4807. // to 0 in index_init() and prelock_range().
  4808. rows_fetched_using_bulk_fetch++;
  4809. // if the iteration is less than the number of possible shifts on
  4810. // a 64 bit integer, check that we haven't exceeded this iterations
  4811. // row fetch upper bound.
  4812. if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) {
  4813. uint64_t row_fetch_upper_bound = 1LLU << bulk_fetch_iteration;
  4814. assert(row_fetch_upper_bound > 0);
  4815. if (rows_fetched_using_bulk_fetch >= row_fetch_upper_bound) {
  4816. error = 0;
  4817. goto cleanup;
  4818. }
  4819. }
  4820. if (bytes_used_in_range_query_buff + table_share->rec_buff_length > user_defined_size) {
  4821. error = 0;
  4822. goto cleanup;
  4823. }
  4824. if (direction > 0) {
  4825. // compare what we got to the right endpoint of prelocked range
  4826. // because we are searching keys in ascending order
  4827. if (prelocked_right_range_size == 0) {
  4828. error = TOKUDB_CURSOR_CONTINUE;
  4829. goto cleanup;
  4830. }
  4831. DBT right_range;
  4832. memset(&right_range, 0, sizeof(right_range));
  4833. right_range.size = prelocked_right_range_size;
  4834. right_range.data = prelocked_right_range;
  4835. int cmp = tokudb_cmp_dbt_key(
  4836. share->key_file[tokudb_active_index],
  4837. key,
  4838. &right_range
  4839. );
  4840. error = (cmp > 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
  4841. }
  4842. else {
  4843. // compare what we got to the left endpoint of prelocked range
  4844. // because we are searching keys in descending order
  4845. if (prelocked_left_range_size == 0) {
  4846. error = TOKUDB_CURSOR_CONTINUE;
  4847. goto cleanup;
  4848. }
  4849. DBT left_range;
  4850. memset(&left_range, 0, sizeof(left_range));
  4851. left_range.size = prelocked_left_range_size;
  4852. left_range.data = prelocked_left_range;
  4853. int cmp = tokudb_cmp_dbt_key(
  4854. share->key_file[tokudb_active_index],
  4855. key,
  4856. &left_range
  4857. );
  4858. error = (cmp < 0) ? 0 : TOKUDB_CURSOR_CONTINUE;
  4859. }
  4860. cleanup:
  4861. return error;
  4862. }
  4863. int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
  4864. int error = 0;
  4865. uint32_t flags = SET_PRELOCK_FLAG(0);
  4866. THD* thd = ha_thd();
  4867. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
  4868. bool need_val;
  4869. HANDLE_INVALID_CURSOR();
  4870. // we need to read the val of what we retrieve if
  4871. // we do NOT have a covering index AND we are using a clustering secondary
  4872. // key
  4873. need_val = (this->key_read == 0) &&
  4874. (tokudb_active_index == primary_key ||
  4875. table->key_info[tokudb_active_index].flags & HA_CLUSTERING
  4876. );
  4877. if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
  4878. error = read_data_from_range_query_buff(buf, need_val);
  4879. }
  4880. else if (icp_went_out_of_range) {
  4881. icp_went_out_of_range = false;
  4882. error = HA_ERR_END_OF_FILE;
  4883. }
  4884. else {
  4885. invalidate_bulk_fetch();
  4886. if (doing_bulk_fetch) {
  4887. struct smart_dbt_bf_info bf_info;
  4888. bf_info.ha = this;
  4889. // you need the val if you have a clustering index and key_read is not 0;
  4890. bf_info.direction = direction;
  4891. bf_info.thd = ha_thd();
  4892. bf_info.need_val = need_val;
  4893. bf_info.buf = buf;
  4894. bf_info.key_to_compare = key_to_compare;
  4895. //
  4896. // call c_getf_next with purpose of filling in range_query_buff
  4897. //
  4898. rows_fetched_using_bulk_fetch = 0;
  4899. // it is expected that we can do ICP in the smart_dbt_bf_callback
  4900. // as a result, it's possible we don't return any data because
  4901. // none of the rows matched the index condition. Therefore, we need
  4902. // this while loop. icp_out_of_range will be set if we hit a row that
  4903. // the index condition states is out of our range. When that hits,
  4904. // we know all the data in the buffer is the last data we will retrieve
  4905. while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) {
  4906. if (direction > 0) {
  4907. error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info);
  4908. } else {
  4909. error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info);
  4910. }
  4911. }
  4912. // if there is no data set and we went out of range,
  4913. // then there is nothing to return
  4914. if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) {
  4915. icp_went_out_of_range = false;
  4916. error = HA_ERR_END_OF_FILE;
  4917. }
  4918. if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) {
  4919. bulk_fetch_iteration++;
  4920. }
  4921. error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index);
  4922. if (error) { goto cleanup; }
  4923. //
  4924. // now that range_query_buff is filled, read an element
  4925. //
  4926. error = read_data_from_range_query_buff(buf, need_val);
  4927. }
  4928. else {
  4929. struct smart_dbt_info info;
  4930. info.ha = this;
  4931. info.buf = buf;
  4932. info.keynr = tokudb_active_index;
  4933. if (direction > 0) {
  4934. error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK, &info);
  4935. } else {
  4936. error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK, &info);
  4937. }
  4938. error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index);
  4939. }
  4940. }
  4941. //
  4942. // at this point, one of two things has happened
  4943. // either we have unpacked the data into buf, and we
  4944. // are done, or we have unpacked the primary key
  4945. // into last_key, and we use the code below to
  4946. // read the full row by doing a point query into the
  4947. // main table.
  4948. //
  4949. if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) {
  4950. error = read_full_row(buf);
  4951. }
  4952. trx->stmt_progress.queried++;
  4953. track_progress(thd);
  4954. cleanup:
  4955. return error;
  4956. }
  4957. //
  4958. // Reads the next row from the active index (cursor) into buf, and advances cursor
  4959. // Parameters:
  4960. // [out] buf - buffer for the next row, in MySQL format
  4961. // Returns:
  4962. // 0 on success
  4963. // HA_ERR_END_OF_FILE if not found
  4964. // error otherwise
  4965. //
  4966. int ha_tokudb::index_next(uchar * buf) {
  4967. TOKUDB_DBUG_ENTER("ha_tokudb::index_next");
  4968. ha_statistic_increment(&SSV::ha_read_next_count);
  4969. int error = get_next(buf, 1, NULL);
  4970. TOKUDB_DBUG_RETURN(error);
  4971. }
  4972. int ha_tokudb::index_read_last(uchar * buf, const uchar * key, uint key_len) {
  4973. return(index_read(buf, key, key_len, HA_READ_PREFIX_LAST));
  4974. }
  4975. //
  4976. // Reads the previous row from the active index (cursor) into buf, and advances cursor
  4977. // Parameters:
  4978. // [out] buf - buffer for the next row, in MySQL format
  4979. // Returns:
  4980. // 0 on success
  4981. // HA_ERR_END_OF_FILE if not found
  4982. // error otherwise
  4983. //
  4984. int ha_tokudb::index_prev(uchar * buf) {
  4985. TOKUDB_DBUG_ENTER("ha_tokudb::index_prev");
  4986. ha_statistic_increment(&SSV::ha_read_prev_count);
  4987. int error = get_next(buf, -1, NULL);
  4988. TOKUDB_DBUG_RETURN(error);
  4989. }
  4990. //
  4991. // Reads the first row from the active index (cursor) into buf, and advances cursor
  4992. // Parameters:
  4993. // [out] buf - buffer for the next row, in MySQL format
  4994. // Returns:
  4995. // 0 on success
  4996. // HA_ERR_END_OF_FILE if not found
  4997. // error otherwise
  4998. //
  4999. int ha_tokudb::index_first(uchar * buf) {
  5000. TOKUDB_DBUG_ENTER("ha_tokudb::index_first");
  5001. invalidate_bulk_fetch();
  5002. int error = 0;
  5003. struct smart_dbt_info info;
  5004. uint32_t flags = SET_PRELOCK_FLAG(0);
  5005. THD* thd = ha_thd();
  5006. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
  5007. HANDLE_INVALID_CURSOR();
  5008. ha_statistic_increment(&SSV::ha_read_first_count);
  5009. info.ha = this;
  5010. info.buf = buf;
  5011. info.keynr = tokudb_active_index;
  5012. error = cursor->c_getf_first(cursor, flags,
  5013. SMART_DBT_CALLBACK, &info);
  5014. error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
  5015. //
  5016. // still need to get entire contents of the row if operation done on
  5017. // secondary DB and it was NOT a covering index
  5018. //
  5019. if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) {
  5020. error = read_full_row(buf);
  5021. }
  5022. trx->stmt_progress.queried++;
  5023. track_progress(thd);
  5024. cleanup:
  5025. TOKUDB_DBUG_RETURN(error);
  5026. }
  5027. //
  5028. // Reads the last row from the active index (cursor) into buf, and advances cursor
  5029. // Parameters:
  5030. // [out] buf - buffer for the next row, in MySQL format
  5031. // Returns:
  5032. // 0 on success
  5033. // HA_ERR_END_OF_FILE if not found
  5034. // error otherwise
  5035. //
  5036. int ha_tokudb::index_last(uchar * buf) {
  5037. TOKUDB_DBUG_ENTER("ha_tokudb::index_last");
  5038. invalidate_bulk_fetch();
  5039. int error = 0;
  5040. struct smart_dbt_info info;
  5041. uint32_t flags = SET_PRELOCK_FLAG(0);
  5042. THD* thd = ha_thd();
  5043. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
  5044. HANDLE_INVALID_CURSOR();
  5045. ha_statistic_increment(&SSV::ha_read_last_count);
  5046. info.ha = this;
  5047. info.buf = buf;
  5048. info.keynr = tokudb_active_index;
  5049. error = cursor->c_getf_last(cursor, flags,
  5050. SMART_DBT_CALLBACK, &info);
  5051. error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index);
  5052. //
  5053. // still need to get entire contents of the row if operation done on
  5054. // secondary DB and it was NOT a covering index
  5055. //
  5056. if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) {
  5057. error = read_full_row(buf);
  5058. }
  5059. if (trx) {
  5060. trx->stmt_progress.queried++;
  5061. }
  5062. track_progress(thd);
  5063. cleanup:
  5064. TOKUDB_DBUG_RETURN(error);
  5065. }
  5066. //
  5067. // Initialize a scan of the table (which is why index_init is called on primary_key)
  5068. // Parameters:
  5069. // scan - unused
  5070. // Returns:
  5071. // 0 on success
  5072. // error otherwise
  5073. //
  5074. int ha_tokudb::rnd_init(bool scan) {
  5075. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_init");
  5076. int error = 0;
  5077. range_lock_grabbed = false;
  5078. error = index_init(MAX_KEY, 0);
  5079. if (error) { goto cleanup;}
  5080. if (scan) {
  5081. error = prelock_range(NULL, NULL);
  5082. if (error) { goto cleanup; }
  5083. }
  5084. //
  5085. // only want to set range_lock_grabbed to true after index_init
  5086. // successfully executed for two reasons:
  5087. // 1) index_init will reset it to false anyway
  5088. // 2) if it fails, we don't want prelocking on,
  5089. //
  5090. if (scan) { range_lock_grabbed = true; }
  5091. error = 0;
  5092. cleanup:
  5093. if (error) {
  5094. index_end();
  5095. last_cursor_error = error;
  5096. }
  5097. TOKUDB_DBUG_RETURN(error);
  5098. }
  5099. //
  5100. // End a scan of the table
  5101. //
  5102. int ha_tokudb::rnd_end() {
  5103. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_end");
  5104. range_lock_grabbed = false;
  5105. TOKUDB_DBUG_RETURN(index_end());
  5106. }
  5107. //
  5108. // Read the next row in a table scan
  5109. // Parameters:
  5110. // [out] buf - buffer for the next row, in MySQL format
  5111. // Returns:
  5112. // 0 on success
  5113. // HA_ERR_END_OF_FILE if not found
  5114. // error otherwise
  5115. //
  5116. int ha_tokudb::rnd_next(uchar * buf) {
  5117. TOKUDB_DBUG_ENTER("ha_tokudb::ha_tokudb::rnd_next");
  5118. ha_statistic_increment(&SSV::ha_read_rnd_next_count);
  5119. int error = get_next(buf, 1, NULL);
  5120. TOKUDB_DBUG_RETURN(error);
  5121. }
  5122. void ha_tokudb::track_progress(THD* thd) {
  5123. tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  5124. if (trx) {
  5125. ulonglong num_written = trx->stmt_progress.inserted + trx->stmt_progress.updated + trx->stmt_progress.deleted;
  5126. bool update_status =
  5127. (trx->stmt_progress.queried && tokudb_read_status_frequency && (trx->stmt_progress.queried % tokudb_read_status_frequency) == 0) ||
  5128. (num_written && tokudb_write_status_frequency && (num_written % tokudb_write_status_frequency) == 0);
  5129. if (update_status) {
  5130. char *next_status = write_status_msg;
  5131. bool first = true;
  5132. int r;
  5133. if (trx->stmt_progress.queried) {
  5134. r = sprintf(next_status, "Queried about %llu row%s", trx->stmt_progress.queried, trx->stmt_progress.queried == 1 ? "" : "s");
  5135. assert(r >= 0);
  5136. next_status += r;
  5137. first = false;
  5138. }
  5139. if (trx->stmt_progress.inserted) {
  5140. if (trx->stmt_progress.using_loader) {
  5141. r = sprintf(next_status, "%sFetched about %llu row%s, loading data still remains", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
  5142. }
  5143. else {
  5144. r = sprintf(next_status, "%sInserted about %llu row%s", first ? "" : ", ", trx->stmt_progress.inserted, trx->stmt_progress.inserted == 1 ? "" : "s");
  5145. }
  5146. assert(r >= 0);
  5147. next_status += r;
  5148. first = false;
  5149. }
  5150. if (trx->stmt_progress.updated) {
  5151. r = sprintf(next_status, "%sUpdated about %llu row%s", first ? "" : ", ", trx->stmt_progress.updated, trx->stmt_progress.updated == 1 ? "" : "s");
  5152. assert(r >= 0);
  5153. next_status += r;
  5154. first = false;
  5155. }
  5156. if (trx->stmt_progress.deleted) {
  5157. r = sprintf(next_status, "%sDeleted about %llu row%s", first ? "" : ", ", trx->stmt_progress.deleted, trx->stmt_progress.deleted == 1 ? "" : "s");
  5158. assert(r >= 0);
  5159. next_status += r;
  5160. first = false;
  5161. }
  5162. if (!first)
  5163. thd_proc_info(thd, write_status_msg);
  5164. }
  5165. }
  5166. }
  5167. DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) {
  5168. TOKUDB_DBUG_ENTER("ha_tokudb::get_pos");
  5169. /* We don't need to set app_data here */
  5170. memset((void *) to, 0, sizeof(*to));
  5171. to->data = pos + sizeof(uint32_t);
  5172. to->size = *(uint32_t *)pos;
  5173. DBUG_DUMP("key", (const uchar *) to->data, to->size);
  5174. DBUG_RETURN(to);
  5175. }
  5176. //
  5177. // Retrieves a row with based on the primary key saved in pos
  5178. // Returns:
  5179. // 0 on success
  5180. // HA_ERR_KEY_NOT_FOUND if not found
  5181. // error otherwise
  5182. //
  5183. int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
  5184. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_pos");
  5185. DBT db_pos;
  5186. int error = 0;
  5187. struct smart_dbt_info info;
  5188. bool old_unpack_entire_row = unpack_entire_row;
  5189. DBT* key = get_pos(&db_pos, pos);
  5190. unpack_entire_row = true;
  5191. ha_statistic_increment(&SSV::ha_read_rnd_count);
  5192. tokudb_active_index = MAX_KEY;
  5193. info.ha = this;
  5194. info.buf = buf;
  5195. info.keynr = primary_key;
  5196. error = share->file->getf_set(share->file, transaction,
  5197. get_cursor_isolation_flags(lock.type, ha_thd()),
  5198. key, smart_dbt_callback_rowread_ptquery, &info);
  5199. if (error == DB_NOTFOUND) {
  5200. error = HA_ERR_KEY_NOT_FOUND;
  5201. goto cleanup;
  5202. }
  5203. cleanup:
  5204. unpack_entire_row = old_unpack_entire_row;
  5205. TOKUDB_DBUG_RETURN(error);
  5206. }
  5207. int ha_tokudb::prelock_range( const key_range *start_key, const key_range *end_key) {
  5208. TOKUDB_DBUG_ENTER("ha_tokudb::prelock_range");
  5209. THD* thd = ha_thd();
  5210. int error = 0;
  5211. DBT start_dbt_key;
  5212. DBT end_dbt_key;
  5213. uchar* start_key_buff = prelocked_left_range;
  5214. uchar* end_key_buff = prelocked_right_range;
  5215. memset((void *) &start_dbt_key, 0, sizeof(start_dbt_key));
  5216. memset((void *) &end_dbt_key, 0, sizeof(end_dbt_key));
  5217. HANDLE_INVALID_CURSOR();
  5218. if (start_key) {
  5219. switch (start_key->flag) {
  5220. case HA_READ_AFTER_KEY:
  5221. pack_key(&start_dbt_key, tokudb_active_index, start_key_buff, start_key->key, start_key->length, COL_POS_INF);
  5222. break;
  5223. default:
  5224. pack_key(&start_dbt_key, tokudb_active_index, start_key_buff, start_key->key, start_key->length, COL_NEG_INF);
  5225. break;
  5226. }
  5227. prelocked_left_range_size = start_dbt_key.size;
  5228. }
  5229. else {
  5230. prelocked_left_range_size = 0;
  5231. }
  5232. if (end_key) {
  5233. switch (end_key->flag) {
  5234. case HA_READ_BEFORE_KEY:
  5235. pack_key(&end_dbt_key, tokudb_active_index, end_key_buff, end_key->key, end_key->length, COL_NEG_INF);
  5236. break;
  5237. default:
  5238. pack_key(&end_dbt_key, tokudb_active_index, end_key_buff, end_key->key, end_key->length, COL_POS_INF);
  5239. break;
  5240. }
  5241. prelocked_right_range_size = end_dbt_key.size;
  5242. }
  5243. else {
  5244. prelocked_right_range_size = 0;
  5245. }
  5246. error = cursor->c_set_bounds(
  5247. cursor,
  5248. start_key ? &start_dbt_key : share->key_file[tokudb_active_index]->dbt_neg_infty(),
  5249. end_key ? &end_dbt_key : share->key_file[tokudb_active_index]->dbt_pos_infty(),
  5250. true,
  5251. (cursor_flags & DB_SERIALIZABLE) != 0 ? DB_NOTFOUND : 0
  5252. );
  5253. if (error){
  5254. last_cursor_error = error;
  5255. //
  5256. // cursor should be initialized here, but in case it is not, we still check
  5257. //
  5258. if (cursor) {
  5259. int r = cursor->c_close(cursor);
  5260. assert(r==0);
  5261. cursor = NULL;
  5262. }
  5263. goto cleanup;
  5264. }
  5265. //
  5266. // at this point, determine if we will be doing bulk fetch
  5267. // as of now, only do it if we are doing a select
  5268. //
  5269. doing_bulk_fetch = (thd_sql_command(thd) == SQLCOM_SELECT);
  5270. bulk_fetch_iteration = 0;
  5271. rows_fetched_using_bulk_fetch = 0;
  5272. cleanup:
  5273. TOKUDB_DBUG_RETURN(error);
  5274. }
  5275. //
  5276. // Prelock range if possible, start_key is leftmost, end_key is rightmost
  5277. // whether scanning forward or backward. This function is called by MySQL
  5278. // for backward range queries (in QUICK_SELECT_DESC::get_next).
  5279. // Forward scans use read_range_first()/read_range_next().
  5280. //
  5281. int ha_tokudb::prepare_range_scan( const key_range *start_key, const key_range *end_key) {
  5282. int error = prelock_range(start_key, end_key);
  5283. if (!error) {
  5284. range_lock_grabbed = true;
  5285. }
  5286. return error;
  5287. }
  5288. int ha_tokudb::read_range_first(
  5289. const key_range *start_key,
  5290. const key_range *end_key,
  5291. bool eq_range,
  5292. bool sorted)
  5293. {
  5294. int error = prelock_range(start_key, end_key);
  5295. if (error) { goto cleanup; }
  5296. range_lock_grabbed = true;
  5297. error = handler::read_range_first(start_key, end_key, eq_range, sorted);
  5298. cleanup:
  5299. return error;
  5300. }
  5301. int ha_tokudb::read_range_next()
  5302. {
  5303. TOKUDB_DBUG_ENTER("ha_tokudb::read_range_next");
  5304. int error;
  5305. error = handler::read_range_next();
  5306. if (error) {
  5307. range_lock_grabbed = false;
  5308. }
  5309. TOKUDB_DBUG_RETURN(error);
  5310. }
  5311. /*
  5312. Set a reference to the current record in (ref,ref_length).
  5313. SYNOPSIS
  5314. ha_tokudb::position()
  5315. record The current record buffer
  5316. DESCRIPTION
  5317. The BDB handler stores the primary key in (ref,ref_length).
  5318. There is either an explicit primary key, or an implicit (hidden)
  5319. primary key.
  5320. During open(), 'ref_length' is calculated as the maximum primary
  5321. key length. When an actual key is shorter than that, the rest of
  5322. the buffer must be cleared out. The row cannot be identified, if
  5323. garbage follows behind the end of the key. There is no length
  5324. field for the current key, so that the whole ref_length is used
  5325. for comparison.
  5326. RETURN
  5327. nothing
  5328. */
  5329. void ha_tokudb::position(const uchar * record) {
  5330. TOKUDB_DBUG_ENTER("ha_tokudb::position");
  5331. DBT key;
  5332. if (hidden_primary_key) {
  5333. DBUG_ASSERT(ref_length == (TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH + sizeof(uint32_t)));
  5334. memcpy_fixed(ref + sizeof(uint32_t), current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  5335. *(uint32_t *)ref = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  5336. }
  5337. else {
  5338. bool has_null;
  5339. //
  5340. // save the data
  5341. //
  5342. create_dbt_key_from_table(&key, primary_key, ref + sizeof(uint32_t), record, &has_null);
  5343. //
  5344. // save the size of data in the first four bytes of ref
  5345. //
  5346. memcpy(ref, &key.size, sizeof(uint32_t));
  5347. }
  5348. DBUG_VOID_RETURN;
  5349. }
  5350. //
  5351. // Per InnoDB: Returns statistics information of the table to the MySQL interpreter,
  5352. // in various fields of the handle object.
  5353. // Return:
  5354. // 0, always success
  5355. //
  5356. int ha_tokudb::info(uint flag) {
  5357. TOKUDB_DBUG_ENTER("ha_tokudb::info %p %d %lld", this, flag, (long long) share->rows);
  5358. int error;
  5359. DB_TXN* txn = NULL;
  5360. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  5361. DB_BTREE_STAT64 dict_stats;
  5362. if (flag & HA_STATUS_VARIABLE) {
  5363. // Just to get optimizations right
  5364. stats.records = share->rows + share->rows_from_locked_table;
  5365. if (stats.records == 0) {
  5366. stats.records++;
  5367. }
  5368. stats.deleted = 0;
  5369. if (!(flag & HA_STATUS_NO_LOCK)) {
  5370. uint64_t num_rows = 0;
  5371. TOKU_DB_FRAGMENTATION_S frag_info;
  5372. memset(&frag_info, 0, sizeof frag_info);
  5373. error = db_env->txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED);
  5374. if (error) { goto cleanup; }
  5375. // we should always have a primary key
  5376. assert(share->file != NULL);
  5377. error = estimate_num_rows(share->file,&num_rows, txn);
  5378. if (error == 0) {
  5379. share->rows = num_rows;
  5380. stats.records = num_rows;
  5381. if (stats.records == 0) {
  5382. stats.records++;
  5383. }
  5384. }
  5385. else {
  5386. goto cleanup;
  5387. }
  5388. error = share->file->get_fragmentation(
  5389. share->file,
  5390. &frag_info
  5391. );
  5392. if (error) { goto cleanup; }
  5393. stats.delete_length = frag_info.unused_bytes;
  5394. error = share->file->stat64(
  5395. share->file,
  5396. txn,
  5397. &dict_stats
  5398. );
  5399. if (error) { goto cleanup; }
  5400. stats.create_time = dict_stats.bt_create_time_sec;
  5401. stats.update_time = dict_stats.bt_modify_time_sec;
  5402. stats.check_time = dict_stats.bt_verify_time_sec;
  5403. stats.data_file_length = dict_stats.bt_dsize;
  5404. if (hidden_primary_key) {
  5405. //
  5406. // in this case, we have a hidden primary key, do not
  5407. // want to report space taken up by the hidden primary key to the user
  5408. //
  5409. uint64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
  5410. stats.data_file_length = (hpk_space > stats.data_file_length) ? 0 : stats.data_file_length - hpk_space;
  5411. }
  5412. else {
  5413. //
  5414. // one infinity byte per key needs to be subtracted
  5415. //
  5416. uint64_t inf_byte_space = dict_stats.bt_ndata;
  5417. stats.data_file_length = (inf_byte_space > stats.data_file_length) ? 0 : stats.data_file_length - inf_byte_space;
  5418. }
  5419. stats.mean_rec_length = stats.records ? (ulong)(stats.data_file_length/stats.records) : 0;
  5420. stats.index_file_length = 0;
  5421. // curr_num_DBs is the number of keys we have, according
  5422. // to the mysql layer. if drop index is running concurrently
  5423. // with info() (it can, because info does not take table locks),
  5424. // then it could be the case that one of the dbs was dropped
  5425. // and set to NULL before mysql was able to set table->s->keys
  5426. // accordingly.
  5427. //
  5428. // we should just ignore any DB * that is NULL.
  5429. //
  5430. // this solution is much simpler than trying to maintain an
  5431. // accurate number of valid keys at the handlerton layer.
  5432. for (uint i = 0; i < curr_num_DBs; i++) {
  5433. // skip the primary key, skip dropped indexes
  5434. if (i == primary_key || share->key_file[i] == NULL) {
  5435. continue;
  5436. }
  5437. error = share->key_file[i]->stat64(
  5438. share->key_file[i],
  5439. txn,
  5440. &dict_stats
  5441. );
  5442. if (error) { goto cleanup; }
  5443. stats.index_file_length += dict_stats.bt_dsize;
  5444. error = share->file->get_fragmentation(
  5445. share->file,
  5446. &frag_info
  5447. );
  5448. if (error) { goto cleanup; }
  5449. stats.delete_length += frag_info.unused_bytes;
  5450. }
  5451. }
  5452. }
  5453. if ((flag & HA_STATUS_CONST)) {
  5454. stats.max_data_file_length= 9223372036854775807ULL;
  5455. uint64_t rec_per_key[table_share->key_parts];
  5456. error = tokudb::get_card_from_status(share->status_block, txn, table_share->key_parts, rec_per_key);
  5457. if (error == 0) {
  5458. tokudb::set_card_in_key_info(table, table_share->key_parts, rec_per_key);
  5459. } else {
  5460. for (uint i = 0; i < table_share->key_parts; i++)
  5461. rec_per_key[i] = 0;
  5462. tokudb::set_card_in_key_info(table, table_share->key_parts, rec_per_key);
  5463. }
  5464. }
  5465. /* Don't return key if we got an error for the internal primary key */
  5466. if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys) {
  5467. errkey = last_dup_key;
  5468. }
  5469. if (flag & HA_STATUS_AUTO && table->found_next_number_field) {
  5470. THD *thd= table->in_use;
  5471. struct system_variables *variables= &thd->variables;
  5472. stats.auto_increment_value = share->last_auto_increment + variables->auto_increment_increment;
  5473. }
  5474. error = 0;
  5475. cleanup:
  5476. if (txn != NULL) {
  5477. commit_txn(txn, DB_TXN_NOSYNC);
  5478. txn = NULL;
  5479. }
  5480. TOKUDB_DBUG_RETURN(error);
  5481. }
  5482. //
  5483. // Per InnoDB: Tells something additional to the handler about how to do things.
  5484. //
  5485. int ha_tokudb::extra(enum ha_extra_function operation) {
  5486. TOKUDB_DBUG_ENTER("extra %p %d", this, operation);
  5487. switch (operation) {
  5488. case HA_EXTRA_RESET_STATE:
  5489. reset();
  5490. break;
  5491. case HA_EXTRA_KEYREAD:
  5492. key_read = 1; // Query satisfied with key
  5493. break;
  5494. case HA_EXTRA_NO_KEYREAD:
  5495. key_read = 0;
  5496. break;
  5497. case HA_EXTRA_IGNORE_DUP_KEY:
  5498. using_ignore = 1;
  5499. break;
  5500. case HA_EXTRA_NO_IGNORE_DUP_KEY:
  5501. using_ignore = 0;
  5502. break;
  5503. default:
  5504. break;
  5505. }
  5506. TOKUDB_DBUG_RETURN(0);
  5507. }
  5508. int ha_tokudb::reset(void) {
  5509. TOKUDB_DBUG_ENTER("ha_tokudb::reset");
  5510. key_read = 0;
  5511. using_ignore = 0;
  5512. reset_dsmrr();
  5513. invalidate_icp();
  5514. TOKUDB_DBUG_RETURN(0);
  5515. }
  5516. //
  5517. // helper function that iterates through all DB's
  5518. // and grabs a lock (either read or write, but not both)
  5519. // Parameters:
  5520. // [in] trans - transaction to be used to pre acquire the lock
  5521. // lt - type of lock to get, either lock_read or lock_write
  5522. // Returns:
  5523. // 0 on success
  5524. // error otherwise
  5525. //
  5526. int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
  5527. int error = ENOSYS;
  5528. rw_rdlock(&share->num_DBs_lock);
  5529. uint curr_num_DBs = share->num_DBs;
  5530. if (lt == lock_read) {
  5531. error = 0;
  5532. goto cleanup;
  5533. }
  5534. else if (lt == lock_write) {
  5535. if (tokudb_debug & TOKUDB_DEBUG_LOCK)
  5536. TOKUDB_TRACE("%s\n", __FUNCTION__);
  5537. for (uint i = 0; i < curr_num_DBs; i++) {
  5538. DB* db = share->key_file[i];
  5539. error = db->pre_acquire_table_lock(db, trans);
  5540. if (error == EINVAL)
  5541. TOKUDB_TRACE("%s %d db=%p trans=%p\n", __FUNCTION__, i, db, trans);
  5542. if (error) break;
  5543. }
  5544. if (tokudb_debug & TOKUDB_DEBUG_LOCK)
  5545. TOKUDB_TRACE("%s error=%d\n", __FUNCTION__, error);
  5546. if (error) goto cleanup;
  5547. }
  5548. else {
  5549. error = ENOSYS;
  5550. goto cleanup;
  5551. }
  5552. error = 0;
  5553. cleanup:
  5554. rw_unlock(&share->num_DBs_lock);
  5555. return error;
  5556. }
  5557. int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) {
  5558. int error;
  5559. ulong tx_isolation = thd_tx_isolation(thd);
  5560. HA_TOKU_ISO_LEVEL toku_iso_level = tx_to_toku_iso(tx_isolation);
  5561. bool is_autocommit = !thd_test_options(
  5562. thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN);
  5563. /* First table lock, start transaction */
  5564. if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) &&
  5565. !trx->all &&
  5566. (thd_sql_command(thd) != SQLCOM_CREATE_TABLE) &&
  5567. (thd_sql_command(thd) != SQLCOM_DROP_TABLE) &&
  5568. (thd_sql_command(thd) != SQLCOM_DROP_INDEX) &&
  5569. (thd_sql_command(thd) != SQLCOM_CREATE_INDEX) &&
  5570. (thd_sql_command(thd) != SQLCOM_ALTER_TABLE)) {
  5571. /* QQQ We have to start a master transaction */
  5572. // DBUG_PRINT("trans", ("starting transaction all "));
  5573. if ((error = db_env->txn_begin(db_env, NULL, &trx->all, toku_iso_to_txn_flag(toku_iso_level)))) {
  5574. trx->tokudb_lock_count--; // We didn't get the lock
  5575. goto cleanup;
  5576. }
  5577. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  5578. TOKUDB_TRACE("just created master:%p\n", trx->all);
  5579. }
  5580. trx->sp_level = trx->all;
  5581. trans_register_ha(thd, true, tokudb_hton);
  5582. }
  5583. DBUG_PRINT("trans", ("starting transaction stmt"));
  5584. if (trx->stmt) {
  5585. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  5586. TOKUDB_TRACE("warning:stmt=%p\n", trx->stmt);
  5587. }
  5588. }
  5589. uint32_t txn_begin_flags;
  5590. if (trx->all == NULL) {
  5591. txn_begin_flags = toku_iso_to_txn_flag(toku_iso_level);
  5592. //
  5593. // if the isolation level that the user has set is serializable,
  5594. // but autocommit is on and this is just a select,
  5595. // then we can go ahead and set the isolation level to
  5596. // be a snapshot read, because we can serialize
  5597. // the transaction to be the point in time at which the snapshot began.
  5598. //
  5599. if (txn_begin_flags == 0 && is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT) {
  5600. txn_begin_flags = DB_TXN_SNAPSHOT;
  5601. }
  5602. if (is_autocommit && thd_sql_command(thd) == SQLCOM_SELECT && !thd->in_sub_stmt && lock.type <= TL_READ_NO_INSERT && !thd->lex->uses_stored_routines()) {
  5603. txn_begin_flags |= DB_TXN_READ_ONLY;
  5604. }
  5605. }
  5606. else {
  5607. txn_begin_flags = DB_INHERIT_ISOLATION;
  5608. }
  5609. if ((error = db_env->txn_begin(db_env, trx->sp_level, &trx->stmt, txn_begin_flags))) {
  5610. /* We leave the possible master transaction open */
  5611. trx->tokudb_lock_count--; // We didn't get the lock
  5612. goto cleanup;
  5613. }
  5614. trx->sub_sp_level = trx->stmt;
  5615. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  5616. TOKUDB_TRACE("just created stmt:%p:%p\n", trx->sp_level, trx->stmt);
  5617. }
  5618. reset_stmt_progress(&trx->stmt_progress);
  5619. trans_register_ha(thd, false, tokudb_hton);
  5620. cleanup:
  5621. return error;
  5622. }
  5623. /*
  5624. As MySQL will execute an external lock for every new table it uses
  5625. we can use this to start the transactions.
  5626. If we are in auto_commit mode we just need to start a transaction
  5627. for the statement to be able to rollback the statement.
  5628. If not, we have to start a master transaction if there doesn't exist
  5629. one from before.
  5630. */
  5631. //
  5632. // Parameters:
  5633. // [in] thd - handle to the user thread
  5634. // lock_type - the type of lock
  5635. // Returns:
  5636. // 0 on success
  5637. // error otherwise
  5638. //
  5639. int ha_tokudb::external_lock(THD * thd, int lock_type) {
  5640. TOKUDB_DBUG_ENTER("ha_tokudb::external_lock cmd=%d %d", thd_sql_command(thd), lock_type);
  5641. if (tokudb_debug & TOKUDB_DEBUG_LOCK)
  5642. TOKUDB_TRACE("%s cmd=%d %d\n", __FUNCTION__, thd_sql_command(thd), lock_type);
  5643. int error = 0;
  5644. tokudb_trx_data *trx = NULL;
  5645. trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  5646. if (!trx) {
  5647. error = create_tokudb_trx_data_instance(&trx);
  5648. if (error) { goto cleanup; }
  5649. thd_data_set(thd, tokudb_hton->slot, trx);
  5650. }
  5651. if (trx->all == NULL) {
  5652. trx->sp_level = NULL;
  5653. }
  5654. if (lock_type != F_UNLCK) {
  5655. is_fast_alter_running = false;
  5656. use_write_locks = false;
  5657. if (lock_type == F_WRLCK) {
  5658. use_write_locks = true;
  5659. }
  5660. if (!trx->tokudb_lock_count++) {
  5661. assert(trx->stmt == 0);
  5662. transaction = NULL; // Safety
  5663. error = create_txn(thd, trx);
  5664. if (error) {
  5665. goto cleanup;
  5666. }
  5667. }
  5668. transaction = trx->sub_sp_level;
  5669. }
  5670. else {
  5671. pthread_mutex_lock(&share->mutex);
  5672. // hate dealing with comparison of signed vs unsigned, so doing this
  5673. if (deleted_rows > added_rows && share->rows < (deleted_rows - added_rows)) {
  5674. share->rows = 0;
  5675. }
  5676. else {
  5677. share->rows += (added_rows - deleted_rows);
  5678. }
  5679. pthread_mutex_unlock(&share->mutex);
  5680. added_rows = 0;
  5681. deleted_rows = 0;
  5682. share->rows_from_locked_table = 0;
  5683. if (trx->tokudb_lock_count > 0 && !--trx->tokudb_lock_count) {
  5684. if (trx->stmt) {
  5685. /*
  5686. F_UNLCK is done without a transaction commit / rollback.
  5687. This happens if the thread didn't update any rows
  5688. We must in this case commit the work to keep the row locks
  5689. */
  5690. DBUG_PRINT("trans", ("commiting non-updating transaction"));
  5691. reset_stmt_progress(&trx->stmt_progress);
  5692. if (!is_fast_alter_running) {
  5693. commit_txn(trx->stmt, 0);
  5694. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  5695. TOKUDB_TRACE("commit:%p:%d\n", trx->stmt, error);
  5696. }
  5697. trx->stmt = NULL;
  5698. trx->sub_sp_level = NULL;
  5699. }
  5700. }
  5701. }
  5702. transaction = NULL;
  5703. is_fast_alter_running = false;
  5704. }
  5705. cleanup:
  5706. if (tokudb_debug & TOKUDB_DEBUG_LOCK)
  5707. TOKUDB_TRACE("%s error=%d\n", __FUNCTION__, error);
  5708. TOKUDB_DBUG_RETURN(error);
  5709. }
  5710. /*
  5711. When using LOCK TABLE's external_lock is only called when the actual
  5712. TABLE LOCK is done.
  5713. Under LOCK TABLES, each used tables will force a call to start_stmt.
  5714. */
  5715. int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
  5716. TOKUDB_DBUG_ENTER("ha_tokudb::start_stmt cmd=%d %d", thd_sql_command(thd), lock_type);
  5717. int error = 0;
  5718. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  5719. DBUG_ASSERT(trx);
  5720. /*
  5721. note that trx->stmt may have been already initialized as start_stmt()
  5722. is called for *each table* not for each storage engine,
  5723. and there could be many bdb tables referenced in the query
  5724. */
  5725. if (!trx->stmt) {
  5726. error = create_txn(thd, trx);
  5727. if (error) {
  5728. goto cleanup;
  5729. }
  5730. }
  5731. else {
  5732. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  5733. TOKUDB_TRACE("trx->stmt already existed\n");
  5734. }
  5735. }
  5736. //
  5737. // we know we are in lock tables
  5738. // attempt to grab a table lock
  5739. // if fail, continue, do not return error
  5740. // This is because a failure ok, it simply means
  5741. // another active transaction has some locks.
  5742. // That other transaction modify this table
  5743. // until it is unlocked, therefore having acquire_table_lock
  5744. // potentially grab some locks but not all is ok.
  5745. //
  5746. if (lock.type <= TL_READ_NO_INSERT) {
  5747. acquire_table_lock(trx->sub_sp_level,lock_read);
  5748. }
  5749. else {
  5750. if (!(thd_sql_command(thd) == SQLCOM_CREATE_INDEX ||
  5751. thd_sql_command(thd) == SQLCOM_ALTER_TABLE ||
  5752. thd_sql_command(thd) == SQLCOM_DROP_INDEX ||
  5753. thd_sql_command(thd) == SQLCOM_TRUNCATE)) {
  5754. acquire_table_lock(trx->sub_sp_level,lock_write);
  5755. }
  5756. }
  5757. if (added_rows > deleted_rows) {
  5758. share->rows_from_locked_table = added_rows - deleted_rows;
  5759. }
  5760. transaction = trx->sub_sp_level;
  5761. trans_register_ha(thd, false, tokudb_hton);
  5762. cleanup:
  5763. TOKUDB_DBUG_RETURN(error);
  5764. }
  5765. uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD* thd) {
  5766. uint sql_command = thd_sql_command(thd);
  5767. bool in_lock_tables = thd_in_lock_tables(thd);
  5768. //
  5769. // following InnoDB's lead and having checksum command use a snapshot read if told
  5770. //
  5771. if (sql_command == SQLCOM_CHECKSUM) {
  5772. return 0;
  5773. }
  5774. else if ((lock_type == TL_READ && in_lock_tables) ||
  5775. (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) ||
  5776. sql_command != SQLCOM_SELECT ||
  5777. (sql_command == SQLCOM_SELECT && lock_type >= TL_WRITE_ALLOW_WRITE)) { // select for update
  5778. ulong tx_isolation = thd_tx_isolation(thd);
  5779. // pattern matched from InnoDB
  5780. if ( (tx_isolation == ISO_READ_COMMITTED || tx_isolation == ISO_READ_UNCOMMITTED) &&
  5781. (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) &&
  5782. (sql_command == SQLCOM_INSERT_SELECT
  5783. || sql_command == SQLCOM_REPLACE_SELECT
  5784. || sql_command == SQLCOM_UPDATE
  5785. || sql_command == SQLCOM_CREATE_TABLE) )
  5786. {
  5787. return 0;
  5788. }
  5789. else {
  5790. return DB_SERIALIZABLE;
  5791. }
  5792. }
  5793. else {
  5794. return 0;
  5795. }
  5796. }
  5797. /*
  5798. The idea with handler::store_lock() is the following:
  5799. The statement decided which locks we should need for the table
  5800. for updates/deletes/inserts we get WRITE locks, for SELECT... we get
  5801. read locks.
  5802. Before adding the lock into the table lock handler (see thr_lock.c)
  5803. mysqld calls store lock with the requested locks. Store lock can now
  5804. modify a write lock to a read lock (or some other lock), ignore the
  5805. lock (if we don't want to use MySQL table locks at all) or add locks
  5806. for many tables (like we do when we are using a MERGE handler).
  5807. Tokudb DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which
  5808. signals that we are doing WRITES, but we are still allowing other
  5809. reader's and writer's.
  5810. When releasing locks, store_lock() are also called. In this case one
  5811. usually doesn't have to do anything.
  5812. In some exceptional cases MySQL may send a request for a TL_IGNORE;
  5813. This means that we are requesting the same lock as last time and this
  5814. should also be ignored. (This may happen when someone does a flush
  5815. table when we have opened a part of the tables, in which case mysqld
  5816. closes and reopens the tables and tries to get the same locks at last
  5817. time). In the future we will probably try to remove this.
  5818. */
  5819. THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type) {
  5820. TOKUDB_DBUG_ENTER("ha_tokudb::store_lock, lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
  5821. if (tokudb_debug & TOKUDB_DEBUG_LOCK) {
  5822. TOKUDB_TRACE("%s lock_type=%d cmd=%d\n", __FUNCTION__, lock_type, thd_sql_command(thd));
  5823. }
  5824. if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
  5825. // if creating a hot index
  5826. if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX && get_create_index_online(thd)) {
  5827. rw_rdlock(&share->num_DBs_lock);
  5828. if (share->num_DBs == (table->s->keys + test(hidden_primary_key))) {
  5829. lock_type = TL_WRITE_ALLOW_WRITE;
  5830. }
  5831. lock.type = lock_type;
  5832. rw_unlock(&share->num_DBs_lock);
  5833. }
  5834. // 5.5 supports reads concurrent with alter table. just use the default lock type.
  5835. #if MYSQL_VERSION_ID < 50500
  5836. else if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX ||
  5837. thd_sql_command(thd)== SQLCOM_ALTER_TABLE ||
  5838. thd_sql_command(thd)== SQLCOM_DROP_INDEX) {
  5839. // force alter table to lock out other readers
  5840. lock_type = TL_WRITE;
  5841. lock.type = lock_type;
  5842. }
  5843. #endif
  5844. else {
  5845. // If we are not doing a LOCK TABLE, then allow multiple writers
  5846. if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
  5847. !thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
  5848. lock_type = TL_WRITE_ALLOW_WRITE;
  5849. }
  5850. lock.type = lock_type;
  5851. }
  5852. }
  5853. *to++ = &lock;
  5854. if (tokudb_debug & TOKUDB_DEBUG_LOCK)
  5855. TOKUDB_TRACE("%s lock_type=%d\n", __FUNCTION__, lock_type);
  5856. DBUG_RETURN(to);
  5857. }
  5858. static inline enum row_type
  5859. compression_method_to_row_type(enum toku_compression_method method)
  5860. {
  5861. switch (method) {
  5862. #if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
  5863. case TOKU_NO_COMPRESSION:
  5864. return ROW_TYPE_TOKU_UNCOMPRESSED;
  5865. case TOKU_ZLIB_METHOD:
  5866. case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
  5867. return ROW_TYPE_TOKU_ZLIB;
  5868. case TOKU_QUICKLZ_METHOD:
  5869. return ROW_TYPE_TOKU_QUICKLZ;
  5870. case TOKU_LZMA_METHOD:
  5871. return ROW_TYPE_TOKU_LZMA;
  5872. case TOKU_FAST_COMPRESSION_METHOD:
  5873. return ROW_TYPE_TOKU_FAST;
  5874. case TOKU_SMALL_COMPRESSION_METHOD:
  5875. return ROW_TYPE_TOKU_SMALL;
  5876. #endif
  5877. case TOKU_DEFAULT_COMPRESSION_METHOD:
  5878. return ROW_TYPE_DEFAULT;
  5879. default:
  5880. assert(false);
  5881. }
  5882. }
  5883. static enum row_type
  5884. get_row_type_for_key(DB *file)
  5885. {
  5886. enum toku_compression_method method;
  5887. int r = file->get_compression_method(file, &method);
  5888. assert(r == 0);
  5889. return compression_method_to_row_type(method);
  5890. }
  5891. enum row_type
  5892. #if MYSQL_VERSION_ID >= 50521
  5893. ha_tokudb::get_row_type(void) const
  5894. #else
  5895. ha_tokudb::get_row_type(void)
  5896. #endif
  5897. {
  5898. return get_row_type_for_key(share->file);
  5899. }
  5900. static inline enum toku_compression_method
  5901. row_type_to_compression_method(enum row_type type)
  5902. {
  5903. switch (type) {
  5904. #if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
  5905. case ROW_TYPE_TOKU_UNCOMPRESSED:
  5906. return TOKU_NO_COMPRESSION;
  5907. case ROW_TYPE_TOKU_ZLIB:
  5908. return TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
  5909. case ROW_TYPE_TOKU_QUICKLZ:
  5910. return TOKU_QUICKLZ_METHOD;
  5911. case ROW_TYPE_TOKU_LZMA:
  5912. return TOKU_LZMA_METHOD;
  5913. case ROW_TYPE_TOKU_SMALL:
  5914. return TOKU_SMALL_COMPRESSION_METHOD;
  5915. case ROW_TYPE_TOKU_FAST:
  5916. return TOKU_FAST_COMPRESSION_METHOD;
  5917. #endif
  5918. default:
  5919. DBUG_PRINT("info", ("Ignoring ROW_FORMAT not used by TokuDB, using TOKUDB_FAST by default instead"));
  5920. case ROW_TYPE_DEFAULT:
  5921. return TOKU_DEFAULT_COMPRESSION_METHOD;
  5922. }
  5923. }
  5924. static int create_sub_table(
  5925. const char *table_name,
  5926. DBT* row_descriptor,
  5927. DB_TXN* txn,
  5928. uint32_t block_size,
  5929. uint32_t read_block_size,
  5930. enum toku_compression_method compression_method,
  5931. bool is_hot_index
  5932. )
  5933. {
  5934. TOKUDB_DBUG_ENTER("create_sub_table");
  5935. int error;
  5936. DB *file = NULL;
  5937. uint32_t create_flags;
  5938. error = db_create(&file, db_env, 0);
  5939. if (error) {
  5940. DBUG_PRINT("error", ("Got error: %d when creating table", error));
  5941. my_errno = error;
  5942. goto exit;
  5943. }
  5944. if (block_size != 0) {
  5945. error = file->set_pagesize(file, block_size);
  5946. if (error != 0) {
  5947. DBUG_PRINT("error", ("Got error: %d when setting block size %u for table '%s'", error, block_size, table_name));
  5948. goto exit;
  5949. }
  5950. }
  5951. if (read_block_size != 0) {
  5952. error = file->set_readpagesize(file, read_block_size);
  5953. if (error != 0) {
  5954. DBUG_PRINT("error", ("Got error: %d when setting read block size %u for table '%s'", error, read_block_size, table_name));
  5955. goto exit;
  5956. }
  5957. }
  5958. error = file->set_compression_method(file, compression_method);
  5959. if (error != 0) {
  5960. DBUG_PRINT("error", ("Got error: %d when setting compression type %u for table '%s'", error, compression_method, table_name));
  5961. goto exit;
  5962. }
  5963. create_flags = DB_THREAD | DB_CREATE | DB_EXCL | (is_hot_index ? DB_IS_HOT_INDEX : 0);
  5964. error = file->open(file, txn, table_name, NULL, DB_BTREE, create_flags, my_umask);
  5965. if (error) {
  5966. DBUG_PRINT("error", ("Got error: %d when opening table '%s'", error, table_name));
  5967. goto exit;
  5968. }
  5969. error = file->change_descriptor(file, txn, row_descriptor, (is_hot_index ? DB_IS_HOT_INDEX | DB_UPDATE_CMP_DESCRIPTOR : DB_UPDATE_CMP_DESCRIPTOR));
  5970. if (error) {
  5971. DBUG_PRINT("error", ("Got error: %d when setting row descriptor for table '%s'", error, table_name));
  5972. goto exit;
  5973. }
  5974. error = 0;
  5975. exit:
  5976. if (file) {
  5977. int r = file->close(file, 0);
  5978. assert(r==0);
  5979. }
  5980. TOKUDB_DBUG_RETURN(error);
  5981. }
  5982. void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
  5983. if (share->has_auto_inc) {
  5984. info(HA_STATUS_AUTO);
  5985. if (!(create_info->used_fields & HA_CREATE_USED_AUTO) ||
  5986. create_info->auto_increment_value < stats.auto_increment_value) {
  5987. create_info->auto_increment_value = stats.auto_increment_value;
  5988. }
  5989. }
  5990. if (!(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
  5991. // show create table asks us to update this create_info, this makes it
  5992. // so we'll always show what compression type we're using
  5993. create_info->row_type = get_row_type();
  5994. }
  5995. }
  5996. //
  5997. // removes key name from status.tokudb.
  5998. // needed for when we are dropping indexes, so that
  5999. // during drop table, we do not attempt to remove already dropped
  6000. // indexes because we did not keep status.tokudb in sync with list of indexes.
  6001. //
  6002. int ha_tokudb::remove_key_name_from_status(DB* status_block, char* key_name, DB_TXN* txn) {
  6003. int error;
  6004. uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
  6005. HA_METADATA_KEY md_key = hatoku_key_name;
  6006. memcpy(status_key_info, &md_key, sizeof(HA_METADATA_KEY));
  6007. //
  6008. // put index name in status.tokudb
  6009. //
  6010. memcpy(
  6011. status_key_info + sizeof(HA_METADATA_KEY),
  6012. key_name,
  6013. strlen(key_name) + 1
  6014. );
  6015. error = remove_metadata(
  6016. status_block,
  6017. status_key_info,
  6018. sizeof(HA_METADATA_KEY) + strlen(key_name) + 1,
  6019. txn
  6020. );
  6021. return error;
  6022. }
  6023. //
  6024. // writes the key name in status.tokudb, so that we may later delete or rename
  6025. // the dictionary associated with key_name
  6026. //
  6027. int ha_tokudb::write_key_name_to_status(DB* status_block, char* key_name, DB_TXN* txn) {
  6028. int error;
  6029. uchar status_key_info[FN_REFLEN + sizeof(HA_METADATA_KEY)];
  6030. HA_METADATA_KEY md_key = hatoku_key_name;
  6031. memcpy(status_key_info, &md_key, sizeof(HA_METADATA_KEY));
  6032. //
  6033. // put index name in status.tokudb
  6034. //
  6035. memcpy(
  6036. status_key_info + sizeof(HA_METADATA_KEY),
  6037. key_name,
  6038. strlen(key_name) + 1
  6039. );
  6040. error = write_metadata(
  6041. status_block,
  6042. status_key_info,
  6043. sizeof(HA_METADATA_KEY) + strlen(key_name) + 1,
  6044. NULL,
  6045. 0,
  6046. txn
  6047. );
  6048. return error;
  6049. }
  6050. //
  6051. // some tracing moved out of ha_tokudb::create, because ::create was getting cluttered
  6052. //
  6053. void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
  6054. uint i;
  6055. //
  6056. // tracing information about what type of table we are creating
  6057. //
  6058. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  6059. for (i = 0; i < form->s->fields; i++) {
  6060. Field *field = form->s->field[i];
  6061. TOKUDB_TRACE("field:%d:%s:type=%d:flags=%x\n", i, field->field_name, field->type(), field->flags);
  6062. }
  6063. for (i = 0; i < form->s->keys; i++) {
  6064. KEY *key = &form->s->key_info[i];
  6065. TOKUDB_TRACE("key:%d:%s:%d\n", i, key->name, get_key_parts(key));
  6066. uint p;
  6067. for (p = 0; p < get_key_parts(key); p++) {
  6068. KEY_PART_INFO *key_part = &key->key_part[p];
  6069. Field *field = key_part->field;
  6070. TOKUDB_TRACE("key:%d:%d:length=%d:%s:type=%d:flags=%x\n",
  6071. i, p, key_part->length, field->field_name, field->type(), field->flags);
  6072. }
  6073. }
  6074. }
  6075. }
  6076. static uint32_t get_max_desc_size(KEY_AND_COL_INFO* kc_info, TABLE* form) {
  6077. uint32_t max_row_desc_buff_size;
  6078. max_row_desc_buff_size = 2*(form->s->fields * 6)+10; // upper bound of key comparison descriptor
  6079. max_row_desc_buff_size += get_max_secondary_key_pack_desc_size(kc_info); // upper bound for sec. key part
  6080. max_row_desc_buff_size += get_max_clustering_val_pack_desc_size(form->s); // upper bound for clustering val part
  6081. return max_row_desc_buff_size;
  6082. }
  6083. static uint32_t create_secondary_key_descriptor(
  6084. uchar* buf,
  6085. KEY* key_info,
  6086. KEY* prim_key,
  6087. uint hpk,
  6088. TABLE* form,
  6089. uint primary_key,
  6090. uint32_t keynr,
  6091. KEY_AND_COL_INFO* kc_info
  6092. )
  6093. {
  6094. uchar* ptr = NULL;
  6095. ptr = buf;
  6096. ptr += create_toku_key_descriptor(
  6097. ptr,
  6098. false,
  6099. key_info,
  6100. hpk,
  6101. prim_key
  6102. );
  6103. ptr += create_toku_secondary_key_pack_descriptor(
  6104. ptr,
  6105. hpk,
  6106. primary_key,
  6107. form->s,
  6108. form,
  6109. kc_info,
  6110. key_info,
  6111. prim_key
  6112. );
  6113. ptr += create_toku_clustering_val_pack_descriptor(
  6114. ptr,
  6115. primary_key,
  6116. form->s,
  6117. kc_info,
  6118. keynr,
  6119. key_info->flags & HA_CLUSTERING
  6120. );
  6121. return ptr - buf;
  6122. }
  6123. //
  6124. // creates dictionary for secondary index, with key description key_info, all using txn
  6125. //
  6126. int ha_tokudb::create_secondary_dictionary(
  6127. const char* name, TABLE* form,
  6128. KEY* key_info,
  6129. DB_TXN* txn,
  6130. KEY_AND_COL_INFO* kc_info,
  6131. uint32_t keynr,
  6132. bool is_hot_index,
  6133. enum row_type row_type
  6134. )
  6135. {
  6136. int error;
  6137. DBT row_descriptor;
  6138. uchar* row_desc_buff = NULL;
  6139. char* newname = NULL;
  6140. KEY* prim_key = NULL;
  6141. char dict_name[MAX_DICT_NAME_LEN];
  6142. uint32_t max_row_desc_buff_size;
  6143. uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
  6144. uint32_t block_size;
  6145. uint32_t read_block_size;
  6146. THD* thd = ha_thd();
  6147. memset(&row_descriptor, 0, sizeof(row_descriptor));
  6148. max_row_desc_buff_size = get_max_desc_size(kc_info,form);
  6149. row_desc_buff = (uchar *)my_malloc(max_row_desc_buff_size, MYF(MY_WME));
  6150. if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
  6151. newname = (char *)my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
  6152. if (newname == NULL){ error = ENOMEM; goto cleanup;}
  6153. sprintf(dict_name, "key-%s", key_info->name);
  6154. make_name(newname, name, dict_name);
  6155. prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
  6156. //
  6157. // setup the row descriptor
  6158. //
  6159. row_descriptor.data = row_desc_buff;
  6160. //
  6161. // save data necessary for key comparisons
  6162. //
  6163. row_descriptor.size = create_secondary_key_descriptor(
  6164. row_desc_buff,
  6165. key_info,
  6166. prim_key,
  6167. hpk,
  6168. form,
  6169. primary_key,
  6170. keynr,
  6171. kc_info
  6172. );
  6173. assert(row_descriptor.size <= max_row_desc_buff_size);
  6174. block_size = key_info->block_size << 10;
  6175. if (block_size == 0) {
  6176. block_size = get_tokudb_block_size(thd);
  6177. }
  6178. read_block_size = get_tokudb_read_block_size(thd);
  6179. error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, row_type_to_compression_method(row_type), is_hot_index);
  6180. cleanup:
  6181. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  6182. my_free(row_desc_buff, MYF(MY_ALLOW_ZERO_PTR));
  6183. return error;
  6184. }
  6185. static uint32_t create_main_key_descriptor(
  6186. uchar* buf,
  6187. KEY* prim_key,
  6188. uint hpk,
  6189. uint primary_key,
  6190. TABLE* form,
  6191. KEY_AND_COL_INFO* kc_info
  6192. )
  6193. {
  6194. uchar* ptr = buf;
  6195. ptr += create_toku_key_descriptor(
  6196. ptr,
  6197. hpk,
  6198. prim_key,
  6199. false,
  6200. NULL
  6201. );
  6202. ptr += create_toku_main_key_pack_descriptor(
  6203. ptr
  6204. );
  6205. ptr += create_toku_clustering_val_pack_descriptor(
  6206. ptr,
  6207. primary_key,
  6208. form->s,
  6209. kc_info,
  6210. primary_key,
  6211. false
  6212. );
  6213. return ptr - buf;
  6214. }
  6215. //
  6216. // create and close the main dictionarr with name of "name" using table form, all within
  6217. // transaction txn.
  6218. //
  6219. int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, enum row_type row_type) {
  6220. int error;
  6221. DBT row_descriptor;
  6222. uchar* row_desc_buff = NULL;
  6223. char* newname = NULL;
  6224. KEY* prim_key = NULL;
  6225. uint32_t max_row_desc_buff_size;
  6226. uint hpk= (form->s->primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
  6227. uint32_t block_size;
  6228. uint32_t read_block_size;
  6229. THD* thd = ha_thd();
  6230. memset(&row_descriptor, 0, sizeof(row_descriptor));
  6231. max_row_desc_buff_size = get_max_desc_size(kc_info, form);
  6232. row_desc_buff = (uchar *)my_malloc(max_row_desc_buff_size, MYF(MY_WME));
  6233. if (row_desc_buff == NULL){ error = ENOMEM; goto cleanup;}
  6234. newname = (char *)my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
  6235. if (newname == NULL){ error = ENOMEM; goto cleanup;}
  6236. make_name(newname, name, "main");
  6237. prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
  6238. //
  6239. // setup the row descriptor
  6240. //
  6241. row_descriptor.data = row_desc_buff;
  6242. //
  6243. // save data necessary for key comparisons
  6244. //
  6245. row_descriptor.size = create_main_key_descriptor(
  6246. row_desc_buff,
  6247. prim_key,
  6248. hpk,
  6249. primary_key,
  6250. form,
  6251. kc_info
  6252. );
  6253. assert(row_descriptor.size <= max_row_desc_buff_size);
  6254. block_size = 0;
  6255. if (prim_key)
  6256. block_size = prim_key->block_size << 10;
  6257. if (block_size == 0) {
  6258. block_size = get_tokudb_block_size(thd);
  6259. }
  6260. read_block_size = get_tokudb_read_block_size(thd);
  6261. /* Create the main table that will hold the real rows */
  6262. error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, row_type_to_compression_method(row_type), false);
  6263. cleanup:
  6264. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  6265. my_free(row_desc_buff, MYF(MY_ALLOW_ZERO_PTR));
  6266. return error;
  6267. }
  6268. static inline enum row_type
  6269. row_format_to_row_type(srv_row_format_t row_format)
  6270. {
  6271. #if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
  6272. switch (row_format) {
  6273. case SRV_ROW_FORMAT_UNCOMPRESSED:
  6274. return ROW_TYPE_TOKU_UNCOMPRESSED;
  6275. case SRV_ROW_FORMAT_ZLIB:
  6276. return ROW_TYPE_TOKU_ZLIB;
  6277. case SRV_ROW_FORMAT_QUICKLZ:
  6278. return ROW_TYPE_TOKU_QUICKLZ;
  6279. case SRV_ROW_FORMAT_LZMA:
  6280. return ROW_TYPE_TOKU_LZMA;
  6281. case SRV_ROW_FORMAT_SMALL:
  6282. return ROW_TYPE_TOKU_SMALL;
  6283. case SRV_ROW_FORMAT_FAST:
  6284. return ROW_TYPE_TOKU_FAST;
  6285. case SRV_ROW_FORMAT_DEFAULT:
  6286. return ROW_TYPE_DEFAULT;
  6287. }
  6288. assert(0);
  6289. #endif
  6290. return ROW_TYPE_DEFAULT;;
  6291. }
  6292. //
  6293. // Creates a new table
  6294. // Parameters:
  6295. // [in] name - table name
  6296. // [in] form - info on table, columns and indexes
  6297. // [in] create_info - more info on table, CURRENTLY UNUSED
  6298. // Returns:
  6299. // 0 on success
  6300. // error otherwise
  6301. //
  6302. int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_info) {
  6303. TOKUDB_DBUG_ENTER("ha_tokudb::create");
  6304. int error;
  6305. DB *status_block = NULL;
  6306. uint version;
  6307. uint capabilities;
  6308. DB_TXN* txn = NULL;
  6309. bool do_commit = false;
  6310. char* newname = NULL;
  6311. KEY_AND_COL_INFO kc_info;
  6312. tokudb_trx_data *trx = NULL;
  6313. THD* thd = ha_thd();
  6314. bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
  6315. memset(&kc_info, 0, sizeof(kc_info));
  6316. pthread_mutex_lock(&tokudb_meta_mutex);
  6317. trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot);
  6318. const enum row_type row_type = ((create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
  6319. ? create_info->row_type
  6320. : row_format_to_row_type(get_row_format(thd)));
  6321. if (create_from_engine) {
  6322. // table already exists, nothing to do
  6323. error = 0;
  6324. goto cleanup;
  6325. }
  6326. // validate the fields in the table. If the table has fields
  6327. // we do not support that came from an old version of MySQL,
  6328. // gracefully return an error
  6329. for (uint32_t i = 0; i < form->s->fields; i++) {
  6330. Field* field = table_share->field[i];
  6331. if (!field_valid_for_tokudb_table(field)) {
  6332. sql_print_error("Table %s has an invalid field %s, that was created "
  6333. "with an old version of MySQL. This field is no longer supported. "
  6334. "This is probably due to an alter table engine=TokuDB. To load this "
  6335. "table, do a dump and load",
  6336. name,
  6337. field->field_name
  6338. );
  6339. error = HA_ERR_UNSUPPORTED;
  6340. goto cleanup;
  6341. }
  6342. }
  6343. newname = (char *)my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
  6344. if (newname == NULL){ error = ENOMEM; goto cleanup;}
  6345. if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
  6346. txn = trx->sub_sp_level;
  6347. }
  6348. else {
  6349. do_commit = true;
  6350. error = db_env->txn_begin(db_env, 0, &txn, 0);
  6351. if (error) { goto cleanup; }
  6352. }
  6353. primary_key = form->s->primary_key;
  6354. hidden_primary_key = (primary_key >= MAX_KEY) ? TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH : 0;
  6355. if (hidden_primary_key) {
  6356. primary_key = form->s->keys;
  6357. }
  6358. /* do some tracing */
  6359. trace_create_table_info(name,form);
  6360. /* Create status.tokudb and save relevant metadata */
  6361. make_name(newname, name, "status");
  6362. error = tokudb::create_status(db_env, &status_block, newname, txn);
  6363. if (error) { goto cleanup; }
  6364. version = HA_TOKU_VERSION;
  6365. error = write_to_status(status_block, hatoku_new_version,&version,sizeof(version), txn);
  6366. if (error) { goto cleanup; }
  6367. capabilities = HA_TOKU_CAP;
  6368. error = write_to_status(status_block, hatoku_capabilities,&capabilities,sizeof(capabilities), txn);
  6369. if (error) { goto cleanup; }
  6370. error = write_auto_inc_create(status_block, create_info->auto_increment_value, txn);
  6371. if (error) { goto cleanup; }
  6372. #if TOKU_PARTITION_WRITE_FRM_DATA
  6373. error = write_frm_data(status_block, txn, form->s->path.str);
  6374. if (error) { goto cleanup; }
  6375. #else
  6376. // only for tables that are not partitioned
  6377. if (form->part_info == NULL) {
  6378. error = write_frm_data(status_block, txn, form->s->path.str);
  6379. if (error) { goto cleanup; }
  6380. }
  6381. #endif
  6382. error = allocate_key_and_col_info(form->s, &kc_info);
  6383. if (error) { goto cleanup; }
  6384. error = initialize_key_and_col_info(
  6385. form->s,
  6386. form,
  6387. &kc_info,
  6388. hidden_primary_key,
  6389. primary_key
  6390. );
  6391. if (error) { goto cleanup; }
  6392. error = create_main_dictionary(name, form, txn, &kc_info, row_type);
  6393. if (error) {
  6394. goto cleanup;
  6395. }
  6396. for (uint i = 0; i < form->s->keys; i++) {
  6397. if (i != primary_key) {
  6398. error = create_secondary_dictionary(name, form, &form->key_info[i], txn, &kc_info, i, false, row_type);
  6399. if (error) {
  6400. goto cleanup;
  6401. }
  6402. error = write_key_name_to_status(status_block, form->s->key_info[i].name, txn);
  6403. if (error) { goto cleanup; }
  6404. }
  6405. }
  6406. error = add_table_to_metadata(name, form, txn);
  6407. if (error) { goto cleanup; }
  6408. error = 0;
  6409. cleanup:
  6410. if (status_block != NULL) {
  6411. int r = tokudb::close_status(&status_block);
  6412. assert(r==0);
  6413. }
  6414. free_key_and_col_info(&kc_info);
  6415. if (do_commit && txn) {
  6416. if (error) {
  6417. abort_txn(txn);
  6418. }
  6419. else {
  6420. commit_txn(txn,0);
  6421. }
  6422. }
  6423. my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
  6424. pthread_mutex_unlock(&tokudb_meta_mutex);
  6425. TOKUDB_DBUG_RETURN(error);
  6426. }
  6427. int ha_tokudb::discard_or_import_tablespace(my_bool discard) {
  6428. /*
  6429. if (discard) {
  6430. my_errno=HA_ERR_WRONG_COMMAND;
  6431. return my_errno;
  6432. }
  6433. return add_table_to_metadata(share->table_name);
  6434. */
  6435. my_errno=HA_ERR_WRONG_COMMAND;
  6436. return my_errno;
  6437. }
  6438. //
  6439. // deletes from_name or renames from_name to to_name, all using transaction txn.
  6440. // is_delete specifies which we are doing
  6441. // is_key specifies if it is a secondary index (and hence a "key-" needs to be prepended) or
  6442. // if it is not a secondary index
  6443. //
  6444. int ha_tokudb::delete_or_rename_dictionary( const char* from_name, const char* to_name, const char* secondary_name, bool is_key, DB_TXN* txn, bool is_delete) {
  6445. int error;
  6446. char dict_name[MAX_DICT_NAME_LEN];
  6447. char* new_from_name = NULL;
  6448. char* new_to_name = NULL;
  6449. assert(txn);
  6450. new_from_name = (char *)my_malloc(
  6451. get_max_dict_name_path_length(from_name),
  6452. MYF(MY_WME)
  6453. );
  6454. if (new_from_name == NULL) {
  6455. error = ENOMEM;
  6456. goto cleanup;
  6457. }
  6458. if (!is_delete) {
  6459. assert(to_name);
  6460. new_to_name = (char *)my_malloc(
  6461. get_max_dict_name_path_length(to_name),
  6462. MYF(MY_WME)
  6463. );
  6464. if (new_to_name == NULL) {
  6465. error = ENOMEM;
  6466. goto cleanup;
  6467. }
  6468. }
  6469. if (is_key) {
  6470. sprintf(dict_name, "key-%s", secondary_name);
  6471. make_name(new_from_name, from_name, dict_name);
  6472. }
  6473. else {
  6474. make_name(new_from_name, from_name, secondary_name);
  6475. }
  6476. if (!is_delete) {
  6477. if (is_key) {
  6478. sprintf(dict_name, "key-%s", secondary_name);
  6479. make_name(new_to_name, to_name, dict_name);
  6480. }
  6481. else {
  6482. make_name(new_to_name, to_name, secondary_name);
  6483. }
  6484. }
  6485. if (is_delete) {
  6486. error = db_env->dbremove(db_env, txn, new_from_name, NULL, 0);
  6487. }
  6488. else {
  6489. error = db_env->dbrename(db_env, txn, new_from_name, NULL, new_to_name, 0);
  6490. }
  6491. if (error) { goto cleanup; }
  6492. cleanup:
  6493. my_free(new_from_name, MYF(MY_ALLOW_ZERO_PTR));
  6494. my_free(new_to_name, MYF(MY_ALLOW_ZERO_PTR));
  6495. return error;
  6496. }
  6497. //
  6498. // deletes or renames a table. if is_delete is true, then we delete, and to_name can be NULL
  6499. // if is_delete is false, then to_name must be non-NULL, as we are renaming the table.
  6500. //
  6501. int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_name, bool is_delete) {
  6502. int error;
  6503. DB* status_db = NULL;
  6504. DBC* status_cursor = NULL;
  6505. DB_TXN* txn = NULL;
  6506. DBT curr_key;
  6507. DBT curr_val;
  6508. memset(&curr_key, 0, sizeof(curr_key));
  6509. memset(&curr_val, 0, sizeof(curr_val));
  6510. pthread_mutex_lock(&tokudb_meta_mutex);
  6511. error = db_env->txn_begin(db_env, 0, &txn, 0);
  6512. if (error) { goto cleanup; }
  6513. //
  6514. // modify metadata db
  6515. //
  6516. if (is_delete) {
  6517. error = drop_table_from_metadata(from_name, txn);
  6518. }
  6519. else {
  6520. error = rename_table_in_metadata(from_name, to_name, txn);
  6521. }
  6522. if (error) { goto cleanup; }
  6523. //
  6524. // open status db,
  6525. // create cursor,
  6526. // for each name read out of there, create a db and delete or rename it
  6527. //
  6528. error = open_status_dictionary(&status_db, from_name, txn);
  6529. if (error) { goto cleanup; }
  6530. error = status_db->cursor(status_db, txn, &status_cursor, 0);
  6531. if (error) { goto cleanup; }
  6532. while (error != DB_NOTFOUND) {
  6533. error = status_cursor->c_get(
  6534. status_cursor,
  6535. &curr_key,
  6536. &curr_val,
  6537. DB_NEXT
  6538. );
  6539. if (error && error != DB_NOTFOUND) { goto cleanup; }
  6540. if (error == DB_NOTFOUND) { break; }
  6541. HA_METADATA_KEY mk = *(HA_METADATA_KEY *)curr_key.data;
  6542. if (mk != hatoku_key_name) {
  6543. continue;
  6544. }
  6545. error = delete_or_rename_dictionary(from_name, to_name, (char *)((char *)curr_key.data + sizeof(HA_METADATA_KEY)), true, txn, is_delete);
  6546. if (error) { goto cleanup; }
  6547. }
  6548. //
  6549. // delete or rename main.tokudb
  6550. //
  6551. error = delete_or_rename_dictionary(from_name, to_name, "main", false, txn, is_delete);
  6552. if (error) { goto cleanup; }
  6553. error = status_cursor->c_close(status_cursor);
  6554. assert(error==0);
  6555. status_cursor = NULL;
  6556. if (error) { goto cleanup; }
  6557. error = status_db->close(status_db, 0);
  6558. assert(error == 0);
  6559. status_db = NULL;
  6560. //
  6561. // delete or rename status.tokudb
  6562. //
  6563. error = delete_or_rename_dictionary(from_name, to_name, "status", false, txn, is_delete);
  6564. if (error) { goto cleanup; }
  6565. my_errno = error;
  6566. cleanup:
  6567. if (status_cursor) {
  6568. int r = status_cursor->c_close(status_cursor);
  6569. assert(r==0);
  6570. }
  6571. if (status_db) {
  6572. int r = status_db->close(status_db, 0);
  6573. assert(r==0);
  6574. }
  6575. if (txn) {
  6576. if (error) {
  6577. abort_txn(txn);
  6578. }
  6579. else {
  6580. commit_txn(txn, 0);
  6581. }
  6582. }
  6583. pthread_mutex_unlock(&tokudb_meta_mutex);
  6584. return error;
  6585. }
  6586. //
  6587. // Drops table
  6588. // Parameters:
  6589. // [in] name - name of table to be deleted
  6590. // Returns:
  6591. // 0 on success
  6592. // error otherwise
  6593. //
  6594. int ha_tokudb::delete_table(const char *name) {
  6595. TOKUDB_DBUG_ENTER("ha_tokudb::delete_table");
  6596. int error;
  6597. error = delete_or_rename_table(name, NULL, true);
  6598. if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
  6599. sql_print_error("Could not delete table %s because \
  6600. another transaction has accessed the table. \
  6601. To drop the table, make sure no transactions touch the table.", name);
  6602. }
  6603. TOKUDB_DBUG_RETURN(error);
  6604. }
  6605. //
  6606. // renames table from "from" to "to"
  6607. // Parameters:
  6608. // [in] name - old name of table
  6609. // [in] to - new name of table
  6610. // Returns:
  6611. // 0 on success
  6612. // error otherwise
  6613. //
  6614. int ha_tokudb::rename_table(const char *from, const char *to) {
  6615. TOKUDB_DBUG_ENTER("%s %s %s", __FUNCTION__, from, to);
  6616. int error;
  6617. error = delete_or_rename_table(from, to, false);
  6618. if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
  6619. sql_print_error("Could not rename table from %s to %s because \
  6620. another transaction has accessed the table. \
  6621. To rename the table, make sure no transactions touch the table.", from, to);
  6622. }
  6623. TOKUDB_DBUG_RETURN(error);
  6624. }
  6625. /*
  6626. Returns estimate on number of seeks it will take to read through the table
  6627. This is to be comparable to the number returned by records_in_range so
  6628. that we can decide if we should scan the table or use keys.
  6629. */
  6630. /// QQQ why divide by 3
  6631. double ha_tokudb::scan_time() {
  6632. TOKUDB_DBUG_ENTER("ha_tokudb::scan_time");
  6633. double ret_val = (double)stats.records / 3;
  6634. DBUG_RETURN(ret_val);
  6635. }
  6636. double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
  6637. {
  6638. TOKUDB_DBUG_ENTER("ha_tokudb::keyread_time");
  6639. double ret_val;
  6640. if ((table->key_info[index].flags & HA_CLUSTERING) || (index == primary_key)) {
  6641. ret_val = read_time(index, ranges, rows);
  6642. DBUG_RETURN(ret_val);
  6643. }
  6644. /*
  6645. It is assumed that we will read trough the whole key range and that all
  6646. key blocks are half full (normally things are much better). It is also
  6647. assumed that each time we read the next key from the index, the handler
  6648. performs a random seek, thus the cost is proportional to the number of
  6649. blocks read. This model does not take into account clustered indexes -
  6650. engines that support that (e.g. InnoDB) may want to overwrite this method.
  6651. */
  6652. double keys_per_block= (stats.block_size/2.0/
  6653. (table->key_info[index].key_length +
  6654. ref_length) + 1);
  6655. ret_val = (rows + keys_per_block - 1)/ keys_per_block;
  6656. DBUG_RETURN(ret_val);
  6657. }
  6658. //
  6659. // Calculate the time it takes to read a set of ranges through an index
  6660. // This enables us to optimize reads for clustered indexes.
  6661. // Implementation pulled from InnoDB
  6662. // Parameters:
  6663. // index - index to use
  6664. // ranges - number of ranges
  6665. // rows - estimated number of rows in the range
  6666. // Returns:
  6667. // estimated time measured in disk seeks
  6668. //
  6669. double ha_tokudb::read_time(
  6670. uint index,
  6671. uint ranges,
  6672. ha_rows rows
  6673. )
  6674. {
  6675. TOKUDB_DBUG_ENTER("ha_tokudb::read_time");
  6676. double total_scan;
  6677. double ret_val;
  6678. bool is_primary = (index == primary_key);
  6679. bool is_clustering;
  6680. //
  6681. // in case for hidden primary key, this is called
  6682. //
  6683. if (index >= table_share->keys) {
  6684. ret_val = handler::read_time(index, ranges, rows);
  6685. goto cleanup;
  6686. }
  6687. is_clustering = (table->key_info[index].flags & HA_CLUSTERING);
  6688. //
  6689. // if it is not the primary key, and it is not a clustering key, then return handler::read_time
  6690. //
  6691. if (!(is_primary || is_clustering)) {
  6692. ret_val = handler::read_time(index, ranges, rows);
  6693. goto cleanup;
  6694. }
  6695. //
  6696. // for primary key and for clustered keys, return a fraction of scan_time()
  6697. //
  6698. total_scan = scan_time();
  6699. if (stats.records < rows) {
  6700. ret_val = is_clustering ? total_scan + 0.00001 : total_scan;
  6701. goto cleanup;
  6702. }
  6703. //
  6704. // one disk seek per range plus the proportional scan time of the rows
  6705. //
  6706. ret_val = (ranges + (double) rows / (double) stats.records * total_scan);
  6707. ret_val = is_clustering ? ret_val + 0.00001 : ret_val;
  6708. cleanup:
  6709. DBUG_RETURN(ret_val);
  6710. }
  6711. double ha_tokudb::index_only_read_time(uint keynr, double records) {
  6712. TOKUDB_DBUG_ENTER("ha_tokudb::index_only_read_time");
  6713. double ret_val = keyread_time(keynr, 1, (ha_rows)records);
  6714. DBUG_RETURN(ret_val);
  6715. }
  6716. //
  6717. // Estimates the number of index records in a range. In case of errors, return
  6718. // HA_TOKUDB_RANGE_COUNT instead of HA_POS_ERROR. This was behavior
  6719. // when we got the handlerton from MySQL.
  6720. // Parameters:
  6721. // keynr -index to use
  6722. // [in] start_key - low end of the range
  6723. // [in] end_key - high end of the range
  6724. // Returns:
  6725. // 0 - There are no matching keys in the given range
  6726. // number > 0 - There are approximately number matching rows in the range
  6727. // HA_POS_ERROR - Something is wrong with the index tree
  6728. //
  6729. ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* end_key) {
  6730. TOKUDB_DBUG_ENTER("ha_tokudb::records_in_range");
  6731. DBT *pleft_key = NULL, *pright_key = NULL;
  6732. DBT left_key, right_key;
  6733. ha_rows ret_val = HA_TOKUDB_RANGE_COUNT;
  6734. DB *kfile = share->key_file[keynr];
  6735. uint64_t less, equal1, middle, equal2, greater;
  6736. uint64_t rows;
  6737. bool is_exact;
  6738. int error;
  6739. uchar inf_byte;
  6740. //
  6741. // get start_rows and end_rows values so that we can estimate range
  6742. // when calling key_range64, the only value we can trust is the value for less
  6743. // The reason is that the key being passed in may be a prefix of keys in the DB
  6744. // As a result, equal may be 0 and greater may actually be equal+greater
  6745. // So, we call key_range64 on the key, and the key that is after it.
  6746. //
  6747. if (!start_key && !end_key) {
  6748. error = estimate_num_rows(kfile, &rows, transaction);
  6749. if (error) {
  6750. ret_val = HA_TOKUDB_RANGE_COUNT;
  6751. goto cleanup;
  6752. }
  6753. ret_val = (rows <= 1) ? 1 : rows;
  6754. goto cleanup;
  6755. }
  6756. if (start_key) {
  6757. inf_byte = (start_key->flag == HA_READ_KEY_EXACT) ?
  6758. COL_NEG_INF : COL_POS_INF;
  6759. pack_key(
  6760. &left_key,
  6761. keynr,
  6762. key_buff,
  6763. start_key->key,
  6764. start_key->length,
  6765. inf_byte
  6766. );
  6767. pleft_key = &left_key;
  6768. }
  6769. if (end_key) {
  6770. inf_byte = (end_key->flag == HA_READ_BEFORE_KEY) ?
  6771. COL_NEG_INF : COL_POS_INF;
  6772. pack_key(
  6773. &right_key,
  6774. keynr,
  6775. key_buff2,
  6776. end_key->key,
  6777. end_key->length,
  6778. inf_byte
  6779. );
  6780. pright_key = &right_key;
  6781. }
  6782. error = kfile->keys_range64(
  6783. kfile,
  6784. transaction,
  6785. pleft_key,
  6786. pright_key,
  6787. &less,
  6788. &equal1,
  6789. &middle,
  6790. &equal2,
  6791. &greater,
  6792. &is_exact
  6793. );
  6794. if (error) {
  6795. ret_val = HA_TOKUDB_RANGE_COUNT;
  6796. goto cleanup;
  6797. }
  6798. rows = middle;
  6799. //
  6800. // MySQL thinks a return value of 0 means there are exactly 0 rows
  6801. // Therefore, always return non-zero so this assumption is not made
  6802. //
  6803. ret_val = (ha_rows) (rows <= 1 ? 1 : rows);
  6804. cleanup:
  6805. DBUG_RETURN(ret_val);
  6806. }
  6807. //
  6808. // Initializes the auto-increment data in the local "share" object to the
  6809. // greater of two values: what's stored in the metadata or the last inserted
  6810. // auto-increment field (if auto-increment field is the first field of a key).
  6811. //
  6812. void ha_tokudb::init_auto_increment() {
  6813. DBT key;
  6814. DBT value;
  6815. int error;
  6816. HA_METADATA_KEY key_val = hatoku_max_ai;
  6817. memset(&key, 0, sizeof(key));
  6818. memset(&value, 0, sizeof(value));
  6819. key.data = &key_val;
  6820. key.size = sizeof(key_val);
  6821. value.flags = DB_DBT_USERMEM;
  6822. DB_TXN* txn = NULL;
  6823. error = db_env->txn_begin(db_env, 0, &txn, 0);
  6824. if (error) {
  6825. share->last_auto_increment = 0;
  6826. }
  6827. else {
  6828. //
  6829. // First retrieve hatoku_max_ai, which is max value used by auto increment
  6830. // column so far, the max value could have been auto generated (e.g. insert (NULL))
  6831. // or it could have been manually inserted by user (e.g. insert (345))
  6832. //
  6833. value.ulen = sizeof(share->last_auto_increment);
  6834. value.data = &share->last_auto_increment;
  6835. error = share->status_block->get(
  6836. share->status_block,
  6837. txn,
  6838. &key,
  6839. &value,
  6840. 0
  6841. );
  6842. if (error || value.size != sizeof(share->last_auto_increment)) {
  6843. share->last_auto_increment = 0;
  6844. }
  6845. //
  6846. // Now retrieve the initial auto increment value, as specified by create table
  6847. // so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
  6848. // then the value 100 should be stored here
  6849. //
  6850. key_val = hatoku_ai_create_value;
  6851. value.ulen = sizeof(share->auto_inc_create_value);
  6852. value.data = &share->auto_inc_create_value;
  6853. error = share->status_block->get(
  6854. share->status_block,
  6855. txn,
  6856. &key,
  6857. &value,
  6858. 0
  6859. );
  6860. if (error || value.size != sizeof(share->auto_inc_create_value)) {
  6861. share->auto_inc_create_value = 0;
  6862. }
  6863. commit_txn(txn, 0);
  6864. }
  6865. if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
  6866. TOKUDB_TRACE("init auto increment:%lld\n", share->last_auto_increment);
  6867. }
  6868. }
  6869. void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong * first_value, ulonglong * nb_reserved_values) {
  6870. TOKUDB_DBUG_ENTER("ha_tokudb::get_auto_increment");
  6871. ulonglong nr;
  6872. bool over;
  6873. pthread_mutex_lock(&share->mutex);
  6874. if (share->auto_inc_create_value > share->last_auto_increment) {
  6875. nr = share->auto_inc_create_value;
  6876. over = false;
  6877. share->last_auto_increment = share->auto_inc_create_value;
  6878. }
  6879. else {
  6880. nr = share->last_auto_increment + increment;
  6881. over = nr < share->last_auto_increment;
  6882. if (over)
  6883. nr = ULONGLONG_MAX;
  6884. }
  6885. if (!over) {
  6886. share->last_auto_increment = nr + (nb_desired_values - 1)*increment;
  6887. if (delay_updating_ai_metadata) {
  6888. ai_metadata_update_required = true;
  6889. }
  6890. else {
  6891. update_max_auto_inc(share->status_block, share->last_auto_increment);
  6892. }
  6893. }
  6894. if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
  6895. TOKUDB_TRACE("get_auto_increment(%lld,%lld,%lld):got:%lld:%lld\n",
  6896. offset, increment, nb_desired_values, nr, nb_desired_values);
  6897. }
  6898. *first_value = nr;
  6899. *nb_reserved_values = nb_desired_values;
  6900. pthread_mutex_unlock(&share->mutex);
  6901. DBUG_VOID_RETURN;
  6902. }
  6903. bool ha_tokudb::is_optimize_blocking() {
  6904. return false;
  6905. }
  6906. bool ha_tokudb::is_auto_inc_singleton(){
  6907. return false;
  6908. }
  6909. volatile int ha_tokudb_tokudb_add_index_wait = 0; // debug
  6910. volatile int ha_tokudb_build_index_wait = 0; // debug
  6911. //
  6912. // Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2
  6913. // With a transaction, drops dictionaries associated with indexes in key_num
  6914. //
  6915. //
  6916. // Adds indexes to the table. Takes the array of KEY passed in key_info, and creates
  6917. // DB's that will go at the end of share->key_file. THE IMPLICIT ASSUMPTION HERE is
  6918. // that the table will be modified and that these added keys will be appended to the end
  6919. // of the array table->key_info
  6920. // Parameters:
  6921. // [in] table_arg - table that is being modified, seems to be identical to this->table
  6922. // [in] key_info - array of KEY's to be added
  6923. // num_of_keys - number of keys to be added, number of elements in key_info
  6924. // Returns:
  6925. // 0 on success, error otherwise
  6926. //
  6927. int ha_tokudb::tokudb_add_index(
  6928. TABLE *table_arg,
  6929. KEY *key_info,
  6930. uint num_of_keys,
  6931. DB_TXN* txn,
  6932. bool* inc_num_DBs,
  6933. bool* modified_DBs
  6934. )
  6935. {
  6936. TOKUDB_DBUG_ENTER("ha_tokudb::tokudb_add_index");
  6937. assert(txn);
  6938. while (ha_tokudb_tokudb_add_index_wait) sleep(1); // debug
  6939. int error;
  6940. uint curr_index = 0;
  6941. DBC* tmp_cursor = NULL;
  6942. int cursor_ret_val = 0;
  6943. DBT curr_pk_key, curr_pk_val;
  6944. THD* thd = ha_thd();
  6945. DB_LOADER* loader = NULL;
  6946. DB_INDEXER* indexer = NULL;
  6947. bool loader_save_space = get_load_save_space(thd);
  6948. bool use_hot_index = (lock.type == TL_WRITE_ALLOW_WRITE);
  6949. uint32_t loader_flags = loader_save_space ? LOADER_COMPRESS_INTERMEDIATES : 0;
  6950. uint32_t indexer_flags = 0;
  6951. uint32_t mult_db_flags[MAX_KEY + 1] = {0};
  6952. uint32_t mult_put_flags[MAX_KEY + 1];
  6953. uint32_t mult_dbt_flags[MAX_KEY + 1];
  6954. bool creating_hot_index = false;
  6955. struct loader_context lc;
  6956. memset(&lc, 0, sizeof lc);
  6957. lc.thd = thd;
  6958. lc.ha = this;
  6959. loader_error = 0;
  6960. bool rw_lock_taken = false;
  6961. *inc_num_DBs = false;
  6962. *modified_DBs = false;
  6963. invalidate_bulk_fetch();
  6964. unpack_entire_row = true; // for bulk fetching rows
  6965. for (uint32_t i = 0; i < MAX_KEY+1; i++) {
  6966. mult_put_flags[i] = 0;
  6967. mult_dbt_flags[i] = DB_DBT_REALLOC;
  6968. }
  6969. //
  6970. // number of DB files we have open currently, before add_index is executed
  6971. //
  6972. uint curr_num_DBs = table_arg->s->keys + test(hidden_primary_key);
  6973. //
  6974. // get the row type to use for the indexes we're adding
  6975. //
  6976. const enum row_type row_type = get_row_type_for_key(share->file);
  6977. //
  6978. // status message to be shown in "show process list"
  6979. //
  6980. char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound.
  6981. ulonglong num_processed = 0; //variable that stores number of elements inserted thus far
  6982. thd_proc_info(thd, "Adding indexes");
  6983. //
  6984. // in unpack_row, MySQL passes a buffer that is this long,
  6985. // so this length should be good enough for us as well
  6986. //
  6987. memset((void *) &curr_pk_key, 0, sizeof(curr_pk_key));
  6988. memset((void *) &curr_pk_val, 0, sizeof(curr_pk_val));
  6989. //
  6990. // The files for secondary tables are derived from the name of keys
  6991. // If we try to add a key with the same name as an already existing key,
  6992. // We can crash. So here we check if any of the keys added has the same
  6993. // name of an existing key, and if so, we fail gracefully
  6994. //
  6995. for (uint i = 0; i < num_of_keys; i++) {
  6996. for (uint j = 0; j < table_arg->s->keys; j++) {
  6997. if (strcmp(key_info[i].name, table_arg->s->key_info[j].name) == 0) {
  6998. error = HA_ERR_WRONG_COMMAND;
  6999. goto cleanup;
  7000. }
  7001. }
  7002. }
  7003. rw_wrlock(&share->num_DBs_lock);
  7004. rw_lock_taken = true;
  7005. //
  7006. // open all the DB files and set the appropriate variables in share
  7007. // they go to the end of share->key_file
  7008. //
  7009. creating_hot_index = use_hot_index && num_of_keys == 1 && (key_info[0].flags & HA_NOSAME) == 0;
  7010. if (use_hot_index && (share->num_DBs > curr_num_DBs)) {
  7011. //
  7012. // already have hot index in progress, get out
  7013. //
  7014. error = HA_ERR_INTERNAL_ERROR;
  7015. goto cleanup;
  7016. }
  7017. curr_index = curr_num_DBs;
  7018. *modified_DBs = true;
  7019. for (uint i = 0; i < num_of_keys; i++, curr_index++) {
  7020. if (key_info[i].flags & HA_CLUSTERING) {
  7021. set_key_filter(
  7022. &share->kc_info.key_filters[curr_index],
  7023. &key_info[i],
  7024. table_arg,
  7025. false
  7026. );
  7027. if (!hidden_primary_key) {
  7028. set_key_filter(
  7029. &share->kc_info.key_filters[curr_index],
  7030. &table_arg->key_info[primary_key],
  7031. table_arg,
  7032. false
  7033. );
  7034. }
  7035. error = initialize_col_pack_info(&share->kc_info,table_arg->s,curr_index);
  7036. if (error) {
  7037. goto cleanup;
  7038. }
  7039. }
  7040. error = create_secondary_dictionary(share->table_name, table_arg, &key_info[i], txn, &share->kc_info, curr_index, creating_hot_index, row_type);
  7041. if (error) { goto cleanup; }
  7042. error = open_secondary_dictionary(
  7043. &share->key_file[curr_index],
  7044. &key_info[i],
  7045. share->table_name,
  7046. false,
  7047. txn
  7048. );
  7049. if (error) { goto cleanup; }
  7050. }
  7051. if (creating_hot_index) {
  7052. share->num_DBs++;
  7053. *inc_num_DBs = true;
  7054. error = db_env->create_indexer(
  7055. db_env,
  7056. txn,
  7057. &indexer,
  7058. share->file,
  7059. num_of_keys,
  7060. &share->key_file[curr_num_DBs],
  7061. mult_db_flags,
  7062. indexer_flags
  7063. );
  7064. if (error) { goto cleanup; }
  7065. error = indexer->set_poll_function(indexer, ai_poll_fun, &lc);
  7066. if (error) { goto cleanup; }
  7067. error = indexer->set_error_callback(indexer, loader_ai_err_fun, &lc);
  7068. if (error) { goto cleanup; }
  7069. rw_unlock(&share->num_DBs_lock);
  7070. rw_lock_taken = false;
  7071. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  7072. // initialize a one phase progress report.
  7073. // incremental reports are done in the indexer's callback function.
  7074. thd_progress_init(thd, 1);
  7075. #endif
  7076. while (ha_tokudb_build_index_wait) sleep(1); // debug
  7077. error = indexer->build(indexer);
  7078. if (error) { goto cleanup; }
  7079. rw_wrlock(&share->num_DBs_lock);
  7080. error = indexer->close(indexer);
  7081. rw_unlock(&share->num_DBs_lock);
  7082. if (error) { goto cleanup; }
  7083. indexer = NULL;
  7084. }
  7085. else {
  7086. rw_unlock(&share->num_DBs_lock);
  7087. rw_lock_taken = false;
  7088. prelocked_right_range_size = 0;
  7089. prelocked_left_range_size = 0;
  7090. struct smart_dbt_bf_info bf_info;
  7091. bf_info.ha = this;
  7092. // you need the val if you have a clustering index and key_read is not 0;
  7093. bf_info.direction = 1;
  7094. bf_info.thd = ha_thd();
  7095. bf_info.need_val = true;
  7096. bf_info.key_to_compare = NULL;
  7097. error = db_env->create_loader(
  7098. db_env,
  7099. txn,
  7100. &loader,
  7101. NULL, // no src_db needed
  7102. num_of_keys,
  7103. &share->key_file[curr_num_DBs],
  7104. mult_put_flags,
  7105. mult_dbt_flags,
  7106. loader_flags
  7107. );
  7108. if (error) { goto cleanup; }
  7109. error = loader->set_poll_function(loader, loader_poll_fun, &lc);
  7110. if (error) { goto cleanup; }
  7111. error = loader->set_error_callback(loader, loader_ai_err_fun, &lc);
  7112. if (error) { goto cleanup; }
  7113. //
  7114. // scan primary table, create each secondary key, add to each DB
  7115. //
  7116. if ((error = share->file->cursor(share->file, txn, &tmp_cursor, DB_SERIALIZABLE))) {
  7117. tmp_cursor = NULL; // Safety
  7118. goto cleanup;
  7119. }
  7120. //
  7121. // grab some locks to make this go faster
  7122. // first a global read lock on the main DB, because
  7123. // we intend to scan the entire thing
  7124. //
  7125. error = tmp_cursor->c_set_bounds(
  7126. tmp_cursor,
  7127. share->file->dbt_neg_infty(),
  7128. share->file->dbt_pos_infty(),
  7129. true,
  7130. 0
  7131. );
  7132. if (error) { goto cleanup; }
  7133. // set the bulk fetch iteration to its max so that adding an
  7134. // index fills the bulk fetch buffer every time. we do not
  7135. // want it to grow exponentially fast.
  7136. rows_fetched_using_bulk_fetch = 0;
  7137. bulk_fetch_iteration = HA_TOKU_BULK_FETCH_ITERATION_MAX;
  7138. cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED,smart_dbt_bf_callback, &bf_info);
  7139. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  7140. // initialize a two phase progress report.
  7141. // first phase: putting rows into the loader
  7142. thd_progress_init(thd, 2);
  7143. #endif
  7144. while (cursor_ret_val != DB_NOTFOUND || ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0)) {
  7145. if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
  7146. invalidate_bulk_fetch(); // reset the buffers
  7147. cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_bf_callback, &bf_info);
  7148. if (cursor_ret_val != DB_NOTFOUND && cursor_ret_val != 0) {
  7149. error = cursor_ret_val;
  7150. goto cleanup;
  7151. }
  7152. }
  7153. // do this check in case the the c_getf_next did not put anything into the buffer because
  7154. // there was no more data
  7155. if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) == 0) {
  7156. break;
  7157. }
  7158. // at this point, we know the range query buffer has at least one key/val pair
  7159. uchar* curr_pos = range_query_buff+curr_range_query_buff_offset;
  7160. uint32_t key_size = *(uint32_t *)curr_pos;
  7161. curr_pos += sizeof(key_size);
  7162. uchar* curr_key_buff = curr_pos;
  7163. curr_pos += key_size;
  7164. curr_pk_key.data = curr_key_buff;
  7165. curr_pk_key.size = key_size;
  7166. uint32_t val_size = *(uint32_t *)curr_pos;
  7167. curr_pos += sizeof(val_size);
  7168. uchar* curr_val_buff = curr_pos;
  7169. curr_pos += val_size;
  7170. curr_pk_val.data = curr_val_buff;
  7171. curr_pk_val.size = val_size;
  7172. curr_range_query_buff_offset = curr_pos - range_query_buff;
  7173. error = loader->put(loader, &curr_pk_key, &curr_pk_val);
  7174. if (error) { goto cleanup; }
  7175. num_processed++;
  7176. if ((num_processed % 1000) == 0) {
  7177. sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.", num_processed, (long long unsigned) share->rows);
  7178. thd_proc_info(thd, status_msg);
  7179. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  7180. thd_progress_report(thd, num_processed, (long long unsigned) share->rows);
  7181. #endif
  7182. if (thd->killed) {
  7183. error = ER_ABORTING_CONNECTION;
  7184. goto cleanup;
  7185. }
  7186. }
  7187. }
  7188. error = tmp_cursor->c_close(tmp_cursor);
  7189. assert(error==0);
  7190. tmp_cursor = NULL;
  7191. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  7192. // next progress report phase: closing the loader.
  7193. // incremental reports are done in the loader's callback function.
  7194. thd_progress_next_stage(thd);
  7195. #endif
  7196. error = loader->close(loader);
  7197. loader = NULL;
  7198. if (error) goto cleanup;
  7199. }
  7200. curr_index = curr_num_DBs;
  7201. for (uint i = 0; i < num_of_keys; i++, curr_index++) {
  7202. if (key_info[i].flags & HA_NOSAME) {
  7203. bool is_unique;
  7204. error = is_index_unique(
  7205. &is_unique,
  7206. txn,
  7207. share->key_file[curr_index],
  7208. &key_info[i]
  7209. );
  7210. if (error) goto cleanup;
  7211. if (!is_unique) {
  7212. error = HA_ERR_FOUND_DUPP_KEY;
  7213. last_dup_key = i;
  7214. goto cleanup;
  7215. }
  7216. }
  7217. }
  7218. //
  7219. // We have an accurate row count, might as well update share->rows
  7220. //
  7221. if(!creating_hot_index) {
  7222. pthread_mutex_lock(&share->mutex);
  7223. share->rows = num_processed;
  7224. pthread_mutex_unlock(&share->mutex);
  7225. }
  7226. //
  7227. // now write stuff to status.tokudb
  7228. //
  7229. pthread_mutex_lock(&share->mutex);
  7230. for (uint i = 0; i < num_of_keys; i++) {
  7231. write_key_name_to_status(share->status_block, key_info[i].name, txn);
  7232. }
  7233. pthread_mutex_unlock(&share->mutex);
  7234. error = 0;
  7235. cleanup:
  7236. #ifdef HA_TOKUDB_HAS_THD_PROGRESS
  7237. thd_progress_end(thd);
  7238. #endif
  7239. if (rw_lock_taken) {
  7240. rw_unlock(&share->num_DBs_lock);
  7241. rw_lock_taken = false;
  7242. }
  7243. if (tmp_cursor) {
  7244. int r = tmp_cursor->c_close(tmp_cursor);
  7245. assert(r==0);
  7246. tmp_cursor = NULL;
  7247. }
  7248. if (loader != NULL) {
  7249. sprintf(status_msg, "aborting creation of indexes.");
  7250. thd_proc_info(thd, status_msg);
  7251. loader->abort(loader);
  7252. }
  7253. if (indexer != NULL) {
  7254. sprintf(status_msg, "aborting creation of indexes.");
  7255. thd_proc_info(thd, status_msg);
  7256. rw_wrlock(&share->num_DBs_lock);
  7257. indexer->abort(indexer);
  7258. rw_unlock(&share->num_DBs_lock);
  7259. }
  7260. if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
  7261. sql_print_error("Could not add indexes to table %s because \
  7262. another transaction has accessed the table. \
  7263. To add indexes, make sure no transactions touch the table.", share->table_name);
  7264. }
  7265. TOKUDB_DBUG_RETURN(error ? error : loader_error);
  7266. }
  7267. //
  7268. // Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2
  7269. // Closes added indexes in case of error in error path of add_index and alter_table_phase2
  7270. //
  7271. void ha_tokudb::restore_add_index(TABLE* table_arg, uint num_of_keys, bool incremented_numDBs, bool modified_DBs) {
  7272. uint curr_num_DBs = table_arg->s->keys + test(hidden_primary_key);
  7273. uint curr_index = 0;
  7274. //
  7275. // need to restore num_DBs, and we have to do it before we close the dictionaries
  7276. // so that there is not a window
  7277. //
  7278. if (incremented_numDBs) {
  7279. rw_wrlock(&share->num_DBs_lock);
  7280. share->num_DBs--;
  7281. }
  7282. if (modified_DBs) {
  7283. curr_index = curr_num_DBs;
  7284. for (uint i = 0; i < num_of_keys; i++, curr_index++) {
  7285. reset_key_and_col_info(&share->kc_info, curr_index);
  7286. }
  7287. curr_index = curr_num_DBs;
  7288. for (uint i = 0; i < num_of_keys; i++, curr_index++) {
  7289. if (share->key_file[curr_index]) {
  7290. int r = share->key_file[curr_index]->close(
  7291. share->key_file[curr_index],
  7292. 0
  7293. );
  7294. assert(r==0);
  7295. share->key_file[curr_index] = NULL;
  7296. }
  7297. }
  7298. }
  7299. if (incremented_numDBs) {
  7300. rw_unlock(&share->num_DBs_lock);
  7301. }
  7302. }
  7303. volatile int ha_tokudb_drop_indexes_wait = 0; // debug
  7304. //
  7305. // Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
  7306. // With a transaction, drops dictionaries associated with indexes in key_num
  7307. //
  7308. int ha_tokudb::drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, KEY *key_info, DB_TXN* txn) {
  7309. TOKUDB_DBUG_ENTER("ha_tokudb::drop_indexes");
  7310. assert(txn);
  7311. while (ha_tokudb_drop_indexes_wait) sleep(1); // debug
  7312. int error = 0;
  7313. for (uint i = 0; i < num_of_keys; i++) {
  7314. uint curr_index = key_num[i];
  7315. error = share->key_file[curr_index]->pre_acquire_fileops_lock(share->key_file[curr_index],txn);
  7316. if (error != 0) {
  7317. goto cleanup;
  7318. }
  7319. }
  7320. for (uint i = 0; i < num_of_keys; i++) {
  7321. uint curr_index = key_num[i];
  7322. int r = share->key_file[curr_index]->close(share->key_file[curr_index],0);
  7323. assert(r==0);
  7324. share->key_file[curr_index] = NULL;
  7325. error = remove_key_name_from_status(share->status_block, key_info[curr_index].name, txn);
  7326. if (error) { goto cleanup; }
  7327. error = delete_or_rename_dictionary(share->table_name, NULL, key_info[curr_index].name, true, txn, true);
  7328. if (error) { goto cleanup; }
  7329. }
  7330. cleanup:
  7331. if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
  7332. sql_print_error("Could not drop indexes from table %s because \
  7333. another transaction has accessed the table. \
  7334. To drop indexes, make sure no transactions touch the table.", share->table_name);
  7335. }
  7336. TOKUDB_DBUG_RETURN(error);
  7337. }
  7338. //
  7339. // Internal function called by ha_tokudb::prepare_drop_index and ha_tokudb::alter_table_phase2
  7340. // Restores dropped indexes in case of error in error path of prepare_drop_index and alter_table_phase2
  7341. //
  7342. void ha_tokudb::restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys) {
  7343. //
  7344. // reopen closed dictionaries
  7345. //
  7346. for (uint i = 0; i < num_of_keys; i++) {
  7347. int r;
  7348. uint curr_index = key_num[i];
  7349. if (share->key_file[curr_index] == NULL) {
  7350. r = open_secondary_dictionary(
  7351. &share->key_file[curr_index],
  7352. &table_share->key_info[curr_index],
  7353. share->table_name,
  7354. false, //
  7355. NULL
  7356. );
  7357. assert(!r);
  7358. }
  7359. }
  7360. }
  7361. void ha_tokudb::print_error(int error, myf errflag) {
  7362. if (error == DB_LOCK_DEADLOCK)
  7363. error = HA_ERR_LOCK_DEADLOCK;
  7364. if (error == DB_LOCK_NOTGRANTED)
  7365. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  7366. #if defined(HA_ERR_DISK_FULL)
  7367. if (error == ENOSPC) {
  7368. error = HA_ERR_DISK_FULL;
  7369. }
  7370. #endif
  7371. if (error == DB_KEYEXIST) {
  7372. error = HA_ERR_FOUND_DUPP_KEY;
  7373. }
  7374. #if defined(HA_ALTER_ERROR)
  7375. if (error == HA_ALTER_ERROR) {
  7376. error = HA_ERR_UNSUPPORTED;
  7377. }
  7378. #endif
  7379. // TODO: should rename debug code to something better
  7380. // just reusing this so that tests don' start complaining
  7381. #if MYSQL_VERSION_ID < 50500
  7382. if ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) {
  7383. THD* thd = ha_thd();
  7384. if (get_log_client_errors(thd)) {
  7385. sql_print_error("query \"%s\" returned handler error %d", thd->query_string.str, error);
  7386. }
  7387. }
  7388. #endif
  7389. handler::print_error(error, errflag);
  7390. }
  7391. //
  7392. // truncate's dictionary associated with keynr index using transaction txn
  7393. // does so by deleting and then recreating the dictionary in the context
  7394. // of a transaction
  7395. //
  7396. int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
  7397. int error;
  7398. bool is_pk = (keynr == primary_key);
  7399. const enum row_type row_type = get_row_type_for_key(share->key_file[keynr]);
  7400. error = share->key_file[keynr]->close(share->key_file[keynr], 0);
  7401. assert(error == 0);
  7402. share->key_file[keynr] = NULL;
  7403. if (is_pk) { share->file = NULL; }
  7404. if (is_pk) {
  7405. error = delete_or_rename_dictionary(
  7406. share->table_name,
  7407. NULL,
  7408. "main",
  7409. false, //is_key
  7410. txn,
  7411. true // is a delete
  7412. );
  7413. if (error) { goto cleanup; }
  7414. }
  7415. else {
  7416. error = delete_or_rename_dictionary(
  7417. share->table_name,
  7418. NULL,
  7419. table_share->key_info[keynr].name,
  7420. true, //is_key
  7421. txn,
  7422. true // is a delete
  7423. );
  7424. if (error) { goto cleanup; }
  7425. }
  7426. if (is_pk) {
  7427. error = create_main_dictionary(share->table_name, table, txn, &share->kc_info, row_type);
  7428. }
  7429. else {
  7430. error = create_secondary_dictionary(
  7431. share->table_name,
  7432. table,
  7433. &table_share->key_info[keynr],
  7434. txn,
  7435. &share->kc_info,
  7436. keynr,
  7437. false,
  7438. row_type
  7439. );
  7440. }
  7441. if (error) { goto cleanup; }
  7442. cleanup:
  7443. return error;
  7444. }
  7445. volatile int ha_tokudb_truncate_wait = 0; // debug
  7446. // for 5.5
  7447. int ha_tokudb::truncate() {
  7448. TOKUDB_DBUG_ENTER("truncate");
  7449. while (ha_tokudb_truncate_wait) sleep(1); // debug
  7450. int error = delete_all_rows_internal();
  7451. TOKUDB_DBUG_RETURN(error);
  7452. }
  7453. // delete all rows from a table
  7454. //
  7455. // effects: delete all of the rows in the main dictionary and all of the
  7456. // indices. this must be atomic, so we use the statement transaction
  7457. // for all of the truncate operations.
  7458. // locks: if we have an exclusive table write lock, all of the concurrency
  7459. // issues go away.
  7460. // returns: 0 if success
  7461. int ha_tokudb::delete_all_rows() {
  7462. TOKUDB_DBUG_ENTER("delete_all_rows");
  7463. int error = 0;
  7464. if (thd_sql_command(ha_thd()) != SQLCOM_TRUNCATE) {
  7465. share->try_table_lock = true;
  7466. error = HA_ERR_WRONG_COMMAND;
  7467. }
  7468. if (error == 0)
  7469. error = delete_all_rows_internal();
  7470. TOKUDB_DBUG_RETURN(error);
  7471. }
  7472. int ha_tokudb::delete_all_rows_internal() {
  7473. TOKUDB_DBUG_ENTER("delete_all_rows_internal");
  7474. int error = 0;
  7475. uint curr_num_DBs = 0;
  7476. DB_TXN* txn = NULL;
  7477. error = db_env->txn_begin(db_env, 0, &txn, 0);
  7478. if (error) { goto cleanup; }
  7479. curr_num_DBs = table->s->keys + test(hidden_primary_key);
  7480. for (uint i = 0; i < curr_num_DBs; i++) {
  7481. error = share->key_file[i]->pre_acquire_fileops_lock(
  7482. share->key_file[i],
  7483. txn
  7484. );
  7485. if (error) { goto cleanup; }
  7486. error = share->key_file[i]->pre_acquire_table_lock(
  7487. share->key_file[i],
  7488. txn
  7489. );
  7490. if (error) { goto cleanup; }
  7491. }
  7492. for (uint i = 0; i < curr_num_DBs; i++) {
  7493. error = truncate_dictionary(i, txn);
  7494. if (error) { goto cleanup; }
  7495. }
  7496. // zap the row count
  7497. if (error == 0) {
  7498. share->rows = 0;
  7499. // update auto increment
  7500. share->last_auto_increment = 0;
  7501. // calling write_to_status directly because we need to use txn
  7502. write_to_status(
  7503. share->status_block,
  7504. hatoku_max_ai,
  7505. &share->last_auto_increment,
  7506. sizeof(share->last_auto_increment),
  7507. txn
  7508. );
  7509. }
  7510. share->try_table_lock = true;
  7511. cleanup:
  7512. if (txn) {
  7513. if (error) {
  7514. abort_txn(txn);
  7515. }
  7516. else {
  7517. commit_txn(txn,0);
  7518. }
  7519. }
  7520. if (error == DB_LOCK_NOTGRANTED && ((tokudb_debug & TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0)) {
  7521. sql_print_error("Could not truncate table %s because another transaction has accessed the \
  7522. table. To truncate the table, make sure no transactions touch the table.",
  7523. share->table_name);
  7524. }
  7525. //
  7526. // regardless of errors, need to reopen the DB's
  7527. //
  7528. for (uint i = 0; i < curr_num_DBs; i++) {
  7529. int r = 0;
  7530. if (share->key_file[i] == NULL) {
  7531. if (i != primary_key) {
  7532. r = open_secondary_dictionary(
  7533. &share->key_file[i],
  7534. &table_share->key_info[i],
  7535. share->table_name,
  7536. false, //
  7537. NULL
  7538. );
  7539. assert(!r);
  7540. }
  7541. else {
  7542. r = open_main_dictionary(
  7543. share->table_name,
  7544. false,
  7545. NULL
  7546. );
  7547. assert(!r);
  7548. }
  7549. }
  7550. }
  7551. TOKUDB_DBUG_RETURN(error);
  7552. }
  7553. void ha_tokudb::set_loader_error(int err) {
  7554. loader_error = err;
  7555. }
  7556. void ha_tokudb::set_dup_value_for_pk(DBT* key) {
  7557. assert(!hidden_primary_key);
  7558. unpack_key(table->record[0],key,primary_key);
  7559. last_dup_key = primary_key;
  7560. }
  7561. void ha_tokudb::close_dsmrr() {
  7562. #ifdef MARIADB_BASE_VERSION
  7563. ds_mrr.dsmrr_close();
  7564. #elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
  7565. ds_mrr.dsmrr_close();
  7566. #endif
  7567. }
  7568. void ha_tokudb::reset_dsmrr() {
  7569. #ifdef MARIADB_BASE_VERSION
  7570. ds_mrr.dsmrr_close();
  7571. #elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
  7572. ds_mrr.reset();
  7573. #endif
  7574. }
  7575. // we cache the information so we can do filtering ourselves,
  7576. // but as far as MySQL knows, we are not doing any filtering,
  7577. // so if we happen to miss filtering a row that does not match
  7578. // idx_cond_arg, MySQL will catch it.
  7579. // This allows us the ability to deal with only index_next and index_prev,
  7580. // and not need to worry about other index_XXX functions
  7581. Item* ha_tokudb::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) {
  7582. toku_pushed_idx_cond_keyno = keyno_arg;
  7583. toku_pushed_idx_cond = idx_cond_arg;
  7584. return idx_cond_arg;
  7585. }
  7586. // table admin
  7587. #include "ha_tokudb_admin.cc"
  7588. // update functions
  7589. #include "tokudb_update_fun.cc"
  7590. // fast updates
  7591. #include "ha_tokudb_update.cc"
  7592. // alter table code for various mysql distros
  7593. #include "ha_tokudb_alter_55.cc"
  7594. #include "ha_tokudb_alter_56.cc"
  7595. // mrr
  7596. #ifdef MARIADB_BASE_VERSION
  7597. #include "ha_tokudb_mrr_maria.cc"
  7598. #elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
  7599. #include "ha_tokudb_mrr_mysql.cc"
  7600. #endif
  7601. // key comparisons
  7602. #include "hatoku_cmp.cc"
  7603. // handlerton
  7604. #include "hatoku_hton.cc"
  7605. // generate template functions
  7606. namespace tokudb {
  7607. template size_t vlq_encode_ui(uint32_t n, void *p, size_t s);
  7608. template size_t vlq_decode_ui(uint32_t *np, void *p, size_t s);
  7609. template size_t vlq_encode_ui(uint64_t n, void *p, size_t s);
  7610. template size_t vlq_decode_ui(uint64_t *np, void *p, size_t s);
  7611. };