You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

5107 lines
170 KiB

  1. #ifdef USE_PRAGMA_IMPLEMENTATION
  2. #pragma implementation // gcc: Class implementation
  3. #endif
  4. #define MYSQL_SERVER 1
  5. #include "mysql_priv.h"
  6. #if !defined(HA_END_SPACE_KEY) || HA_END_SPACE_KEY != 0
  7. #error
  8. #endif
  9. unsigned long my_getphyspages() {
  10. return sysconf(_SC_PHYS_PAGES);
  11. }
  12. #include <syscall.h>
  13. unsigned int my_tid() {
  14. return syscall(__NR_gettid);
  15. }
  16. static inline void *thd_data_get(THD *thd, int slot) {
  17. #if MYSQL_VERSION_ID <= 50123
  18. return thd->ha_data[slot];
  19. #else
  20. return thd->ha_data[slot].ha_ptr;
  21. #endif
  22. }
  23. static inline void thd_data_set(THD *thd, int slot, void *data) {
  24. #if MYSQL_VERSION_ID <= 50123
  25. thd->ha_data[slot] = data;
  26. #else
  27. thd->ha_data[slot].ha_ptr = data;
  28. #endif
  29. }
  30. #undef PACKAGE
  31. #undef VERSION
  32. #undef HAVE_DTRACE
  33. #undef _DTRACE_VERSION
  34. //#include "tokudb_config.h"
  35. /* We define DTRACE after mysql_priv.h in case it disabled dtrace in the main server */
  36. #ifdef HAVE_DTRACE
  37. #define _DTRACE_VERSION 1
  38. #else
  39. #endif
  40. #include "tokudb_probes.h"
  41. #include "hatoku_defines.h"
  42. #include "ha_tokudb.h"
  43. #include <mysql/plugin.h>
  44. static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root);
  45. handlerton *tokudb_hton;
  46. typedef struct st_tokudb_trx_data {
  47. DB_TXN *all;
  48. DB_TXN *stmt;
  49. DB_TXN *sp_level;
  50. uint tokudb_lock_count;
  51. } tokudb_trx_data;
  52. const char *ha_tokudb_ext = ".tokudb";
  53. //static my_bool tokudb_shared_data = FALSE;
  54. static u_int32_t tokudb_init_flags =
  55. DB_CREATE | DB_THREAD | DB_PRIVATE |
  56. DB_INIT_LOCK |
  57. DB_INIT_MPOOL |
  58. DB_INIT_TXN |
  59. 0 | // disabled for 1.0.2 DB_INIT_LOG |
  60. 0; // disabled for 1.0.1 DB_RECOVER;
  61. static u_int32_t tokudb_env_flags = DB_LOG_AUTOREMOVE;
  62. //static u_int32_t tokudb_lock_type = DB_LOCK_DEFAULT;
  63. //static ulong tokudb_log_buffer_size = 0;
  64. //static ulong tokudb_log_file_size = 0;
  65. static ulonglong tokudb_cache_size = 0;
  66. static uint tokudb_cache_memory_percent = 50;
  67. static char *tokudb_home;
  68. //static char *tokudb_tmpdir;
  69. static char *tokudb_data_dir;
  70. static char *tokudb_log_dir;
  71. //static long tokudb_lock_scan_time = 0;
  72. //static ulong tokudb_region_size = 0;
  73. //static ulong tokudb_cache_parts = 1;
  74. static ulong tokudb_max_lock;
  75. static ulong tokudb_debug;
  76. #ifdef TOKUDB_VERSION
  77. static char *tokudb_version = TOKUDB_VERSION;
  78. #else
  79. static char *tokudb_version;
  80. #endif
  81. static DB_ENV *db_env;
  82. static const char tokudb_hton_name[] = "TokuDB";
  83. static const int tokudb_hton_name_length = sizeof(tokudb_hton_name) - 1;
  84. // thread variables
  85. static MYSQL_THDVAR_BOOL(commit_sync, PLUGIN_VAR_THDLOCAL, "sync on txn commit",
  86. /* check */ NULL, /* update */ NULL, /* default*/ TRUE);
  87. static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer);
  88. static void tokudb_cleanup_log_files(void);
  89. static TOKUDB_SHARE *get_share(const char *table_name, TABLE * table);
  90. static int free_share(TOKUDB_SHARE * share, TABLE * table, uint hidden_primary_key, bool mutex_is_locked);
  91. static int tokudb_end(handlerton * hton, ha_panic_function type);
  92. static bool tokudb_flush_logs(handlerton * hton);
  93. static bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * print, enum ha_stat_type);
  94. static int tokudb_close_connection(handlerton * hton, THD * thd);
  95. static int tokudb_commit(handlerton * hton, THD * thd, bool all);
  96. static int tokudb_rollback(handlerton * hton, THD * thd, bool all);
  97. static uint tokudb_alter_table_flags(uint flags);
  98. #if 0
  99. static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint);
  100. static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint);
  101. static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint);
  102. #endif
  103. static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print);
  104. static HASH tokudb_open_tables;
  105. pthread_mutex_t tokudb_mutex;
  106. static uchar *tokudb_get_key(TOKUDB_SHARE * share, size_t * length, my_bool not_used __attribute__ ((unused))) {
  107. *length = share->table_name_length;
  108. return (uchar *) share->table_name;
  109. }
  110. static int tokudb_init_func(void *p) {
  111. TOKUDB_DBUG_ENTER("tokudb_init_func");
  112. tokudb_hton = (handlerton *) p;
  113. VOID(pthread_mutex_init(&tokudb_mutex, MY_MUTEX_INIT_FAST));
  114. (void) hash_init(&tokudb_open_tables, system_charset_info, 32, 0, 0, (hash_get_key) tokudb_get_key, 0, 0);
  115. tokudb_hton->state = SHOW_OPTION_YES;
  116. // tokudb_hton->flags= HTON_CAN_RECREATE; // QQQ this came from skeleton
  117. tokudb_hton->flags = HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME;
  118. #ifdef DB_TYPE_TOKUDB
  119. tokudb_hton->db_type = DB_TYPE_TOKUDB;
  120. #else
  121. tokudb_hton->db_type = DB_TYPE_UNKNOWN;
  122. #endif
  123. tokudb_hton->create = tokudb_create_handler;
  124. tokudb_hton->close_connection = tokudb_close_connection;
  125. #if 0
  126. tokudb_hton->savepoint_offset = sizeof(DB_TXN *);
  127. tokudb_hton->savepoint_set = tokudb_savepoint;
  128. tokudb_hton->savepoint_rollback = tokudb_rollback_to_savepoint;
  129. tokudb_hton->savepoint_release = tokudb_release_savepoint;
  130. #endif
  131. tokudb_hton->commit = tokudb_commit;
  132. tokudb_hton->rollback = tokudb_rollback;
  133. tokudb_hton->panic = tokudb_end;
  134. tokudb_hton->flush_logs = tokudb_flush_logs;
  135. tokudb_hton->show_status = tokudb_show_status;
  136. tokudb_hton->alter_table_flags = tokudb_alter_table_flags;
  137. #if 0
  138. if (!tokudb_tmpdir)
  139. tokudb_tmpdir = mysql_tmpdir;
  140. DBUG_PRINT("info", ("tokudb_tmpdir: %s", tokudb_tmpdir));
  141. #endif
  142. if (!tokudb_home)
  143. tokudb_home = mysql_real_data_home;
  144. DBUG_PRINT("info", ("tokudb_home: %s", tokudb_home));
  145. #if 0
  146. if (!tokudb_log_buffer_size) { // QQQ
  147. tokudb_log_buffer_size = max(table_cache_size * 512, 32 * 1024);
  148. DBUG_PRINT("info", ("computing tokudb_log_buffer_size %ld\n", tokudb_log_buffer_size));
  149. }
  150. tokudb_log_file_size = tokudb_log_buffer_size * 4;
  151. tokudb_log_file_size = MY_ALIGN(tokudb_log_file_size, 1024 * 1024L);
  152. tokudb_log_file_size = max(tokudb_log_file_size, 10 * 1024 * 1024L);
  153. DBUG_PRINT("info", ("computing tokudb_log_file_size: %ld\n", tokudb_log_file_size));
  154. #endif
  155. int r;
  156. if ((r = db_env_create(&db_env, 0))) {
  157. DBUG_PRINT("info", ("db_env_create %d\n", r));
  158. goto error;
  159. }
  160. DBUG_PRINT("info", ("tokudb_env_flags: 0x%x\n", tokudb_env_flags));
  161. r = db_env->set_flags(db_env, tokudb_env_flags, 1);
  162. if (r) { // QQQ
  163. if (tokudb_debug & TOKUDB_DEBUG_INIT)
  164. TOKUDB_TRACE("%s:WARNING: flags=%x r=%d\n", __FUNCTION__, tokudb_env_flags, r);
  165. // goto error;
  166. }
  167. // config error handling
  168. db_env->set_errcall(db_env, tokudb_print_error);
  169. db_env->set_errpfx(db_env, "TokuDB");
  170. // config directories
  171. #if 0
  172. DBUG_PRINT("info", ("tokudb_tmpdir: %s\n", tokudb_tmpdir));
  173. db_env->set_tmp_dir(db_env, tokudb_tmpdir);
  174. #endif
  175. {
  176. char *data_dir = tokudb_data_dir;
  177. if (data_dir == 0)
  178. data_dir = mysql_data_home;
  179. DBUG_PRINT("info", ("tokudb_data_dir: %s\n", data_dir));
  180. db_env->set_data_dir(db_env, data_dir);
  181. }
  182. if (tokudb_log_dir) {
  183. DBUG_PRINT("info", ("tokudb_log_dir: %s\n", tokudb_log_dir));
  184. db_env->set_lg_dir(db_env, tokudb_log_dir);
  185. }
  186. // config the cache table
  187. if (tokudb_cache_size == 0) {
  188. unsigned long pagesize = my_getpagesize();
  189. unsigned long long npages = my_getphyspages();
  190. unsigned long long physmem = npages * pagesize;
  191. tokudb_cache_size = (ulonglong) (physmem * (tokudb_cache_memory_percent / 100.0));
  192. }
  193. if (tokudb_cache_size) {
  194. DBUG_PRINT("info", ("tokudb_cache_size: %lld\n", tokudb_cache_size));
  195. r = db_env->set_cachesize(db_env, tokudb_cache_size / (1024 * 1024L * 1024L), tokudb_cache_size % (1024L * 1024L * 1024L), 1);
  196. if (r) {
  197. DBUG_PRINT("info", ("set_cachesize %d\n", r));
  198. goto error;
  199. }
  200. }
  201. u_int32_t gbytes, bytes; int parts;
  202. r = db_env->get_cachesize(db_env, &gbytes, &bytes, &parts);
  203. if (r == 0)
  204. if (tokudb_debug & TOKUDB_DEBUG_INIT)
  205. TOKUDB_TRACE("%s:tokudb_cache_size=%lld\n", __FUNCTION__, ((unsigned long long) gbytes << 30) + bytes);
  206. #if 0
  207. // QQQ config the logs
  208. DBUG_PRINT("info", ("tokudb_log_file_size: %ld\n", tokudb_log_file_size));
  209. db_env->set_lg_max(db_env, tokudb_log_file_size);
  210. DBUG_PRINT("info", ("tokudb_log_buffer_size: %ld\n", tokudb_log_buffer_size));
  211. db_env->set_lg_bsize(db_env, tokudb_log_buffer_size);
  212. // DBUG_PRINT("info",("tokudb_region_size: %ld\n", tokudb_region_size));
  213. // db_env->set_lg_regionmax(db_env, tokudb_region_size);
  214. #endif
  215. // config the locks
  216. #if 0 // QQQ no lock types yet
  217. DBUG_PRINT("info", ("tokudb_lock_type: 0x%lx\n", tokudb_lock_type));
  218. db_env->set_lk_detect(db_env, tokudb_lock_type);
  219. #endif
  220. if (tokudb_max_lock) {
  221. DBUG_PRINT("info",("tokudb_max_lock: %ld\n", tokudb_max_lock));
  222. r = db_env->set_lk_max_locks(db_env, tokudb_max_lock);
  223. if (r) {
  224. DBUG_PRINT("info", ("tokudb_set_max_locks %d\n", r));
  225. goto error;
  226. }
  227. }
  228. if (tokudb_debug & TOKUDB_DEBUG_INIT) TOKUDB_TRACE("%s:env open:flags=%x\n", __FUNCTION__, tokudb_init_flags);
  229. r = db_env->open(db_env, tokudb_home, tokudb_init_flags, 0666);
  230. if (tokudb_debug & TOKUDB_DEBUG_INIT) TOKUDB_TRACE("%s:env opened:return=%d\n", __FUNCTION__, r);
  231. if (r) {
  232. DBUG_PRINT("info", ("env->open %d\n", r));
  233. goto error;
  234. }
  235. DBUG_RETURN(FALSE);
  236. error:
  237. if (db_env) {
  238. db_env->close(db_env, 0);
  239. db_env = 0;
  240. }
  241. DBUG_RETURN(TRUE);
  242. }
  243. static int tokudb_done_func(void *p) {
  244. TOKUDB_DBUG_ENTER("tokudb_done_func");
  245. int error = 0;
  246. if (tokudb_open_tables.records)
  247. error = 1;
  248. hash_free(&tokudb_open_tables);
  249. pthread_mutex_destroy(&tokudb_mutex);
  250. TOKUDB_DBUG_RETURN(0);
  251. }
  252. /** @brief
  253. Simple lock controls. The "share" it creates is a structure we will
  254. pass to each tokudb handler. Do you have to have one of these? Well, you have
  255. pieces that are used for locking, and they are needed to function.
  256. */
  257. static TOKUDB_SHARE *get_share(const char *table_name, TABLE * table) {
  258. TOKUDB_SHARE *share;
  259. uint length;
  260. pthread_mutex_lock(&tokudb_mutex);
  261. length = (uint) strlen(table_name);
  262. if (!(share = (TOKUDB_SHARE *) hash_search(&tokudb_open_tables, (uchar *) table_name, length))) {
  263. char *tmp_name;
  264. //
  265. // create share and fill it with all zeroes
  266. // hence, all pointers are initialized to NULL
  267. //
  268. if (!(share = (TOKUDB_SHARE *)
  269. my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
  270. &share, sizeof(*share),
  271. &tmp_name, length + 1,
  272. NullS))) {
  273. pthread_mutex_unlock(&tokudb_mutex);
  274. return NULL;
  275. }
  276. share->use_count = 0;
  277. share->table_name_length = length;
  278. share->table_name = tmp_name;
  279. strmov(share->table_name, table_name);
  280. bzero((void *) share->key_file, sizeof(share->key_file));
  281. if (my_hash_insert(&tokudb_open_tables, (uchar *) share))
  282. goto error;
  283. thr_lock_init(&share->lock);
  284. pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
  285. }
  286. pthread_mutex_unlock(&tokudb_mutex);
  287. return share;
  288. error:
  289. pthread_mutex_destroy(&share->mutex);
  290. my_free((uchar *) share, MYF(0));
  291. return NULL;
  292. }
  293. static int free_share(TOKUDB_SHARE * share, TABLE * table, uint hidden_primary_key, bool mutex_is_locked) {
  294. int error, result = 0;
  295. pthread_mutex_lock(&tokudb_mutex);
  296. if (mutex_is_locked)
  297. pthread_mutex_unlock(&share->mutex);
  298. if (!--share->use_count) {
  299. DBUG_PRINT("info", ("share->use_count %u", share->use_count));
  300. //
  301. // number of open DB's may not be equal to number of keys we have because add_index
  302. // may have added some. So, we loop through entire array and close any non-NULL value
  303. // It is imperative that we reset a DB to NULL once we are done with it.
  304. //
  305. for (uint i = 0; i < sizeof(share->key_file)/sizeof(share->key_file[0]); i++) {
  306. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  307. TOKUDB_TRACE("dbclose:%p\n", share->key_file[i]);
  308. }
  309. if (share->key_file[i]) {
  310. error = share->key_file[i]->close(share->key_file[i], 0);
  311. if (error) {
  312. result = error;
  313. }
  314. share->key_file[i] = NULL;
  315. }
  316. }
  317. if (share->status_block && (error = share->status_block->close(share->status_block, 0))) {
  318. result = error;
  319. }
  320. hash_delete(&tokudb_open_tables, (uchar *) share);
  321. thr_lock_delete(&share->lock);
  322. pthread_mutex_destroy(&share->mutex);
  323. my_free((uchar *) share, MYF(0));
  324. }
  325. pthread_mutex_unlock(&tokudb_mutex);
  326. return result;
  327. }
  328. static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, MEM_ROOT * mem_root) {
  329. return new(mem_root) ha_tokudb(hton, table);
  330. }
  331. int tokudb_end(handlerton * hton, ha_panic_function type) {
  332. TOKUDB_DBUG_ENTER("tokudb_end");
  333. int error = 0;
  334. if (db_env) {
  335. if (tokudb_init_flags & DB_INIT_LOG)
  336. tokudb_cleanup_log_files();
  337. error = db_env->close(db_env, 0); // Error is logged
  338. db_env = NULL;
  339. }
  340. TOKUDB_DBUG_RETURN(error);
  341. }
  342. static int tokudb_close_connection(handlerton * hton, THD * thd) {
  343. my_free(thd_data_get(thd, hton->slot), MYF(0));
  344. return 0;
  345. }
  346. bool tokudb_flush_logs(handlerton * hton) {
  347. TOKUDB_DBUG_ENTER("tokudb_flush_logs");
  348. int error;
  349. bool result = 0;
  350. if (tokudb_init_flags & DB_INIT_LOG) {
  351. if ((error = db_env->log_flush(db_env, 0))) {
  352. my_error(ER_ERROR_DURING_FLUSH_LOGS, MYF(0), error);
  353. result = 1;
  354. }
  355. if ((error = db_env->txn_checkpoint(db_env, 0, 0, 0))) {
  356. my_error(ER_ERROR_DURING_CHECKPOINT, MYF(0), error);
  357. result = 1;
  358. }
  359. }
  360. TOKUDB_DBUG_RETURN(result);
  361. }
  362. static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
  363. TOKUDB_DBUG_ENTER("tokudb_commit");
  364. DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt"));
  365. u_int32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC;
  366. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
  367. DB_TXN **txn = all ? &trx->all : &trx->stmt;
  368. int error = 0;
  369. if (*txn) {
  370. if (tokudb_debug & TOKUDB_DEBUG_TXN)
  371. TOKUDB_TRACE("commit:%d:%p\n", all, *txn);
  372. error = (*txn)->commit(*txn, syncflag);
  373. if (*txn == trx->sp_level)
  374. trx->sp_level = 0;
  375. *txn = 0;
  376. } else
  377. if (tokudb_debug & TOKUDB_DEBUG_TXN)
  378. TOKUDB_TRACE("commit0\n");
  379. TOKUDB_DBUG_RETURN(error);
  380. }
  381. static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
  382. TOKUDB_DBUG_ENTER("tokudb_rollback");
  383. DBUG_PRINT("trans", ("aborting transaction %s", all ? "all" : "stmt"));
  384. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
  385. DB_TXN **txn = all ? &trx->all : &trx->stmt;
  386. int error = 0;
  387. if (*txn) {
  388. if (tokudb_debug & TOKUDB_DEBUG_TXN)
  389. TOKUDB_TRACE("rollback:%p\n", *txn);
  390. error = (*txn)->abort(*txn);
  391. if (*txn == trx->sp_level)
  392. trx->sp_level = 0;
  393. *txn = 0;
  394. } else
  395. if (tokudb_debug & TOKUDB_DEBUG_TXN)
  396. TOKUDB_TRACE("abort0\n");
  397. TOKUDB_DBUG_RETURN(error);
  398. }
  399. #if 0
  400. static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
  401. TOKUDB_DBUG_ENTER("tokudb_savepoint");
  402. int error;
  403. DB_TXN **save_txn = (DB_TXN **) savepoint;
  404. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
  405. if (!(error = db_env->txn_begin(db_env, trx->sp_level, save_txn, 0))) {
  406. trx->sp_level = *save_txn;
  407. }
  408. TOKUDB_DBUG_RETURN(error);
  409. }
  410. static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint) {
  411. TOKUDB_DBUG_ENTER("tokudb_rollback_to_savepoint");
  412. int error;
  413. DB_TXN *parent, **save_txn = (DB_TXN **) savepoint;
  414. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
  415. parent = (*save_txn)->parent;
  416. if (!(error = (*save_txn)->abort(*save_txn))) {
  417. trx->sp_level = parent;
  418. error = tokudb_savepoint(hton, thd, savepoint);
  419. }
  420. TOKUDB_DBUG_RETURN(error);
  421. }
  422. static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint) {
  423. TOKUDB_DBUG_ENTER("tokudb_release_savepoint");
  424. int error;
  425. DB_TXN *parent, **save_txn = (DB_TXN **) savepoint;
  426. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot);
  427. parent = (*save_txn)->parent;
  428. if (!(error = (*save_txn)->commit(*save_txn, 0))) {
  429. trx->sp_level = parent;
  430. *save_txn = 0;
  431. }
  432. TOKUDB_DBUG_RETURN(error);
  433. }
  434. #endif
  435. static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print) {
  436. TOKUDB_DBUG_ENTER("tokudb_show_logs");
  437. char **all_logs, **free_logs, **a, **f;
  438. int error = 1;
  439. MEM_ROOT **root_ptr = my_pthread_getspecific_ptr(MEM_ROOT **, THR_MALLOC);
  440. MEM_ROOT show_logs_root, *old_mem_root = *root_ptr;
  441. init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE, BDB_LOG_ALLOC_BLOCK_SIZE);
  442. *root_ptr = &show_logs_root;
  443. all_logs = free_logs = 0;
  444. error = db_env->log_archive(db_env, &all_logs, 0);
  445. if (error) {
  446. DBUG_PRINT("error", ("log_archive failed (error %d)", error));
  447. db_env->err(db_env, error, "log_archive");
  448. if (error == DB_NOTFOUND)
  449. error = 0; // No log files
  450. goto err;
  451. }
  452. /* Error is 0 here */
  453. if (all_logs) {
  454. for (a = all_logs, f = free_logs; *a; ++a) {
  455. if (f && *f && strcmp(*a, *f) == 0) {
  456. f++;
  457. if ((error = stat_print(thd, tokudb_hton_name, tokudb_hton_name_length, *a, strlen(*a), STRING_WITH_LEN(SHOW_LOG_STATUS_FREE))))
  458. break;
  459. } else {
  460. if ((error = stat_print(thd, tokudb_hton_name, tokudb_hton_name_length, *a, strlen(*a), STRING_WITH_LEN(SHOW_LOG_STATUS_INUSE))))
  461. break;
  462. }
  463. }
  464. }
  465. err:
  466. if (all_logs)
  467. free(all_logs);
  468. if (free_logs)
  469. free(free_logs);
  470. free_root(&show_logs_root, MYF(0));
  471. *root_ptr = old_mem_root;
  472. TOKUDB_DBUG_RETURN(error);
  473. }
  474. bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type) {
  475. switch (stat_type) {
  476. case HA_ENGINE_LOGS:
  477. return tokudb_show_logs(thd, stat_print);
  478. default:
  479. return FALSE;
  480. }
  481. }
  482. static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) {
  483. sql_print_error("%s: %s", db_errpfx, buffer);
  484. }
  485. void tokudb_cleanup_log_files(void) {
  486. TOKUDB_DBUG_ENTER("tokudb_cleanup_log_files");
  487. char **names;
  488. int error;
  489. if ((error = db_env->txn_checkpoint(db_env, 0, 0, 0)))
  490. my_error(ER_ERROR_DURING_CHECKPOINT, MYF(0), error);
  491. if ((error = db_env->log_archive(db_env, &names, 0)) != 0) {
  492. DBUG_PRINT("error", ("log_archive failed (error %d)", error));
  493. db_env->err(db_env, error, "log_archive");
  494. DBUG_VOID_RETURN;
  495. }
  496. if (names) {
  497. char **np;
  498. for (np = names; *np; ++np) {
  499. #if 1
  500. if (tokudb_debug)
  501. TOKUDB_TRACE("%s:cleanup:%s\n", __FUNCTION__, *np);
  502. #else
  503. my_delete(*np, MYF(MY_WME));
  504. #endif
  505. }
  506. free(names);
  507. }
  508. DBUG_VOID_RETURN;
  509. }
  510. //
  511. // *******NOTE*****
  512. // If the flags HA_ONLINE_DROP_INDEX and HA_ONLINE_DROP_UNIQUE_INDEX
  513. // are ever added, prepare_drop_index and final_drop_index will need to be modified
  514. // so that the actual deletion of DB's is done in final_drop_index and not prepare_drop_index
  515. //
  516. static uint tokudb_alter_table_flags(uint flags)
  517. {
  518. return (HA_ONLINE_ADD_INDEX_NO_WRITES| HA_ONLINE_DROP_INDEX_NO_WRITES |
  519. HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES| HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES);
  520. }
  521. static int get_name_length(const char *name) {
  522. int n = 0;
  523. const char *newname = name;
  524. if (tokudb_data_dir) {
  525. n += strlen(tokudb_data_dir) + 1;
  526. if (strncmp("./", name, 2) == 0)
  527. newname = name + 2;
  528. }
  529. n += strlen(newname);
  530. n += strlen(ha_tokudb_ext);
  531. return n;
  532. }
  533. static void make_name(char *newname, const char *tablename, const char *dictname) {
  534. const char *newtablename = tablename;
  535. char *nn = newname;
  536. if (tokudb_data_dir) {
  537. nn += sprintf(nn, "%s/", tokudb_data_dir);
  538. if (strncmp("./", tablename, 2) == 0)
  539. newtablename = tablename + 2;
  540. }
  541. nn += sprintf(nn, "%s%s", newtablename, ha_tokudb_ext);
  542. if (dictname)
  543. nn += sprintf(nn, "/%s%s", dictname, ha_tokudb_ext);
  544. }
  545. #define HANDLE_INVALID_CURSOR() \
  546. if (cursor == NULL) { \
  547. error = last_cursor_error; \
  548. goto cleanup; \
  549. }
  550. ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg)
  551. :
  552. handler(hton, table_arg), alloc_ptr(0), rec_buff(0),
  553. // flags defined in sql\handler.h
  554. int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_PRIMARY_KEY_IN_READ_INDEX |
  555. HA_FILE_BASED | HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
  556. added_rows(0), deleted_rows(0), last_dup_key((uint) - 1), using_ignore(0), last_cursor_error(0),range_lock_grabbed(false), primary_key_offsets(NULL) {
  557. transaction = NULL;
  558. }
  559. static const char *ha_tokudb_exts[] = {
  560. ha_tokudb_ext,
  561. NullS
  562. };
  563. /*
  564. * returns NULL terminated file extension string
  565. */
  566. const char **ha_tokudb::bas_ext() const {
  567. TOKUDB_DBUG_ENTER("ha_tokudb::bas_ext");
  568. DBUG_RETURN(ha_tokudb_exts);
  569. }
  570. //
  571. // Returns a bit mask of capabilities of the key or its part specified by
  572. // the arguments. The capabilities are defined in sql/handler.h.
  573. //
  574. ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
  575. TOKUDB_DBUG_ENTER("ha_tokudb::index_flags");
  576. ulong flags = (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
  577. DBUG_RETURN(flags);
  578. }
  579. static int tokudb_cmp_hidden_key(DB * file, const DBT * new_key, const DBT * saved_key) {
  580. ulonglong a = uint5korr((char *) new_key->data);
  581. ulonglong b = uint5korr((char *) saved_key->data);
  582. return a < b ? -1 : (a > b ? 1 : 0);
  583. }
  584. /*
  585. Things that are required for ALL data types:
  586. key_part->field->null_bit
  587. key_part->length
  588. key_part->field->packed_col_length(...)
  589. DEFAULT: virtual uint packed_col_length(const uchar *to, uint length)
  590. { return length;}
  591. All integer types use this.
  592. String types MIGHT use different one, espescially the varchars
  593. key_part->field->pack_cmp(...)
  594. DEFAULT: virtual int pack_cmp(...)
  595. { return cmp(a,b); }
  596. All integer types use the obvious one.
  597. Assume X byte bytestream, int =:
  598. ((u_int64_t)((u_int8_t)bytes[0])) << 0 |
  599. ((u_int64_t)((u_int8_t)bytes[1])) << 8 |
  600. ((u_int64_t)((u_int8_t)bytes[2])) << 16 |
  601. ((u_int64_t)((u_int8_t)bytes[3])) << 24 |
  602. ((u_int64_t)((u_int8_t)bytes[4])) << 32 |
  603. ((u_int64_t)((u_int8_t)bytes[5])) << 40 |
  604. ((u_int64_t)((u_int8_t)bytes[6])) << 48 |
  605. ((u_int64_t)((u_int8_t)bytes[7])) << 56
  606. If the integer type is < 8 bytes, just skip the unneeded ones.
  607. Then compare the integers in the obvious way.
  608. Strings:
  609. Empty space differences at end are ignored.
  610. i.e. delete all empty space at end first, and then compare.
  611. Possible prerequisites:
  612. key_part->field->cmp
  613. NO DEFAULT
  614. */
  615. typedef enum {
  616. TOKUTRACE_SIGNED_INTEGER = 0,
  617. TOKUTRACE_UNSIGNED_INTEGER = 1,
  618. TOKUTRACE_CHAR = 2
  619. } tokutrace_field_type;
  620. typedef struct {
  621. tokutrace_field_type type;
  622. bool null_bit;
  623. u_int32_t length;
  624. } tokutrace_field;
  625. typedef struct {
  626. u_int16_t version;
  627. u_int32_t num_fields;
  628. tokutrace_field fields[0];
  629. } tokutrace_cmp_fun;
  630. static int tokutrace_db_get_cmp_byte_stream(DB* db, DBT* byte_stream) {
  631. int r = ENOSYS;
  632. void* data = NULL;
  633. KEY* key = NULL;
  634. if (byte_stream->flags != DB_DBT_MALLOC) { return EINVAL; }
  635. bzero((void *) byte_stream, sizeof(*byte_stream));
  636. u_int32_t num_fields = 0;
  637. if (!db->app_private) { num_fields = 1; }
  638. else {
  639. key = (KEY*)db->app_private;
  640. num_fields = key->key_parts;
  641. }
  642. size_t need_size = sizeof(tokutrace_cmp_fun) +
  643. num_fields * sizeof(tokutrace_field);
  644. data = my_malloc(need_size, MYF(MY_FAE | MY_ZEROFILL | MY_WME));
  645. if (!data) { return ENOMEM; }
  646. tokutrace_cmp_fun* info = (tokutrace_cmp_fun*)data;
  647. info->version = 1;
  648. info->num_fields = num_fields;
  649. if (!db->app_private) {
  650. info->fields[0].type = TOKUTRACE_UNSIGNED_INTEGER;
  651. info->fields[0].null_bit = false;
  652. info->fields[0].length = 40 / 8;
  653. goto finish;
  654. }
  655. assert(db->app_private);
  656. assert(key);
  657. u_int32_t i;
  658. for (i = 0; i < num_fields; i++) {
  659. info->fields[i].null_bit = key->key_part[i].null_bit;
  660. info->fields[i].length = key->key_part[i].length;
  661. enum_field_types type = key->key_part[i].field->type();
  662. switch (type) {
  663. #ifdef HAVE_LONG_LONG
  664. case (MYSQL_TYPE_LONGLONG):
  665. #endif
  666. case (MYSQL_TYPE_LONG):
  667. case (MYSQL_TYPE_INT24):
  668. case (MYSQL_TYPE_SHORT):
  669. case (MYSQL_TYPE_TINY): {
  670. /* Integer */
  671. Field_num* field = static_cast<Field_num*>(key->key_part[i].field);
  672. if (field->unsigned_flag) {
  673. info->fields[i].type = TOKUTRACE_UNSIGNED_INTEGER; }
  674. else {
  675. info->fields[i].type = TOKUTRACE_SIGNED_INTEGER; }
  676. break;
  677. }
  678. default: {
  679. fprintf(stderr, "Cannot save cmp function for type %d.\n", type);
  680. r = ENOSYS;
  681. goto cleanup;
  682. }
  683. }
  684. }
  685. finish:
  686. byte_stream->data = data;
  687. byte_stream->size = need_size;
  688. r = 0;
  689. cleanup:
  690. if (r!=0) {
  691. if (data) { my_free(data, MYF(0)); }
  692. }
  693. return r;
  694. }
  695. static int tokudb_compare_two_keys(KEY *key, const DBT * new_key, const DBT * saved_key, bool cmp_prefix) {
  696. uchar new_key_inf_val = *(uchar *) new_key->data;
  697. uchar saved_key_inf_val = *(uchar *) saved_key->data;
  698. //
  699. // first byte is "infinity" byte
  700. //
  701. uchar *new_key_ptr = (uchar *)(new_key->data) + 1;
  702. uchar *saved_key_ptr = (uchar *)(saved_key->data) + 1;
  703. KEY_PART_INFO *key_part = key->key_part, *end = key_part + key->key_parts;
  704. int ret_val;
  705. //
  706. // do not include the inf val at the beginning
  707. //
  708. uint new_key_length = new_key->size - sizeof(uchar);
  709. uint saved_key_length = saved_key->size - sizeof(uchar);
  710. //DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size);
  711. for (; key_part != end && (int) new_key_length > 0 && (int) saved_key_length > 0; key_part++) {
  712. int cmp;
  713. uint new_key_field_length;
  714. uint saved_key_field_length;
  715. if (key_part->field->null_bit) {
  716. assert(new_key_ptr < (uchar *) new_key->data + new_key->size);
  717. assert(saved_key_ptr < (uchar *) saved_key->data + saved_key->size);
  718. if (*new_key_ptr != *saved_key_ptr) {
  719. return ((int) *new_key_ptr - (int) *saved_key_ptr); }
  720. saved_key_ptr++;
  721. new_key_length--;
  722. saved_key_length--;
  723. if (!*new_key_ptr++) { continue; }
  724. }
  725. new_key_field_length = key_part->field->packed_col_length(new_key_ptr, key_part->length);
  726. saved_key_field_length = key_part->field->packed_col_length(saved_key_ptr, key_part->length);
  727. assert(new_key_length >= new_key_field_length);
  728. assert(saved_key_length >= saved_key_field_length);
  729. if ((cmp = key_part->field->pack_cmp(new_key_ptr, saved_key_ptr, key_part->length, 0)))
  730. return cmp;
  731. new_key_ptr += new_key_field_length;
  732. new_key_length -= new_key_field_length;
  733. saved_key_ptr += saved_key_field_length;
  734. saved_key_length -= saved_key_field_length;
  735. }
  736. if (cmp_prefix || (new_key_length == 0 && saved_key_length == 0) ) {
  737. ret_val = 0;
  738. }
  739. //
  740. // at this point, one SHOULD be 0
  741. //
  742. else if (new_key_length == 0 && saved_key_length > 0) {
  743. ret_val = (new_key_inf_val == COL_POS_INF ) ? 1 : -1;
  744. }
  745. else if (new_key_length > 0 && saved_key_length == 0) {
  746. ret_val = (saved_key_inf_val == COL_POS_INF ) ? -1 : 1;
  747. }
  748. //
  749. // this should never happen, perhaps we should assert(false)
  750. //
  751. else {
  752. ret_val = new_key_length - saved_key_length;
  753. }
  754. return ret_val;
  755. }
  756. static int tokudb_cmp_packed_key(DB *file, const DBT *keya, const DBT *keyb) {
  757. assert(file->app_private != 0);
  758. KEY *key = (KEY *) file->app_private;
  759. return tokudb_compare_two_keys(key, keya, keyb, false);
  760. }
  761. static int tokudb_cmp_primary_key(DB *file, const DBT *keya, const DBT *keyb) {
  762. assert(file->app_private != 0);
  763. KEY *key = (KEY *) file->api_internal;
  764. return tokudb_compare_two_keys(key, keya, keyb, false);
  765. }
  766. //TODO: QQQ Only do one direction for prefix.
  767. static int tokudb_prefix_cmp_packed_key(DB *file, const DBT *keya, const DBT *keyb) {
  768. assert(file->app_private != 0);
  769. KEY *key = (KEY *) file->app_private;
  770. return tokudb_compare_two_keys(key, keya, keyb, true);
  771. }
  772. #if 0
  773. /* Compare key against row */
  774. static bool tokudb_key_cmp(TABLE * table, KEY * key_info, const uchar * key, uint key_length) {
  775. KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + key_info->key_parts;
  776. for (; key_part != end && (int) key_length > 0; key_part++) {
  777. int cmp;
  778. uint length;
  779. if (key_part->null_bit) {
  780. key_length--;
  781. /*
  782. With the current usage, the following case will always be FALSE,
  783. because NULL keys are sorted before any other key
  784. */
  785. if (*key != (table->record[0][key_part->null_offset] & key_part->null_bit) ? 0 : 1)
  786. return 1;
  787. if (!*key++) // Null value
  788. continue;
  789. }
  790. /*
  791. Last argument has to be 0 as we are also using this to function to see
  792. if a key like 'a ' matched a row with 'a'
  793. */
  794. if ((cmp = key_part->field->pack_cmp(key, key_part->length, 0)))
  795. return cmp;
  796. length = key_part->field->packed_col_length(key, key_part->length);
  797. key += length;
  798. key_length -= length;
  799. }
  800. return 0; // Identical keys
  801. }
  802. #endif
  803. int primary_key_part_compare (const void* left, const void* right) {
  804. PRIM_KEY_PART_INFO* left_part= (PRIM_KEY_PART_INFO *)left;
  805. PRIM_KEY_PART_INFO* right_part = (PRIM_KEY_PART_INFO *)right;
  806. return left_part->offset - right_part->offset;
  807. }
  808. //
  809. // struct that will be used as a context for smart DBT callbacks
  810. // contains parameters needed to complete the smart DBT cursor call
  811. //
  812. typedef struct smart_dbt_info {
  813. ha_tokudb* ha; //instance to ha_tokudb needed for reading the row
  814. uchar* buf; // output buffer where row will be written
  815. uint keynr; // index into share->key_file that represents DB we are currently operating on
  816. } *SMART_DBT_INFO;
  817. //
  818. // struct that will be used as a context for smart DBT callbacks
  819. // ONLY for the function add_index
  820. //
  821. typedef struct smart_dbt_ai_info {
  822. ha_tokudb* ha; //instance to ha_tokudb needed for reading the row
  823. DBT* prim_key; // DBT to store the primary key
  824. uchar* buf; // buffer to unpack the row
  825. } *SMART_DBT_AI_INFO;
  826. static void smart_dbt_ai_callback (DBT const *key, DBT const *row, void *context) {
  827. SMART_DBT_AI_INFO info = (SMART_DBT_AI_INFO)context;
  828. info->ha->unpack_row(info->buf,row,key);
  829. //
  830. // copy the key to prim_key
  831. //
  832. info->prim_key->size = key->size;
  833. memcpy(info->prim_key->data, key->data, key->size);
  834. }
  835. //
  836. // smart DBT callback function for optimize
  837. // in optimize, we want to flatten DB by doing
  838. // a full table scan. Therefore, we don't
  839. // want to actually do anything with the data, hence
  840. // callback does nothing
  841. //
  842. static void smart_dbt_opt_callback (DBT const *key, DBT const *row, void *context) {
  843. }
  844. //
  845. // Smart DBT callback function in case where we have a covering index
  846. //
  847. static void smart_dbt_callback_keyread(DBT const *key, DBT const *row, void *context) {
  848. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  849. info->ha->extract_hidden_primary_key(info->keynr, row, key);
  850. info->ha->read_key_only(info->buf,info->keynr,row,key);
  851. }
  852. //
  853. // Smart DBT callback function in case where we do NOT have a covering index
  854. //
  855. static void smart_dbt_callback_rowread(DBT const *key, DBT const *row, void *context) {
  856. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  857. info->ha->extract_hidden_primary_key(info->keynr, row, key);
  858. info->ha->read_primary_key(info->buf,info->keynr,row,key);
  859. }
  860. //
  861. // Smart DBT callback function in c_getf_heavi, in case where we have a covering index,
  862. //
  863. static void smart_dbt_callback_keyread_heavi(DBT const *key, DBT const *row, void *context, int r_h) {
  864. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  865. info->ha->heavi_ret_val = r_h;
  866. smart_dbt_callback_keyread(key,row,context);
  867. }
  868. //
  869. // Smart DBT callback function in c_getf_heavi, in case where we do NOT have a covering index
  870. //
  871. static void smart_dbt_callback_rowread_heavi(DBT const *key, DBT const *row, void *context, int r_h) {
  872. SMART_DBT_INFO info = (SMART_DBT_INFO)context;
  873. info->ha->heavi_ret_val = r_h;
  874. smart_dbt_callback_rowread(key,row,context);
  875. }
  876. //
  877. // Smart DBT callback function in records_in_range
  878. //
  879. static void smart_dbt_callback_ror_heavi(DBT const *key, DBT const *row, void *context, int r_h) {
  880. DBT* copied_key = (DBT *)context;
  881. copied_key->size = key->size;
  882. memcpy(copied_key->data, key->data, key->size);
  883. }
  884. //
  885. // macro for Smart DBT callback function,
  886. // so we do not need to put this long line of code in multiple places
  887. //
  888. #define SMART_DBT_CALLBACK ( this->key_read ? smart_dbt_callback_keyread : smart_dbt_callback_rowread )
  889. //
  890. // macro that modifies read flag for cursor operations depending on whether
  891. // we have preacquired lock or not
  892. //
  893. #define SET_READ_FLAG(flg) ((range_lock_grabbed || current_thd->options & OPTION_TABLE_LOCK) ? ((flg) | DB_PRELOCKED) : (flg))
  894. //
  895. // This method retrieves the value of the auto increment column of a record in MySQL format
  896. // This was basically taken from MyISAM
  897. // Parameters:
  898. // type - the type of the auto increment column (e.g. int, float, double...)
  899. // offset - offset into the record where the auto increment column is stored
  900. // [in] record - MySQL row whose auto increment value we want to extract
  901. // Returns:
  902. // The value of the auto increment column in record
  903. //
  904. ulonglong retrieve_auto_increment(uint16 type, uint32 offset,const uchar *record)
  905. {
  906. const uchar *key; /* Key */
  907. ulonglong unsigned_autoinc; /* Unsigned auto-increment */
  908. longlong signed_autoinc; /* Signed auto-increment */
  909. enum { unsigned_type, signed_type } autoinc_type;
  910. float float_tmp; /* Temporary variable */
  911. double double_tmp; /* Temporary variable */
  912. key = ((uchar *) record) + offset;
  913. /* Set default autoincrement type */
  914. autoinc_type = unsigned_type;
  915. switch (type) {
  916. case HA_KEYTYPE_INT8:
  917. signed_autoinc = (longlong) *(char*)key;
  918. autoinc_type = signed_type;
  919. break;
  920. case HA_KEYTYPE_BINARY:
  921. unsigned_autoinc = (ulonglong) *(uchar*) key;
  922. break;
  923. case HA_KEYTYPE_SHORT_INT:
  924. signed_autoinc = (longlong) sint2korr(key);
  925. autoinc_type = signed_type;
  926. break;
  927. case HA_KEYTYPE_USHORT_INT:
  928. unsigned_autoinc = (ulonglong) uint2korr(key);
  929. break;
  930. case HA_KEYTYPE_LONG_INT:
  931. signed_autoinc = (longlong) sint4korr(key);
  932. autoinc_type = signed_type;
  933. break;
  934. case HA_KEYTYPE_ULONG_INT:
  935. unsigned_autoinc = (ulonglong) uint4korr(key);
  936. break;
  937. case HA_KEYTYPE_INT24:
  938. signed_autoinc = (longlong) sint3korr(key);
  939. autoinc_type = signed_type;
  940. break;
  941. case HA_KEYTYPE_UINT24:
  942. unsigned_autoinc = (ulonglong) uint3korr(key);
  943. break;
  944. case HA_KEYTYPE_LONGLONG:
  945. signed_autoinc = sint8korr(key);
  946. autoinc_type = signed_type;
  947. break;
  948. case HA_KEYTYPE_ULONGLONG:
  949. unsigned_autoinc = uint8korr(key);
  950. break;
  951. /* The remaining two cases should not be used but are included for
  952. compatibility */
  953. case HA_KEYTYPE_FLOAT:
  954. float4get(float_tmp, key); /* Note: float4get is a macro */
  955. signed_autoinc = (longlong) float_tmp;
  956. autoinc_type = signed_type;
  957. break;
  958. case HA_KEYTYPE_DOUBLE:
  959. float8get(double_tmp, key); /* Note: float8get is a macro */
  960. signed_autoinc = (longlong) double_tmp;
  961. autoinc_type = signed_type;
  962. break;
  963. default:
  964. DBUG_ASSERT(0);
  965. unsigned_autoinc = 0;
  966. }
  967. if (signed_autoinc < 0) {
  968. signed_autoinc = 0;
  969. }
  970. return autoinc_type == unsigned_type ?
  971. unsigned_autoinc : (ulonglong) signed_autoinc;
  972. }
  973. inline uint get_null_offset(TABLE* table, Field* field) {
  974. return (uint) ((uchar*) field->null_ptr - (uchar*) table->record[0]);
  975. }
  976. inline bool
  977. is_null_field( TABLE* table, Field* field, const uchar* record) {
  978. uint null_offset;
  979. bool ret_val;
  980. if (!field->null_ptr) {
  981. ret_val = false;
  982. goto exitpt;
  983. }
  984. null_offset = get_null_offset(table,field);
  985. ret_val = (record[null_offset] & field->null_bit) ? true: false;
  986. exitpt:
  987. return ret_val;
  988. }
  989. //
  990. // Open a secondary table, the key will be a secondary index, the data will be a primary key
  991. //
  992. int ha_tokudb::open_secondary_table(DB** ptr, KEY* key_info, const char* name, int mode, u_int32_t* key_type) {
  993. int error = ENOSYS;
  994. char part[MAX_ALIAS_NAME + 10];
  995. char name_buff[FN_REFLEN];
  996. uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
  997. char newname[strlen(name) + 32];
  998. DBT cmp_byte_stream;
  999. open_flags += DB_AUTO_COMMIT;
  1000. if ((error = db_create(ptr, db_env, 0))) {
  1001. my_errno = error;
  1002. goto cleanup;
  1003. }
  1004. sprintf(part, "key-%s", key_info->name);
  1005. make_name(newname, name, part);
  1006. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  1007. *key_type = key_info->flags & HA_NOSAME ? DB_NOOVERWRITE : DB_YESOVERWRITE;
  1008. (*ptr)->app_private = (void *) (key_info);
  1009. if (tokudb_debug & TOKUDB_DEBUG_SAVE_TRACE) {
  1010. bzero((void *) &cmp_byte_stream, sizeof(cmp_byte_stream));
  1011. cmp_byte_stream.flags = DB_DBT_MALLOC;
  1012. if ((error = tokutrace_db_get_cmp_byte_stream(*ptr, &cmp_byte_stream))) {
  1013. my_errno = error;
  1014. goto cleanup;
  1015. }
  1016. (*ptr)->set_bt_compare(*ptr, tokudb_cmp_packed_key);
  1017. my_free(cmp_byte_stream.data, MYF(0));
  1018. }
  1019. else {
  1020. (*ptr)->set_bt_compare(*ptr, tokudb_cmp_packed_key);
  1021. }
  1022. DBUG_PRINT("info", ("Setting DB_DUP+DB_DUPSORT for key %s\n", key_info->name));
  1023. (*ptr)->set_flags(*ptr, DB_DUP + DB_DUPSORT);
  1024. (*ptr)->api_internal = share->file->app_private;
  1025. (*ptr)->set_dup_compare(*ptr, hidden_primary_key ? tokudb_cmp_hidden_key : tokudb_cmp_primary_key);
  1026. if ((error = (*ptr)->open(*ptr, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) {
  1027. my_errno = error;
  1028. goto cleanup;
  1029. }
  1030. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1031. TOKUDB_TRACE("open:%s:file=%p\n", newname, *ptr);
  1032. }
  1033. cleanup:
  1034. return error;
  1035. }
  1036. //
  1037. // Creates and opens a handle to a table which already exists in a tokudb
  1038. // database.
  1039. // Parameters:
  1040. // [in] name - table name
  1041. // mode - seems to specify if table is read only
  1042. // test_if_locked - unused
  1043. // Returns:
  1044. // 0 on success
  1045. // 1 on error
  1046. //
  1047. int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
  1048. TOKUDB_DBUG_ENTER("ha_tokudb::open %p %s", this, name);
  1049. TOKUDB_OPEN();
  1050. char name_buff[FN_REFLEN];
  1051. uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
  1052. uint max_key_length;
  1053. int error;
  1054. transaction = NULL;
  1055. cursor = NULL;
  1056. open_flags += DB_AUTO_COMMIT;
  1057. /* Open primary key */
  1058. hidden_primary_key = 0;
  1059. if ((primary_key = table_share->primary_key) >= MAX_KEY) {
  1060. // No primary key
  1061. primary_key = table_share->keys;
  1062. key_used_on_scan = MAX_KEY;
  1063. ref_length = hidden_primary_key = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  1064. } else
  1065. key_used_on_scan = primary_key;
  1066. /* Need some extra memory in case of packed keys */
  1067. // the "+ 1" is for the first byte that states +/- infinity
  1068. max_key_length = table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar);
  1069. if (!(alloc_ptr =
  1070. my_multi_malloc(MYF(MY_WME),
  1071. &key_buff, max_key_length,
  1072. &key_buff2, max_key_length,
  1073. &primary_key_buff, (hidden_primary_key ? 0 : table_share->key_info[table_share->primary_key].key_length + sizeof(uchar)),
  1074. NullS)))
  1075. TOKUDB_DBUG_RETURN(1);
  1076. if (!(rec_buff = (uchar *) my_malloc((alloced_rec_buff_length = table_share->rec_buff_length), MYF(MY_WME)))) {
  1077. my_free(alloc_ptr, MYF(0));
  1078. TOKUDB_DBUG_RETURN(1);
  1079. }
  1080. /* Init shared structure */
  1081. if (!(share = get_share(name, table))) {
  1082. my_free((char *) rec_buff, MYF(0));
  1083. my_free(alloc_ptr, MYF(0));
  1084. TOKUDB_DBUG_RETURN(1);
  1085. }
  1086. /* Make sorted list of primary key parts, if they exist*/
  1087. if (!hidden_primary_key) {
  1088. uint num_prim_key_parts = table_share->key_info[table_share->primary_key].key_parts;
  1089. primary_key_offsets = (PRIM_KEY_PART_INFO *)my_malloc(
  1090. num_prim_key_parts*sizeof(*primary_key_offsets),
  1091. MYF(MY_WME)
  1092. );
  1093. if (!primary_key_offsets) {
  1094. free_share(share, table, hidden_primary_key, 1);
  1095. my_free((char *) rec_buff, MYF(0));
  1096. my_free(alloc_ptr, MYF(0));
  1097. TOKUDB_DBUG_RETURN(1);
  1098. }
  1099. for (uint i = 0; i < table_share->key_info[table_share->primary_key].key_parts; i++) {
  1100. primary_key_offsets[i].offset = table_share->key_info[table_share->primary_key].key_part[i].offset;
  1101. primary_key_offsets[i].part_index = i;
  1102. }
  1103. qsort(
  1104. primary_key_offsets, // start of array
  1105. num_prim_key_parts, //num elements
  1106. sizeof(*primary_key_offsets), //size of each element
  1107. primary_key_part_compare
  1108. );
  1109. }
  1110. thr_lock_data_init(&share->lock, &lock, NULL);
  1111. bzero((void *) &current_row, sizeof(current_row));
  1112. /* Fill in shared structure, if needed */
  1113. pthread_mutex_lock(&share->mutex);
  1114. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  1115. TOKUDB_TRACE("tokudbopen:%p:share=%p:file=%p:table=%p:table->s=%p:%d\n",
  1116. this, share, share->file, table, table->s, share->use_count);
  1117. if (!share->use_count++) {
  1118. DBUG_PRINT("info", ("share->use_count %u", share->use_count));
  1119. DBT cmp_byte_stream;
  1120. if ((error = db_create(&share->file, db_env, 0))) {
  1121. free_share(share, table, hidden_primary_key, 1);
  1122. my_free((char *) rec_buff, MYF(0));
  1123. my_free(alloc_ptr, MYF(0));
  1124. if (primary_key_offsets) my_free(primary_key_offsets, MYF(0));
  1125. my_errno = error;
  1126. TOKUDB_DBUG_RETURN(1);
  1127. }
  1128. if (!hidden_primary_key)
  1129. share->file->app_private = (void *) (table_share->key_info + table_share->primary_key);
  1130. if (tokudb_debug & TOKUDB_DEBUG_SAVE_TRACE) {
  1131. bzero((void *) &cmp_byte_stream, sizeof(cmp_byte_stream));
  1132. cmp_byte_stream.flags = DB_DBT_MALLOC;
  1133. if ((error = tokutrace_db_get_cmp_byte_stream(share->file, &cmp_byte_stream))) {
  1134. free_share(share, table, hidden_primary_key, 1);
  1135. my_free((char *) rec_buff, MYF(0));
  1136. my_free(alloc_ptr, MYF(0));
  1137. if (primary_key_offsets) my_free(primary_key_offsets, MYF(0));
  1138. my_errno = error;
  1139. TOKUDB_DBUG_RETURN(1);
  1140. }
  1141. share->file->set_bt_compare(share->file, (hidden_primary_key ? tokudb_cmp_hidden_key : tokudb_cmp_packed_key));
  1142. my_free(cmp_byte_stream.data, MYF(0));
  1143. }
  1144. else
  1145. share->file->set_bt_compare(share->file, (hidden_primary_key ? tokudb_cmp_hidden_key : tokudb_cmp_packed_key));
  1146. char newname[strlen(name) + 32];
  1147. make_name(newname, name, "main");
  1148. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  1149. if ((error = share->file->open(share->file, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) {
  1150. free_share(share, table, hidden_primary_key, 1);
  1151. my_free((char *) rec_buff, MYF(0));
  1152. my_free(alloc_ptr, MYF(0));
  1153. if (primary_key_offsets) my_free(primary_key_offsets, MYF(0));
  1154. my_errno = error;
  1155. TOKUDB_DBUG_RETURN(1);
  1156. }
  1157. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  1158. TOKUDB_TRACE("open:%s:file=%p\n", newname, share->file);
  1159. /* Open other keys; These are part of the share structure */
  1160. share->key_file[primary_key] = share->file;
  1161. share->key_type[primary_key] = hidden_primary_key ? DB_YESOVERWRITE : DB_NOOVERWRITE;
  1162. DB **ptr = share->key_file;
  1163. for (uint i = 0; i < table_share->keys; i++, ptr++) {
  1164. if (i != primary_key) {
  1165. if ((error = open_secondary_table(ptr,&table_share->key_info[i],name,mode,&share->key_type[i]))) {
  1166. __close(1);
  1167. TOKUDB_DBUG_RETURN(1);
  1168. }
  1169. }
  1170. }
  1171. /* Calculate pack_length of primary key */
  1172. share->fixed_length_primary_key = 1;
  1173. if (!hidden_primary_key) {
  1174. //
  1175. // I realize this is incredibly confusing, and refactoring should take
  1176. // care of this, but we need to set the ref_length to start at 1, to account for
  1177. // the "infinity byte" in keys.
  1178. //
  1179. ref_length = sizeof(uchar);
  1180. KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
  1181. KEY_PART_INFO *end = key_part + table->key_info[primary_key].key_parts;
  1182. for (; key_part != end; key_part++)
  1183. ref_length += key_part->field->max_packed_col_length(key_part->length);
  1184. share->fixed_length_primary_key = (ref_length == table->key_info[primary_key].key_length + sizeof(uchar));
  1185. share->status |= STATUS_PRIMARY_KEY_INIT;
  1186. }
  1187. share->ref_length = ref_length;
  1188. error = get_status();
  1189. if (error || share->version < HA_TOKU_VERSION) {
  1190. __close(1);
  1191. TOKUDB_DBUG_RETURN(1);
  1192. }
  1193. //////////////////////
  1194. u_int64_t num_rows = 0;
  1195. int error = estimate_num_rows(share->file,&num_rows);
  1196. //
  1197. // estimate_num_rows should not fail under normal conditions
  1198. //
  1199. if (error == 0) {
  1200. share->rows = num_rows;
  1201. }
  1202. else {
  1203. __close(1);
  1204. TOKUDB_DBUG_RETURN(1);
  1205. }
  1206. //
  1207. // initialize auto increment data
  1208. //
  1209. share->has_auto_inc = has_auto_increment_flag(&share->ai_field_index);
  1210. if (share->has_auto_inc) {
  1211. init_auto_increment();
  1212. }
  1213. }
  1214. ref_length = share->ref_length; // If second open
  1215. pthread_mutex_unlock(&share->mutex);
  1216. key_read = false;
  1217. stats.block_size = 1<<20; // QQQ Tokudb DB block size
  1218. share->fixed_length_row = !(table_share->db_create_options & HA_OPTION_PACK_RECORD);
  1219. init_hidden_prim_key_info();
  1220. info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
  1221. TOKUDB_DBUG_RETURN(0);
  1222. }
  1223. //
  1224. // estimate the number of rows in a DB
  1225. // Parameters:
  1226. // [in] db - DB whose number of rows will be estimated
  1227. // [out] num_rows - number of estimated rows in db
  1228. // Returns:
  1229. // 0 on success
  1230. // error otherwise
  1231. //
  1232. int ha_tokudb::estimate_num_rows(DB* db, u_int64_t* num_rows) {
  1233. DBT key;
  1234. DBT data;
  1235. int error = ENOSYS;
  1236. DBC* crsr = NULL;
  1237. u_int64_t less, equal, greater;
  1238. int is_exact;
  1239. bool do_commit = false;
  1240. bzero((void *)&key, sizeof(key));
  1241. bzero((void *)&data, sizeof(data));
  1242. if (transaction == NULL) {
  1243. error = db_env->txn_begin(db_env, 0, &transaction, 0);
  1244. if (error) goto cleanup;
  1245. do_commit = true;
  1246. }
  1247. error = db->cursor(db, transaction, &crsr, 0);
  1248. if (error) { goto cleanup; }
  1249. //
  1250. // get the first element, then estimate number of records
  1251. // by calling key_range64 on the first element
  1252. //
  1253. error = crsr->c_get(crsr, &key, &data, DB_FIRST);
  1254. if (error == DB_NOTFOUND) {
  1255. *num_rows = 0;
  1256. error = 0;
  1257. goto cleanup;
  1258. }
  1259. else if (error) { goto cleanup; }
  1260. error = db->key_range64(
  1261. db,
  1262. transaction,
  1263. &key,
  1264. &less,
  1265. &equal,
  1266. &greater,
  1267. &is_exact
  1268. );
  1269. if (error) {
  1270. goto cleanup;
  1271. }
  1272. *num_rows = equal + greater;
  1273. error = 0;
  1274. cleanup:
  1275. if (do_commit) {
  1276. transaction->commit(transaction, 0);
  1277. transaction = NULL;
  1278. }
  1279. if (crsr != NULL) {
  1280. crsr->c_close(crsr);
  1281. crsr = NULL;
  1282. }
  1283. return error;
  1284. }
  1285. //
  1286. // states if table has an auto increment column, if so, sets index where auto inc column is to index
  1287. // Parameters:
  1288. // [out] index - if auto inc exists, then this param is set to where it exists in table, if not, then unchanged
  1289. // Returns:
  1290. // true if auto inc column exists, false otherwise
  1291. //
  1292. bool ha_tokudb::has_auto_increment_flag(uint* index) {
  1293. //
  1294. // check to see if we have auto increment field
  1295. //
  1296. bool ai_found = false;
  1297. uint i = 0;
  1298. for (Field ** field = table->field; *field; field++,i++) {
  1299. if ((*field)->flags & AUTO_INCREMENT_FLAG) {
  1300. ai_found = true;
  1301. *index = i;
  1302. break;
  1303. }
  1304. }
  1305. return ai_found;
  1306. }
  1307. //
  1308. // helper function to write a piece of metadata in to status.tokudb
  1309. //
  1310. int ha_tokudb::write_metadata(DB* db, HA_METADATA_KEY curr_key_data, void* data, ulonglong size ){
  1311. int error;
  1312. DBT key;
  1313. DBT value;
  1314. DB_TXN* txn = NULL;
  1315. //
  1316. // transaction to be used for putting metadata into status.tokudb
  1317. //
  1318. error = db_env->txn_begin(db_env, 0, &txn, 0);
  1319. if (error) {
  1320. goto cleanup;
  1321. }
  1322. bzero(&key, sizeof(key));
  1323. bzero(&value, sizeof(value));
  1324. key.data = &curr_key_data;
  1325. key.size = sizeof(curr_key_data);
  1326. value.data = data;
  1327. value.size = size;
  1328. error = db->put(db, txn, &key, &value, 0);
  1329. if (error) {
  1330. goto cleanup;
  1331. }
  1332. error = 0;
  1333. cleanup:
  1334. if (txn) {
  1335. if (!error) {
  1336. txn->commit(txn, DB_TXN_NOSYNC);
  1337. }
  1338. else {
  1339. txn->abort(txn);
  1340. }
  1341. }
  1342. return error;
  1343. }
  1344. //
  1345. // Updates status.tokudb with a new max value used for the auto increment column
  1346. // Parameters:
  1347. // [in] db - this will always be status.tokudb
  1348. // val - value to store
  1349. // Returns:
  1350. // 0 on success, error otherwise
  1351. //
  1352. //
  1353. int ha_tokudb::update_max_auto_inc(DB* db, ulonglong val){
  1354. return write_metadata(db,hatoku_max_ai,&val,sizeof(val));
  1355. }
  1356. //
  1357. // Writes the initial auto increment value, as specified by create table
  1358. // so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
  1359. // then the value 100 will be stored here in val
  1360. // Parameters:
  1361. // [in] db - this will always be status.tokudb
  1362. // val - value to store
  1363. // Returns:
  1364. // 0 on success, error otherwise
  1365. //
  1366. //
  1367. int ha_tokudb::write_auto_inc_create(DB* db, ulonglong val){
  1368. return write_metadata(db,hatoku_ai_create_value,&val,sizeof(val));
  1369. }
  1370. //
  1371. // Closes a handle to a table.
  1372. //
  1373. int ha_tokudb::close(void) {
  1374. TOKUDB_DBUG_ENTER("ha_tokudb::close %p", this);
  1375. TOKUDB_CLOSE();
  1376. TOKUDB_DBUG_RETURN(__close(0));
  1377. }
  1378. int ha_tokudb::__close(int mutex_is_locked) {
  1379. TOKUDB_DBUG_ENTER("ha_tokudb::__close %p", this);
  1380. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  1381. TOKUDB_TRACE("close:%p\n", this);
  1382. my_free(rec_buff, MYF(MY_ALLOW_ZERO_PTR));
  1383. my_free(alloc_ptr, MYF(MY_ALLOW_ZERO_PTR));
  1384. my_free(primary_key_offsets, MYF(MY_ALLOW_ZERO_PTR));
  1385. ha_tokudb::reset(); // current_row buffer
  1386. TOKUDB_DBUG_RETURN(free_share(share, table, hidden_primary_key, mutex_is_locked));
  1387. }
  1388. //
  1389. // Reallocate record buffer (rec_buff) if needed
  1390. // If not needed, does nothing
  1391. // Parameters:
  1392. // length - size of buffer required for rec_buff
  1393. //
  1394. bool ha_tokudb::fix_rec_buff_for_blob(ulong length) {
  1395. if (!rec_buff || length > alloced_rec_buff_length) {
  1396. uchar *newptr;
  1397. if (!(newptr = (uchar *) my_realloc((void *) rec_buff, length, MYF(MY_ALLOW_ZERO_PTR))))
  1398. return 1;
  1399. rec_buff = newptr;
  1400. alloced_rec_buff_length = length;
  1401. }
  1402. return 0;
  1403. }
  1404. /* Calculate max length needed for row */
  1405. ulong ha_tokudb::max_row_length(const uchar * buf) {
  1406. ulong length = table_share->reclength + table_share->fields * 2;
  1407. uint *ptr, *end;
  1408. for (ptr = table_share->blob_field, end = ptr + table_share->blob_fields; ptr != end; ptr++) {
  1409. Field_blob *blob = ((Field_blob *) table->field[*ptr]);
  1410. length += blob->get_length((uchar *) (buf + field_offset(blob))) + 2;
  1411. }
  1412. return length;
  1413. }
  1414. /*
  1415. */
  1416. //
  1417. // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
  1418. // Pack a row for storage.
  1419. // If the row is of fixed length, just store the row 'as is'.
  1420. // If not, we will generate a packed row suitable for storage.
  1421. // This will only fail if we don't have enough memory to pack the row,
  1422. // which may only happen in rows with blobs, as the default row length is
  1423. // pre-allocated.
  1424. // Parameters:
  1425. // [out] row - row stored in DBT to be converted
  1426. // [in] record - row in MySQL format
  1427. //
  1428. int ha_tokudb::pack_row(DBT * row, const uchar * record) {
  1429. uchar *ptr;
  1430. int r = ENOSYS;
  1431. bzero((void *) row, sizeof(*row));
  1432. uint curr_skip_index;
  1433. KEY *key_info = table->key_info + primary_key;
  1434. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  1435. //
  1436. // two cases, fixed length row, and variable length row
  1437. // fixed length row is first below
  1438. //
  1439. if (share->fixed_length_row) {
  1440. if (hidden_primary_key) {
  1441. row->data = (void *)record;
  1442. row->size = table_share->reclength;
  1443. r = 0;
  1444. goto cleanup;
  1445. }
  1446. else {
  1447. //
  1448. // if the primary key is not hidden, then it is part of the record
  1449. // because primary key information is already stored in the key
  1450. // that will be passed to the fractal tree, we do not copy
  1451. // components that belong to the primary key
  1452. //
  1453. if (fix_rec_buff_for_blob(table_share->reclength)) {
  1454. r = HA_ERR_OUT_OF_MEM;
  1455. goto cleanup;
  1456. }
  1457. uchar* tmp_dest = rec_buff;
  1458. const uchar* tmp_src = record;
  1459. uint i = 0;
  1460. //
  1461. // say we have 100 bytes in record, and bytes 25-50 and 75-90 belong to the primary key
  1462. // this for loop will do a memcpy [0,25], [51,75] and [90,100]
  1463. //
  1464. for (i =0; i < key_info->key_parts; i++){
  1465. uint curr_index = primary_key_offsets[i].part_index;
  1466. uint bytes_to_copy = record + key_info->key_part[curr_index].offset - tmp_src;
  1467. memcpy(tmp_dest,tmp_src, bytes_to_copy);
  1468. tmp_dest += bytes_to_copy;
  1469. tmp_src = record + key_info->key_part[curr_index].offset + key_info->key_part[curr_index].length;
  1470. }
  1471. memcpy(tmp_dest,tmp_src, record + table_share->reclength - tmp_src);
  1472. tmp_dest += record + table_share->reclength - tmp_src;
  1473. row->data = rec_buff;
  1474. row->size = (size_t) (tmp_dest - rec_buff);
  1475. r = 0;
  1476. goto cleanup;
  1477. }
  1478. }
  1479. if (table_share->blob_fields) {
  1480. if (fix_rec_buff_for_blob(max_row_length(record))) {
  1481. r = HA_ERR_OUT_OF_MEM;
  1482. goto cleanup;
  1483. }
  1484. }
  1485. /* Copy null bits */
  1486. memcpy(rec_buff, record, table_share->null_bytes);
  1487. ptr = rec_buff + table_share->null_bytes;
  1488. //
  1489. // assert that when the hidden primary key exists, primary_key_offsets is NULL
  1490. //
  1491. assert( (hidden_primary_key != 0) == (primary_key_offsets == NULL));
  1492. curr_skip_index = 0;
  1493. for (Field ** field = table->field; *field; field++) {
  1494. uint curr_field_offset = field_offset(*field);
  1495. //
  1496. // if the primary key is hidden, primary_key_offsets will be NULL and
  1497. // this clause will not execute
  1498. //
  1499. if (primary_key_offsets) {
  1500. uint curr_skip_offset = primary_key_offsets[curr_skip_index].offset;
  1501. if (curr_skip_offset == curr_field_offset) {
  1502. //
  1503. // we have hit a field that is a portion of the primary key
  1504. //
  1505. uint curr_key_index = primary_key_offsets[curr_skip_index].part_index;
  1506. curr_skip_index++;
  1507. //
  1508. // only choose to continue over the key if the key's length matches the field's length
  1509. // otherwise, we may have a situation where the column is a varchar(10), the
  1510. // key is only the first 3 characters, and we end up losing the last 7 bytes of the
  1511. // column
  1512. //
  1513. if (table->key_info[primary_key].key_part[curr_key_index].length == (*field)->field_length) {
  1514. continue;
  1515. }
  1516. }
  1517. }
  1518. if (is_null_field(table, *field, record)) {
  1519. continue;
  1520. }
  1521. ptr = (*field)->pack(ptr, (const uchar *)
  1522. (record + curr_field_offset));
  1523. }
  1524. row->data = rec_buff;
  1525. row->size = (size_t) (ptr - rec_buff);
  1526. r = 0;
  1527. cleanup:
  1528. dbug_tmp_restore_column_map(table->write_set, old_map);
  1529. return r;
  1530. }
  1531. //
  1532. // take the row passed in as a DBT*, and convert it into a row in MySQL format in record
  1533. // Parameters:
  1534. // [out] record - row in MySQL format
  1535. // [in] row - row stored in DBT to be converted
  1536. //
  1537. void ha_tokudb::unpack_row(uchar * record, DBT const *row, DBT const *key) {
  1538. //
  1539. // two cases, fixed length row, and variable length row
  1540. // fixed length row is first below
  1541. //
  1542. if (share->fixed_length_row) {
  1543. if (hidden_primary_key) {
  1544. memcpy(record, (void *) row->data, table_share->reclength);
  1545. }
  1546. else {
  1547. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  1548. KEY *key_info = table_share->key_info + primary_key;
  1549. uchar* tmp_dest = record;
  1550. uchar* tmp_src = (uchar *)row->data;
  1551. uint i = 0;
  1552. //
  1553. // unpack_key will fill in parts of record that are part of the primary key
  1554. //
  1555. unpack_key(record, key, primary_key);
  1556. //
  1557. // produces the opposite effect to what happened in pack_row
  1558. // first we fill in the parts of record that are not part of the key
  1559. //
  1560. for (i =0; i < key_info->key_parts; i++){
  1561. uint curr_index = primary_key_offsets[i].part_index;
  1562. uint bytes_to_copy = record + key_info->key_part[curr_index].offset - tmp_dest;
  1563. memcpy(tmp_dest,tmp_src, bytes_to_copy);
  1564. tmp_src += bytes_to_copy;
  1565. tmp_dest = record + key_info->key_part[curr_index].offset + key_info->key_part[curr_index].length;
  1566. }
  1567. memcpy(tmp_dest,tmp_src, record + table_share->reclength - tmp_dest);
  1568. dbug_tmp_restore_column_map(table->write_set, old_map);
  1569. }
  1570. }
  1571. else {
  1572. /* Copy null bits */
  1573. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  1574. const uchar *ptr = (const uchar *) row->data;
  1575. memcpy(record, ptr, table_share->null_bytes);
  1576. ptr += table_share->null_bytes;
  1577. if (primary_key_offsets) {
  1578. //
  1579. // unpack_key will fill in parts of record that are part of the primary key
  1580. //
  1581. unpack_key(record, key, primary_key);
  1582. }
  1583. //
  1584. // fill in parts of record that are not part of the key
  1585. //
  1586. uint curr_skip_index = 0;
  1587. for (Field ** field = table->field; *field; field++) {
  1588. uint curr_field_offset = field_offset(*field);
  1589. if (primary_key_offsets) {
  1590. uint curr_skip_offset = primary_key_offsets[curr_skip_index].offset;
  1591. if (curr_skip_offset == curr_field_offset) {
  1592. //
  1593. // we have hit a field that is a portion of the primary key
  1594. //
  1595. uint curr_key_index = primary_key_offsets[curr_skip_index].part_index;
  1596. curr_skip_index++;
  1597. //
  1598. // only choose to continue over the key if the key's length matches the field's length
  1599. // otherwise, we may have a situation where the column is a varchar(10), the
  1600. // key is only the first 3 characters, and we end up losing the last 7 bytes of the
  1601. // column
  1602. //
  1603. if (table->key_info[primary_key].key_part[curr_key_index].length == (*field)->field_length) {
  1604. continue;
  1605. }
  1606. }
  1607. }
  1608. //
  1609. // null bytes MUST have been copied before doing this
  1610. //
  1611. if (is_null_field(table, *field, record)) {
  1612. continue;
  1613. }
  1614. ptr = (*field)->unpack(record + field_offset(*field), ptr);
  1615. }
  1616. dbug_tmp_restore_column_map(table->write_set, old_map);
  1617. }
  1618. }
  1619. //
  1620. // Store the key and the primary key into the row
  1621. // Parameters:
  1622. // [out] record - key stored in MySQL format
  1623. // [in] key - key stored in DBT to be converted
  1624. // index -index into key_file that represents the DB
  1625. // unpacking a key of
  1626. //
  1627. void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
  1628. KEY *key_info = table->key_info + index;
  1629. KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + key_info->key_parts;
  1630. uchar *pos = (uchar *) key->data + 1;
  1631. for (; key_part != end; key_part++) {
  1632. if (key_part->null_bit) {
  1633. if (*pos++ == NULL_COL_VAL) { // Null value
  1634. /*
  1635. We don't need to reset the record data as we will not access it
  1636. if the null data is set
  1637. */
  1638. record[key_part->null_offset] |= key_part->null_bit;
  1639. continue;
  1640. }
  1641. record[key_part->null_offset] &= ~key_part->null_bit;
  1642. }
  1643. /* tokutek change to make pack_key and unpack_key work for
  1644. decimals */
  1645. uint unpack_length = key_part->length;
  1646. pos = (uchar *) key_part->field->unpack_key(record + field_offset(key_part->field), pos,
  1647. #if MYSQL_VERSION_ID < 50123
  1648. unpack_length);
  1649. #else
  1650. unpack_length, table->s->db_low_byte_first);
  1651. #endif
  1652. }
  1653. }
  1654. //
  1655. // Create a packed key from a row. This key will be written as such
  1656. // to the index tree. This will never fail as the key buffer is pre-allocated.
  1657. // Parameters:
  1658. // [out] key - DBT that holds the key
  1659. // [in] key_info - holds data about the key, such as it's length and offset into record
  1660. // [out] buff - buffer that will hold the data for key (unless
  1661. // we have a hidden primary key)
  1662. // [in] record - row from which to create the key
  1663. // key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
  1664. // Returns:
  1665. // the parameter key
  1666. //
  1667. DBT* ha_tokudb::create_dbt_key_from_key(DBT * key, KEY* key_info, uchar * buff, const uchar * record, bool* has_null, int key_length) {
  1668. KEY_PART_INFO *key_part = key_info->key_part;
  1669. KEY_PART_INFO *end = key_part + key_info->key_parts;
  1670. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  1671. key->data = buff;
  1672. //
  1673. // first put the "infinity" byte at beginning. States if missing columns are implicitly
  1674. // positive infinity or negative infinity. For this, because we are creating key
  1675. // from a row, there is no way that columns can be missing, so in practice,
  1676. // this will be meaningless. Might as well put in a value
  1677. //
  1678. *buff++ = COL_NEG_INF;
  1679. *has_null = false;
  1680. for (; key_part != end && key_length > 0; key_part++) {
  1681. //
  1682. // accessing key_part->field->null_bit instead off key_part->null_bit
  1683. // because key_part->null_bit is not set in add_index
  1684. // filed ticket 862 to look into this
  1685. //
  1686. if (key_part->field->null_bit) {
  1687. /* Store 0 if the key part is a NULL part */
  1688. uint null_offset = (uint) ((char*) key_part->field->null_ptr
  1689. - (char*) table->record[0]);
  1690. if (record[null_offset] & key_part->field->null_bit) {
  1691. *buff++ = NULL_COL_VAL;
  1692. *has_null = true;
  1693. //
  1694. // fractal tree does not handle this falg at the moment
  1695. // so commenting out for now
  1696. //
  1697. //key->flags |= DB_DBT_DUPOK;
  1698. continue;
  1699. }
  1700. *buff++ = NONNULL_COL_VAL; // Store NOT NULL marker
  1701. }
  1702. //
  1703. // accessing field_offset(key_part->field) instead off key_part->offset
  1704. // because key_part->offset is SET INCORRECTLY in add_index
  1705. // filed ticket 862 to look into this
  1706. //
  1707. buff = key_part->field->pack_key(buff, (uchar *) (record + field_offset(key_part->field)),
  1708. #if MYSQL_VERSION_ID < 50123
  1709. key_part->length);
  1710. #else
  1711. key_part->length, table->s->db_low_byte_first);
  1712. #endif
  1713. key_length -= key_part->length;
  1714. }
  1715. key->size = (buff - (uchar *) key->data);
  1716. DBUG_DUMP("key", (uchar *) key->data, key->size);
  1717. dbug_tmp_restore_column_map(table->write_set, old_map);
  1718. return key;
  1719. }
  1720. //
  1721. // Create a packed key from a row. This key will be written as such
  1722. // to the index tree. This will never fail as the key buffer is pre-allocated.
  1723. // Parameters:
  1724. // [out] key - DBT that holds the key
  1725. // keynr - index for which to create the key
  1726. // [out] buff - buffer that will hold the data for key (unless
  1727. // we have a hidden primary key)
  1728. // [in] record - row from which to create the key
  1729. // [out] has_null - says if the key has a NULL value for one of its columns
  1730. // key_length - currently set to MAX_KEY_LENGTH, is it size of buff?
  1731. // Returns:
  1732. // the parameter key
  1733. //
  1734. DBT *ha_tokudb::create_dbt_key_from_table(DBT * key, uint keynr, uchar * buff, const uchar * record, bool* has_null, int key_length) {
  1735. TOKUDB_DBUG_ENTER("ha_tokudb::create_dbt_key_from_table");
  1736. bzero((void *) key, sizeof(*key));
  1737. if (hidden_primary_key && keynr == primary_key) {
  1738. key->data = current_ident;
  1739. key->size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  1740. *has_null = false;
  1741. DBUG_RETURN(key);
  1742. }
  1743. DBUG_RETURN(create_dbt_key_from_key(key, &table->key_info[keynr],buff,record, has_null, key_length));
  1744. }
  1745. //
  1746. // Create a packed key from from a MySQL unpacked key (like the one that is
  1747. // sent from the index_read() This key is to be used to read a row
  1748. // Parameters:
  1749. // [out] key - DBT that holds the key
  1750. // keynr - index for which to pack the key
  1751. // [out] buff - buffer that will hold the data for key
  1752. // [in] key_ptr - MySQL unpacked key
  1753. // key_length - length of key_ptr
  1754. // Returns:
  1755. // the parameter key
  1756. //
  1757. DBT *ha_tokudb::pack_key(DBT * key, uint keynr, uchar * buff, const uchar * key_ptr, uint key_length, uchar inf_byte) {
  1758. TOKUDB_DBUG_ENTER("ha_tokudb::pack_key");
  1759. KEY *key_info = table->key_info + keynr;
  1760. KEY_PART_INFO *key_part = key_info->key_part;
  1761. KEY_PART_INFO *end = key_part + key_info->key_parts;
  1762. my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
  1763. bzero((void *) key, sizeof(*key));
  1764. key->data = buff;
  1765. //
  1766. // first put the "infinity" byte at beginning. States if missing columns are implicitly
  1767. // positive infinity or negative infinity
  1768. //
  1769. *buff++ = inf_byte;
  1770. for (; key_part != end && (int) key_length > 0; key_part++) {
  1771. uint offset = 0;
  1772. if (key_part->null_bit) {
  1773. if (!(*key_ptr == 0)) {
  1774. *buff++ = NULL_COL_VAL;
  1775. key_length -= key_part->store_length;
  1776. key_ptr += key_part->store_length;
  1777. continue;
  1778. }
  1779. *buff++ = NONNULL_COL_VAL;
  1780. offset = 1; // Data is at key_ptr+1
  1781. }
  1782. buff = key_part->field->pack_key_from_key_image(buff, (uchar *) key_ptr + offset,
  1783. #if MYSQL_VERSION_ID < 50123
  1784. key_part->length);
  1785. #else
  1786. key_part->length, table->s->db_low_byte_first);
  1787. #endif
  1788. key_ptr += key_part->store_length;
  1789. key_length -= key_part->store_length;
  1790. }
  1791. key->size = (buff - (uchar *) key->data);
  1792. DBUG_DUMP("key", (uchar *) key->data, key->size);
  1793. dbug_tmp_restore_column_map(table->write_set, old_map);
  1794. DBUG_RETURN(key);
  1795. }
  1796. int ha_tokudb::read_last() {
  1797. TOKUDB_DBUG_ENTER("ha_tokudb::read_last");
  1798. int do_commit = 0;
  1799. if (transaction == NULL) {
  1800. int r = db_env->txn_begin(db_env, 0, &transaction, 0);
  1801. assert(r == 0);
  1802. do_commit = 1;
  1803. }
  1804. int error = index_init(primary_key, 0);
  1805. if (error == 0)
  1806. error = index_last(table->record[1]);
  1807. index_end();
  1808. if (do_commit) {
  1809. int r = transaction->commit(transaction, 0);
  1810. assert(r == 0);
  1811. transaction = NULL;
  1812. }
  1813. TOKUDB_DBUG_RETURN(error);
  1814. }
  1815. //
  1816. // get max used hidden primary key value
  1817. //
  1818. void ha_tokudb::init_hidden_prim_key_info() {
  1819. TOKUDB_DBUG_ENTER("ha_tokudb::init_prim_key_info");
  1820. pthread_mutex_lock(&share->mutex);
  1821. if (!(share->status & STATUS_PRIMARY_KEY_INIT)) {
  1822. (void) extra(HA_EXTRA_KEYREAD);
  1823. int error = read_last();
  1824. (void) extra(HA_EXTRA_NO_KEYREAD);
  1825. if (error == 0) {
  1826. share->auto_ident = uint5korr(current_ident);
  1827. }
  1828. share->status |= STATUS_PRIMARY_KEY_INIT;
  1829. }
  1830. pthread_mutex_unlock(&share->mutex);
  1831. DBUG_VOID_RETURN;
  1832. }
  1833. /** @brief
  1834. Get metadata info stored in status.tokudb
  1835. */
  1836. int ha_tokudb::get_status() {
  1837. TOKUDB_DBUG_ENTER("ha_tokudb::get_status");
  1838. DB_TXN* txn = NULL;
  1839. DBT key, value;
  1840. HA_METADATA_KEY curr_key;
  1841. int error;
  1842. //
  1843. // open status.tokudb
  1844. //
  1845. if (!share->status_block) {
  1846. char name_buff[FN_REFLEN];
  1847. char newname[get_name_length(share->table_name) + 32];
  1848. make_name(newname, share->table_name, "status");
  1849. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  1850. uint open_mode = (((table->db_stat & HA_READ_ONLY) ? DB_RDONLY : 0)
  1851. | DB_THREAD);
  1852. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  1853. TOKUDB_TRACE("open:%s\n", newname);
  1854. }
  1855. error = db_create(&share->status_block, db_env, 0);
  1856. if (error) { goto cleanup; }
  1857. error = share->status_block->open(share->status_block, NULL, name_buff, NULL, DB_BTREE, open_mode, 0);
  1858. if (error) { goto cleanup; }
  1859. }
  1860. //
  1861. // transaction to be used for putting metadata into status.tokudb
  1862. //
  1863. bzero(&key, sizeof(key));
  1864. bzero(&value, sizeof(value));
  1865. key.data = &curr_key;
  1866. key.size = sizeof(curr_key);
  1867. value.flags = DB_DBT_MALLOC;
  1868. error = db_env->txn_begin(db_env, 0, &txn, 0);
  1869. if (error) { goto cleanup; }
  1870. if (share->status_block) {
  1871. int error;
  1872. //
  1873. // get version
  1874. //
  1875. curr_key = hatoku_version;
  1876. error = share->status_block->get(
  1877. share->status_block,
  1878. txn,
  1879. &key,
  1880. &value,
  1881. 0
  1882. );
  1883. if (error == DB_NOTFOUND) {
  1884. share->version = 0;
  1885. }
  1886. else if (error == 0 && value.size == sizeof(share->version)) {
  1887. share->version = *(uint *)value.data;
  1888. free(value.data);
  1889. value.data = NULL;
  1890. }
  1891. else {
  1892. goto cleanup;
  1893. }
  1894. //
  1895. // get capabilities
  1896. //
  1897. curr_key = hatoku_capabilities;
  1898. error = share->status_block->get(
  1899. share->status_block,
  1900. txn,
  1901. &key,
  1902. &value,
  1903. 0
  1904. );
  1905. if (error == DB_NOTFOUND) {
  1906. share->capabilities= 0;
  1907. }
  1908. else if (error == 0 && value.size == sizeof(share->version)) {
  1909. share->capabilities= *(uint *)value.data;
  1910. free(value.data);
  1911. value.data = NULL;
  1912. }
  1913. else {
  1914. goto cleanup;
  1915. }
  1916. }
  1917. error = 0;
  1918. cleanup:
  1919. if (txn) {
  1920. txn->commit(txn,0);
  1921. }
  1922. if (error) {
  1923. if (share->status_block) {
  1924. share->status_block->close(share->status_block, 0);
  1925. share->status_block = NULL;
  1926. }
  1927. }
  1928. TOKUDB_DBUG_RETURN(error);
  1929. }
  1930. /** @brief
  1931. Return an estimated of the number of rows in the table.
  1932. Used when sorting to allocate buffers and by the optimizer.
  1933. This is used in filesort.cc.
  1934. */
  1935. ha_rows ha_tokudb::estimate_rows_upper_bound() {
  1936. TOKUDB_DBUG_ENTER("ha_tokudb::estimate_rows_upper_bound");
  1937. DBUG_RETURN(share->rows + HA_TOKUDB_EXTRA_ROWS);
  1938. }
  1939. int ha_tokudb::cmp_ref(const uchar * ref1, const uchar * ref2) {
  1940. if (hidden_primary_key) {
  1941. return memcmp(ref1, ref2, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  1942. }
  1943. int result;
  1944. Field *field;
  1945. KEY *key_info = table->key_info + table_share->primary_key;
  1946. KEY_PART_INFO *key_part = key_info->key_part;
  1947. KEY_PART_INFO *end = key_part + key_info->key_parts;
  1948. //
  1949. // HACK until we get refactoring in. manually move up by infinity byte
  1950. //
  1951. ref1++;
  1952. ref2++;
  1953. for (; key_part != end; key_part++) {
  1954. field = key_part->field;
  1955. result = field->pack_cmp((const uchar *) ref1, (const uchar *) ref2, key_part->length, 0);
  1956. if (result)
  1957. return result;
  1958. ref1 += field->packed_col_length((const uchar *) ref1, key_part->length);
  1959. ref2 += field->packed_col_length((const uchar *) ref2, key_part->length);
  1960. }
  1961. return 0;
  1962. }
  1963. bool ha_tokudb::check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes) {
  1964. //
  1965. // This is a horrendous hack for now, as copied by InnoDB.
  1966. // This states that if the auto increment create field has changed,
  1967. // via a "alter table foo auto_increment=new_val", that this
  1968. // change is incompatible, and to rebuild the entire table
  1969. // This will need to be fixed
  1970. //
  1971. if ((info->used_fields & HA_CREATE_USED_AUTO) &&
  1972. info->auto_increment_value != 0) {
  1973. return COMPATIBLE_DATA_NO;
  1974. }
  1975. if (table_changes != IS_EQUAL_YES)
  1976. return COMPATIBLE_DATA_NO;
  1977. return COMPATIBLE_DATA_YES;
  1978. }
  1979. //
  1980. // Stores a row in the table, called when handling an INSERT query
  1981. // Parameters:
  1982. // [in] record - a row in MySQL format
  1983. // Returns:
  1984. // 0 on success
  1985. // error otherwise
  1986. //
  1987. int ha_tokudb::write_row(uchar * record) {
  1988. TOKUDB_DBUG_ENTER("ha_tokudb::write_row");
  1989. DBT row, prim_key, key;
  1990. int error;
  1991. THD *thd = NULL;
  1992. u_int32_t put_flags;
  1993. bool has_null;
  1994. DB_TXN* sub_trans = NULL;
  1995. DB_TXN* txn = NULL;
  1996. //
  1997. // some crap that needs to be done because MySQL does not properly abstract
  1998. // this work away from us, namely filling in auto increment and setting auto timestamp
  1999. //
  2000. statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
  2001. if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) {
  2002. table->timestamp_field->set_time();
  2003. }
  2004. if (table->next_number_field && record == table->record[0]) {
  2005. update_auto_increment();
  2006. }
  2007. //
  2008. // check to see if some value for the auto increment column that is bigger
  2009. // than anything else til now is being used. If so, update the metadata to reflect it
  2010. // the goal here is we never want to have a dup key error due to a bad increment
  2011. // of the auto inc field.
  2012. //
  2013. if (share->has_auto_inc && record == table->record[0]) {
  2014. pthread_mutex_lock(&share->mutex);
  2015. ulonglong curr_auto_inc = retrieve_auto_increment(
  2016. table->field[share->ai_field_index]->key_type(),
  2017. field_offset(table->field[share->ai_field_index]),
  2018. record
  2019. );
  2020. if (curr_auto_inc > share->last_auto_increment) {
  2021. error = update_max_auto_inc(share->status_block, curr_auto_inc);
  2022. if (!error) {
  2023. share->last_auto_increment = curr_auto_inc;
  2024. }
  2025. }
  2026. pthread_mutex_unlock(&share->mutex);
  2027. }
  2028. if ((error = pack_row(&row, (const uchar *) record))){
  2029. goto cleanup;
  2030. }
  2031. if (hidden_primary_key) {
  2032. get_auto_primary_key(current_ident);
  2033. }
  2034. if (using_ignore) {
  2035. error = db_env->txn_begin(db_env, transaction, &sub_trans, 0);
  2036. if (error) {
  2037. goto cleanup;
  2038. }
  2039. }
  2040. txn = using_ignore ? sub_trans : transaction;
  2041. //
  2042. // first the primary key (because it must be unique, has highest chance of failure)
  2043. //
  2044. put_flags = share->key_type[primary_key];
  2045. thd = ha_thd();
  2046. if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
  2047. put_flags = DB_YESOVERWRITE;
  2048. }
  2049. error = share->file->put(
  2050. share->file,
  2051. txn,
  2052. create_dbt_key_from_table(&prim_key, primary_key, key_buff, record, &has_null),
  2053. &row,
  2054. put_flags
  2055. );
  2056. if (error) {
  2057. last_dup_key = primary_key;
  2058. goto cleanup;
  2059. }
  2060. //
  2061. // now insertion for rest of indexes
  2062. //
  2063. for (uint keynr = 0; keynr < table_share->keys; keynr++) {
  2064. if (keynr == primary_key) {
  2065. continue;
  2066. }
  2067. put_flags = share->key_type[keynr];
  2068. create_dbt_key_from_table(&key, keynr, key_buff2, record, &has_null);
  2069. if (put_flags == DB_NOOVERWRITE && (has_null || thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS))) {
  2070. put_flags = DB_YESOVERWRITE;
  2071. }
  2072. error = share->key_file[keynr]->put(
  2073. share->key_file[keynr],
  2074. txn,
  2075. &key,
  2076. &prim_key,
  2077. put_flags
  2078. );
  2079. //
  2080. // We break if we hit an error, unless it is a dup key error
  2081. // and MySQL told us to ignore duplicate key errors
  2082. //
  2083. if (error) {
  2084. last_dup_key = keynr;
  2085. goto cleanup;
  2086. }
  2087. }
  2088. if (!error) {
  2089. added_rows++;
  2090. }
  2091. cleanup:
  2092. if (error == DB_KEYEXIST) {
  2093. error = HA_ERR_FOUND_DUPP_KEY;
  2094. }
  2095. if (sub_trans) {
  2096. // no point in recording error value of abort.
  2097. // nothing we can do about it anyway and it is not what
  2098. // we want to return.
  2099. if (error) {
  2100. sub_trans->abort(sub_trans);
  2101. }
  2102. else {
  2103. error = sub_trans->commit(sub_trans, DB_TXN_NOSYNC);
  2104. }
  2105. }
  2106. TOKUDB_DBUG_RETURN(error);
  2107. }
  2108. /* Compare if a key in a row has changed */
  2109. int ha_tokudb::key_cmp(uint keynr, const uchar * old_row, const uchar * new_row) {
  2110. KEY_PART_INFO *key_part = table->key_info[keynr].key_part;
  2111. KEY_PART_INFO *end = key_part + table->key_info[keynr].key_parts;
  2112. for (; key_part != end; key_part++) {
  2113. if (key_part->null_bit) {
  2114. if ((old_row[key_part->null_offset] & key_part->null_bit) != (new_row[key_part->null_offset] & key_part->null_bit))
  2115. return 1;
  2116. }
  2117. if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) {
  2118. if (key_part->field->cmp_binary((uchar *) (old_row + key_part->offset), (uchar *) (new_row + key_part->offset), (ulong) key_part->length))
  2119. return 1;
  2120. } else {
  2121. if (memcmp(old_row + key_part->offset, new_row + key_part->offset, key_part->length))
  2122. return 1;
  2123. }
  2124. }
  2125. return 0;
  2126. }
  2127. /*
  2128. Update a row from one value to another.
  2129. Clobbers key_buff2
  2130. */
  2131. int ha_tokudb::update_primary_key(DB_TXN * trans, bool primary_key_changed, const uchar * old_row, DBT * old_key, const uchar * new_row, DBT * new_key) {
  2132. TOKUDB_DBUG_ENTER("update_primary_key");
  2133. DBT row;
  2134. int error;
  2135. if (primary_key_changed) {
  2136. // Primary key changed or we are updating a key that can have duplicates.
  2137. // Delete the old row and add a new one
  2138. if (!(error = remove_key(trans, primary_key, old_row, old_key))) {
  2139. if (!(error = pack_row(&row, new_row))) {
  2140. if ((error = share->file->put(share->file, trans, new_key, &row, share->key_type[primary_key]))) {
  2141. // Probably a duplicated key; restore old key and row if needed
  2142. last_dup_key = primary_key;
  2143. }
  2144. }
  2145. }
  2146. }
  2147. else {
  2148. // Primary key didn't change; just update the row data
  2149. if (!(error = pack_row(&row, new_row))) {
  2150. error = share->file->put(share->file, trans, new_key, &row, 0);
  2151. }
  2152. }
  2153. TOKUDB_DBUG_RETURN(error);
  2154. }
  2155. //
  2156. // Updates a row in the table, called when handling an UPDATE query
  2157. // Parameters:
  2158. // [in] old_row - row to be updated, in MySQL format
  2159. // [in] new_row - new row, in MySQL format
  2160. // Returns:
  2161. // 0 on success
  2162. // error otherwise
  2163. //
  2164. int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
  2165. TOKUDB_DBUG_ENTER("update_row");
  2166. DBT prim_key, key, old_prim_key;
  2167. int error;
  2168. bool primary_key_changed;
  2169. bool has_null;
  2170. THD* thd = ha_thd();
  2171. DB_TXN* sub_trans = NULL;
  2172. DB_TXN* txn = NULL;
  2173. LINT_INIT(error);
  2174. statistic_increment(table->in_use->status_var.ha_update_count, &LOCK_status);
  2175. if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) {
  2176. table->timestamp_field->set_time();
  2177. }
  2178. //
  2179. // check to see if some value for the auto increment column that is bigger
  2180. // than anything else til now is being used. If so, update the metadata to reflect it
  2181. // the goal here is we never want to have a dup key error due to a bad increment
  2182. // of the auto inc field.
  2183. //
  2184. if (share->has_auto_inc && new_row == table->record[0]) {
  2185. pthread_mutex_lock(&share->mutex);
  2186. ulonglong curr_auto_inc = retrieve_auto_increment(
  2187. table->field[share->ai_field_index]->key_type(),
  2188. field_offset(table->field[share->ai_field_index]),
  2189. new_row
  2190. );
  2191. if (curr_auto_inc > share->last_auto_increment) {
  2192. error = update_max_auto_inc(share->status_block, curr_auto_inc);
  2193. if (!error) {
  2194. share->last_auto_increment = curr_auto_inc;
  2195. }
  2196. }
  2197. pthread_mutex_unlock(&share->mutex);
  2198. }
  2199. if (hidden_primary_key) {
  2200. primary_key_changed = 0;
  2201. bzero((void *) &prim_key, sizeof(prim_key));
  2202. prim_key.data = (void *) current_ident;
  2203. prim_key.size = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH;
  2204. old_prim_key = prim_key;
  2205. }
  2206. else {
  2207. create_dbt_key_from_table(&prim_key, primary_key, key_buff, new_row, &has_null);
  2208. if ((primary_key_changed = key_cmp(primary_key, old_row, new_row))) {
  2209. create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null);
  2210. }
  2211. else {
  2212. old_prim_key = prim_key;
  2213. }
  2214. }
  2215. if (using_ignore) {
  2216. error = db_env->txn_begin(db_env, transaction, &sub_trans, 0);
  2217. if (error) {
  2218. goto cleanup;
  2219. }
  2220. }
  2221. txn = using_ignore ? sub_trans : transaction;
  2222. /* Start by updating the primary key */
  2223. error = update_primary_key(txn, primary_key_changed, old_row, &old_prim_key, new_row, &prim_key);
  2224. if (error) {
  2225. last_dup_key = primary_key;
  2226. goto cleanup;
  2227. }
  2228. // Update all other keys
  2229. for (uint keynr = 0; keynr < table_share->keys; keynr++) {
  2230. if (keynr == primary_key) {
  2231. continue;
  2232. }
  2233. if (key_cmp(keynr, old_row, new_row) || primary_key_changed) {
  2234. u_int32_t put_flags;
  2235. if ((error = remove_key(txn, keynr, old_row, &old_prim_key))) {
  2236. goto cleanup;
  2237. }
  2238. create_dbt_key_from_table(&key, keynr, key_buff2, new_row, &has_null),
  2239. put_flags = share->key_type[keynr];
  2240. if (put_flags == DB_NOOVERWRITE && (has_null || thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS))) {
  2241. put_flags = DB_YESOVERWRITE;
  2242. }
  2243. error = share->key_file[keynr]->put(
  2244. share->key_file[keynr],
  2245. txn,
  2246. &key,
  2247. &prim_key,
  2248. put_flags
  2249. );
  2250. //
  2251. // We break if we hit an error, unless it is a dup key error
  2252. // and MySQL told us to ignore duplicate key errors
  2253. //
  2254. if (error) {
  2255. last_dup_key = keynr;
  2256. goto cleanup;
  2257. }
  2258. }
  2259. }
  2260. cleanup:
  2261. if (error == DB_KEYEXIST) {
  2262. error = HA_ERR_FOUND_DUPP_KEY;
  2263. }
  2264. if (sub_trans) {
  2265. // no point in recording error value of abort.
  2266. // nothing we can do about it anyway and it is not what
  2267. // we want to return.
  2268. if (error) {
  2269. sub_trans->abort(sub_trans);
  2270. }
  2271. else {
  2272. error = sub_trans->commit(sub_trans, DB_TXN_NOSYNC);
  2273. }
  2274. }
  2275. TOKUDB_DBUG_RETURN(error);
  2276. }
  2277. //
  2278. //
  2279. // Delete one key in key_file[keynr]
  2280. // This uses key_buff2, when keynr != primary key, so it's important that
  2281. // a function that calls this doesn't use this buffer for anything else.
  2282. // Parameters:
  2283. // [in] trans - transaction to be used for the delete
  2284. // keynr - index for which a key needs to be deleted
  2285. // [in] record - row in MySQL format. Must delete a key for this row
  2286. // [in] prim_key - key for record in primary table
  2287. // Returns:
  2288. // 0 on success
  2289. // error otherwise
  2290. //
  2291. int ha_tokudb::remove_key(DB_TXN * trans, uint keynr, const uchar * record, DBT * prim_key) {
  2292. TOKUDB_DBUG_ENTER("ha_tokudb::remove_key");
  2293. int error;
  2294. DBT key;
  2295. bool has_null;
  2296. DBUG_PRINT("enter", ("index: %d", keynr));
  2297. DBUG_PRINT("primary", ("index: %d", primary_key));
  2298. DBUG_DUMP("prim_key", (uchar *) prim_key->data, prim_key->size);
  2299. if (keynr == active_index && cursor) {
  2300. error = cursor->c_del(cursor, 0);
  2301. }
  2302. else if (keynr == primary_key) { // Unique key
  2303. DBUG_PRINT("Unique key", ("index: %d", keynr));
  2304. error = share->key_file[keynr]->del(share->key_file[keynr], trans, prim_key , 0);
  2305. }
  2306. else {
  2307. create_dbt_key_from_table(&key, keynr, key_buff2, record, &has_null);
  2308. error = share->key_file[keynr]->delboth(
  2309. share->key_file[keynr],
  2310. trans,
  2311. &key,
  2312. prim_key,
  2313. DB_DELETE_ANY
  2314. );
  2315. }
  2316. TOKUDB_DBUG_RETURN(error);
  2317. }
  2318. //
  2319. // Delete all keys for new_record
  2320. // Parameters:
  2321. // [in] trans - transaction to be used for the delete
  2322. // [in] record - row in MySQL format. Must delete all keys for this row
  2323. // [in] prim_key - key for record in primary table
  2324. // [in] keys - array that states if a key is set, and hence needs
  2325. // removal
  2326. // Returns:
  2327. // 0 on success
  2328. // error otherwise
  2329. //
  2330. int ha_tokudb::remove_keys(DB_TXN * trans, const uchar * record, DBT * prim_key, key_map * keys) {
  2331. int result = 0;
  2332. for (uint keynr = 0; keynr < table_share->keys + test(hidden_primary_key); keynr++) {
  2333. if (keys->is_set(keynr)) {
  2334. int new_error = remove_key(trans, keynr, record, prim_key);
  2335. if (new_error) {
  2336. result = new_error; // Return last error
  2337. break; // Let rollback correct things
  2338. }
  2339. }
  2340. }
  2341. return result;
  2342. }
  2343. //
  2344. // Deletes a row in the table, called when handling a DELETE query
  2345. // Parameters:
  2346. // [in] record - row to be deleted, in MySQL format
  2347. // Returns:
  2348. // 0 on success
  2349. // error otherwise
  2350. //
  2351. int ha_tokudb::delete_row(const uchar * record) {
  2352. TOKUDB_DBUG_ENTER("ha_tokudb::delete_row");
  2353. int error = ENOSYS;
  2354. DBT prim_key;
  2355. key_map keys = table_share->keys_in_use;
  2356. bool has_null;
  2357. statistic_increment(table->in_use->status_var.ha_delete_count, &LOCK_status);
  2358. create_dbt_key_from_table(&prim_key, primary_key, key_buff, record, &has_null);
  2359. if (hidden_primary_key) {
  2360. keys.set_bit(primary_key);
  2361. }
  2362. /* Subtransactions may be used in order to retry the delete in
  2363. case we get a DB_LOCK_DEADLOCK error. */
  2364. DB_TXN *sub_trans = transaction;
  2365. error = remove_keys(sub_trans, record, &prim_key, &keys);
  2366. if (error) {
  2367. DBUG_PRINT("error", ("Got error %d", error));
  2368. }
  2369. else {
  2370. deleted_rows++;
  2371. }
  2372. TOKUDB_DBUG_RETURN(error);
  2373. }
  2374. //
  2375. // Notification that a scan of entire secondary table is about
  2376. // to take place. Will pre acquire table read lock
  2377. // Returns:
  2378. // 0 on success
  2379. // error otherwise
  2380. //
  2381. int ha_tokudb::prepare_index_scan() {
  2382. int error;
  2383. DB* db = share->key_file[active_index];
  2384. error = db->pre_acquire_read_lock(
  2385. db,
  2386. transaction,
  2387. db->dbt_neg_infty(), db->dbt_neg_infty(),
  2388. db->dbt_pos_infty(), db->dbt_pos_infty()
  2389. );
  2390. if (error) { last_cursor_error = error; goto cleanup; }
  2391. range_lock_grabbed = true;
  2392. error = 0;
  2393. cleanup:
  2394. return error;
  2395. }
  2396. //
  2397. // Initializes local cursor on DB with index keynr
  2398. // Parameters:
  2399. // keynr - key (index) number
  2400. // sorted - 1 if result MUST be sorted according to index
  2401. // Returns:
  2402. // 0 on success
  2403. // error otherwise
  2404. //
  2405. int ha_tokudb::index_init(uint keynr, bool sorted) {
  2406. TOKUDB_DBUG_ENTER("ha_tokudb::index_init %p %d", this, keynr);
  2407. int error;
  2408. DBUG_PRINT("enter", ("table: '%s' key: %d", table_share->table_name.str, keynr));
  2409. /*
  2410. Under some very rare conditions (like full joins) we may already have
  2411. an active cursor at this point
  2412. */
  2413. if (cursor) {
  2414. DBUG_PRINT("note", ("Closing active cursor"));
  2415. cursor->c_close(cursor);
  2416. }
  2417. active_index = keynr;
  2418. last_cursor_error = 0;
  2419. range_lock_grabbed = false;
  2420. DBUG_ASSERT(keynr <= table->s->keys);
  2421. DBUG_ASSERT(share->key_file[keynr]);
  2422. if ((error = share->key_file[keynr]->cursor(share->key_file[keynr], transaction, &cursor, 0))) {
  2423. last_cursor_error = error;
  2424. cursor = NULL; // Safety
  2425. }
  2426. bzero((void *) &last_key, sizeof(last_key));
  2427. TOKUDB_DBUG_RETURN(error);
  2428. }
  2429. //
  2430. // closes the local cursor
  2431. //
  2432. int ha_tokudb::index_end() {
  2433. TOKUDB_DBUG_ENTER("ha_tokudb::index_end %p", this);
  2434. int error = 0;
  2435. range_lock_grabbed = false;
  2436. if (cursor) {
  2437. DBUG_PRINT("enter", ("table: '%s'", table_share->table_name.str));
  2438. error = cursor->c_close(cursor);
  2439. cursor = NULL;
  2440. last_cursor_error = 0;
  2441. }
  2442. active_index = MAX_KEY;
  2443. TOKUDB_DBUG_RETURN(error);
  2444. }
  2445. int ha_tokudb::handle_cursor_error(int error, int err_to_return, uint keynr) {
  2446. TOKUDB_DBUG_ENTER("ha_tokudb::handle_cursor_error");
  2447. if (error) {
  2448. last_cursor_error = error;
  2449. table->status = STATUS_NOT_FOUND;
  2450. cursor->c_close(cursor);
  2451. cursor = NULL;
  2452. if (error == DB_NOTFOUND || error == DB_KEYEMPTY) {
  2453. error = err_to_return;
  2454. if ((share->key_file[keynr]->cursor(share->key_file[keynr], transaction, &cursor, 0))) {
  2455. cursor = NULL; // Safety
  2456. }
  2457. }
  2458. }
  2459. TOKUDB_DBUG_RETURN(error);
  2460. }
  2461. //
  2462. // Helper function for read_row and smart_dbt_callback_xxx functions
  2463. // When using a hidden primary key, upon reading a row,
  2464. // we set the current_ident field to whatever the primary key we retrieved
  2465. // was
  2466. //
  2467. void ha_tokudb::extract_hidden_primary_key(uint keynr, DBT const *row, DBT const *found_key) {
  2468. //
  2469. // extract hidden primary key to current_ident
  2470. //
  2471. if (hidden_primary_key) {
  2472. if (keynr == primary_key) {
  2473. memcpy_fixed(current_ident, (char *) found_key->data, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  2474. }
  2475. else {
  2476. memcpy_fixed(current_ident, (char *) row->data, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  2477. }
  2478. }
  2479. }
  2480. //
  2481. // Reads the contents of row and found_key, DBT's retrieved from the DB associated to keynr, into buf
  2482. // This function assumes that we are using a covering index, as a result, if keynr is the primary key,
  2483. // we do not read row into buf
  2484. // Parameters:
  2485. // [out] buf - buffer for the row, in MySQL format
  2486. // keynr - index into key_file that represents DB we are currently operating on.
  2487. // [in] row - the row that has been read from the preceding DB call
  2488. // [in] found_key - key used to retrieve the row
  2489. //
  2490. void ha_tokudb::read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
  2491. TOKUDB_DBUG_ENTER("ha_tokudb::read_key_only");
  2492. table->status = 0;
  2493. unpack_key(buf, found_key, keynr);
  2494. if (!hidden_primary_key && (keynr != primary_key)) {
  2495. unpack_key(buf, row, primary_key);
  2496. }
  2497. DBUG_VOID_RETURN;
  2498. }
  2499. //
  2500. // Helper function used to try to retrieve the entire row
  2501. // If keynr is associated with the main table, reads contents of found_key and row into buf, otherwise,
  2502. // makes copy of primary key and saves it to last_key. This can later be used to retrieve the entire row
  2503. // Parameters:
  2504. // [out] buf - buffer for the row, in MySQL format
  2505. // keynr - index into key_file that represents DB we are currently operating on.
  2506. // [in] row - the row that has been read from the preceding DB call
  2507. // [in] found_key - key used to retrieve the row
  2508. //
  2509. void ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
  2510. TOKUDB_DBUG_ENTER("ha_tokudb::read_primary_key");
  2511. table->status = 0;
  2512. if (keynr != primary_key) {
  2513. //
  2514. // create a DBT that has the same data as row,
  2515. //
  2516. bzero((void *) &last_key, sizeof(last_key));
  2517. last_key.data = key_buff;
  2518. last_key.size = row->size;
  2519. memcpy(key_buff, row->data, row->size);
  2520. }
  2521. else {
  2522. unpack_row(buf, row, found_key);
  2523. }
  2524. if (found_key) { DBUG_DUMP("read row key", (uchar *) found_key->data, found_key->size); }
  2525. DBUG_VOID_RETURN;
  2526. }
  2527. //
  2528. // This function reads an entire row into buf. This function also assumes that
  2529. // the key needed to retrieve the row is stored in the member variable last_key
  2530. // Parameters:
  2531. // [out] buf - buffer for the row, in MySQL format
  2532. // Returns:
  2533. // 0 on success, error otherwise
  2534. //
  2535. int ha_tokudb::read_full_row(uchar * buf) {
  2536. TOKUDB_DBUG_ENTER("ha_tokudb::read_full_row");
  2537. int error;
  2538. //
  2539. // Read the data into current_row, assumes key is stored in this->last_key
  2540. //
  2541. current_row.flags = DB_DBT_REALLOC;
  2542. if ((error = share->file->get(share->file, transaction, &last_key, &current_row, 0))) {
  2543. table->status = STATUS_NOT_FOUND;
  2544. TOKUDB_DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error);
  2545. }
  2546. unpack_row(buf, &current_row, &last_key);
  2547. TOKUDB_DBUG_RETURN(0);
  2548. }
  2549. //
  2550. // The funtion read_row checks whether the row was obtained from the primary table or
  2551. // from an index table. If it was obtained from an index table, it further dereferences on
  2552. // the main table. In the end, the read_row function will manage to return the actual row
  2553. // of interest in the buf parameter.
  2554. //
  2555. // Parameters:
  2556. // [out] buf - buffer for the row, in MySQL format
  2557. // keynr - index into key_file that represents DB we are currently operating on.
  2558. // [in] row - the row that has been read from the preceding DB call
  2559. // [in] found_key - key used to retrieve the row
  2560. //
  2561. int ha_tokudb::read_row(uchar * buf, uint keynr, DBT const *row, DBT const *found_key) {
  2562. TOKUDB_DBUG_ENTER("ha_tokudb::read_row");
  2563. int error;
  2564. extract_hidden_primary_key(keynr, row, found_key);
  2565. table->status = 0;
  2566. //
  2567. // if the index shows that the table we read the row from was indexed on the primary key,
  2568. // that means we have our row and can skip
  2569. // this entire if clause. All that is required is to unpack row.
  2570. // if the index shows that what we read was from a table that was NOT indexed on the
  2571. // primary key, then we must still retrieve the row, as the "row" value is indeed just
  2572. // a primary key, whose row we must still read
  2573. //
  2574. if (keynr != primary_key) {
  2575. if (key_read && found_key) {
  2576. // TOKUDB_DBUG_DUMP("key=", found_key->data, found_key->size);
  2577. unpack_key(buf, found_key, keynr);
  2578. if (!hidden_primary_key) {
  2579. // TOKUDB_DBUG_DUMP("row=", row->data, row->size);
  2580. unpack_key(buf, row, primary_key);
  2581. }
  2582. TOKUDB_DBUG_RETURN(0);
  2583. }
  2584. //
  2585. // create a DBT that has the same data as row,
  2586. //
  2587. DBT key;
  2588. bzero((void *) &key, sizeof(key));
  2589. key.data = key_buff;
  2590. key.size = row->size;
  2591. memcpy(key_buff, row->data, row->size);
  2592. //
  2593. // Read the data into current_row
  2594. //
  2595. current_row.flags = DB_DBT_REALLOC;
  2596. if ((error = share->file->get(share->file, transaction, &key, &current_row, 0))) {
  2597. table->status = STATUS_NOT_FOUND;
  2598. TOKUDB_DBUG_RETURN(error == DB_NOTFOUND ? HA_ERR_CRASHED : error);
  2599. }
  2600. unpack_row(buf, &current_row, &key);
  2601. }
  2602. else {
  2603. if (key_read && !hidden_primary_key) {
  2604. unpack_key(buf, found_key, keynr);
  2605. }
  2606. else {
  2607. unpack_row(buf, row, found_key);
  2608. }
  2609. }
  2610. if (found_key) { DBUG_DUMP("read row key", (uchar *) found_key->data, found_key->size); }
  2611. TOKUDB_DBUG_RETURN(0);
  2612. }
  2613. //
  2614. // This is only used to read whole keys
  2615. // According to InnoDB handlerton: Positions an index cursor to the index
  2616. // specified in keynr. Fetches the row if any
  2617. // Parameters:
  2618. // [out] buf - buffer for the returned row
  2619. // keynr - index to use
  2620. // [in] key - key value, according to InnoDB, if NULL,
  2621. // position cursor at start or end of index,
  2622. // not sure if this is done now
  2623. // key_len - length of key
  2624. // find_flag - according to InnoDB, search flags from my_base.h
  2625. // Returns:
  2626. // 0 on success
  2627. // HA_ERR_KEY_NOT_FOUND if not found (per InnoDB),
  2628. // error otherwise
  2629. //
  2630. int ha_tokudb::index_read_idx(uchar * buf, uint keynr, const uchar * key, uint key_len, enum ha_rkey_function find_flag) {
  2631. TOKUDB_DBUG_ENTER("ha_tokudb::index_read_idx");
  2632. int error;
  2633. table->in_use->status_var.ha_read_key_count++;
  2634. current_row.flags = DB_DBT_REALLOC;
  2635. active_index = MAX_KEY;
  2636. error = share->key_file[keynr]->get(share->key_file[keynr], transaction, pack_key(&last_key, keynr, key_buff, key, key_len, COL_NEG_INF), &current_row, 0);
  2637. if (error == DB_NOTFOUND || error == DB_KEYEMPTY) {
  2638. error = HA_ERR_KEY_NOT_FOUND;
  2639. goto cleanup;
  2640. }
  2641. if (!error) {
  2642. error = read_row(buf, keynr, &current_row, &last_key);
  2643. }
  2644. cleanup:
  2645. TOKUDB_DBUG_RETURN(error);
  2646. }
  2647. //
  2648. // context information for the heaviside functions.
  2649. // Context information includes data necessary
  2650. // to perform comparisons
  2651. //
  2652. typedef struct heavi_info {
  2653. DB *db;
  2654. const DBT *key;
  2655. } *HEAVI_INFO;
  2656. //
  2657. // effect:
  2658. // heaviside function used for HA_READ_AFTER_KEY.
  2659. // to use this heaviside function in ha_read_after_key, use direction>0
  2660. // the stored key (in heavi_info) contains a prefix of the columns in the candidate
  2661. // keys. only the columns in the stored key will be used for comparison.
  2662. //
  2663. // parameters:
  2664. // [in] key - candidate key in db that is being compared
  2665. // [in] value - candidate value, unused
  2666. // [in] extra_h - a heavi_info that contains information necessary for
  2667. // the comparison
  2668. // returns:
  2669. // >0 : candidate key > stored key
  2670. // <0 : otherwise
  2671. // examples:
  2672. // columns: (a,b,c,d)
  2673. // stored key = (3,4) (only a,b)
  2674. // candidate keys have (a,b,c,d)
  2675. // (3,2,1,1) < (3,4)
  2676. // (3,4,1,1) == (3,4)
  2677. // (3,5,1,1) > (3,4)
  2678. //
  2679. static int after_key_heavi(const DBT *key, const DBT *value, void *extra_h) {
  2680. HEAVI_INFO info = (HEAVI_INFO)extra_h;
  2681. int cmp = tokudb_prefix_cmp_packed_key(info->db, key, info->key);
  2682. return cmp>0 ? 1 : -1;
  2683. }
  2684. //
  2685. // effect:
  2686. // heaviside function used for HA_READ_PREFIX_LAST_OR_PREV.
  2687. // to use this heaviside function in HA_READ_PREFIX_LAST_OR_PREV, use direction<0
  2688. // the stored key (in heavi_info) contains a prefix of the columns in the candidate
  2689. // keys. only the columns in the stored key will be used for comparison.
  2690. //
  2691. // parameters:
  2692. // [in] key - candidate key in db that is being compared
  2693. // [in] value - candidate value, unused
  2694. // [in] extra_h - a heavi_info that contains information necessary for
  2695. // the comparison
  2696. // returns:
  2697. // >0 : candidate key > stored key
  2698. // 0 : candidate key == stored key
  2699. // <0 : candidate key < stored key
  2700. // examples:
  2701. // columns: (a,b,c,d)
  2702. // stored key = (3,4) (only a,b)
  2703. // candidate keys have (a,b,c,d)
  2704. // (3,2,1,1) < (3,4)
  2705. // (3,4,1,1) == (3,4)
  2706. // (3,5,1,1) > (3,4)
  2707. //
  2708. static int prefix_last_or_prev_heavi(const DBT *key, const DBT *value, void *extra_h) {
  2709. HEAVI_INFO info = (HEAVI_INFO)extra_h;
  2710. int cmp = tokudb_prefix_cmp_packed_key(info->db, key, info->key);
  2711. return cmp;
  2712. }
  2713. //
  2714. // effect:
  2715. // heaviside function used for HA_READ_BEFORE_KEY.
  2716. // to use this heaviside function in HA_READ_BEFORE_KEY, use direction<0
  2717. // the stored key (in heavi_info) contains a prefix of the columns in the candidate
  2718. // keys. only the columns in the stored key will be used for comparison.
  2719. //
  2720. // parameters:
  2721. // [in] key - candidate key in db that is being compared
  2722. // [in] value - candidate value, unused
  2723. // [in] extra_h - a heavi_info that contains information necessary for
  2724. // the comparison
  2725. // returns:
  2726. // <0 : candidate key < stored key
  2727. // >0 : otherwise
  2728. // examples:
  2729. // columns: (a,b,c,d)
  2730. // stored key = (3,4) (only a,b)
  2731. // candidate keys have (a,b,c,d)
  2732. // (3,2,1,1) < (3,4)
  2733. // (3,4,1,1) == (3,4)
  2734. // (3,5,1,1) > (3,4)
  2735. //
  2736. static int before_key_heavi(const DBT *key, const DBT *value, void *extra_h) {
  2737. HEAVI_INFO info = (HEAVI_INFO)extra_h;
  2738. int cmp = tokudb_prefix_cmp_packed_key(info->db, key, info->key);
  2739. return (cmp<0) ? -1 : 1;
  2740. }
  2741. //
  2742. // According to InnoDB handlerton: Positions an index cursor to the index
  2743. // specified in keynr. Fetches the row if any
  2744. // Parameters:
  2745. // [out] buf - buffer for the returned row
  2746. // [in] key - key value, according to InnoDB, if NULL,
  2747. // position cursor at start or end of index,
  2748. // not sure if this is done now
  2749. // key_len - length of key
  2750. // find_flag - according to InnoDB, search flags from my_base.h
  2751. // Returns:
  2752. // 0 on success
  2753. // HA_ERR_KEY_NOT_FOUND if not found (per InnoDB),
  2754. // we seem to return HA_ERR_END_OF_FILE if find_flag != HA_READ_KEY_EXACT
  2755. // TODO: investigate this for correctness
  2756. // error otherwise
  2757. //
  2758. int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) {
  2759. TOKUDB_DBUG_ENTER("ha_tokudb::index_read %p find %d", this, find_flag);
  2760. // TOKUDB_DBUG_DUMP("key=", key, key_len);
  2761. DBT row;
  2762. int error;
  2763. struct smart_dbt_info info;
  2764. struct heavi_info heavi_info;
  2765. bool do_read_row = true;
  2766. HANDLE_INVALID_CURSOR();
  2767. table->in_use->status_var.ha_read_key_count++;
  2768. bzero((void *) &row, sizeof(row));
  2769. pack_key(&last_key, active_index, key_buff, key, key_len, COL_NEG_INF);
  2770. info.ha = this;
  2771. info.buf = buf;
  2772. info.keynr = active_index;
  2773. heavi_info.db = share->key_file[active_index];
  2774. heavi_info.key = &last_key;
  2775. switch (find_flag) {
  2776. case HA_READ_KEY_EXACT: /* Find first record else error */
  2777. error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
  2778. if (error == 0) {
  2779. DBT orig_key;
  2780. pack_key(&orig_key, active_index, key_buff2, key, key_len, COL_NEG_INF);
  2781. if (tokudb_prefix_cmp_packed_key(share->key_file[active_index], &orig_key, &last_key)) {
  2782. error = DB_NOTFOUND;
  2783. }
  2784. }
  2785. break;
  2786. case HA_READ_AFTER_KEY: /* Find next rec. after key-record */
  2787. error = cursor->c_getf_heavi(
  2788. cursor, 0,
  2789. key_read ? smart_dbt_callback_keyread_heavi : smart_dbt_callback_rowread_heavi, &info,
  2790. after_key_heavi, &heavi_info,
  2791. 1
  2792. );
  2793. do_read_row = false;
  2794. break;
  2795. case HA_READ_BEFORE_KEY: /* Find next rec. before key-record */
  2796. error = cursor->c_getf_heavi(
  2797. cursor, 0,
  2798. key_read ? smart_dbt_callback_keyread_heavi : smart_dbt_callback_rowread_heavi, &info,
  2799. before_key_heavi, &heavi_info,
  2800. -1
  2801. );
  2802. do_read_row = false;
  2803. break;
  2804. case HA_READ_KEY_OR_NEXT: /* Record or next record */
  2805. error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
  2806. break;
  2807. case HA_READ_KEY_OR_PREV: /* Record or previous */
  2808. error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
  2809. if (error == 0) {
  2810. DBT orig_key;
  2811. pack_key(&orig_key, active_index, key_buff2, key, key_len, COL_NEG_INF);
  2812. if (tokudb_prefix_cmp_packed_key(share->key_file[active_index], &orig_key, &last_key) != 0) {
  2813. error = cursor->c_get(cursor, &last_key, &row, DB_PREV);
  2814. }
  2815. }
  2816. else if (error == DB_NOTFOUND)
  2817. error = cursor->c_get(cursor, &last_key, &row, DB_LAST);
  2818. break;
  2819. case HA_READ_PREFIX_LAST_OR_PREV: /* Last or prev key with the same prefix */
  2820. error = cursor->c_getf_heavi(
  2821. cursor, 0,
  2822. key_read ? smart_dbt_callback_keyread_heavi : smart_dbt_callback_rowread_heavi, &info,
  2823. prefix_last_or_prev_heavi, &heavi_info,
  2824. -1
  2825. );
  2826. do_read_row = false;
  2827. break;
  2828. case HA_READ_PREFIX_LAST:
  2829. error = cursor->c_getf_heavi(
  2830. cursor, 0,
  2831. key_read ? smart_dbt_callback_keyread_heavi : smart_dbt_callback_rowread_heavi, &info,
  2832. prefix_last_or_prev_heavi, &heavi_info,
  2833. -1
  2834. );
  2835. if (!error && heavi_ret_val != 0) {
  2836. error = DB_NOTFOUND;
  2837. }
  2838. do_read_row = false;
  2839. break;
  2840. default:
  2841. TOKUDB_TRACE("unsupported:%d\n", find_flag);
  2842. error = HA_ERR_UNSUPPORTED;
  2843. break;
  2844. }
  2845. error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,active_index);
  2846. if (!error && do_read_row) {
  2847. error = read_row(buf, active_index, &row, &last_key);
  2848. }
  2849. else if (!error && !key_read && active_index != primary_key) {
  2850. error = read_full_row(buf);
  2851. }
  2852. if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR)) {
  2853. TOKUDB_TRACE("error:%d:%d\n", error, find_flag);
  2854. }
  2855. cleanup:
  2856. TOKUDB_DBUG_RETURN(error);
  2857. }
  2858. //
  2859. // Reads the next row from the active index (cursor) into buf, and advances cursor
  2860. // Parameters:
  2861. // [out] buf - buffer for the next row, in MySQL format
  2862. // Returns:
  2863. // 0 on success
  2864. // HA_ERR_END_OF_FILE if not found
  2865. // error otherwise
  2866. //
  2867. int ha_tokudb::index_next(uchar * buf) {
  2868. TOKUDB_DBUG_ENTER("ha_tokudb::index_next");
  2869. int error;
  2870. struct smart_dbt_info info;
  2871. u_int32_t flags = SET_READ_FLAG(0);
  2872. HANDLE_INVALID_CURSOR();
  2873. statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status);
  2874. info.ha = this;
  2875. info.buf = buf;
  2876. info.keynr = active_index;
  2877. error = handle_cursor_error(cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK, &info), HA_ERR_END_OF_FILE,active_index);
  2878. //
  2879. // still need to get entire contents of the row if operation done on
  2880. // secondary DB and it was NOT a covering index
  2881. //
  2882. if (!error && !key_read && (active_index != primary_key) ) {
  2883. error = read_full_row(buf);
  2884. }
  2885. cleanup:
  2886. TOKUDB_DBUG_RETURN(error);
  2887. }
  2888. //
  2889. // Reads the next row matching to the key, on success, advances cursor
  2890. // Parameters:
  2891. // [out] buf - buffer for the next row, in MySQL format
  2892. // [in] key - key value
  2893. // keylen - length of key
  2894. // Returns:
  2895. // 0 on success
  2896. // HA_ERR_END_OF_FILE if not found
  2897. // error otherwise
  2898. //
  2899. int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
  2900. TOKUDB_DBUG_ENTER("ha_tokudb::index_next_same %p", this);
  2901. int error;
  2902. struct smart_dbt_info info;
  2903. HANDLE_INVALID_CURSOR();
  2904. statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status);
  2905. info.ha = this;
  2906. info.buf = buf;
  2907. info.keynr = active_index;
  2908. /* QQQ NEXT_DUP on nodup returns EINVAL for tokudb */
  2909. if (keylen == table->key_info[active_index].key_length &&
  2910. !(table->key_info[active_index].flags & HA_NOSAME) &&
  2911. !(table->key_info[active_index].flags & HA_END_SPACE_KEY)) {
  2912. u_int32_t flags = SET_READ_FLAG(0);
  2913. error = handle_cursor_error(cursor->c_getf_next_dup(cursor, flags, SMART_DBT_CALLBACK, &info),HA_ERR_END_OF_FILE,active_index);
  2914. if (!error && !key_read && active_index != primary_key) {
  2915. error = read_full_row(buf);
  2916. }
  2917. } else {
  2918. u_int32_t flags = SET_READ_FLAG(0);
  2919. error = handle_cursor_error(cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK, &info),HA_ERR_END_OF_FILE,active_index);
  2920. if (!error && !key_read && active_index != primary_key) {
  2921. error = read_full_row(buf);
  2922. }
  2923. if (!error &&::key_cmp_if_same(table, key, active_index, keylen))
  2924. error = HA_ERR_END_OF_FILE;
  2925. }
  2926. cleanup:
  2927. TOKUDB_DBUG_RETURN(error);
  2928. }
  2929. //
  2930. // Reads the previous row from the active index (cursor) into buf, and advances cursor
  2931. // Parameters:
  2932. // [out] buf - buffer for the next row, in MySQL format
  2933. // Returns:
  2934. // 0 on success
  2935. // HA_ERR_END_OF_FILE if not found
  2936. // error otherwise
  2937. //
  2938. int ha_tokudb::index_prev(uchar * buf) {
  2939. TOKUDB_DBUG_ENTER("ha_tokudb::index_next");
  2940. int error;
  2941. struct smart_dbt_info info;
  2942. u_int32_t flags = SET_READ_FLAG(0);
  2943. HANDLE_INVALID_CURSOR();
  2944. statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status);
  2945. info.ha = this;
  2946. info.buf = buf;
  2947. info.keynr = active_index;
  2948. error = handle_cursor_error(cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK, &info),HA_ERR_END_OF_FILE,active_index);
  2949. //
  2950. // still need to get entire contents of the row if operation done on
  2951. // secondary DB and it was NOT a covering index
  2952. //
  2953. if (!error && !key_read && (active_index != primary_key) ) {
  2954. error = read_full_row(buf);
  2955. }
  2956. cleanup:
  2957. TOKUDB_DBUG_RETURN(error);
  2958. }
  2959. //
  2960. // Reads the first row from the active index (cursor) into buf, and advances cursor
  2961. // Parameters:
  2962. // [out] buf - buffer for the next row, in MySQL format
  2963. // Returns:
  2964. // 0 on success
  2965. // HA_ERR_END_OF_FILE if not found
  2966. // error otherwise
  2967. //
  2968. int ha_tokudb::index_first(uchar * buf) {
  2969. TOKUDB_DBUG_ENTER("ha_tokudb::index_first");
  2970. int error;
  2971. DBT row;
  2972. HANDLE_INVALID_CURSOR();
  2973. statistic_increment(table->in_use->status_var.ha_read_first_count, &LOCK_status);
  2974. bzero((void *) &row, sizeof(row));
  2975. error = handle_cursor_error(cursor->c_get(cursor, &last_key, &row, DB_FIRST),HA_ERR_END_OF_FILE,active_index);
  2976. if (!error) {
  2977. error = read_row(buf, active_index, &row, &last_key);
  2978. }
  2979. cleanup:
  2980. TOKUDB_DBUG_RETURN(error);
  2981. }
  2982. //
  2983. // Reads the last row from the active index (cursor) into buf, and advances cursor
  2984. // Parameters:
  2985. // [out] buf - buffer for the next row, in MySQL format
  2986. // Returns:
  2987. // 0 on success
  2988. // HA_ERR_END_OF_FILE if not found
  2989. // error otherwise
  2990. //
  2991. int ha_tokudb::index_last(uchar * buf) {
  2992. TOKUDB_DBUG_ENTER("ha_tokudb::index_last");
  2993. int error;
  2994. DBT row;
  2995. HANDLE_INVALID_CURSOR();
  2996. statistic_increment(table->in_use->status_var.ha_read_last_count, &LOCK_status);
  2997. bzero((void *) &row, sizeof(row));
  2998. error = handle_cursor_error(cursor->c_get(cursor, &last_key, &row, DB_LAST),HA_ERR_END_OF_FILE,active_index);
  2999. if (!error) {
  3000. error = read_row(buf, active_index, &row, &last_key);
  3001. }
  3002. cleanup:
  3003. TOKUDB_DBUG_RETURN(error);
  3004. }
  3005. //
  3006. // Initialize a scan of the table (which is why index_init is called on primary_key)
  3007. // Parameters:
  3008. // scan - unused
  3009. // Returns:
  3010. // 0 on success
  3011. // error otherwise
  3012. //
  3013. int ha_tokudb::rnd_init(bool scan) {
  3014. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_init");
  3015. int error;
  3016. current_row.flags = DB_DBT_REALLOC;
  3017. range_lock_grabbed = false;
  3018. if (scan) {
  3019. DB* db = share->key_file[primary_key];
  3020. error = db->pre_acquire_read_lock(db, transaction, db->dbt_neg_infty(), NULL, db->dbt_pos_infty(), NULL);
  3021. if (error) { last_cursor_error = error; goto cleanup; }
  3022. }
  3023. error = index_init(primary_key, 0);
  3024. if (error) { goto cleanup;}
  3025. //
  3026. // only want to set range_lock_grabbed to true after index_init
  3027. // successfully executed for two reasons:
  3028. // 1) index_init will reset it to false anyway
  3029. // 2) if it fails, we don't want prelocking on,
  3030. //
  3031. if (scan) { range_lock_grabbed = true; }
  3032. error = 0;
  3033. cleanup:
  3034. TOKUDB_DBUG_RETURN(error);
  3035. }
  3036. //
  3037. // End a scan of the table
  3038. //
  3039. int ha_tokudb::rnd_end() {
  3040. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_end");
  3041. range_lock_grabbed = false;
  3042. TOKUDB_DBUG_RETURN(index_end());
  3043. }
  3044. //
  3045. // Read the next row in a table scan
  3046. // Parameters:
  3047. // [out] buf - buffer for the next row, in MySQL format
  3048. // Returns:
  3049. // 0 on success
  3050. // HA_ERR_END_OF_FILE if not found
  3051. // error otherwise
  3052. //
  3053. int ha_tokudb::rnd_next(uchar * buf) {
  3054. TOKUDB_DBUG_ENTER("ha_tokudb::ha_tokudb::rnd_next");
  3055. int error;
  3056. u_int32_t flags = SET_READ_FLAG(0);
  3057. struct smart_dbt_info info;
  3058. HANDLE_INVALID_CURSOR();
  3059. //
  3060. // The reason we do not just call index_next is that index_next
  3061. // increments a different variable than we do here
  3062. //
  3063. statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, &LOCK_status);
  3064. info.ha = this;
  3065. info.buf = buf;
  3066. info.keynr = primary_key;
  3067. error = handle_cursor_error(cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK, &info),HA_ERR_END_OF_FILE,primary_key);
  3068. cleanup:
  3069. TOKUDB_DBUG_RETURN(error);
  3070. }
  3071. DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) {
  3072. TOKUDB_DBUG_ENTER("ha_tokudb::get_pos");
  3073. /* We don't need to set app_data here */
  3074. bzero((void *) to, sizeof(*to));
  3075. //
  3076. // this should really be done through pack_key functions
  3077. //
  3078. to->data = pos;
  3079. if (share->fixed_length_primary_key)
  3080. to->size = ref_length;
  3081. else {
  3082. //
  3083. // move up infinity byte
  3084. //
  3085. pos++;
  3086. KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
  3087. KEY_PART_INFO *end = key_part + table->key_info[primary_key].key_parts;
  3088. for (; key_part != end; key_part++) {
  3089. pos += key_part->field->packed_col_length(pos, key_part->length);
  3090. }
  3091. to->size = (uint) (pos - (uchar *) to->data);
  3092. }
  3093. DBUG_DUMP("key", (const uchar *) to->data, to->size);
  3094. DBUG_RETURN(to);
  3095. }
  3096. //
  3097. // Retrieves a row with based on the primary key saved in pos
  3098. // Returns:
  3099. // 0 on success
  3100. // HA_ERR_KEY_NOT_FOUND if not found
  3101. // error otherwise
  3102. //
  3103. int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
  3104. TOKUDB_DBUG_ENTER("ha_tokudb::rnd_pos");
  3105. DBT db_pos;
  3106. int error;
  3107. statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status);
  3108. active_index = MAX_KEY;
  3109. DBT* key = get_pos(&db_pos, pos);
  3110. error = share->file->get(share->file, transaction, key, &current_row, 0);
  3111. if (error == DB_NOTFOUND || error == DB_KEYEMPTY) {
  3112. error = HA_ERR_KEY_NOT_FOUND;
  3113. goto cleanup;
  3114. }
  3115. if (!error) {
  3116. error = read_row(buf, primary_key, &current_row, key);
  3117. }
  3118. cleanup:
  3119. TOKUDB_DBUG_RETURN(error);
  3120. }
  3121. int ha_tokudb::read_range_first(
  3122. const key_range *start_key,
  3123. const key_range *end_key,
  3124. bool eq_range,
  3125. bool sorted)
  3126. {
  3127. TOKUDB_DBUG_ENTER("ha_tokudb::read_range_first");
  3128. int error;
  3129. DBT start_dbt_key;
  3130. const DBT* start_dbt_data = NULL;
  3131. DBT end_dbt_key;
  3132. const DBT* end_dbt_data = NULL;
  3133. uchar start_key_buff [table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar)];
  3134. uchar end_key_buff [table_share->max_key_length + MAX_REF_PARTS * 3 + sizeof(uchar)];
  3135. bzero((void *) &start_dbt_key, sizeof(start_dbt_key));
  3136. bzero((void *) &end_dbt_key, sizeof(end_dbt_key));
  3137. range_lock_grabbed = false;
  3138. if (start_key) {
  3139. switch (start_key->flag) {
  3140. case HA_READ_AFTER_KEY:
  3141. pack_key(&start_dbt_key, active_index, start_key_buff, start_key->key, start_key->length, COL_POS_INF);
  3142. start_dbt_data = share->key_file[active_index]->dbt_pos_infty();
  3143. break;
  3144. default:
  3145. pack_key(&start_dbt_key, active_index, start_key_buff, start_key->key, start_key->length, COL_NEG_INF);
  3146. start_dbt_data = share->key_file[active_index]->dbt_neg_infty();
  3147. break;
  3148. }
  3149. }
  3150. else {
  3151. start_dbt_data = share->key_file[active_index]->dbt_neg_infty();
  3152. }
  3153. if (end_key) {
  3154. switch (end_key->flag) {
  3155. case HA_READ_BEFORE_KEY:
  3156. pack_key(&end_dbt_key, active_index, end_key_buff, end_key->key, end_key->length, COL_NEG_INF);
  3157. end_dbt_data = share->key_file[active_index]->dbt_neg_infty();
  3158. break;
  3159. default:
  3160. pack_key(&end_dbt_key, active_index, end_key_buff, end_key->key, end_key->length, COL_POS_INF);
  3161. end_dbt_data = share->key_file[active_index]->dbt_pos_infty();
  3162. break;
  3163. }
  3164. }
  3165. else {
  3166. end_dbt_data = share->key_file[active_index]->dbt_pos_infty();
  3167. }
  3168. error = share->key_file[active_index]->pre_acquire_read_lock(
  3169. share->key_file[active_index],
  3170. transaction,
  3171. start_key ? &start_dbt_key : share->key_file[active_index]->dbt_neg_infty(),
  3172. start_dbt_data,
  3173. end_key ? &end_dbt_key : share->key_file[active_index]->dbt_pos_infty(),
  3174. end_dbt_data
  3175. );
  3176. if (error){
  3177. last_cursor_error = error;
  3178. //
  3179. // cursor should be initialized here, but in case it is not, we still check
  3180. //
  3181. if (cursor) {
  3182. cursor->c_close(cursor);
  3183. cursor = NULL;
  3184. }
  3185. goto cleanup;
  3186. }
  3187. range_lock_grabbed = true;
  3188. error = handler::read_range_first(start_key, end_key, eq_range, sorted);
  3189. cleanup:
  3190. TOKUDB_DBUG_RETURN(error);
  3191. }
  3192. int ha_tokudb::read_range_next()
  3193. {
  3194. TOKUDB_DBUG_ENTER("ha_tokudb::read_range_next");
  3195. int error;
  3196. error = handler::read_range_next();
  3197. if (error) {
  3198. range_lock_grabbed = false;
  3199. }
  3200. TOKUDB_DBUG_RETURN(error);
  3201. }
  3202. /*
  3203. Set a reference to the current record in (ref,ref_length).
  3204. SYNOPSIS
  3205. ha_tokudb::position()
  3206. record The current record buffer
  3207. DESCRIPTION
  3208. The BDB handler stores the primary key in (ref,ref_length).
  3209. There is either an explicit primary key, or an implicit (hidden)
  3210. primary key.
  3211. During open(), 'ref_length' is calculated as the maximum primary
  3212. key length. When an actual key is shorter than that, the rest of
  3213. the buffer must be cleared out. The row cannot be identified, if
  3214. garbage follows behind the end of the key. There is no length
  3215. field for the current key, so that the whole ref_length is used
  3216. for comparison.
  3217. RETURN
  3218. nothing
  3219. */
  3220. void ha_tokudb::position(const uchar * record) {
  3221. TOKUDB_DBUG_ENTER("ha_tokudb::position");
  3222. DBT key;
  3223. if (hidden_primary_key) {
  3224. DBUG_ASSERT(ref_length == TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  3225. memcpy_fixed(ref, (char *) current_ident, TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH);
  3226. }
  3227. else {
  3228. bool has_null;
  3229. create_dbt_key_from_table(&key, primary_key, ref, record, &has_null);
  3230. if (key.size < ref_length) {
  3231. bzero(ref + key.size, ref_length - key.size);
  3232. }
  3233. }
  3234. DBUG_VOID_RETURN;
  3235. }
  3236. //
  3237. // Per InnoDB: Returns statistics information of the table to the MySQL interpreter,
  3238. // in various fields of the handle object.
  3239. // Return:
  3240. // 0, always success
  3241. //
  3242. int ha_tokudb::info(uint flag) {
  3243. TOKUDB_DBUG_ENTER("ha_tokudb::info %p %d %lld", this, flag, share->rows);
  3244. if (flag & HA_STATUS_VARIABLE) {
  3245. // Just to get optimizations right
  3246. stats.records = share->rows;
  3247. stats.deleted = 0;
  3248. }
  3249. if ((flag & HA_STATUS_CONST)) {
  3250. for (uint i = 0; i < table_share->keys; i++) {
  3251. table->key_info[i].rec_per_key[table->key_info[i].key_parts - 1] = 0;
  3252. }
  3253. }
  3254. /* Don't return key if we got an error for the internal primary key */
  3255. if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys)
  3256. errkey = last_dup_key;
  3257. TOKUDB_DBUG_RETURN(0);
  3258. }
  3259. //
  3260. // Per InnoDB: Tells something additional to the handler about how to do things.
  3261. //
  3262. int ha_tokudb::extra(enum ha_extra_function operation) {
  3263. TOKUDB_DBUG_ENTER("extra %p %d", this, operation);
  3264. switch (operation) {
  3265. case HA_EXTRA_RESET_STATE:
  3266. reset();
  3267. break;
  3268. case HA_EXTRA_KEYREAD:
  3269. key_read = 1; // Query satisfied with key
  3270. break;
  3271. case HA_EXTRA_NO_KEYREAD:
  3272. key_read = 0;
  3273. break;
  3274. case HA_EXTRA_IGNORE_DUP_KEY:
  3275. using_ignore = 1;
  3276. break;
  3277. case HA_EXTRA_NO_IGNORE_DUP_KEY:
  3278. using_ignore = 0;
  3279. break;
  3280. default:
  3281. break;
  3282. }
  3283. TOKUDB_DBUG_RETURN(0);
  3284. }
  3285. int ha_tokudb::reset(void) {
  3286. TOKUDB_DBUG_ENTER("ha_tokudb::reset");
  3287. key_read = 0;
  3288. using_ignore = 0;
  3289. if (current_row.flags & (DB_DBT_MALLOC | DB_DBT_REALLOC)) {
  3290. current_row.flags = 0;
  3291. if (current_row.data) {
  3292. free(current_row.data);
  3293. current_row.data = 0;
  3294. }
  3295. }
  3296. TOKUDB_DBUG_RETURN(0);
  3297. }
  3298. //
  3299. // helper function that iterates through all DB's
  3300. // and grabs a lock (either read or write, but not both)
  3301. // Parameters:
  3302. // [in] trans - transaction to be used to pre acquire the lock
  3303. // lt - type of lock to get, either lock_read or lock_write
  3304. // Returns:
  3305. // 0 on success
  3306. // error otherwise
  3307. //
  3308. int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
  3309. int error = ENOSYS;
  3310. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  3311. if (lt == lock_read) {
  3312. for (uint i = 0; i < curr_num_DBs; i++) {
  3313. DB* db = share->key_file[i];
  3314. error = db->pre_acquire_read_lock(
  3315. db,
  3316. trans,
  3317. db->dbt_neg_infty(), db->dbt_neg_infty(),
  3318. db->dbt_pos_infty(), db->dbt_pos_infty()
  3319. );
  3320. if (error) { goto cleanup; }
  3321. }
  3322. }
  3323. else if (lt == lock_write) {
  3324. for (uint i = 0; i < curr_num_DBs; i++) {
  3325. DB* db = share->key_file[i];
  3326. error = db->pre_acquire_table_lock(db, trans);
  3327. if (error) { goto cleanup; }
  3328. }
  3329. }
  3330. else {
  3331. error = ENOSYS;
  3332. goto cleanup;
  3333. }
  3334. error = 0;
  3335. cleanup:
  3336. return error;
  3337. }
  3338. /*
  3339. As MySQL will execute an external lock for every new table it uses
  3340. we can use this to start the transactions.
  3341. If we are in auto_commit mode we just need to start a transaction
  3342. for the statement to be able to rollback the statement.
  3343. If not, we have to start a master transaction if there doesn't exist
  3344. one from before.
  3345. */
  3346. //
  3347. // Parameters:
  3348. // [in] thd - handle to the user thread
  3349. // lock_type - the type of lock
  3350. // Returns:
  3351. // 0 on success
  3352. // error otherwise
  3353. //
  3354. int ha_tokudb::external_lock(THD * thd, int lock_type) {
  3355. TOKUDB_DBUG_ENTER("ha_tokudb::external_lock %d", thd_sql_command(thd));
  3356. // QQQ this is here to allow experiments without transactions
  3357. int error = 0;
  3358. tokudb_trx_data *trx = NULL;
  3359. trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  3360. if (!trx) {
  3361. trx = (tokudb_trx_data *)
  3362. my_malloc(sizeof(*trx), MYF(MY_ZEROFILL));
  3363. if (!trx) {
  3364. error = 1;
  3365. goto cleanup;
  3366. }
  3367. thd_data_set(thd, tokudb_hton->slot, trx);
  3368. }
  3369. if (trx->all == 0) {
  3370. trx->sp_level = 0;
  3371. }
  3372. if (lock_type != F_UNLCK) {
  3373. if (!trx->tokudb_lock_count++) {
  3374. DBUG_ASSERT(trx->stmt == 0);
  3375. transaction = NULL; // Safety
  3376. /* First table lock, start transaction */
  3377. if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK)) && !trx->all) {
  3378. /* QQQ We have to start a master transaction */
  3379. DBUG_PRINT("trans", ("starting transaction all: options: 0x%lx", (ulong) thd->options));
  3380. if ((error = db_env->txn_begin(db_env, NULL, &trx->all, 0))) {
  3381. trx->tokudb_lock_count--; // We didn't get the lock
  3382. goto cleanup;
  3383. }
  3384. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  3385. TOKUDB_TRACE("master:%p\n", trx->all);
  3386. }
  3387. trx->sp_level = trx->all;
  3388. trans_register_ha(thd, TRUE, tokudb_hton);
  3389. if (thd->in_lock_tables) {
  3390. //
  3391. // grab table locks
  3392. // For the command "Lock tables foo read, bar read"
  3393. // This statement is grabbing the locks for the table
  3394. // foo. The locks for bar will be grabbed when
  3395. // trx->tokudb_lock_count has been initialized
  3396. //
  3397. if (lock.type <= TL_READ_NO_INSERT) {
  3398. error = acquire_table_lock(trx->all,lock_read);
  3399. }
  3400. else {
  3401. error = acquire_table_lock(trx->all,lock_write);
  3402. }
  3403. // Don't create stmt trans
  3404. if (error) {trx->tokudb_lock_count--;}
  3405. goto cleanup;
  3406. }
  3407. }
  3408. DBUG_PRINT("trans", ("starting transaction stmt"));
  3409. if (trx->stmt) {
  3410. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  3411. TOKUDB_TRACE("warning:stmt=%p\n", trx->stmt);
  3412. }
  3413. }
  3414. if ((error = db_env->txn_begin(db_env, trx->sp_level, &trx->stmt, 0))) {
  3415. /* We leave the possible master transaction open */
  3416. trx->tokudb_lock_count--; // We didn't get the lock
  3417. goto cleanup;
  3418. }
  3419. if (tokudb_debug & TOKUDB_DEBUG_TXN) {
  3420. TOKUDB_TRACE("stmt:%p:%p\n", trx->sp_level, trx->stmt);
  3421. }
  3422. trans_register_ha(thd, FALSE, tokudb_hton);
  3423. }
  3424. else {
  3425. if (thd->in_lock_tables) {
  3426. assert(trx->all != NULL);
  3427. //
  3428. // For the command "Lock tables foo read, bar read"
  3429. // This statement is grabbing the locks for the table
  3430. // bar. The locks for foo will be grabbed when
  3431. // trx->tokudb_lock_count is 0 and we are initializing
  3432. // trx->all above
  3433. //
  3434. if (lock.type <= TL_READ_NO_INSERT) {
  3435. error = acquire_table_lock(trx->all,lock_read);
  3436. }
  3437. else {
  3438. error = acquire_table_lock(trx->all,lock_write);
  3439. }
  3440. if (error) {trx->tokudb_lock_count--; goto cleanup;}
  3441. }
  3442. }
  3443. transaction = trx->stmt;
  3444. }
  3445. else {
  3446. lock.type = TL_UNLOCK; // Unlocked
  3447. pthread_mutex_lock(&share->mutex);
  3448. // hate dealing with comparison of signed vs unsigned, so doing this
  3449. if (deleted_rows > added_rows && share->rows < (deleted_rows - added_rows)) {
  3450. share->rows = 0;
  3451. }
  3452. else {
  3453. share->rows += (added_rows - deleted_rows);
  3454. }
  3455. pthread_mutex_unlock(&share->mutex);
  3456. added_rows = 0;
  3457. deleted_rows = 0;
  3458. if (!--trx->tokudb_lock_count) {
  3459. if (trx->stmt) {
  3460. /*
  3461. F_UNLCK is done without a transaction commit / rollback.
  3462. This happens if the thread didn't update any rows
  3463. We must in this case commit the work to keep the row locks
  3464. */
  3465. DBUG_PRINT("trans", ("commiting non-updating transaction"));
  3466. error = trx->stmt->commit(trx->stmt, 0);
  3467. if (tokudb_debug & TOKUDB_DEBUG_TXN)
  3468. TOKUDB_TRACE("commit:%p:%d\n", trx->stmt, error);
  3469. trx->stmt = NULL;
  3470. }
  3471. }
  3472. transaction = NULL;
  3473. }
  3474. cleanup:
  3475. TOKUDB_DBUG_RETURN(error);
  3476. }
  3477. /*
  3478. When using LOCK TABLE's external_lock is only called when the actual
  3479. TABLE LOCK is done.
  3480. Under LOCK TABLES, each used tables will force a call to start_stmt.
  3481. */
  3482. int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
  3483. TOKUDB_DBUG_ENTER("ha_tokudb::start_stmt");
  3484. int error = 0;
  3485. tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  3486. DBUG_ASSERT(trx);
  3487. /*
  3488. note that trx->stmt may have been already initialized as start_stmt()
  3489. is called for *each table* not for each storage engine,
  3490. and there could be many bdb tables referenced in the query
  3491. */
  3492. if (!trx->stmt) {
  3493. DBUG_PRINT("trans", ("starting transaction stmt"));
  3494. error = db_env->txn_begin(db_env, trx->sp_level, &trx->stmt, 0);
  3495. trans_register_ha(thd, FALSE, tokudb_hton);
  3496. }
  3497. transaction = trx->stmt;
  3498. TOKUDB_DBUG_RETURN(error);
  3499. }
  3500. /*
  3501. The idea with handler::store_lock() is the following:
  3502. The statement decided which locks we should need for the table
  3503. for updates/deletes/inserts we get WRITE locks, for SELECT... we get
  3504. read locks.
  3505. Before adding the lock into the table lock handler (see thr_lock.c)
  3506. mysqld calls store lock with the requested locks. Store lock can now
  3507. modify a write lock to a read lock (or some other lock), ignore the
  3508. lock (if we don't want to use MySQL table locks at all) or add locks
  3509. for many tables (like we do when we are using a MERGE handler).
  3510. Tokudb DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which
  3511. signals that we are doing WRITES, but we are still allowing other
  3512. reader's and writer's.
  3513. When releasing locks, store_lock() are also called. In this case one
  3514. usually doesn't have to do anything.
  3515. In some exceptional cases MySQL may send a request for a TL_IGNORE;
  3516. This means that we are requesting the same lock as last time and this
  3517. should also be ignored. (This may happen when someone does a flush
  3518. table when we have opened a part of the tables, in which case mysqld
  3519. closes and reopens the tables and tries to get the same locks at last
  3520. time). In the future we will probably try to remove this.
  3521. */
  3522. THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_lock_type lock_type) {
  3523. TOKUDB_DBUG_ENTER("ha_tokudb::store_lock, lock_type=%d cmd=%d", lock_type, thd_sql_command(thd));
  3524. if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
  3525. /* If we are not doing a LOCK TABLE, then allow multiple writers */
  3526. if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
  3527. !thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE) {
  3528. lock_type = TL_WRITE_ALLOW_WRITE;
  3529. }
  3530. lock.type = lock_type;
  3531. }
  3532. *to++ = &lock;
  3533. DBUG_RETURN(to);
  3534. }
  3535. static int create_sub_table(const char *table_name, const char *sub_name, DBTYPE type, int flags) {
  3536. TOKUDB_DBUG_ENTER("create_sub_table");
  3537. int error;
  3538. DB *file;
  3539. DBUG_PRINT("enter", ("sub_name: %s flags: %d", sub_name, flags));
  3540. if (!(error = db_create(&file, db_env, 0))) {
  3541. file->set_flags(file, flags);
  3542. error = (file->open(file, NULL, table_name, sub_name, type, DB_THREAD | DB_CREATE, my_umask));
  3543. if (error) {
  3544. DBUG_PRINT("error", ("Got error: %d when opening table '%s'", error, table_name));
  3545. (void) file->remove(file, table_name, NULL, 0);
  3546. } else
  3547. (void) file->close(file, 0);
  3548. } else {
  3549. DBUG_PRINT("error", ("Got error: %d when creating table", error));
  3550. }
  3551. if (error)
  3552. my_errno = error;
  3553. TOKUDB_DBUG_RETURN(error);
  3554. }
  3555. static int mkdirpath(char *name, mode_t mode) {
  3556. int r = mkdir(name, mode);
  3557. if (r == -1 && errno == ENOENT) {
  3558. char parent[strlen(name)+1];
  3559. strcpy(parent, name);
  3560. char *cp = strrchr(parent, '/');
  3561. if (cp) {
  3562. *cp = 0;
  3563. r = mkdir(parent, 0755);
  3564. if (r == 0)
  3565. r = mkdir(name, mode);
  3566. }
  3567. }
  3568. return r;
  3569. }
  3570. #include <dirent.h>
  3571. static int rmall(const char *dname) {
  3572. int error = 0;
  3573. DIR *d = opendir(dname);
  3574. if (d) {
  3575. struct dirent *dirent;
  3576. while ((dirent = readdir(d)) != 0) {
  3577. if (0 == strcmp(dirent->d_name, ".") || 0 == strcmp(dirent->d_name, ".."))
  3578. continue;
  3579. char fname[strlen(dname) + 1 + strlen(dirent->d_name) + 1];
  3580. sprintf(fname, "%s/%s", dname, dirent->d_name);
  3581. if (dirent->d_type == DT_DIR) {
  3582. error = rmall(fname);
  3583. }
  3584. else {
  3585. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  3586. TOKUDB_TRACE("removing:%s\n", fname);
  3587. }
  3588. //
  3589. // if clause checks if the file is a .tokudb file
  3590. //
  3591. if (strlen(fname) >= strlen (ha_tokudb_ext) &&
  3592. strcmp(fname + (strlen(fname) - strlen(ha_tokudb_ext)), ha_tokudb_ext) == 0)
  3593. {
  3594. //
  3595. // if this fails under low memory conditions, gracefully exit and return error
  3596. // user will be notified that something went wrong, and he will
  3597. // have to deal with it
  3598. //
  3599. DB* db = NULL;
  3600. error = db_create(&db, db_env, 0);
  3601. if (error) {
  3602. break;
  3603. }
  3604. //
  3605. // it is ok to do db->remove on any .tokudb file, because any such
  3606. // file was created with db->open
  3607. //
  3608. db->remove(db, fname, NULL, 0);
  3609. }
  3610. else {
  3611. //
  3612. // in case we have some file that is not .tokudb, we just delete it
  3613. //
  3614. error = unlink(fname);
  3615. if (error != 0) {
  3616. error = errno;
  3617. break;
  3618. }
  3619. }
  3620. }
  3621. }
  3622. closedir(d);
  3623. if (error == 0) {
  3624. error = rmdir(dname);
  3625. if (error != 0)
  3626. error = errno;
  3627. }
  3628. }
  3629. else {
  3630. error = errno;
  3631. }
  3632. return error;
  3633. }
  3634. //
  3635. // Creates a new table
  3636. // Parameters:
  3637. // [in] name - table name
  3638. // [in] form - info on table, columns and indexes
  3639. // [in] create_info - more info on table, CURRENTLY UNUSED
  3640. // Returns:
  3641. // 0 on success
  3642. // error otherwise
  3643. //
  3644. int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_info) {
  3645. TOKUDB_DBUG_ENTER("ha_tokudb::create");
  3646. char name_buff[FN_REFLEN];
  3647. int error;
  3648. char dirname[get_name_length(name) + 32];
  3649. char newname[get_name_length(name) + 32];
  3650. uint i;
  3651. //
  3652. // tracing information about what type of table we are creating
  3653. //
  3654. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  3655. for (i = 0; i < form->s->fields; i++) {
  3656. Field *field = form->s->field[i];
  3657. TOKUDB_TRACE("field:%d:%s:type=%d:flags=%x\n", i, field->field_name, field->type(), field->flags);
  3658. }
  3659. for (i = 0; i < form->s->keys; i++) {
  3660. KEY *key = &form->s->key_info[i];
  3661. TOKUDB_TRACE("key:%d:%s:%d\n", i, key->name, key->key_parts);
  3662. uint p;
  3663. for (p = 0; p < key->key_parts; p++) {
  3664. KEY_PART_INFO *key_part = &key->key_part[p];
  3665. Field *field = key_part->field;
  3666. TOKUDB_TRACE("key:%d:%d:length=%d:%s:type=%d:flags=%x\n",
  3667. i, p, key_part->length, field->field_name, field->type(), field->flags);
  3668. }
  3669. }
  3670. }
  3671. // a table is a directory of dictionaries
  3672. make_name(dirname, name, 0);
  3673. error = mkdirpath(dirname, 0777);
  3674. if (error != 0) {
  3675. TOKUDB_DBUG_RETURN(errno);
  3676. }
  3677. make_name(newname, name, "main");
  3678. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  3679. /* Create the main table that will hold the real rows */
  3680. error = create_sub_table(name_buff, NULL, DB_BTREE, 0);
  3681. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  3682. TOKUDB_TRACE("create:%s:error=%d\n", newname, error);
  3683. if (error) {
  3684. rmall(dirname);
  3685. TOKUDB_DBUG_RETURN(error);
  3686. }
  3687. primary_key = form->s->primary_key;
  3688. /* Create the keys */
  3689. char part[MAX_ALIAS_NAME + 10];
  3690. for (uint i = 0; i < form->s->keys; i++) {
  3691. if (i != primary_key) {
  3692. sprintf(part, "key-%s", form->s->key_info[i].name);
  3693. make_name(newname, name, part);
  3694. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  3695. error = create_sub_table(name_buff, NULL, DB_BTREE, DB_DUP + DB_DUPSORT);
  3696. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  3697. TOKUDB_TRACE("create:%s:flags=%ld:error=%d\n", newname, form->key_info[i].flags, error);
  3698. if (error) {
  3699. rmall(dirname);
  3700. TOKUDB_DBUG_RETURN(error);
  3701. }
  3702. }
  3703. }
  3704. /* Create status.tokudb and save relevant metadata */
  3705. DB *status_block = NULL;
  3706. if (!(error = (db_create(&status_block, db_env, 0)))) {
  3707. make_name(newname, name, "status");
  3708. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  3709. if (!(error = (status_block->open(status_block, NULL, name_buff, NULL, DB_BTREE, DB_CREATE, 0)))) {
  3710. uint version = HA_TOKU_VERSION;
  3711. uint capabilities = HA_TOKU_CAP;
  3712. error = write_metadata(status_block, hatoku_version,&version,sizeof(version));
  3713. if (error) { goto quit_status; }
  3714. error = write_metadata(status_block, hatoku_capabilities,&capabilities,sizeof(capabilities));
  3715. if (error) { goto quit_status; }
  3716. error = write_auto_inc_create(status_block, create_info->auto_increment_value);
  3717. if (error) { goto quit_status; }
  3718. quit_status:
  3719. status_block->close(status_block, 0);
  3720. }
  3721. if (tokudb_debug & TOKUDB_DEBUG_OPEN)
  3722. TOKUDB_TRACE("create:%s:error=%d\n", newname, error);
  3723. }
  3724. if (error)
  3725. rmall(dirname);
  3726. TOKUDB_DBUG_RETURN(error);
  3727. }
  3728. //
  3729. // Drops table
  3730. // Parameters:
  3731. // [in] name - name of table to be deleted
  3732. // Returns:
  3733. // 0 on success
  3734. // error otherwise
  3735. //
  3736. int ha_tokudb::delete_table(const char *name) {
  3737. TOKUDB_DBUG_ENTER("ha_tokudb::delete_table");
  3738. int error;
  3739. #if 0 // QQQ single file per table
  3740. char name_buff[FN_REFLEN];
  3741. char newname[strlen(name) + 32];
  3742. sprintf(newname, "%s/main", name);
  3743. fn_format(name_buff, newname, "", ha_tokudb_ext, MY_UNPACK_FILENAME | MY_APPEND_EXT);
  3744. error = db_create(&file, db_env, 0);
  3745. if (error != 0)
  3746. goto exit;
  3747. error = file->remove(file, name_buff, NULL, 0);
  3748. sprintf(newname, "%s/status", name);
  3749. fn_format(name_buff, newname, "", ha_tokudb_ext, MY_UNPACK_FILENAME | MY_APPEND_EXT);
  3750. error = db_create(&file, db_env, 0);
  3751. if (error != 0)
  3752. goto exit;
  3753. error = file->remove(file, name_buff, NULL, 0);
  3754. exit:
  3755. file = 0; // Safety
  3756. my_errno = error;
  3757. #else
  3758. // remove all of the dictionaries in the table directory
  3759. char newname[(tokudb_data_dir ? strlen(tokudb_data_dir) : 0) + strlen(name) + 32];
  3760. make_name(newname, name, 0);
  3761. error = rmall(newname);
  3762. my_errno = error;
  3763. #endif
  3764. TOKUDB_DBUG_RETURN(error);
  3765. }
  3766. //
  3767. // renames table from "from" to "to"
  3768. // Parameters:
  3769. // [in] name - old name of table
  3770. // [in] to - new name of table
  3771. // Returns:
  3772. // 0 on success
  3773. // error otherwise
  3774. //
  3775. int ha_tokudb::rename_table(const char *from, const char *to) {
  3776. TOKUDB_DBUG_ENTER("%s %s %s", __FUNCTION__, from, to);
  3777. int error;
  3778. #if 0 // QQQ single file per table
  3779. char from_buff[FN_REFLEN];
  3780. char to_buff[FN_REFLEN];
  3781. if ((error = db_create(&file, db_env, 0)))
  3782. my_errno = error;
  3783. else {
  3784. /* On should not do a file->close() after rename returns */
  3785. error = file->rename(file,
  3786. fn_format(from_buff, from, "", ha_tokudb_ext, MY_UNPACK_FILENAME | MY_APPEND_EXT), NULL, fn_format(to_buff, to, "", ha_tokudb_ext, MY_UNPACK_FILENAME | MY_APPEND_EXT), 0);
  3787. }
  3788. #else
  3789. int n = get_name_length(from) + 32;
  3790. char newfrom[n];
  3791. make_name(newfrom, from, 0);
  3792. n = get_name_length(to) + 32;
  3793. char newto[n];
  3794. make_name(newto, to, 0);
  3795. error = rename(newfrom, newto);
  3796. if (error != 0)
  3797. error = my_errno = errno;
  3798. #endif
  3799. TOKUDB_DBUG_RETURN(error);
  3800. }
  3801. /*
  3802. Returns estimate on number of seeks it will take to read through the table
  3803. This is to be comparable to the number returned by records_in_range so
  3804. that we can decide if we should scan the table or use keys.
  3805. */
  3806. /// QQQ why divide by 3
  3807. double ha_tokudb::scan_time() {
  3808. TOKUDB_DBUG_ENTER("ha_tokudb::scan_time");
  3809. double ret_val = stats.records / 3;
  3810. DBUG_RETURN(ret_val);
  3811. }
  3812. //
  3813. // Calculate the time it takes to read a set of ranges through an index
  3814. // This enables us to optimize reads for clustered indexes.
  3815. // Implementation pulled from InnoDB
  3816. // Parameters:
  3817. // index - index to use
  3818. // ranges - number of ranges
  3819. // rows - estimated number of rows in the range
  3820. // Returns:
  3821. // estimated time measured in disk seeks
  3822. //
  3823. double ha_tokudb::read_time(
  3824. uint index,
  3825. uint ranges,
  3826. ha_rows rows
  3827. )
  3828. {
  3829. double total_scan;
  3830. double ret_val;
  3831. if (index != primary_key) {
  3832. ret_val = handler::read_time(index, ranges, rows);
  3833. goto cleanup;
  3834. }
  3835. total_scan = scan_time();
  3836. if (stats.records < rows) {
  3837. ret_val = total_scan;
  3838. goto cleanup;
  3839. }
  3840. //
  3841. // one disk seek per range plus the proportional scan time of the rows
  3842. //
  3843. ret_val = (ranges + (double) rows / (double) stats.records * total_scan);
  3844. cleanup:
  3845. return ret_val;
  3846. }
  3847. //
  3848. // Estimates the number of index records in a range. In case of errors, return
  3849. // HA_TOKUDB_RANGE_COUNT instead of HA_POS_ERROR. This was behavior
  3850. // when we got the handlerton from MySQL.
  3851. // Parameters:
  3852. // keynr -index to use
  3853. // [in] start_key - low end of the range
  3854. // [in] end_key - high end of the range
  3855. // Returns:
  3856. // 0 - There are no matching keys in the given range
  3857. // number > 0 - There are approximately number matching rows in the range
  3858. // HA_POS_ERROR - Something is wrong with the index tree
  3859. //
  3860. ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* end_key) {
  3861. TOKUDB_DBUG_ENTER("ha_tokudb::records_in_range");
  3862. DBT key, after_key;
  3863. ha_rows ret_val = HA_TOKUDB_RANGE_COUNT;
  3864. DB *kfile = share->key_file[keynr];
  3865. u_int64_t less, equal, greater;
  3866. u_int64_t start_rows, end_rows, rows;
  3867. int is_exact;
  3868. int error;
  3869. struct heavi_info heavi_info;
  3870. DBC* tmp_cursor = NULL;
  3871. u_int64_t after_key_less, after_key_equal, after_key_greater;
  3872. heavi_info.db = kfile;
  3873. heavi_info.key = &key;
  3874. after_key.data = key_buff2;
  3875. error = kfile->cursor(kfile, transaction, &tmp_cursor, 0);
  3876. if (error) {
  3877. ret_val = HA_TOKUDB_RANGE_COUNT;
  3878. goto cleanup;
  3879. }
  3880. //
  3881. // get start_rows and end_rows values so that we can estimate range
  3882. // when calling key_range64, the only value we can trust is the value for less
  3883. // The reason is that the key being passed in may be a prefix of keys in the DB
  3884. // As a result, equal may be 0 and greater may actually be equal+greater
  3885. // So, we call key_range64 on the key, and the key that is after it.
  3886. //
  3887. if (start_key) {
  3888. pack_key(&key, keynr, key_buff, start_key->key, start_key->length, COL_NEG_INF);
  3889. error = kfile->key_range64(
  3890. kfile,
  3891. transaction,
  3892. &key,
  3893. &less,
  3894. &equal,
  3895. &greater,
  3896. &is_exact
  3897. );
  3898. if (error) {
  3899. ret_val = HA_TOKUDB_RANGE_COUNT;
  3900. goto cleanup;
  3901. }
  3902. if (start_key->flag == HA_READ_KEY_EXACT) {
  3903. start_rows= less;
  3904. }
  3905. else {
  3906. error = tmp_cursor->c_getf_heavi(
  3907. tmp_cursor,
  3908. 0,
  3909. smart_dbt_callback_ror_heavi,
  3910. &after_key,
  3911. after_key_heavi,
  3912. &heavi_info,
  3913. 1
  3914. );
  3915. if (error && error != DB_NOTFOUND) {
  3916. ret_val = HA_TOKUDB_RANGE_COUNT;
  3917. goto cleanup;
  3918. }
  3919. else if (error == DB_NOTFOUND) {
  3920. start_rows = stats.records;
  3921. }
  3922. else {
  3923. error = kfile->key_range64(
  3924. kfile,
  3925. transaction,
  3926. &after_key,
  3927. &after_key_less,
  3928. &after_key_equal,
  3929. &after_key_greater,
  3930. &is_exact
  3931. );
  3932. if (error) {
  3933. ret_val = HA_TOKUDB_RANGE_COUNT;
  3934. goto cleanup;
  3935. }
  3936. start_rows = after_key_less;
  3937. }
  3938. }
  3939. }
  3940. else {
  3941. start_rows= 0;
  3942. }
  3943. if (end_key) {
  3944. pack_key(&key, keynr, key_buff, end_key->key, end_key->length, COL_NEG_INF);
  3945. error = kfile->key_range64(
  3946. kfile,
  3947. transaction,
  3948. &key,
  3949. &less,
  3950. &equal,
  3951. &greater,
  3952. &is_exact
  3953. );
  3954. if (error) {
  3955. ret_val = HA_TOKUDB_RANGE_COUNT;
  3956. goto cleanup;
  3957. }
  3958. if (end_key->flag == HA_READ_BEFORE_KEY) {
  3959. end_rows= less;
  3960. }
  3961. else {
  3962. error = tmp_cursor->c_getf_heavi(
  3963. tmp_cursor,
  3964. 0,
  3965. smart_dbt_callback_ror_heavi,
  3966. &after_key,
  3967. after_key_heavi,
  3968. &heavi_info,
  3969. 1
  3970. );
  3971. if (error && error != DB_NOTFOUND) {
  3972. ret_val = HA_TOKUDB_RANGE_COUNT;
  3973. goto cleanup;
  3974. }
  3975. else if (error == DB_NOTFOUND) {
  3976. end_rows = stats.records;
  3977. }
  3978. else {
  3979. error = kfile->key_range64(
  3980. kfile,
  3981. transaction,
  3982. &after_key,
  3983. &after_key_less,
  3984. &after_key_equal,
  3985. &after_key_greater,
  3986. &is_exact
  3987. );
  3988. if (error) {
  3989. ret_val = HA_TOKUDB_RANGE_COUNT;
  3990. goto cleanup;
  3991. }
  3992. end_rows= after_key_less;
  3993. }
  3994. }
  3995. }
  3996. else {
  3997. end_rows = stats.records;
  3998. }
  3999. rows = (end_rows > start_rows) ? end_rows - start_rows : 1;
  4000. //
  4001. // MySQL thinks a return value of 0 means there are exactly 0 rows
  4002. // Therefore, always return non-zero so this assumption is not made
  4003. //
  4004. ret_val = (ha_rows) (rows <= 1 ? 1 : rows);
  4005. cleanup:
  4006. if (tmp_cursor) {
  4007. tmp_cursor->c_close(tmp_cursor);
  4008. tmp_cursor = NULL;
  4009. }
  4010. TOKUDB_DBUG_RETURN(ret_val);
  4011. }
  4012. //
  4013. // initializes the auto increment data needed
  4014. //
  4015. void ha_tokudb::init_auto_increment() {
  4016. DBT key;
  4017. DBT value;
  4018. int error;
  4019. HA_METADATA_KEY key_val = hatoku_max_ai;
  4020. bzero(&key, sizeof(key));
  4021. bzero(&value, sizeof(value));
  4022. key.data = &key_val;
  4023. key.size = sizeof(key_val);
  4024. value.flags = DB_DBT_MALLOC;
  4025. DB_TXN* txn = NULL;
  4026. error = db_env->txn_begin(db_env, 0, &txn, 0);
  4027. if (error) {
  4028. share->last_auto_increment = 0;
  4029. }
  4030. else {
  4031. //
  4032. // First retrieve hatoku_max_ai, which is max value used by auto increment
  4033. // column so far, the max value could have been auto generated (e.g. insert (NULL))
  4034. // or it could have been manually inserted by user (e.g. insert (345))
  4035. //
  4036. error = share->status_block->get(
  4037. share->status_block,
  4038. txn,
  4039. &key,
  4040. &value,
  4041. 0
  4042. );
  4043. if (error == 0 && value.size == sizeof(share->last_auto_increment)) {
  4044. share->last_auto_increment = *(uint *)value.data;
  4045. free(value.data);
  4046. value.data = NULL;
  4047. }
  4048. else {
  4049. share->last_auto_increment = 0;
  4050. }
  4051. //
  4052. // Now retrieve the initial auto increment value, as specified by create table
  4053. // so if a user does "create table t1 (a int auto_increment, primary key (a)) auto_increment=100",
  4054. // then the value 100 should be stored here
  4055. //
  4056. key_val = hatoku_ai_create_value;
  4057. error = share->status_block->get(
  4058. share->status_block,
  4059. txn,
  4060. &key,
  4061. &value,
  4062. 0
  4063. );
  4064. if (error == 0 && value.size == sizeof(share->auto_inc_create_value)) {
  4065. share->auto_inc_create_value = *(uint *)value.data;
  4066. free(value.data);
  4067. value.data = NULL;
  4068. }
  4069. else {
  4070. share->auto_inc_create_value = 0;
  4071. }
  4072. txn->commit(txn,DB_TXN_NOSYNC);
  4073. }
  4074. if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
  4075. TOKUDB_TRACE("init auto increment:%lld\n", share->last_auto_increment);
  4076. }
  4077. }
  4078. void ha_tokudb::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong * first_value, ulonglong * nb_reserved_values) {
  4079. TOKUDB_DBUG_ENTER("ha_tokudb::get_auto_increment");
  4080. ulonglong nr;
  4081. pthread_mutex_lock(&share->mutex);
  4082. if (share->auto_inc_create_value > share->last_auto_increment) {
  4083. nr = share->auto_inc_create_value;
  4084. share->last_auto_increment = share->auto_inc_create_value;
  4085. }
  4086. else {
  4087. nr = share->last_auto_increment + increment;
  4088. }
  4089. update_max_auto_inc(share->status_block, nr + (nb_desired_values - 1)*increment);
  4090. share->last_auto_increment = nr + (nb_desired_values - 1)*increment;
  4091. if (tokudb_debug & TOKUDB_DEBUG_AUTO_INCREMENT) {
  4092. TOKUDB_TRACE("get_auto_increment(%lld,%lld,%lld):got:%lld:%lld\n",
  4093. offset, increment, nb_desired_values, nr, nb_desired_values);
  4094. }
  4095. *first_value = nr;
  4096. *nb_reserved_values = nb_desired_values;
  4097. pthread_mutex_unlock(&share->mutex);
  4098. DBUG_VOID_RETURN;
  4099. }
  4100. bool ha_tokudb::is_auto_inc_singleton(){
  4101. return false;
  4102. }
  4103. //
  4104. // Adds indexes to the table. Takes the array of KEY passed in key_info, and creates
  4105. // DB's that will go at the end of share->key_file. THE IMPLICIT ASSUMPTION HERE is
  4106. // that the table will be modified and that these added keys will be appended to the end
  4107. // of the array table->key_info
  4108. // Parameters:
  4109. // [in] table_arg - table that is being modified, seems to be identical to this->table
  4110. // [in] key_info - array of KEY's to be added
  4111. // num_of_keys - number of keys to be added, number of elements in key_info
  4112. // Returns:
  4113. // 0 on success, error otherwise
  4114. //
  4115. int ha_tokudb::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) {
  4116. TOKUDB_DBUG_ENTER("ha_tokudb::add_index");
  4117. char name_buff[FN_REFLEN];
  4118. int error;
  4119. char newname[share->table_name_length + 32];
  4120. uint curr_index = 0;
  4121. DBC* tmp_cursor = NULL;
  4122. int cursor_ret_val = 0;
  4123. DBT current_primary_key;
  4124. DB_TXN* txn = NULL;
  4125. uchar tmp_key_buff[2*table_arg->s->rec_buff_length];
  4126. uchar tmp_prim_key_buff[2*table_arg->s->rec_buff_length];
  4127. THD* thd = ha_thd();
  4128. //
  4129. // number of DB files we have open currently, before add_index is executed
  4130. //
  4131. uint curr_num_DBs = table_arg->s->keys + test(hidden_primary_key);
  4132. //
  4133. // these variables are for error handling
  4134. //
  4135. uint num_files_created = 0;
  4136. uint num_DB_opened = 0;
  4137. //
  4138. // in unpack_row, MySQL passes a buffer that is this long,
  4139. // so this length should be good enough for us as well
  4140. //
  4141. uchar tmp_record[table_arg->s->rec_buff_length];
  4142. bzero((void *) &current_primary_key, sizeof(current_primary_key));
  4143. current_primary_key.data = tmp_prim_key_buff;
  4144. //
  4145. // The files for secondary tables are derived from the name of keys
  4146. // If we try to add a key with the same name as an already existing key,
  4147. // We can crash. So here we check if any of the keys added has the same
  4148. // name of an existing key, and if so, we fail gracefully
  4149. //
  4150. for (uint i = 0; i < num_of_keys; i++) {
  4151. for (uint j = 0; j < table_arg->s->keys; j++) {
  4152. if (strcmp(key_info[i].name, table_arg->s->key_info[j].name) == 0) {
  4153. error = HA_ERR_WRONG_COMMAND;
  4154. goto cleanup;
  4155. }
  4156. }
  4157. }
  4158. //
  4159. // first create all the DB's files
  4160. //
  4161. char part[MAX_ALIAS_NAME + 10];
  4162. for (uint i = 0; i < num_of_keys; i++) {
  4163. sprintf(part, "key-%s", key_info[i].name);
  4164. make_name(newname, share->table_name, part);
  4165. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  4166. error = create_sub_table(name_buff, NULL, DB_BTREE, DB_DUP + DB_DUPSORT);
  4167. if (tokudb_debug & TOKUDB_DEBUG_OPEN) {
  4168. TOKUDB_TRACE("create:%s:flags=%ld:error=%d\n", newname, key_info[i].flags, error);
  4169. }
  4170. if (error) { goto cleanup; }
  4171. num_files_created++;
  4172. }
  4173. //
  4174. // open all the DB files and set the appropriate variables in share
  4175. // they go to the end of share->key_file
  4176. //
  4177. curr_index = curr_num_DBs;
  4178. for (uint i = 0; i < num_of_keys; i++, curr_index++) {
  4179. error = open_secondary_table(
  4180. &share->key_file[curr_index],
  4181. &key_info[i],
  4182. share->table_name,
  4183. 0,
  4184. &share->key_type[curr_index]
  4185. );
  4186. if (error) { goto cleanup; }
  4187. num_DB_opened++;
  4188. }
  4189. //
  4190. // scan primary table, create each secondary key, add to each DB
  4191. //
  4192. error = db_env->txn_begin(db_env, 0, &txn, 0);
  4193. assert(error == 0);
  4194. //
  4195. // grab some locks to make this go faster
  4196. // first a global read lock on the main DB, because
  4197. // we intend to scan the entire thing
  4198. //
  4199. error = share->file->pre_acquire_read_lock(
  4200. share->file,
  4201. txn,
  4202. share->file->dbt_neg_infty(),
  4203. NULL,
  4204. share->file->dbt_pos_infty(),
  4205. NULL
  4206. );
  4207. if (error) { txn->commit(txn, 0); goto cleanup; }
  4208. //
  4209. // now grab a table write lock for secondary tables we
  4210. // are creating
  4211. //
  4212. for (uint i = 0; i < num_of_keys; i++) {
  4213. uint curr_index = i + curr_num_DBs;
  4214. error = share->key_file[curr_index]->pre_acquire_table_lock(
  4215. share->key_file[curr_index],
  4216. txn
  4217. );
  4218. if (error) { txn->commit(txn, 0); goto cleanup; }
  4219. }
  4220. if ((error = share->file->cursor(share->file, txn, &tmp_cursor, 0))) {
  4221. tmp_cursor = NULL; // Safety
  4222. goto cleanup;
  4223. }
  4224. //
  4225. // for each element in the primary table, insert the proper key value pair in each secondary table
  4226. // that is created
  4227. //
  4228. struct smart_dbt_ai_info info;
  4229. info.ha = this;
  4230. info.prim_key = &current_primary_key;
  4231. info.buf = tmp_record;
  4232. cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_ai_callback, &info);
  4233. while (cursor_ret_val != DB_NOTFOUND) {
  4234. if (cursor_ret_val) {
  4235. error = cursor_ret_val;
  4236. goto cleanup;
  4237. }
  4238. for (uint i = 0; i < num_of_keys; i++) {
  4239. DBT secondary_key;
  4240. bool has_null = false;
  4241. create_dbt_key_from_key(&secondary_key,&key_info[i], tmp_key_buff, tmp_record, &has_null);
  4242. uint curr_index = i + curr_num_DBs;
  4243. u_int32_t put_flags = share->key_type[curr_index];
  4244. if (put_flags == DB_NOOVERWRITE && (has_null || thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS))) {
  4245. put_flags = DB_YESOVERWRITE;
  4246. }
  4247. error = share->key_file[curr_index]->put(share->key_file[curr_index], txn, &secondary_key, &current_primary_key, put_flags);
  4248. if (error) {
  4249. //
  4250. // in the case of any error anywhere, we can just nuke all the files created, so we dont need
  4251. // to be tricky and try to roll back changes. That is why we commit the transaction,
  4252. // which should be fast. The DB is going to go away anyway, so no pt in trying to keep
  4253. // it in a good state.
  4254. //
  4255. txn->commit(txn, 0);
  4256. //
  4257. // found a duplicate in a no_dup DB
  4258. //
  4259. if ( (error == DB_KEYEXIST) && (key_info[i].flags & HA_NOSAME)) {
  4260. error = HA_ERR_FOUND_DUPP_KEY;
  4261. last_dup_key = i;
  4262. memcpy(table_arg->record[0], tmp_record, table_arg->s->rec_buff_length);
  4263. }
  4264. goto cleanup;
  4265. }
  4266. }
  4267. cursor_ret_val = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_ai_callback, &info);
  4268. }
  4269. tmp_cursor->c_close(tmp_cursor);
  4270. tmp_cursor = NULL;
  4271. //
  4272. // Now flatten the new DB's created
  4273. //
  4274. for (uint i = 0; i < num_of_keys; i++) {
  4275. uint curr_index = i + curr_num_DBs;
  4276. if ((error = share->key_file[curr_index]->cursor(share->key_file[curr_index], txn, &tmp_cursor, 0))) {
  4277. tmp_cursor = NULL; // Safety
  4278. goto cleanup;
  4279. }
  4280. error = 0;
  4281. while (error != DB_NOTFOUND) {
  4282. error = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_opt_callback, NULL);
  4283. if (error && error != DB_NOTFOUND) {
  4284. tmp_cursor->c_close(tmp_cursor);
  4285. txn->commit(txn, 0);
  4286. goto cleanup;
  4287. }
  4288. }
  4289. tmp_cursor->c_close(tmp_cursor);
  4290. tmp_cursor = NULL;
  4291. }
  4292. error = txn->commit(txn, 0);
  4293. assert(error == 0);
  4294. error = 0;
  4295. cleanup:
  4296. if (error) {
  4297. //
  4298. // We need to delete all the files that may have been created
  4299. // The DB's must be closed and removed
  4300. //
  4301. for (uint i = curr_num_DBs; i < curr_num_DBs + num_DB_opened; i++) {
  4302. share->key_file[i]->close(share->key_file[i], 0);
  4303. share->key_file[i] = NULL;
  4304. }
  4305. for (uint i = 0; i < num_files_created; i++) {
  4306. DB* tmp;
  4307. sprintf(part, "key-%s", key_info[i].name);
  4308. make_name(newname, share->table_name, part);
  4309. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  4310. if (!(db_create(&tmp, db_env, 0))) {
  4311. tmp->remove(tmp, name_buff, NULL, 0);
  4312. }
  4313. }
  4314. }
  4315. TOKUDB_DBUG_RETURN(error);
  4316. }
  4317. //
  4318. // Prepares to drop indexes to the table. For each value, i, in the array key_num,
  4319. // table->key_info[i] is a key that is to be dropped.
  4320. // ***********NOTE*******************
  4321. // Although prepare_drop_index is supposed to just get the DB's ready for removal,
  4322. // and not actually do the removal, we are doing it here and not in final_drop_index
  4323. // For the flags we expose in alter_table_flags, namely xxx_NO_WRITES, this is allowed
  4324. // Changes for "future-proofing" this so that it works when we have the equivalent flags
  4325. // that are not NO_WRITES are not worth it at the moments
  4326. // Parameters:
  4327. // [in] table_arg - table that is being modified, seems to be identical to this->table
  4328. // [in] key_num - array of indexes that specify which keys of the array table->key_info
  4329. // are to be dropped
  4330. // num_of_keys - size of array, key_num
  4331. // Returns:
  4332. // 0 on success, error otherwise
  4333. //
  4334. int ha_tokudb::prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys) {
  4335. TOKUDB_DBUG_ENTER("ha_tokudb::prepare_drop_index");
  4336. int error;
  4337. char name_buff[FN_REFLEN];
  4338. char newname[share->table_name_length + 32];
  4339. char part[MAX_ALIAS_NAME + 10];
  4340. DB** dbs_to_remove = NULL;
  4341. //
  4342. // we allocate an array of DB's here to get ready for removal
  4343. // We do this so that all potential memory allocation errors that may occur
  4344. // will do so BEFORE we go about dropping any indexes. This way, we
  4345. // can fail gracefully without losing integrity of data in such cases. If on
  4346. // on the other hand, we started removing DB's, and in the middle,
  4347. // one failed, it is not immedietely obvious how one would rollback
  4348. //
  4349. dbs_to_remove = (DB **)my_malloc(sizeof(*dbs_to_remove)*num_of_keys, MYF(MY_ZEROFILL));
  4350. if (dbs_to_remove == NULL) {
  4351. error = ENOMEM;
  4352. goto cleanup;
  4353. }
  4354. for (uint i = 0; i < num_of_keys; i++) {
  4355. error = db_create(&dbs_to_remove[i], db_env, 0);
  4356. if (error) {
  4357. goto cleanup;
  4358. }
  4359. }
  4360. for (uint i = 0; i < num_of_keys; i++) {
  4361. uint curr_index = key_num[i];
  4362. share->key_file[curr_index]->close(share->key_file[curr_index],0);
  4363. share->key_file[curr_index] = NULL;
  4364. sprintf(part, "key-%s", table_arg->key_info[curr_index].name);
  4365. make_name(newname, share->table_name, part);
  4366. fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
  4367. dbs_to_remove[i]->remove(dbs_to_remove[i], name_buff, NULL, 0);
  4368. }
  4369. cleanup:
  4370. my_free(dbs_to_remove, MYF(MY_ALLOW_ZERO_PTR));
  4371. TOKUDB_DBUG_RETURN(error);
  4372. }
  4373. // ***********NOTE*******************
  4374. // Although prepare_drop_index is supposed to just get the DB's ready for removal,
  4375. // and not actually do the removal, we are doing it here and not in final_drop_index
  4376. // For the flags we expose in alter_table_flags, namely xxx_NO_WRITES, this is allowed
  4377. // Changes for "future-proofing" this so that it works when we have the equivalent flags
  4378. // that are not NO_WRITES are not worth it at the moments, therefore, we can make
  4379. // this function just return
  4380. int ha_tokudb::final_drop_index(TABLE *table_arg) {
  4381. TOKUDB_DBUG_ENTER("ha_tokudb::final_drop_index");
  4382. TOKUDB_DBUG_RETURN(0);
  4383. }
  4384. void ha_tokudb::print_error(int error, myf errflag) {
  4385. if (error == DB_LOCK_DEADLOCK)
  4386. error = HA_ERR_LOCK_DEADLOCK;
  4387. if (error == DB_LOCK_NOTGRANTED)
  4388. error = HA_ERR_LOCK_WAIT_TIMEOUT;
  4389. handler::print_error(error, errflag);
  4390. }
  4391. #if 0 // QQQ use default
  4392. //
  4393. // This function will probably need to be redone from scratch
  4394. // if we ever choose to implement it
  4395. //
  4396. int ha_tokudb::analyze(THD * thd, HA_CHECK_OPT * check_opt) {
  4397. uint i;
  4398. DB_BTREE_STAT *stat = 0;
  4399. DB_TXN_STAT *txn_stat_ptr = 0;
  4400. tokudb_trx_data *trx = (tokudb_trx_data *) thd->ha_data[tokudb_hton->slot];
  4401. DBUG_ASSERT(trx);
  4402. for (i = 0; i < table_share->keys; i++) {
  4403. if (stat) {
  4404. free(stat);
  4405. stat = 0;
  4406. }
  4407. if ((key_file[i]->stat) (key_file[i], trx->all, (void *) &stat, 0))
  4408. goto err;
  4409. share->rec_per_key[i] = (stat->bt_ndata / (stat->bt_nkeys ? stat->bt_nkeys : 1));
  4410. }
  4411. /* A hidden primary key is not in key_file[] */
  4412. if (hidden_primary_key) {
  4413. if (stat) {
  4414. free(stat);
  4415. stat = 0;
  4416. }
  4417. if ((file->stat) (file, trx->all, (void *) &stat, 0))
  4418. goto err;
  4419. }
  4420. pthread_mutex_lock(&share->mutex);
  4421. share->status |= STATUS_TOKUDB_ANALYZE; // Save status on close
  4422. share->version++; // Update stat in table
  4423. pthread_mutex_unlock(&share->mutex);
  4424. update_status(share, table); // Write status to file
  4425. if (stat)
  4426. free(stat);
  4427. return ((share->status & STATUS_TOKUDB_ANALYZE) ? HA_ADMIN_FAILED : HA_ADMIN_OK);
  4428. err:
  4429. if (stat)
  4430. free(stat);
  4431. return HA_ADMIN_FAILED;
  4432. }
  4433. #endif
  4434. //
  4435. // flatten all DB's in this table, to do so, just do a full scan on every DB
  4436. //
  4437. int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
  4438. TOKUDB_DBUG_ENTER("ha_tokudb::optimize");
  4439. int error;
  4440. DBC* tmp_cursor = NULL;
  4441. tokudb_trx_data *trx = NULL;
  4442. DB_TXN* txn = NULL;
  4443. bool do_commit = false;
  4444. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  4445. trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
  4446. if (trx == NULL) {
  4447. error = HA_ERR_UNSUPPORTED;
  4448. goto cleanup;
  4449. }
  4450. //
  4451. // optimize may be called without a valid transaction, so we have to do this
  4452. // in order to get a valid transaction
  4453. // this is a bit hacky, but it is the best we have right now
  4454. //
  4455. txn = trx->stmt ? trx->stmt : trx->sp_level;
  4456. if (txn == NULL) {
  4457. error = db_env->txn_begin(db_env, NULL, &txn, 0);
  4458. if (error) {
  4459. goto cleanup;
  4460. }
  4461. do_commit = true;
  4462. }
  4463. //
  4464. // prelock so each scan goes faster
  4465. //
  4466. error = acquire_table_lock(txn,lock_read);
  4467. if (error) {
  4468. goto cleanup;
  4469. }
  4470. //
  4471. // for each DB, scan through entire table and do nothing
  4472. //
  4473. for (uint i = 0; i < curr_num_DBs; i++) {
  4474. error = 0;
  4475. if ((error = share->file->cursor(share->file, txn, &tmp_cursor, 0))) {
  4476. tmp_cursor = NULL;
  4477. goto cleanup;
  4478. }
  4479. while (error != DB_NOTFOUND) {
  4480. error = tmp_cursor->c_getf_next(tmp_cursor, DB_PRELOCKED, smart_dbt_opt_callback, NULL);
  4481. if (error && error != DB_NOTFOUND) {
  4482. goto cleanup;
  4483. }
  4484. }
  4485. tmp_cursor->c_close(tmp_cursor);
  4486. }
  4487. error = 0;
  4488. cleanup:
  4489. if (do_commit) {
  4490. error = txn->commit(txn, 0);
  4491. }
  4492. TOKUDB_DBUG_RETURN(error);
  4493. }
  4494. ulong ha_tokudb::field_offset(Field *field) {
  4495. if (table->record[0] <= field->ptr && field->ptr < table->record[1])
  4496. return field->offset(table->record[0]);
  4497. assert(0);
  4498. return 0;
  4499. }
  4500. // delete all rows from a table
  4501. //
  4502. // effects: delete all of the rows in the main dictionary and all of the
  4503. // indices. this must be atomic, so we use the statement transaction
  4504. // for all of the truncate operations.
  4505. // locks: if we have an exclusive table write lock, all of the concurrency
  4506. // issues go away.
  4507. // returns: 0 if success
  4508. int ha_tokudb::delete_all_rows() {
  4509. TOKUDB_DBUG_ENTER("delete_all_rows");
  4510. int error = 0;
  4511. // truncate all dictionaries
  4512. uint curr_num_DBs = table->s->keys + test(hidden_primary_key);
  4513. for (uint i = 0; i < curr_num_DBs; i++) {
  4514. DB *db = share->key_file[i];
  4515. u_int32_t row_count = 0;
  4516. error = db->truncate(db, transaction, &row_count, 0);
  4517. if (error)
  4518. break;
  4519. // do something with the row_count?
  4520. if (tokudb_debug)
  4521. TOKUDB_TRACE("row_count=%u\n", row_count);
  4522. }
  4523. // zap the row count
  4524. if (error == 0)
  4525. share->rows = 0;
  4526. TOKUDB_DBUG_RETURN(error);
  4527. }
  4528. struct st_mysql_storage_engine storage_engine_structure = { MYSQL_HANDLERTON_INTERFACE_VERSION };
  4529. // options flags
  4530. // PLUGIN_VAR_THDLOCAL Variable is per-connection
  4531. // PLUGIN_VAR_READONLY Server variable is read only
  4532. // PLUGIN_VAR_NOSYSVAR Not a server variable
  4533. // PLUGIN_VAR_NOCMDOPT Not a command line option
  4534. // PLUGIN_VAR_NOCMDARG No argument for cmd line
  4535. // PLUGIN_VAR_RQCMDARG Argument required for cmd line
  4536. // PLUGIN_VAR_OPCMDARG Argument optional for cmd line
  4537. // PLUGIN_VAR_MEMALLOC String needs memory allocated
  4538. // system variables
  4539. static MYSQL_SYSVAR_ULONGLONG(cache_size, tokudb_cache_size, PLUGIN_VAR_READONLY, "TokuDB cache table size", NULL, NULL, 0, 0, ~0LL, 0);
  4540. static MYSQL_SYSVAR_UINT(cache_memory_percent, tokudb_cache_memory_percent, PLUGIN_VAR_READONLY, "Default percent of physical memory in the TokuDB cache table", NULL, NULL, tokudb_cache_memory_percent, 0, 100, 0);
  4541. static MYSQL_SYSVAR_ULONG(max_lock, tokudb_max_lock, PLUGIN_VAR_READONLY, "TokuDB Max Locks", NULL, NULL, 8 * 1024, 0, ~0L, 0);
  4542. static MYSQL_SYSVAR_ULONG(debug, tokudb_debug, PLUGIN_VAR_READONLY, "TokuDB Debug", NULL, NULL, 0, 0, ~0L, 0);
  4543. static MYSQL_SYSVAR_STR(log_dir, tokudb_log_dir, PLUGIN_VAR_READONLY, "TokuDB Log Directory", NULL, NULL, NULL);
  4544. static MYSQL_SYSVAR_STR(data_dir, tokudb_data_dir, PLUGIN_VAR_READONLY, "TokuDB Data Directory", NULL, NULL, NULL);
  4545. static MYSQL_SYSVAR_STR(version, tokudb_version, PLUGIN_VAR_READONLY, "TokuDB Version", NULL, NULL, NULL);
  4546. static MYSQL_SYSVAR_UINT(init_flags, tokudb_init_flags, PLUGIN_VAR_READONLY, "Sets TokuDB DB_ENV->open flags", NULL, NULL, tokudb_init_flags, 0, ~0, 0);
  4547. #if 0
  4548. static MYSQL_SYSVAR_ULONG(cache_parts, tokudb_cache_parts, PLUGIN_VAR_READONLY, "Sets TokuDB set_cache_parts", NULL, NULL, 0, 0, ~0L, 0);
  4549. // this is really a u_int32_t
  4550. // ? use MYSQL_SYSVAR_SET
  4551. static MYSQL_SYSVAR_UINT(env_flags, tokudb_env_flags, PLUGIN_VAR_READONLY, "Sets TokuDB env_flags", NULL, NULL, DB_LOG_AUTOREMOVE, 0, ~0, 0);
  4552. static MYSQL_SYSVAR_STR(home, tokudb_home, PLUGIN_VAR_READONLY, "Sets TokuDB env->open home", NULL, NULL, NULL);
  4553. // this is really a u_int32_t
  4554. //? use MYSQL_SYSVAR_SET
  4555. // this looks to be unused
  4556. static MYSQL_SYSVAR_LONG(lock_scan_time, tokudb_lock_scan_time, PLUGIN_VAR_READONLY, "Tokudb Lock Scan Time (UNUSED)", NULL, NULL, 0, 0, ~0L, 0);
  4557. // this is really a u_int32_t
  4558. //? use MYSQL_SYSVAR_ENUM
  4559. static MYSQL_SYSVAR_UINT(lock_type, tokudb_lock_type, PLUGIN_VAR_READONLY, "Sets set_lk_detect", NULL, NULL, DB_LOCK_DEFAULT, 0, ~0, 0);
  4560. static MYSQL_SYSVAR_ULONG(log_buffer_size, tokudb_log_buffer_size, PLUGIN_VAR_READONLY, "Tokudb Log Buffer Size", NULL, NULL, 0, 0, ~0L, 0);
  4561. static MYSQL_SYSVAR_ULONG(region_size, tokudb_region_size, PLUGIN_VAR_READONLY, "Tokudb Region Size", NULL, NULL, 128 * 1024, 0, ~0L, 0);
  4562. static MYSQL_SYSVAR_BOOL(shared_data, tokudb_shared_data, PLUGIN_VAR_READONLY, "Tokudb Shared Data", NULL, NULL, FALSE);
  4563. static MYSQL_SYSVAR_STR(tmpdir, tokudb_tmpdir, PLUGIN_VAR_READONLY, "Tokudb Tmp Dir", NULL, NULL, NULL);
  4564. #endif
  4565. static struct st_mysql_sys_var *tokudb_system_variables[] = {
  4566. MYSQL_SYSVAR(cache_size),
  4567. MYSQL_SYSVAR(cache_memory_percent),
  4568. MYSQL_SYSVAR(max_lock),
  4569. MYSQL_SYSVAR(data_dir),
  4570. MYSQL_SYSVAR(log_dir),
  4571. MYSQL_SYSVAR(debug),
  4572. MYSQL_SYSVAR(commit_sync),
  4573. MYSQL_SYSVAR(version),
  4574. MYSQL_SYSVAR(init_flags),
  4575. #if 0
  4576. MYSQL_SYSVAR(cache_parts),
  4577. MYSQL_SYSVAR(env_flags),
  4578. MYSQL_SYSVAR(home),
  4579. MYSQL_SYSVAR(lock_scan_time),
  4580. MYSQL_SYSVAR(lock_type),
  4581. MYSQL_SYSVAR(log_buffer_size),
  4582. MYSQL_SYSVAR(region_size),
  4583. MYSQL_SYSVAR(shared_data),
  4584. MYSQL_SYSVAR(tmpdir),
  4585. #endif
  4586. NULL
  4587. };
  4588. mysql_declare_plugin(tokudb) {
  4589. MYSQL_STORAGE_ENGINE_PLUGIN,
  4590. &storage_engine_structure,
  4591. "TokuDB",
  4592. "Tokutek Inc",
  4593. "Fractal trees, transactions, row level locks",
  4594. PLUGIN_LICENSE_PROPRIETARY, /* QQQ license? */
  4595. tokudb_init_func, /* plugin init */
  4596. tokudb_done_func, /* plugin deinit */
  4597. 0x0200, /* QQQ 2.0 */
  4598. NULL, /* status variables */
  4599. tokudb_system_variables, /* system variables */
  4600. NULL /* config options */
  4601. }
  4602. mysql_declare_plugin_end;