You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1572 lines
41 KiB

12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
  1. /*****************************************************************************
  2. Copyright (c) 2010, 2013, Oracle and/or its affiliates. All Rights Reserved.
  3. This program is free software; you can redistribute it and/or modify it under
  4. the terms of the GNU General Public License as published by the Free Software
  5. Foundation; version 2 of the License.
  6. This program is distributed in the hope that it will be useful, but WITHOUT
  7. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. You should have received a copy of the GNU General Public License along with
  10. this program; if not, write to the Free Software Foundation, Inc.,
  11. 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
  12. *****************************************************************************/
  13. /**************************************************//**
  14. @file row/row0ftsort.cc
  15. Create Full Text Index with (parallel) merge sort
  16. Created 10/13/2010 Jimmy Yang
  17. *******************************************************/
  18. #include "dict0dict.h" /* dict_table_stats_lock() */
  19. #include "row0merge.h"
  20. #include "pars0pars.h"
  21. #include "row0ftsort.h"
  22. #include "row0merge.h"
  23. #include "row0row.h"
  24. #include "btr0cur.h"
  25. /** Read the next record to buffer N.
  26. @param N index into array of merge info structure */
  27. #define ROW_MERGE_READ_GET_NEXT(N) \
  28. do { \
  29. b[N] = row_merge_read_rec( \
  30. block[N], buf[N], b[N], index, \
  31. fd[N], &foffs[N], &mrec[N], offsets[N]); \
  32. if (UNIV_UNLIKELY(!b[N])) { \
  33. if (mrec[N]) { \
  34. goto exit; \
  35. } \
  36. } \
  37. } while (0)
  38. /** Parallel sort degree */
  39. UNIV_INTERN ulong fts_sort_pll_degree = 2;
  40. /*********************************************************************//**
  41. Create a temporary "fts sort index" used to merge sort the
  42. tokenized doc string. The index has three "fields":
  43. 1) Tokenized word,
  44. 2) Doc ID (depend on number of records to sort, it can be a 4 bytes or 8 bytes
  45. integer value)
  46. 3) Word's position in original doc.
  47. @return dict_index_t structure for the fts sort index */
  48. UNIV_INTERN
  49. dict_index_t*
  50. row_merge_create_fts_sort_index(
  51. /*============================*/
  52. dict_index_t* index, /*!< in: Original FTS index
  53. based on which this sort index
  54. is created */
  55. const dict_table_t* table, /*!< in: table that FTS index
  56. is being created on */
  57. ibool* opt_doc_id_size)
  58. /*!< out: whether to use 4 bytes
  59. instead of 8 bytes integer to
  60. store Doc ID during sort */
  61. {
  62. dict_index_t* new_index;
  63. dict_field_t* field;
  64. dict_field_t* idx_field;
  65. CHARSET_INFO* charset;
  66. // FIXME: This name shouldn't be hard coded here.
  67. new_index = dict_mem_index_create(
  68. index->table->name, "tmp_fts_idx", 0, DICT_FTS, 3);
  69. new_index->id = index->id;
  70. new_index->table = (dict_table_t*) table;
  71. new_index->n_uniq = FTS_NUM_FIELDS_SORT;
  72. new_index->n_def = FTS_NUM_FIELDS_SORT;
  73. new_index->cached = TRUE;
  74. idx_field = dict_index_get_nth_field(index, 0);
  75. charset = fts_index_get_charset(index);
  76. /* The first field is on the Tokenized Word */
  77. field = dict_index_get_nth_field(new_index, 0);
  78. field->name = NULL;
  79. field->prefix_len = 0;
  80. field->col = static_cast<dict_col_t*>(
  81. mem_heap_alloc(new_index->heap, sizeof(dict_col_t)));
  82. field->col->len = FTS_MAX_WORD_LEN;
  83. if (strcmp(charset->name, "latin1_swedish_ci") == 0) {
  84. field->col->mtype = DATA_VARCHAR;
  85. } else {
  86. field->col->mtype = DATA_VARMYSQL;
  87. }
  88. field->col->prtype = idx_field->col->prtype | DATA_NOT_NULL;
  89. field->col->mbminmaxlen = idx_field->col->mbminmaxlen;
  90. field->fixed_len = 0;
  91. /* Doc ID */
  92. field = dict_index_get_nth_field(new_index, 1);
  93. field->name = NULL;
  94. field->prefix_len = 0;
  95. field->col = static_cast<dict_col_t*>(
  96. mem_heap_alloc(new_index->heap, sizeof(dict_col_t)));
  97. field->col->mtype = DATA_INT;
  98. *opt_doc_id_size = FALSE;
  99. /* Check whether we can use 4 bytes instead of 8 bytes integer
  100. field to hold the Doc ID, thus reduce the overall sort size */
  101. if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
  102. /* If Doc ID column is being added by this create
  103. index, then just check the number of rows in the table */
  104. if (dict_table_get_n_rows(table) < MAX_DOC_ID_OPT_VAL) {
  105. *opt_doc_id_size = TRUE;
  106. }
  107. } else {
  108. doc_id_t max_doc_id;
  109. /* If the Doc ID column is supplied by user, then
  110. check the maximum Doc ID in the table */
  111. max_doc_id = fts_get_max_doc_id((dict_table_t*) table);
  112. if (max_doc_id && max_doc_id < MAX_DOC_ID_OPT_VAL) {
  113. *opt_doc_id_size = TRUE;
  114. }
  115. }
  116. if (*opt_doc_id_size) {
  117. field->col->len = sizeof(ib_uint32_t);
  118. field->fixed_len = sizeof(ib_uint32_t);
  119. } else {
  120. field->col->len = FTS_DOC_ID_LEN;
  121. field->fixed_len = FTS_DOC_ID_LEN;
  122. }
  123. field->col->prtype = DATA_NOT_NULL | DATA_BINARY_TYPE;
  124. field->col->mbminmaxlen = 0;
  125. /* The third field is on the word's position in the original doc */
  126. field = dict_index_get_nth_field(new_index, 2);
  127. field->name = NULL;
  128. field->prefix_len = 0;
  129. field->col = static_cast<dict_col_t*>(
  130. mem_heap_alloc(new_index->heap, sizeof(dict_col_t)));
  131. field->col->mtype = DATA_INT;
  132. field->col->len = 4 ;
  133. field->fixed_len = 4;
  134. field->col->prtype = DATA_NOT_NULL;
  135. field->col->mbminmaxlen = 0;
  136. return(new_index);
  137. }
  138. /*********************************************************************//**
  139. Initialize FTS parallel sort structures.
  140. @return TRUE if all successful */
  141. UNIV_INTERN
  142. ibool
  143. row_fts_psort_info_init(
  144. /*====================*/
  145. trx_t* trx, /*!< in: transaction */
  146. row_merge_dup_t* dup, /*!< in,own: descriptor of
  147. FTS index being created */
  148. const dict_table_t* new_table,/*!< in: table on which indexes are
  149. created */
  150. ibool opt_doc_id_size,
  151. /*!< in: whether to use 4 bytes
  152. instead of 8 bytes integer to
  153. store Doc ID during sort */
  154. fts_psort_t** psort, /*!< out: parallel sort info to be
  155. instantiated */
  156. fts_psort_t** merge) /*!< out: parallel merge info
  157. to be instantiated */
  158. {
  159. ulint i;
  160. ulint j;
  161. fts_psort_common_t* common_info = NULL;
  162. fts_psort_t* psort_info = NULL;
  163. fts_psort_t* merge_info = NULL;
  164. ulint block_size;
  165. ibool ret = TRUE;
  166. block_size = 3 * srv_sort_buf_size;
  167. *psort = psort_info = static_cast<fts_psort_t*>(mem_zalloc(
  168. fts_sort_pll_degree * sizeof *psort_info));
  169. if (!psort_info) {
  170. ut_free(dup);
  171. return(FALSE);
  172. }
  173. /* Common Info for all sort threads */
  174. common_info = static_cast<fts_psort_common_t*>(
  175. mem_alloc(sizeof *common_info));
  176. if (!common_info) {
  177. ut_free(dup);
  178. mem_free(psort_info);
  179. return(FALSE);
  180. }
  181. common_info->dup = dup;
  182. common_info->new_table = (dict_table_t*) new_table;
  183. common_info->trx = trx;
  184. common_info->all_info = psort_info;
  185. common_info->sort_event = os_event_create();
  186. common_info->merge_event = os_event_create();
  187. common_info->opt_doc_id_size = opt_doc_id_size;
  188. /* There will be FTS_NUM_AUX_INDEX number of "sort buckets" for
  189. each parallel sort thread. Each "sort bucket" holds records for
  190. a particular "FTS index partition" */
  191. for (j = 0; j < fts_sort_pll_degree; j++) {
  192. UT_LIST_INIT(psort_info[j].fts_doc_list);
  193. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  194. psort_info[j].merge_file[i] =
  195. static_cast<merge_file_t*>(
  196. mem_zalloc(sizeof(merge_file_t)));
  197. if (!psort_info[j].merge_file[i]) {
  198. ret = FALSE;
  199. goto func_exit;
  200. }
  201. psort_info[j].merge_buf[i] = row_merge_buf_create(
  202. dup->index);
  203. if (row_merge_file_create(psort_info[j].merge_file[i])
  204. < 0) {
  205. goto func_exit;
  206. }
  207. /* Need to align memory for O_DIRECT write */
  208. psort_info[j].block_alloc[i] =
  209. static_cast<row_merge_block_t*>(ut_malloc(
  210. block_size + 1024));
  211. psort_info[j].merge_block[i] =
  212. static_cast<row_merge_block_t*>(
  213. ut_align(
  214. psort_info[j].block_alloc[i], 1024));
  215. if (!psort_info[j].merge_block[i]) {
  216. ret = FALSE;
  217. goto func_exit;
  218. }
  219. }
  220. psort_info[j].child_status = 0;
  221. psort_info[j].state = 0;
  222. psort_info[j].psort_common = common_info;
  223. psort_info[j].error = DB_SUCCESS;
  224. psort_info[j].memory_used = 0;
  225. mutex_create(fts_pll_tokenize_mutex_key, &psort_info[j].mutex, SYNC_FTS_TOKENIZE);
  226. }
  227. /* Initialize merge_info structures parallel merge and insert
  228. into auxiliary FTS tables (FTS_INDEX_TABLE) */
  229. *merge = merge_info = static_cast<fts_psort_t*>(
  230. mem_alloc(FTS_NUM_AUX_INDEX * sizeof *merge_info));
  231. for (j = 0; j < FTS_NUM_AUX_INDEX; j++) {
  232. merge_info[j].child_status = 0;
  233. merge_info[j].state = 0;
  234. merge_info[j].psort_common = common_info;
  235. }
  236. func_exit:
  237. if (!ret) {
  238. row_fts_psort_info_destroy(psort_info, merge_info);
  239. }
  240. return(ret);
  241. }
  242. /*********************************************************************//**
  243. Clean up and deallocate FTS parallel sort structures, and close the
  244. merge sort files */
  245. UNIV_INTERN
  246. void
  247. row_fts_psort_info_destroy(
  248. /*=======================*/
  249. fts_psort_t* psort_info, /*!< parallel sort info */
  250. fts_psort_t* merge_info) /*!< parallel merge info */
  251. {
  252. ulint i;
  253. ulint j;
  254. if (psort_info) {
  255. for (j = 0; j < fts_sort_pll_degree; j++) {
  256. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  257. if (psort_info[j].merge_file[i]) {
  258. row_merge_file_destroy(
  259. psort_info[j].merge_file[i]);
  260. }
  261. if (psort_info[j].block_alloc[i]) {
  262. ut_free(psort_info[j].block_alloc[i]);
  263. }
  264. mem_free(psort_info[j].merge_file[i]);
  265. }
  266. mutex_free(&psort_info[j].mutex);
  267. }
  268. os_event_free(merge_info[0].psort_common->sort_event);
  269. os_event_free(merge_info[0].psort_common->merge_event);
  270. ut_free(merge_info[0].psort_common->dup);
  271. mem_free(merge_info[0].psort_common);
  272. mem_free(psort_info);
  273. }
  274. if (merge_info) {
  275. mem_free(merge_info);
  276. }
  277. }
  278. /*********************************************************************//**
  279. Free up merge buffers when merge sort is done */
  280. UNIV_INTERN
  281. void
  282. row_fts_free_pll_merge_buf(
  283. /*=======================*/
  284. fts_psort_t* psort_info) /*!< in: parallel sort info */
  285. {
  286. ulint j;
  287. ulint i;
  288. if (!psort_info) {
  289. return;
  290. }
  291. for (j = 0; j < fts_sort_pll_degree; j++) {
  292. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  293. row_merge_buf_free(psort_info[j].merge_buf[i]);
  294. }
  295. }
  296. return;
  297. }
  298. /*********************************************************************//**
  299. Tokenize incoming text data and add to the sort buffer.
  300. @return TRUE if the record passed, FALSE if out of space */
  301. static
  302. ibool
  303. row_merge_fts_doc_tokenize(
  304. /*=======================*/
  305. row_merge_buf_t** sort_buf, /*!< in/out: sort buffer */
  306. doc_id_t doc_id, /*!< in: Doc ID */
  307. fts_doc_t* doc, /*!< in: Doc to be tokenized */
  308. dtype_t* word_dtype, /*!< in: data structure for
  309. word col */
  310. merge_file_t** merge_file, /*!< in/out: merge file */
  311. ibool opt_doc_id_size,/*!< in: whether to use 4 bytes
  312. instead of 8 bytes integer to
  313. store Doc ID during sort*/
  314. fts_tokenize_ctx_t* t_ctx) /*!< in/out: tokenize context */
  315. {
  316. ulint i;
  317. ulint inc;
  318. fts_string_t str;
  319. ulint len;
  320. row_merge_buf_t* buf;
  321. dfield_t* field;
  322. fts_string_t t_str;
  323. ibool buf_full = FALSE;
  324. byte str_buf[FTS_MAX_WORD_LEN + 1];
  325. ulint data_size[FTS_NUM_AUX_INDEX];
  326. ulint n_tuple[FTS_NUM_AUX_INDEX];
  327. t_str.f_n_char = 0;
  328. t_ctx->buf_used = 0;
  329. memset(n_tuple, 0, FTS_NUM_AUX_INDEX * sizeof(ulint));
  330. memset(data_size, 0, FTS_NUM_AUX_INDEX * sizeof(ulint));
  331. /* Tokenize the data and add each word string, its corresponding
  332. doc id and position to sort buffer */
  333. for (i = t_ctx->processed_len; i < doc->text.f_len; i += inc) {
  334. ib_rbt_bound_t parent;
  335. ulint idx = 0;
  336. ib_uint32_t position;
  337. ulint offset = 0;
  338. ulint cur_len = 0;
  339. doc_id_t write_doc_id;
  340. inc = innobase_mysql_fts_get_token(
  341. doc->charset, doc->text.f_str + i,
  342. doc->text.f_str + doc->text.f_len, &str, &offset);
  343. ut_a(inc > 0);
  344. /* Ignore string whose character number is less than
  345. "fts_min_token_size" or more than "fts_max_token_size" */
  346. if (str.f_n_char < fts_min_token_size
  347. || str.f_n_char > fts_max_token_size) {
  348. t_ctx->processed_len += inc;
  349. continue;
  350. }
  351. t_str.f_len = innobase_fts_casedn_str(
  352. doc->charset, (char*) str.f_str, str.f_len,
  353. (char*) &str_buf, FTS_MAX_WORD_LEN + 1);
  354. t_str.f_str = (byte*) &str_buf;
  355. /* if "cached_stopword" is defined, ingore words in the
  356. stopword list */
  357. if (t_ctx->cached_stopword
  358. && rbt_search(t_ctx->cached_stopword,
  359. &parent, &t_str) == 0) {
  360. t_ctx->processed_len += inc;
  361. continue;
  362. }
  363. /* There are FTS_NUM_AUX_INDEX auxiliary tables, find
  364. out which sort buffer to put this word record in */
  365. t_ctx->buf_used = fts_select_index(
  366. doc->charset, t_str.f_str, t_str.f_len);
  367. buf = sort_buf[t_ctx->buf_used];
  368. ut_a(t_ctx->buf_used < FTS_NUM_AUX_INDEX);
  369. idx = t_ctx->buf_used;
  370. mtuple_t* mtuple = &buf->tuples[buf->n_tuples + n_tuple[idx]];
  371. field = mtuple->fields = static_cast<dfield_t*>(
  372. mem_heap_alloc(buf->heap,
  373. FTS_NUM_FIELDS_SORT * sizeof *field));
  374. /* The first field is the tokenized word */
  375. dfield_set_data(field, t_str.f_str, t_str.f_len);
  376. len = dfield_get_len(field);
  377. field->type.mtype = word_dtype->mtype;
  378. field->type.prtype = word_dtype->prtype | DATA_NOT_NULL;
  379. /* Variable length field, set to max size. */
  380. field->type.len = FTS_MAX_WORD_LEN;
  381. field->type.mbminmaxlen = word_dtype->mbminmaxlen;
  382. cur_len += len;
  383. dfield_dup(field, buf->heap);
  384. field++;
  385. /* The second field is the Doc ID */
  386. ib_uint32_t doc_id_32_bit;
  387. if (!opt_doc_id_size) {
  388. fts_write_doc_id((byte*) &write_doc_id, doc_id);
  389. dfield_set_data(
  390. field, &write_doc_id, sizeof(write_doc_id));
  391. } else {
  392. mach_write_to_4(
  393. (byte*) &doc_id_32_bit, (ib_uint32_t) doc_id);
  394. dfield_set_data(
  395. field, &doc_id_32_bit, sizeof(doc_id_32_bit));
  396. }
  397. len = field->len;
  398. ut_ad(len == FTS_DOC_ID_LEN || len == sizeof(ib_uint32_t));
  399. field->type.mtype = DATA_INT;
  400. field->type.prtype = DATA_NOT_NULL | DATA_BINARY_TYPE;
  401. field->type.len = len;
  402. field->type.mbminmaxlen = 0;
  403. cur_len += len;
  404. dfield_dup(field, buf->heap);
  405. ++field;
  406. /* The third field is the position */
  407. mach_write_to_4(
  408. (byte*) &position,
  409. (i + offset + inc - str.f_len + t_ctx->init_pos));
  410. dfield_set_data(field, &position, sizeof(position));
  411. len = dfield_get_len(field);
  412. ut_ad(len == sizeof(ib_uint32_t));
  413. field->type.mtype = DATA_INT;
  414. field->type.prtype = DATA_NOT_NULL;
  415. field->type.len = len;
  416. field->type.mbminmaxlen = 0;
  417. cur_len += len;
  418. dfield_dup(field, buf->heap);
  419. /* One variable length column, word with its lenght less than
  420. fts_max_token_size, add one extra size and one extra byte */
  421. cur_len += 2;
  422. /* Reserve one byte for the end marker of row_merge_block_t. */
  423. if (buf->total_size + data_size[idx] + cur_len
  424. >= srv_sort_buf_size - 1) {
  425. buf_full = TRUE;
  426. break;
  427. }
  428. /* Increment the number of tuples */
  429. n_tuple[idx]++;
  430. t_ctx->processed_len += inc;
  431. data_size[idx] += cur_len;
  432. }
  433. /* Update the data length and the number of new word tuples
  434. added in this round of tokenization */
  435. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  436. /* The computation of total_size below assumes that no
  437. delete-mark flags will be stored and that all fields
  438. are NOT NULL and fixed-length. */
  439. sort_buf[i]->total_size += data_size[i];
  440. sort_buf[i]->n_tuples += n_tuple[i];
  441. merge_file[i]->n_rec += n_tuple[i];
  442. t_ctx->rows_added[i] += n_tuple[i];
  443. }
  444. if (!buf_full) {
  445. /* we pad one byte between text accross two fields */
  446. t_ctx->init_pos += doc->text.f_len + 1;
  447. }
  448. return(!buf_full);
  449. }
  450. /*********************************************************************//**
  451. Get next doc item from fts_doc_list */
  452. UNIV_INLINE
  453. void
  454. row_merge_fts_get_next_doc_item(
  455. /*============================*/
  456. fts_psort_t* psort_info, /*!< in: psort_info */
  457. fts_doc_item_t** doc_item) /*!< in/out: doc item */
  458. {
  459. if (*doc_item != NULL) {
  460. ut_free(*doc_item);
  461. }
  462. mutex_enter(&psort_info->mutex);
  463. *doc_item = UT_LIST_GET_FIRST(psort_info->fts_doc_list);
  464. if (*doc_item != NULL) {
  465. UT_LIST_REMOVE(doc_list, psort_info->fts_doc_list,
  466. *doc_item);
  467. ut_ad(psort_info->memory_used >= sizeof(fts_doc_item_t)
  468. + (*doc_item)->field->len);
  469. psort_info->memory_used -= sizeof(fts_doc_item_t)
  470. + (*doc_item)->field->len;
  471. }
  472. mutex_exit(&psort_info->mutex);
  473. }
  474. /*********************************************************************//**
  475. Function performs parallel tokenization of the incoming doc strings.
  476. It also performs the initial in memory sort of the parsed records.
  477. @return OS_THREAD_DUMMY_RETURN */
  478. UNIV_INTERN
  479. os_thread_ret_t
  480. fts_parallel_tokenization(
  481. /*======================*/
  482. void* arg) /*!< in: psort_info for the thread */
  483. {
  484. fts_psort_t* psort_info = (fts_psort_t*) arg;
  485. ulint i;
  486. fts_doc_item_t* doc_item = NULL;
  487. row_merge_buf_t** buf;
  488. ibool processed = FALSE;
  489. merge_file_t** merge_file;
  490. row_merge_block_t** block;
  491. int tmpfd[FTS_NUM_AUX_INDEX];
  492. ulint mycount[FTS_NUM_AUX_INDEX];
  493. ib_uint64_t total_rec = 0;
  494. ulint num_doc_processed = 0;
  495. doc_id_t last_doc_id = 0;
  496. ulint zip_size;
  497. mem_heap_t* blob_heap = NULL;
  498. fts_doc_t doc;
  499. dict_table_t* table = psort_info->psort_common->new_table;
  500. dtype_t word_dtype;
  501. dict_field_t* idx_field;
  502. fts_tokenize_ctx_t t_ctx;
  503. ulint retried = 0;
  504. dberr_t error = DB_SUCCESS;
  505. ut_ad(psort_info);
  506. buf = psort_info->merge_buf;
  507. merge_file = psort_info->merge_file;
  508. blob_heap = mem_heap_create(512);
  509. memset(&doc, 0, sizeof(doc));
  510. memset(&t_ctx, 0, sizeof(t_ctx));
  511. memset(mycount, 0, FTS_NUM_AUX_INDEX * sizeof(int));
  512. doc.charset = fts_index_get_charset(
  513. psort_info->psort_common->dup->index);
  514. idx_field = dict_index_get_nth_field(
  515. psort_info->psort_common->dup->index, 0);
  516. word_dtype.prtype = idx_field->col->prtype;
  517. word_dtype.mbminmaxlen = idx_field->col->mbminmaxlen;
  518. word_dtype.mtype = (strcmp(doc.charset->name, "latin1_swedish_ci") == 0)
  519. ? DATA_VARCHAR : DATA_VARMYSQL;
  520. block = psort_info->merge_block;
  521. zip_size = dict_table_zip_size(table);
  522. row_merge_fts_get_next_doc_item(psort_info, &doc_item);
  523. t_ctx.cached_stopword = table->fts->cache->stopword_info.cached_stopword;
  524. processed = TRUE;
  525. loop:
  526. while (doc_item) {
  527. dfield_t* dfield = doc_item->field;
  528. last_doc_id = doc_item->doc_id;
  529. ut_ad (dfield->data != NULL
  530. && dfield_get_len(dfield) != UNIV_SQL_NULL);
  531. /* If finish processing the last item, update "doc" with
  532. strings in the doc_item, otherwise continue processing last
  533. item */
  534. if (processed) {
  535. byte* data;
  536. ulint data_len;
  537. dfield = doc_item->field;
  538. data = static_cast<byte*>(dfield_get_data(dfield));
  539. data_len = dfield_get_len(dfield);
  540. if (dfield_is_ext(dfield)) {
  541. doc.text.f_str =
  542. btr_copy_externally_stored_field(
  543. &doc.text.f_len, data,
  544. zip_size, data_len, blob_heap,
  545. NULL);
  546. } else {
  547. doc.text.f_str = data;
  548. doc.text.f_len = data_len;
  549. }
  550. doc.tokens = 0;
  551. t_ctx.processed_len = 0;
  552. } else {
  553. /* Not yet finish processing the "doc" on hand,
  554. continue processing it */
  555. ut_ad(doc.text.f_str);
  556. ut_ad(t_ctx.processed_len < doc.text.f_len);
  557. }
  558. processed = row_merge_fts_doc_tokenize(
  559. buf, doc_item->doc_id, &doc,
  560. &word_dtype,
  561. merge_file, psort_info->psort_common->opt_doc_id_size,
  562. &t_ctx);
  563. /* Current sort buffer full, need to recycle */
  564. if (!processed) {
  565. ut_ad(t_ctx.processed_len < doc.text.f_len);
  566. ut_ad(t_ctx.rows_added[t_ctx.buf_used]);
  567. break;
  568. }
  569. num_doc_processed++;
  570. if (fts_enable_diag_print && num_doc_processed % 10000 == 1) {
  571. ib_logf(IB_LOG_LEVEL_INFO,
  572. "number of doc processed %d\n",
  573. (int) num_doc_processed);
  574. #ifdef FTS_INTERNAL_DIAG_PRINT
  575. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  576. ib_logf(IB_LOG_LEVEL_INFO,
  577. "ID %d, partition %d, word "
  578. "%d\n",(int) psort_info->psort_id,
  579. (int) i, (int) mycount[i]);
  580. }
  581. #endif
  582. }
  583. mem_heap_empty(blob_heap);
  584. row_merge_fts_get_next_doc_item(psort_info, &doc_item);
  585. if (doc_item && last_doc_id != doc_item->doc_id) {
  586. t_ctx.init_pos = 0;
  587. }
  588. }
  589. /* If we run out of current sort buffer, need to sort
  590. and flush the sort buffer to disk */
  591. if (t_ctx.rows_added[t_ctx.buf_used] && !processed) {
  592. row_merge_buf_sort(buf[t_ctx.buf_used], NULL);
  593. row_merge_buf_write(buf[t_ctx.buf_used],
  594. merge_file[t_ctx.buf_used],
  595. block[t_ctx.buf_used]);
  596. if (!row_merge_write(merge_file[t_ctx.buf_used]->fd,
  597. merge_file[t_ctx.buf_used]->offset++,
  598. block[t_ctx.buf_used])) {
  599. error = DB_TEMP_FILE_WRITE_FAILURE;
  600. goto func_exit;
  601. }
  602. UNIV_MEM_INVALID(block[t_ctx.buf_used][0], srv_sort_buf_size);
  603. buf[t_ctx.buf_used] = row_merge_buf_empty(buf[t_ctx.buf_used]);
  604. mycount[t_ctx.buf_used] += t_ctx.rows_added[t_ctx.buf_used];
  605. t_ctx.rows_added[t_ctx.buf_used] = 0;
  606. ut_a(doc_item);
  607. goto loop;
  608. }
  609. /* Parent done scanning, and if finish processing all the docs, exit */
  610. if (psort_info->state == FTS_PARENT_COMPLETE) {
  611. if (UT_LIST_GET_LEN(psort_info->fts_doc_list) == 0) {
  612. goto exit;
  613. } else if (retried > 10000) {
  614. ut_ad(!doc_item);
  615. /* retied too many times and cannot get new record */
  616. ib_logf(IB_LOG_LEVEL_ERROR,
  617. "InnoDB: FTS parallel sort processed "
  618. "%lu records, the sort queue has "
  619. "%lu records. But sort cannot get "
  620. "the next records", num_doc_processed,
  621. UT_LIST_GET_LEN(
  622. psort_info->fts_doc_list));
  623. goto exit;
  624. }
  625. } else if (psort_info->state == FTS_PARENT_EXITING) {
  626. /* Parent abort */
  627. goto func_exit;
  628. }
  629. if (doc_item == NULL) {
  630. os_thread_yield();
  631. }
  632. row_merge_fts_get_next_doc_item(psort_info, &doc_item);
  633. if (doc_item != NULL) {
  634. if (last_doc_id != doc_item->doc_id) {
  635. t_ctx.init_pos = 0;
  636. }
  637. retried = 0;
  638. } else if (psort_info->state == FTS_PARENT_COMPLETE) {
  639. retried++;
  640. }
  641. goto loop;
  642. exit:
  643. /* Do a final sort of the last (or latest) batch of records
  644. in block memory. Flush them to temp file if records cannot
  645. be hold in one block memory */
  646. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  647. if (t_ctx.rows_added[i]) {
  648. row_merge_buf_sort(buf[i], NULL);
  649. row_merge_buf_write(
  650. buf[i], merge_file[i], block[i]);
  651. /* Write to temp file, only if records have
  652. been flushed to temp file before (offset > 0):
  653. The pseudo code for sort is following:
  654. while (there are rows) {
  655. tokenize rows, put result in block[]
  656. if (block[] runs out) {
  657. sort rows;
  658. write to temp file with
  659. row_merge_write();
  660. offset++;
  661. }
  662. }
  663. # write out the last batch
  664. if (offset > 0) {
  665. row_merge_write();
  666. offset++;
  667. } else {
  668. # no need to write anything
  669. offset stay as 0
  670. }
  671. so if merge_file[i]->offset is 0 when we come to
  672. here as the last batch, this means rows have
  673. never flush to temp file, it can be held all in
  674. memory */
  675. if (merge_file[i]->offset != 0) {
  676. if (!row_merge_write(merge_file[i]->fd,
  677. merge_file[i]->offset++,
  678. block[i])) {
  679. error = DB_TEMP_FILE_WRITE_FAILURE;
  680. goto func_exit;
  681. }
  682. UNIV_MEM_INVALID(block[i][0],
  683. srv_sort_buf_size);
  684. }
  685. buf[i] = row_merge_buf_empty(buf[i]);
  686. t_ctx.rows_added[i] = 0;
  687. }
  688. }
  689. if (fts_enable_diag_print) {
  690. DEBUG_FTS_SORT_PRINT(" InnoDB_FTS: start merge sort\n");
  691. }
  692. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  693. if (!merge_file[i]->offset) {
  694. continue;
  695. }
  696. tmpfd[i] = row_merge_file_create_low();
  697. if (tmpfd[i] < 0) {
  698. error = DB_OUT_OF_MEMORY;
  699. goto func_exit;
  700. }
  701. error = row_merge_sort(psort_info->psort_common->trx,
  702. psort_info->psort_common->dup,
  703. merge_file[i], block[i], &tmpfd[i], false, 0.0/* pct_progress */, 0.0/* pct_cost */);
  704. if (error != DB_SUCCESS) {
  705. close(tmpfd[i]);
  706. goto func_exit;
  707. }
  708. total_rec += merge_file[i]->n_rec;
  709. close(tmpfd[i]);
  710. }
  711. func_exit:
  712. if (fts_enable_diag_print) {
  713. DEBUG_FTS_SORT_PRINT(" InnoDB_FTS: complete merge sort\n");
  714. }
  715. mem_heap_free(blob_heap);
  716. mutex_enter(&psort_info->mutex);
  717. psort_info->error = error;
  718. mutex_exit(&psort_info->mutex);
  719. if (UT_LIST_GET_LEN(psort_info->fts_doc_list) > 0) {
  720. /* child can exit either with error or told by parent. */
  721. ut_ad(error != DB_SUCCESS
  722. || psort_info->state == FTS_PARENT_EXITING);
  723. }
  724. /* Free fts doc list in case of error. */
  725. do {
  726. row_merge_fts_get_next_doc_item(psort_info, &doc_item);
  727. } while (doc_item != NULL);
  728. psort_info->child_status = FTS_CHILD_COMPLETE;
  729. os_event_set(psort_info->psort_common->sort_event);
  730. psort_info->child_status = FTS_CHILD_EXITING;
  731. #ifdef __WIN__
  732. CloseHandle(psort_info->thread_hdl);
  733. #endif /*__WIN__ */
  734. os_thread_exit(NULL);
  735. OS_THREAD_DUMMY_RETURN;
  736. }
  737. /*********************************************************************//**
  738. Start the parallel tokenization and parallel merge sort */
  739. UNIV_INTERN
  740. void
  741. row_fts_start_psort(
  742. /*================*/
  743. fts_psort_t* psort_info) /*!< parallel sort structure */
  744. {
  745. ulint i = 0;
  746. os_thread_id_t thd_id;
  747. for (i = 0; i < fts_sort_pll_degree; i++) {
  748. psort_info[i].psort_id = i;
  749. psort_info[i].thread_hdl = os_thread_create(
  750. fts_parallel_tokenization,
  751. (void*) &psort_info[i], &thd_id);
  752. }
  753. }
  754. /*********************************************************************//**
  755. Function performs the merge and insertion of the sorted records.
  756. @return OS_THREAD_DUMMY_RETURN */
  757. UNIV_INTERN
  758. os_thread_ret_t
  759. fts_parallel_merge(
  760. /*===============*/
  761. void* arg) /*!< in: parallel merge info */
  762. {
  763. fts_psort_t* psort_info = (fts_psort_t*) arg;
  764. ulint id;
  765. ut_ad(psort_info);
  766. id = psort_info->psort_id;
  767. row_fts_merge_insert(psort_info->psort_common->dup->index,
  768. psort_info->psort_common->new_table,
  769. psort_info->psort_common->all_info, id);
  770. psort_info->child_status = FTS_CHILD_COMPLETE;
  771. os_event_set(psort_info->psort_common->merge_event);
  772. psort_info->child_status = FTS_CHILD_EXITING;
  773. #ifdef __WIN__
  774. CloseHandle(psort_info->thread_hdl);
  775. #endif /*__WIN__ */
  776. os_thread_exit(NULL);
  777. OS_THREAD_DUMMY_RETURN;
  778. }
  779. /*********************************************************************//**
  780. Kick off the parallel merge and insert thread */
  781. UNIV_INTERN
  782. void
  783. row_fts_start_parallel_merge(
  784. /*=========================*/
  785. fts_psort_t* merge_info) /*!< in: parallel sort info */
  786. {
  787. int i = 0;
  788. os_thread_id_t thd_id;
  789. /* Kick off merge/insert threads */
  790. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  791. merge_info[i].psort_id = i;
  792. merge_info[i].child_status = 0;
  793. merge_info[i].thread_hdl = os_thread_create(
  794. fts_parallel_merge, (void*) &merge_info[i], &thd_id);
  795. }
  796. }
  797. /********************************************************************//**
  798. Insert processed FTS data to auxillary index tables.
  799. @return DB_SUCCESS if insertion runs fine */
  800. static __attribute__((nonnull))
  801. dberr_t
  802. row_merge_write_fts_word(
  803. /*=====================*/
  804. trx_t* trx, /*!< in: transaction */
  805. que_t** ins_graph, /*!< in: Insert query graphs */
  806. fts_tokenizer_word_t* word, /*!< in: sorted and tokenized
  807. word */
  808. fts_table_t* fts_table, /*!< in: fts aux table instance */
  809. CHARSET_INFO* charset) /*!< in: charset */
  810. {
  811. ulint selected;
  812. dberr_t ret = DB_SUCCESS;
  813. selected = fts_select_index(
  814. charset, word->text.f_str, word->text.f_len);
  815. fts_table->suffix = fts_get_suffix(selected);
  816. /* Pop out each fts_node in word->nodes write them to auxiliary table */
  817. while (ib_vector_size(word->nodes) > 0) {
  818. dberr_t error;
  819. fts_node_t* fts_node;
  820. fts_node = static_cast<fts_node_t*>(ib_vector_pop(word->nodes));
  821. error = fts_write_node(
  822. trx, &ins_graph[selected], fts_table, &word->text,
  823. fts_node);
  824. if (error != DB_SUCCESS) {
  825. fprintf(stderr, "InnoDB: failed to write"
  826. " word %s to FTS auxiliary index"
  827. " table, error (%s) \n",
  828. word->text.f_str, ut_strerr(error));
  829. ret = error;
  830. }
  831. ut_free(fts_node->ilist);
  832. fts_node->ilist = NULL;
  833. }
  834. return(ret);
  835. }
  836. /*********************************************************************//**
  837. Read sorted FTS data files and insert data tuples to auxillary tables.
  838. @return DB_SUCCESS or error number */
  839. UNIV_INTERN
  840. void
  841. row_fts_insert_tuple(
  842. /*=================*/
  843. fts_psort_insert_t*
  844. ins_ctx, /*!< in: insert context */
  845. fts_tokenizer_word_t* word, /*!< in: last processed
  846. tokenized word */
  847. ib_vector_t* positions, /*!< in: word position */
  848. doc_id_t* in_doc_id, /*!< in: last item doc id */
  849. dtuple_t* dtuple) /*!< in: entry to insert */
  850. {
  851. fts_node_t* fts_node = NULL;
  852. dfield_t* dfield;
  853. doc_id_t doc_id;
  854. ulint position;
  855. fts_string_t token_word;
  856. ulint i;
  857. /* Get fts_node for the FTS auxillary INDEX table */
  858. if (ib_vector_size(word->nodes) > 0) {
  859. fts_node = static_cast<fts_node_t*>(
  860. ib_vector_last(word->nodes));
  861. }
  862. if (fts_node == NULL
  863. || fts_node->ilist_size > FTS_ILIST_MAX_SIZE) {
  864. fts_node = static_cast<fts_node_t*>(
  865. ib_vector_push(word->nodes, NULL));
  866. memset(fts_node, 0x0, sizeof(*fts_node));
  867. }
  868. /* If dtuple == NULL, this is the last word to be processed */
  869. if (!dtuple) {
  870. if (fts_node && ib_vector_size(positions) > 0) {
  871. fts_cache_node_add_positions(
  872. NULL, fts_node, *in_doc_id,
  873. positions);
  874. /* Write out the current word */
  875. row_merge_write_fts_word(ins_ctx->trx,
  876. ins_ctx->ins_graph, word,
  877. &ins_ctx->fts_table,
  878. ins_ctx->charset);
  879. }
  880. return;
  881. }
  882. /* Get the first field for the tokenized word */
  883. dfield = dtuple_get_nth_field(dtuple, 0);
  884. token_word.f_n_char = 0;
  885. token_word.f_len = dfield->len;
  886. token_word.f_str = static_cast<byte*>(dfield_get_data(dfield));
  887. if (!word->text.f_str) {
  888. fts_utf8_string_dup(&word->text, &token_word, ins_ctx->heap);
  889. }
  890. /* compare to the last word, to see if they are the same
  891. word */
  892. if (innobase_fts_text_cmp(ins_ctx->charset,
  893. &word->text, &token_word) != 0) {
  894. ulint num_item;
  895. /* Getting a new word, flush the last position info
  896. for the currnt word in fts_node */
  897. if (ib_vector_size(positions) > 0) {
  898. fts_cache_node_add_positions(
  899. NULL, fts_node, *in_doc_id, positions);
  900. }
  901. /* Write out the current word */
  902. row_merge_write_fts_word(ins_ctx->trx, ins_ctx->ins_graph,
  903. word, &ins_ctx->fts_table,
  904. ins_ctx->charset);
  905. /* Copy the new word */
  906. fts_utf8_string_dup(&word->text, &token_word, ins_ctx->heap);
  907. num_item = ib_vector_size(positions);
  908. /* Clean up position queue */
  909. for (i = 0; i < num_item; i++) {
  910. ib_vector_pop(positions);
  911. }
  912. /* Reset Doc ID */
  913. *in_doc_id = 0;
  914. memset(fts_node, 0x0, sizeof(*fts_node));
  915. }
  916. /* Get the word's Doc ID */
  917. dfield = dtuple_get_nth_field(dtuple, 1);
  918. if (!ins_ctx->opt_doc_id_size) {
  919. doc_id = fts_read_doc_id(
  920. static_cast<byte*>(dfield_get_data(dfield)));
  921. } else {
  922. doc_id = (doc_id_t) mach_read_from_4(
  923. static_cast<byte*>(dfield_get_data(dfield)));
  924. }
  925. /* Get the word's position info */
  926. dfield = dtuple_get_nth_field(dtuple, 2);
  927. position = mach_read_from_4(static_cast<byte*>(dfield_get_data(dfield)));
  928. /* If this is the same word as the last word, and they
  929. have the same Doc ID, we just need to add its position
  930. info. Otherwise, we will flush position info to the
  931. fts_node and initiate a new position vector */
  932. if (!(*in_doc_id) || *in_doc_id == doc_id) {
  933. ib_vector_push(positions, &position);
  934. } else {
  935. ulint num_pos = ib_vector_size(positions);
  936. fts_cache_node_add_positions(NULL, fts_node,
  937. *in_doc_id, positions);
  938. for (i = 0; i < num_pos; i++) {
  939. ib_vector_pop(positions);
  940. }
  941. ib_vector_push(positions, &position);
  942. }
  943. /* record the current Doc ID */
  944. *in_doc_id = doc_id;
  945. }
  946. /*********************************************************************//**
  947. Propagate a newly added record up one level in the selection tree
  948. @return parent where this value propagated to */
  949. static
  950. int
  951. row_fts_sel_tree_propagate(
  952. /*=======================*/
  953. int propogated, /*<! in: tree node propagated */
  954. int* sel_tree, /*<! in: selection tree */
  955. const mrec_t** mrec, /*<! in: sort record */
  956. ulint** offsets, /*<! in: record offsets */
  957. dict_index_t* index) /*<! in/out: FTS index */
  958. {
  959. ulint parent;
  960. int child_left;
  961. int child_right;
  962. int selected;
  963. /* Find which parent this value will be propagated to */
  964. parent = (propogated - 1) / 2;
  965. /* Find out which value is smaller, and to propagate */
  966. child_left = sel_tree[parent * 2 + 1];
  967. child_right = sel_tree[parent * 2 + 2];
  968. if (child_left == -1 || mrec[child_left] == NULL) {
  969. if (child_right == -1
  970. || mrec[child_right] == NULL) {
  971. selected = -1;
  972. } else {
  973. selected = child_right ;
  974. }
  975. } else if (child_right == -1
  976. || mrec[child_right] == NULL) {
  977. selected = child_left;
  978. } else if (cmp_rec_rec_simple(mrec[child_left], mrec[child_right],
  979. offsets[child_left],
  980. offsets[child_right],
  981. index, NULL) < 0) {
  982. selected = child_left;
  983. } else {
  984. selected = child_right;
  985. }
  986. sel_tree[parent] = selected;
  987. return(static_cast<int>(parent));
  988. }
  989. /*********************************************************************//**
  990. Readjust selection tree after popping the root and read a new value
  991. @return the new root */
  992. static
  993. int
  994. row_fts_sel_tree_update(
  995. /*====================*/
  996. int* sel_tree, /*<! in/out: selection tree */
  997. ulint propagated, /*<! in: node to propagate up */
  998. ulint height, /*<! in: tree height */
  999. const mrec_t** mrec, /*<! in: sort record */
  1000. ulint** offsets, /*<! in: record offsets */
  1001. dict_index_t* index) /*<! in: index dictionary */
  1002. {
  1003. ulint i;
  1004. for (i = 1; i <= height; i++) {
  1005. propagated = static_cast<ulint>(row_fts_sel_tree_propagate(
  1006. static_cast<int>(propagated), sel_tree, mrec, offsets, index));
  1007. }
  1008. return(sel_tree[0]);
  1009. }
  1010. /*********************************************************************//**
  1011. Build selection tree at a specified level */
  1012. static
  1013. void
  1014. row_fts_build_sel_tree_level(
  1015. /*=========================*/
  1016. int* sel_tree, /*<! in/out: selection tree */
  1017. ulint level, /*<! in: selection tree level */
  1018. const mrec_t** mrec, /*<! in: sort record */
  1019. ulint** offsets, /*<! in: record offsets */
  1020. dict_index_t* index) /*<! in: index dictionary */
  1021. {
  1022. ulint start;
  1023. int child_left;
  1024. int child_right;
  1025. ulint i;
  1026. ulint num_item;
  1027. start = static_cast<ulint>((1 << level) - 1);
  1028. num_item = static_cast<ulint>(1 << level);
  1029. for (i = 0; i < num_item; i++) {
  1030. child_left = sel_tree[(start + i) * 2 + 1];
  1031. child_right = sel_tree[(start + i) * 2 + 2];
  1032. if (child_left == -1) {
  1033. if (child_right == -1) {
  1034. sel_tree[start + i] = -1;
  1035. } else {
  1036. sel_tree[start + i] = child_right;
  1037. }
  1038. continue;
  1039. } else if (child_right == -1) {
  1040. sel_tree[start + i] = child_left;
  1041. continue;
  1042. }
  1043. /* Deal with NULL child conditions */
  1044. if (!mrec[child_left]) {
  1045. if (!mrec[child_right]) {
  1046. sel_tree[start + i] = -1;
  1047. } else {
  1048. sel_tree[start + i] = child_right;
  1049. }
  1050. continue;
  1051. } else if (!mrec[child_right]) {
  1052. sel_tree[start + i] = child_left;
  1053. continue;
  1054. }
  1055. /* Select the smaller one to set parent pointer */
  1056. int cmp = cmp_rec_rec_simple(
  1057. mrec[child_left], mrec[child_right],
  1058. offsets[child_left], offsets[child_right],
  1059. index, NULL);
  1060. sel_tree[start + i] = cmp < 0 ? child_left : child_right;
  1061. }
  1062. }
  1063. /*********************************************************************//**
  1064. Build a selection tree for merge. The selection tree is a binary tree
  1065. and should have fts_sort_pll_degree / 2 levels. With root as level 0
  1066. @return number of tree levels */
  1067. static
  1068. ulint
  1069. row_fts_build_sel_tree(
  1070. /*===================*/
  1071. int* sel_tree, /*<! in/out: selection tree */
  1072. const mrec_t** mrec, /*<! in: sort record */
  1073. ulint** offsets, /*<! in: record offsets */
  1074. dict_index_t* index) /*<! in: index dictionary */
  1075. {
  1076. ulint treelevel = 1;
  1077. ulint num = 2;
  1078. int i = 0;
  1079. ulint start;
  1080. /* No need to build selection tree if we only have two merge threads */
  1081. if (fts_sort_pll_degree <= 2) {
  1082. return(0);
  1083. }
  1084. while (num < fts_sort_pll_degree) {
  1085. num = num << 1;
  1086. treelevel++;
  1087. }
  1088. start = (1 << treelevel) - 1;
  1089. for (i = 0; i < (int) fts_sort_pll_degree; i++) {
  1090. sel_tree[i + start] = i;
  1091. }
  1092. for (i = static_cast<int>(treelevel) - 1; i >= 0; i--) {
  1093. row_fts_build_sel_tree_level(
  1094. sel_tree, static_cast<ulint>(i), mrec, offsets, index);
  1095. }
  1096. return(treelevel);
  1097. }
  1098. /*********************************************************************//**
  1099. Read sorted file containing index data tuples and insert these data
  1100. tuples to the index
  1101. @return DB_SUCCESS or error number */
  1102. UNIV_INTERN
  1103. dberr_t
  1104. row_fts_merge_insert(
  1105. /*=================*/
  1106. dict_index_t* index, /*!< in: index */
  1107. dict_table_t* table, /*!< in: new table */
  1108. fts_psort_t* psort_info, /*!< parallel sort info */
  1109. ulint id) /* !< in: which auxiliary table's data
  1110. to insert to */
  1111. {
  1112. const byte** b;
  1113. mem_heap_t* tuple_heap;
  1114. mem_heap_t* heap;
  1115. dberr_t error = DB_SUCCESS;
  1116. ulint* foffs;
  1117. ulint** offsets;
  1118. fts_tokenizer_word_t new_word;
  1119. ib_vector_t* positions;
  1120. doc_id_t last_doc_id;
  1121. ib_alloc_t* heap_alloc;
  1122. ulint n_bytes;
  1123. ulint i;
  1124. mrec_buf_t** buf;
  1125. int* fd;
  1126. byte** block;
  1127. const mrec_t** mrec;
  1128. ulint count = 0;
  1129. int* sel_tree;
  1130. ulint height;
  1131. ulint start;
  1132. fts_psort_insert_t ins_ctx;
  1133. ulint count_diag = 0;
  1134. ut_ad(index);
  1135. ut_ad(table);
  1136. /* We use the insert query graph as the dummy graph
  1137. needed in the row module call */
  1138. ins_ctx.trx = trx_allocate_for_background();
  1139. ins_ctx.trx->op_info = "inserting index entries";
  1140. ins_ctx.opt_doc_id_size = psort_info[0].psort_common->opt_doc_id_size;
  1141. heap = mem_heap_create(500 + sizeof(mrec_buf_t));
  1142. b = (const byte**) mem_heap_alloc(
  1143. heap, sizeof (*b) * fts_sort_pll_degree);
  1144. foffs = (ulint*) mem_heap_alloc(
  1145. heap, sizeof(*foffs) * fts_sort_pll_degree);
  1146. offsets = (ulint**) mem_heap_alloc(
  1147. heap, sizeof(*offsets) * fts_sort_pll_degree);
  1148. buf = (mrec_buf_t**) mem_heap_alloc(
  1149. heap, sizeof(*buf) * fts_sort_pll_degree);
  1150. fd = (int*) mem_heap_alloc(heap, sizeof(*fd) * fts_sort_pll_degree);
  1151. block = (byte**) mem_heap_alloc(
  1152. heap, sizeof(*block) * fts_sort_pll_degree);
  1153. mrec = (const mrec_t**) mem_heap_alloc(
  1154. heap, sizeof(*mrec) * fts_sort_pll_degree);
  1155. sel_tree = (int*) mem_heap_alloc(
  1156. heap, sizeof(*sel_tree) * (fts_sort_pll_degree * 2));
  1157. tuple_heap = mem_heap_create(1000);
  1158. ins_ctx.charset = fts_index_get_charset(index);
  1159. ins_ctx.heap = heap;
  1160. for (i = 0; i < fts_sort_pll_degree; i++) {
  1161. ulint num;
  1162. num = 1 + REC_OFFS_HEADER_SIZE
  1163. + dict_index_get_n_fields(index);
  1164. offsets[i] = static_cast<ulint*>(mem_heap_zalloc(
  1165. heap, num * sizeof *offsets[i]));
  1166. offsets[i][0] = num;
  1167. offsets[i][1] = dict_index_get_n_fields(index);
  1168. block[i] = psort_info[i].merge_block[id];
  1169. b[i] = psort_info[i].merge_block[id];
  1170. fd[i] = psort_info[i].merge_file[id]->fd;
  1171. foffs[i] = 0;
  1172. buf[i] = static_cast<unsigned char (*)[65536]>(
  1173. mem_heap_alloc(heap, sizeof *buf[i]));
  1174. count_diag += (int) psort_info[i].merge_file[id]->n_rec;
  1175. }
  1176. if (fts_enable_diag_print) {
  1177. ut_print_timestamp(stderr);
  1178. fprintf(stderr, " InnoDB_FTS: to inserted %lu records\n",
  1179. (ulong) count_diag);
  1180. }
  1181. /* Initialize related variables if creating FTS indexes */
  1182. heap_alloc = ib_heap_allocator_create(heap);
  1183. memset(&new_word, 0, sizeof(new_word));
  1184. new_word.nodes = ib_vector_create(heap_alloc, sizeof(fts_node_t), 4);
  1185. positions = ib_vector_create(heap_alloc, sizeof(ulint), 32);
  1186. last_doc_id = 0;
  1187. /* Allocate insert query graphs for FTS auxillary
  1188. Index Table, note we have FTS_NUM_AUX_INDEX such index tables */
  1189. n_bytes = sizeof(que_t*) * (FTS_NUM_AUX_INDEX + 1);
  1190. ins_ctx.ins_graph = static_cast<que_t**>(mem_heap_alloc(heap, n_bytes));
  1191. memset(ins_ctx.ins_graph, 0x0, n_bytes);
  1192. /* We should set the flags2 with aux_table_name here,
  1193. in order to get the correct aux table names. */
  1194. index->table->flags2 |= DICT_TF2_FTS_AUX_HEX_NAME;
  1195. DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name",
  1196. index->table->flags2 &= ~DICT_TF2_FTS_AUX_HEX_NAME;);
  1197. ins_ctx.fts_table.type = FTS_INDEX_TABLE;
  1198. ins_ctx.fts_table.index_id = index->id;
  1199. ins_ctx.fts_table.table_id = table->id;
  1200. ins_ctx.fts_table.parent = index->table->name;
  1201. ins_ctx.fts_table.table = index->table;
  1202. for (i = 0; i < fts_sort_pll_degree; i++) {
  1203. if (psort_info[i].merge_file[id]->n_rec == 0) {
  1204. /* No Rows to read */
  1205. mrec[i] = b[i] = NULL;
  1206. } else {
  1207. /* Read from temp file only if it has been
  1208. written to. Otherwise, block memory holds
  1209. all the sorted records */
  1210. if (psort_info[i].merge_file[id]->offset > 0
  1211. && (!row_merge_read(
  1212. fd[i], foffs[i],
  1213. (row_merge_block_t*) block[i]))) {
  1214. error = DB_CORRUPTION;
  1215. goto exit;
  1216. }
  1217. ROW_MERGE_READ_GET_NEXT(i);
  1218. }
  1219. }
  1220. height = row_fts_build_sel_tree(sel_tree, (const mrec_t **) mrec,
  1221. offsets, index);
  1222. start = (1 << height) - 1;
  1223. /* Fetch sorted records from sort buffer and insert them into
  1224. corresponding FTS index auxiliary tables */
  1225. for (;;) {
  1226. dtuple_t* dtuple;
  1227. ulint n_ext;
  1228. int min_rec = 0;
  1229. if (fts_sort_pll_degree <= 2) {
  1230. while (!mrec[min_rec]) {
  1231. min_rec++;
  1232. if (min_rec >= (int) fts_sort_pll_degree) {
  1233. row_fts_insert_tuple(
  1234. &ins_ctx, &new_word,
  1235. positions, &last_doc_id,
  1236. NULL);
  1237. goto exit;
  1238. }
  1239. }
  1240. for (i = min_rec + 1; i < fts_sort_pll_degree; i++) {
  1241. if (!mrec[i]) {
  1242. continue;
  1243. }
  1244. if (cmp_rec_rec_simple(
  1245. mrec[i], mrec[min_rec],
  1246. offsets[i], offsets[min_rec],
  1247. index, NULL) < 0) {
  1248. min_rec = static_cast<int>(i);
  1249. }
  1250. }
  1251. } else {
  1252. min_rec = sel_tree[0];
  1253. if (min_rec == -1) {
  1254. row_fts_insert_tuple(
  1255. &ins_ctx, &new_word,
  1256. positions, &last_doc_id,
  1257. NULL);
  1258. goto exit;
  1259. }
  1260. }
  1261. dtuple = row_rec_to_index_entry_low(
  1262. mrec[min_rec], index, offsets[min_rec], &n_ext,
  1263. tuple_heap);
  1264. row_fts_insert_tuple(
  1265. &ins_ctx, &new_word, positions,
  1266. &last_doc_id, dtuple);
  1267. ROW_MERGE_READ_GET_NEXT(min_rec);
  1268. if (fts_sort_pll_degree > 2) {
  1269. if (!mrec[min_rec]) {
  1270. sel_tree[start + min_rec] = -1;
  1271. }
  1272. row_fts_sel_tree_update(sel_tree, start + min_rec,
  1273. height, mrec,
  1274. offsets, index);
  1275. }
  1276. count++;
  1277. mem_heap_empty(tuple_heap);
  1278. }
  1279. exit:
  1280. fts_sql_commit(ins_ctx.trx);
  1281. ins_ctx.trx->op_info = "";
  1282. mem_heap_free(tuple_heap);
  1283. for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
  1284. if (ins_ctx.ins_graph[i]) {
  1285. fts_que_graph_free(ins_ctx.ins_graph[i]);
  1286. }
  1287. }
  1288. trx_free_for_background(ins_ctx.trx);
  1289. mem_heap_free(heap);
  1290. if (fts_enable_diag_print) {
  1291. ut_print_timestamp(stderr);
  1292. fprintf(stderr, " InnoDB_FTS: inserted %lu records\n",
  1293. (ulong) count);
  1294. }
  1295. return(error);
  1296. }