You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3886 lines
105 KiB

12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
  1. /*****************************************************************************
  2. Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
  3. This program is free software; you can redistribute it and/or modify it under
  4. the terms of the GNU General Public License as published by the Free Software
  5. Foundation; version 2 of the License.
  6. This program is distributed in the hope that it will be useful, but WITHOUT
  7. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. You should have received a copy of the GNU General Public License along with
  10. this program; if not, write to the Free Software Foundation, Inc.,
  11. 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
  12. *****************************************************************************/
  13. /**************************************************//**
  14. @file row/row0merge.cc
  15. New index creation routines using a merge sort
  16. Created 12/4/2005 Jan Lindstrom
  17. Completed by Sunny Bains and Marko Makela
  18. *******************************************************/
  19. #include <my_config.h>
  20. #include <log.h>
  21. #include "row0merge.h"
  22. #include "row0ext.h"
  23. #include "row0log.h"
  24. #include "row0ins.h"
  25. #include "row0sel.h"
  26. #include "dict0crea.h"
  27. #include "trx0purge.h"
  28. #include "lock0lock.h"
  29. #include "pars0pars.h"
  30. #include "ut0sort.h"
  31. #include "row0ftsort.h"
  32. #include "row0import.h"
  33. #include "handler0alter.h"
  34. #include "ha_prototypes.h"
  35. #include "math.h" /* log() */
  36. float my_log2f(float n)
  37. {
  38. /* log(n) / log(2) is log2. */
  39. return (float)(log((double)n) / log((double)2));
  40. }
  41. /* Ignore posix_fadvise() on those platforms where it does not exist */
  42. #if defined __WIN__
  43. # define posix_fadvise(fd, offset, len, advice) /* nothing */
  44. #endif /* __WIN__ */
  45. #ifdef UNIV_DEBUG
  46. /** Set these in order ot enable debug printout. */
  47. /* @{ */
  48. /** Log each record read from temporary file. */
  49. static ibool row_merge_print_read;
  50. /** Log each record write to temporary file. */
  51. static ibool row_merge_print_write;
  52. /** Log each row_merge_blocks() call, merging two blocks of records to
  53. a bigger one. */
  54. static ibool row_merge_print_block;
  55. /** Log each block read from temporary file. */
  56. static ibool row_merge_print_block_read;
  57. /** Log each block read from temporary file. */
  58. static ibool row_merge_print_block_write;
  59. /* @} */
  60. #endif /* UNIV_DEBUG */
  61. /* Whether to disable file system cache */
  62. UNIV_INTERN char srv_disable_sort_file_cache;
  63. /* Maximum pending doc memory limit in bytes for a fts tokenization thread */
  64. #define FTS_PENDING_DOC_MEMORY_LIMIT 1000000
  65. #ifdef UNIV_DEBUG
  66. /******************************************************//**
  67. Display a merge tuple. */
  68. static __attribute__((nonnull))
  69. void
  70. row_merge_tuple_print(
  71. /*==================*/
  72. FILE* f, /*!< in: output stream */
  73. const mtuple_t* entry, /*!< in: tuple to print */
  74. ulint n_fields)/*!< in: number of fields in the tuple */
  75. {
  76. ulint j;
  77. for (j = 0; j < n_fields; j++) {
  78. const dfield_t* field = &entry->fields[j];
  79. if (dfield_is_null(field)) {
  80. fputs("\n NULL;", f);
  81. } else {
  82. ulint field_len = dfield_get_len(field);
  83. ulint len = ut_min(field_len, 20);
  84. if (dfield_is_ext(field)) {
  85. fputs("\nE", f);
  86. } else {
  87. fputs("\n ", f);
  88. }
  89. ut_print_buf(f, dfield_get_data(field), len);
  90. if (len != field_len) {
  91. fprintf(f, " (total %lu bytes)", field_len);
  92. }
  93. }
  94. }
  95. putc('\n', f);
  96. }
  97. #endif /* UNIV_DEBUG */
  98. /******************************************************//**
  99. Encode an index record. */
  100. static __attribute__((nonnull))
  101. void
  102. row_merge_buf_encode(
  103. /*=================*/
  104. byte** b, /*!< in/out: pointer to
  105. current end of output buffer */
  106. const dict_index_t* index, /*!< in: index */
  107. const mtuple_t* entry, /*!< in: index fields
  108. of the record to encode */
  109. ulint n_fields) /*!< in: number of fields
  110. in the entry */
  111. {
  112. ulint size;
  113. ulint extra_size;
  114. size = rec_get_converted_size_temp(
  115. index, entry->fields, n_fields, &extra_size);
  116. ut_ad(size >= extra_size);
  117. /* Encode extra_size + 1 */
  118. if (extra_size + 1 < 0x80) {
  119. *(*b)++ = (byte) (extra_size + 1);
  120. } else {
  121. ut_ad((extra_size + 1) < 0x8000);
  122. *(*b)++ = (byte) (0x80 | ((extra_size + 1) >> 8));
  123. *(*b)++ = (byte) (extra_size + 1);
  124. }
  125. rec_convert_dtuple_to_temp(*b + extra_size, index,
  126. entry->fields, n_fields);
  127. *b += size;
  128. }
  129. /******************************************************//**
  130. Allocate a sort buffer.
  131. @return own: sort buffer */
  132. static __attribute__((malloc, nonnull))
  133. row_merge_buf_t*
  134. row_merge_buf_create_low(
  135. /*=====================*/
  136. mem_heap_t* heap, /*!< in: heap where allocated */
  137. dict_index_t* index, /*!< in: secondary index */
  138. ulint max_tuples, /*!< in: maximum number of
  139. data tuples */
  140. ulint buf_size) /*!< in: size of the buffer,
  141. in bytes */
  142. {
  143. row_merge_buf_t* buf;
  144. ut_ad(max_tuples > 0);
  145. ut_ad(max_tuples <= srv_sort_buf_size);
  146. buf = static_cast<row_merge_buf_t*>(mem_heap_zalloc(heap, buf_size));
  147. buf->heap = heap;
  148. buf->index = index;
  149. buf->max_tuples = max_tuples;
  150. buf->tuples = static_cast<mtuple_t*>(
  151. ut_malloc(2 * max_tuples * sizeof *buf->tuples));
  152. buf->tmp_tuples = buf->tuples + max_tuples;
  153. return(buf);
  154. }
  155. /******************************************************//**
  156. Allocate a sort buffer.
  157. @return own: sort buffer */
  158. UNIV_INTERN
  159. row_merge_buf_t*
  160. row_merge_buf_create(
  161. /*=================*/
  162. dict_index_t* index) /*!< in: secondary index */
  163. {
  164. row_merge_buf_t* buf;
  165. ulint max_tuples;
  166. ulint buf_size;
  167. mem_heap_t* heap;
  168. max_tuples = srv_sort_buf_size
  169. / ut_max(1, dict_index_get_min_size(index));
  170. buf_size = (sizeof *buf);
  171. heap = mem_heap_create(buf_size);
  172. buf = row_merge_buf_create_low(heap, index, max_tuples, buf_size);
  173. return(buf);
  174. }
  175. /******************************************************//**
  176. Empty a sort buffer.
  177. @return sort buffer */
  178. UNIV_INTERN
  179. row_merge_buf_t*
  180. row_merge_buf_empty(
  181. /*================*/
  182. row_merge_buf_t* buf) /*!< in,own: sort buffer */
  183. {
  184. ulint buf_size = sizeof *buf;
  185. ulint max_tuples = buf->max_tuples;
  186. mem_heap_t* heap = buf->heap;
  187. dict_index_t* index = buf->index;
  188. mtuple_t* tuples = buf->tuples;
  189. mem_heap_empty(heap);
  190. buf = static_cast<row_merge_buf_t*>(mem_heap_zalloc(heap, buf_size));
  191. buf->heap = heap;
  192. buf->index = index;
  193. buf->max_tuples = max_tuples;
  194. buf->tuples = tuples;
  195. buf->tmp_tuples = buf->tuples + max_tuples;
  196. return(buf);
  197. }
  198. /******************************************************//**
  199. Deallocate a sort buffer. */
  200. UNIV_INTERN
  201. void
  202. row_merge_buf_free(
  203. /*===============*/
  204. row_merge_buf_t* buf) /*!< in,own: sort buffer to be freed */
  205. {
  206. ut_free(buf->tuples);
  207. mem_heap_free(buf->heap);
  208. }
  209. /******************************************************//**
  210. Insert a data tuple into a sort buffer.
  211. @return number of rows added, 0 if out of space */
  212. static
  213. ulint
  214. row_merge_buf_add(
  215. /*==============*/
  216. row_merge_buf_t* buf, /*!< in/out: sort buffer */
  217. dict_index_t* fts_index,/*!< in: fts index to be created */
  218. const dict_table_t* old_table,/*!< in: original table */
  219. fts_psort_t* psort_info, /*!< in: parallel sort info */
  220. const dtuple_t* row, /*!< in: table row */
  221. const row_ext_t* ext, /*!< in: cache of externally stored
  222. column prefixes, or NULL */
  223. doc_id_t* doc_id) /*!< in/out: Doc ID if we are
  224. creating FTS index */
  225. {
  226. ulint i;
  227. const dict_index_t* index;
  228. mtuple_t* entry;
  229. dfield_t* field;
  230. const dict_field_t* ifield;
  231. ulint n_fields;
  232. ulint data_size;
  233. ulint extra_size;
  234. ulint bucket = 0;
  235. doc_id_t write_doc_id;
  236. ulint n_row_added = 0;
  237. DBUG_ENTER("row_merge_buf_add");
  238. if (buf->n_tuples >= buf->max_tuples) {
  239. DBUG_RETURN(0);
  240. }
  241. DBUG_EXECUTE_IF(
  242. "ib_row_merge_buf_add_two",
  243. if (buf->n_tuples >= 2) DBUG_RETURN(0););
  244. UNIV_PREFETCH_R(row->fields);
  245. /* If we are building FTS index, buf->index points to
  246. the 'fts_sort_idx', and real FTS index is stored in
  247. fts_index */
  248. index = (buf->index->type & DICT_FTS) ? fts_index : buf->index;
  249. n_fields = dict_index_get_n_fields(index);
  250. entry = &buf->tuples[buf->n_tuples];
  251. field = entry->fields = static_cast<dfield_t*>(
  252. mem_heap_alloc(buf->heap, n_fields * sizeof *entry->fields));
  253. data_size = 0;
  254. extra_size = UT_BITS_IN_BYTES(index->n_nullable);
  255. ifield = dict_index_get_nth_field(index, 0);
  256. for (i = 0; i < n_fields; i++, field++, ifield++) {
  257. ulint len;
  258. const dict_col_t* col;
  259. ulint col_no;
  260. ulint fixed_len;
  261. const dfield_t* row_field;
  262. col = ifield->col;
  263. col_no = dict_col_get_no(col);
  264. /* Process the Doc ID column */
  265. if (*doc_id > 0
  266. && col_no == index->table->fts->doc_col) {
  267. fts_write_doc_id((byte*) &write_doc_id, *doc_id);
  268. /* Note: field->data now points to a value on the
  269. stack: &write_doc_id after dfield_set_data(). Because
  270. there is only one doc_id per row, it shouldn't matter.
  271. We allocate a new buffer before we leave the function
  272. later below. */
  273. dfield_set_data(
  274. field, &write_doc_id, sizeof(write_doc_id));
  275. field->type.mtype = ifield->col->mtype;
  276. field->type.prtype = ifield->col->prtype;
  277. field->type.mbminmaxlen = DATA_MBMINMAXLEN(0, 0);
  278. field->type.len = ifield->col->len;
  279. } else {
  280. row_field = dtuple_get_nth_field(row, col_no);
  281. dfield_copy(field, row_field);
  282. /* Tokenize and process data for FTS */
  283. if (index->type & DICT_FTS) {
  284. fts_doc_item_t* doc_item;
  285. byte* value;
  286. void* ptr;
  287. const ulint max_trial_count = 10000;
  288. ulint trial_count = 0;
  289. /* fetch Doc ID if it already exists
  290. in the row, and not supplied by the
  291. caller. Even if the value column is
  292. NULL, we still need to get the Doc
  293. ID so to maintain the correct max
  294. Doc ID */
  295. if (*doc_id == 0) {
  296. const dfield_t* doc_field;
  297. doc_field = dtuple_get_nth_field(
  298. row,
  299. index->table->fts->doc_col);
  300. *doc_id = (doc_id_t) mach_read_from_8(
  301. static_cast<byte*>(
  302. dfield_get_data(doc_field)));
  303. if (*doc_id == 0) {
  304. ib_logf(IB_LOG_LEVEL_WARN,
  305. "FTS Doc ID is zero. "
  306. "Record Skipped");
  307. DBUG_RETURN(0);
  308. }
  309. }
  310. if (dfield_is_null(field)) {
  311. n_row_added = 1;
  312. continue;
  313. }
  314. ptr = ut_malloc(sizeof(*doc_item)
  315. + field->len);
  316. doc_item = static_cast<fts_doc_item_t*>(ptr);
  317. value = static_cast<byte*>(ptr)
  318. + sizeof(*doc_item);
  319. memcpy(value, field->data, field->len);
  320. field->data = value;
  321. doc_item->field = field;
  322. doc_item->doc_id = *doc_id;
  323. bucket = *doc_id % fts_sort_pll_degree;
  324. /* Add doc item to fts_doc_list */
  325. mutex_enter(&psort_info[bucket].mutex);
  326. if (psort_info[bucket].error == DB_SUCCESS) {
  327. UT_LIST_ADD_LAST(
  328. doc_list,
  329. psort_info[bucket].fts_doc_list,
  330. doc_item);
  331. psort_info[bucket].memory_used +=
  332. sizeof(*doc_item) + field->len;
  333. } else {
  334. ut_free(doc_item);
  335. }
  336. mutex_exit(&psort_info[bucket].mutex);
  337. /* Sleep when memory used exceeds limit*/
  338. while (psort_info[bucket].memory_used
  339. > FTS_PENDING_DOC_MEMORY_LIMIT
  340. && trial_count++ < max_trial_count) {
  341. os_thread_sleep(1000);
  342. }
  343. n_row_added = 1;
  344. continue;
  345. }
  346. }
  347. len = dfield_get_len(field);
  348. if (dfield_is_null(field)) {
  349. ut_ad(!(col->prtype & DATA_NOT_NULL));
  350. continue;
  351. } else if (!ext) {
  352. } else if (dict_index_is_clust(index)) {
  353. /* Flag externally stored fields. */
  354. const byte* buf = row_ext_lookup(ext, col_no,
  355. &len);
  356. if (UNIV_LIKELY_NULL(buf)) {
  357. ut_a(buf != field_ref_zero);
  358. if (i < dict_index_get_n_unique(index)) {
  359. dfield_set_data(field, buf, len);
  360. } else {
  361. dfield_set_ext(field);
  362. len = dfield_get_len(field);
  363. }
  364. }
  365. } else {
  366. const byte* buf = row_ext_lookup(ext, col_no,
  367. &len);
  368. if (UNIV_LIKELY_NULL(buf)) {
  369. ut_a(buf != field_ref_zero);
  370. dfield_set_data(field, buf, len);
  371. }
  372. }
  373. /* If a column prefix index, take only the prefix */
  374. if (ifield->prefix_len) {
  375. len = dtype_get_at_most_n_mbchars(
  376. col->prtype,
  377. col->mbminmaxlen,
  378. ifield->prefix_len,
  379. len,
  380. static_cast<char*>(dfield_get_data(field)));
  381. dfield_set_len(field, len);
  382. }
  383. ut_ad(len <= col->len || col->mtype == DATA_BLOB);
  384. fixed_len = ifield->fixed_len;
  385. if (fixed_len && !dict_table_is_comp(index->table)
  386. && DATA_MBMINLEN(col->mbminmaxlen)
  387. != DATA_MBMAXLEN(col->mbminmaxlen)) {
  388. /* CHAR in ROW_FORMAT=REDUNDANT is always
  389. fixed-length, but in the temporary file it is
  390. variable-length for variable-length character
  391. sets. */
  392. fixed_len = 0;
  393. }
  394. if (fixed_len) {
  395. #ifdef UNIV_DEBUG
  396. ulint mbminlen = DATA_MBMINLEN(col->mbminmaxlen);
  397. ulint mbmaxlen = DATA_MBMAXLEN(col->mbminmaxlen);
  398. /* len should be between size calcualted base on
  399. mbmaxlen and mbminlen */
  400. ut_ad(len <= fixed_len);
  401. ut_ad(!mbmaxlen || len >= mbminlen
  402. * (fixed_len / mbmaxlen));
  403. ut_ad(!dfield_is_ext(field));
  404. #endif /* UNIV_DEBUG */
  405. } else if (dfield_is_ext(field)) {
  406. extra_size += 2;
  407. } else if (len < 128
  408. || (col->len < 256 && col->mtype != DATA_BLOB)) {
  409. extra_size++;
  410. } else {
  411. /* For variable-length columns, we look up the
  412. maximum length from the column itself. If this
  413. is a prefix index column shorter than 256 bytes,
  414. this will waste one byte. */
  415. extra_size += 2;
  416. }
  417. data_size += len;
  418. }
  419. /* If this is FTS index, we already populated the sort buffer, return
  420. here */
  421. if (index->type & DICT_FTS) {
  422. DBUG_RETURN(n_row_added);
  423. }
  424. #ifdef UNIV_DEBUG
  425. {
  426. ulint size;
  427. ulint extra;
  428. size = rec_get_converted_size_temp(
  429. index, entry->fields, n_fields, &extra);
  430. ut_ad(data_size + extra_size == size);
  431. ut_ad(extra_size == extra);
  432. }
  433. #endif /* UNIV_DEBUG */
  434. /* Add to the total size of the record in row_merge_block_t
  435. the encoded length of extra_size and the extra bytes (extra_size).
  436. See row_merge_buf_write() for the variable-length encoding
  437. of extra_size. */
  438. data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
  439. ut_ad(data_size < srv_sort_buf_size);
  440. /* Reserve one byte for the end marker of row_merge_block_t. */
  441. if (buf->total_size + data_size >= srv_sort_buf_size - 1) {
  442. DBUG_RETURN(0);
  443. }
  444. buf->total_size += data_size;
  445. buf->n_tuples++;
  446. n_row_added++;
  447. field = entry->fields;
  448. /* Copy the data fields. */
  449. do {
  450. dfield_dup(field++, buf->heap);
  451. } while (--n_fields);
  452. DBUG_RETURN(n_row_added);
  453. }
  454. /*************************************************************//**
  455. Report a duplicate key. */
  456. UNIV_INTERN
  457. void
  458. row_merge_dup_report(
  459. /*=================*/
  460. row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */
  461. const dfield_t* entry) /*!< in: duplicate index entry */
  462. {
  463. if (!dup->n_dup++) {
  464. /* Only report the first duplicate record,
  465. but count all duplicate records. */
  466. innobase_fields_to_mysql(dup->table, dup->index, entry);
  467. }
  468. }
  469. /*************************************************************//**
  470. Compare two tuples.
  471. @return 1, 0, -1 if a is greater, equal, less, respectively, than b */
  472. static __attribute__((warn_unused_result))
  473. int
  474. row_merge_tuple_cmp(
  475. /*================*/
  476. ulint n_uniq, /*!< in: number of unique fields */
  477. ulint n_field,/*!< in: number of fields */
  478. const mtuple_t& a, /*!< in: first tuple to be compared */
  479. const mtuple_t& b, /*!< in: second tuple to be compared */
  480. row_merge_dup_t* dup) /*!< in/out: for reporting duplicates,
  481. NULL if non-unique index */
  482. {
  483. int cmp;
  484. const dfield_t* af = a.fields;
  485. const dfield_t* bf = b.fields;
  486. ulint n = n_uniq;
  487. ut_ad(n_uniq > 0);
  488. ut_ad(n_uniq <= n_field);
  489. /* Compare the fields of the tuples until a difference is
  490. found or we run out of fields to compare. If !cmp at the
  491. end, the tuples are equal. */
  492. do {
  493. cmp = cmp_dfield_dfield(af++, bf++);
  494. } while (!cmp && --n);
  495. if (cmp) {
  496. return(cmp);
  497. }
  498. if (dup) {
  499. /* Report a duplicate value error if the tuples are
  500. logically equal. NULL columns are logically inequal,
  501. although they are equal in the sorting order. Find
  502. out if any of the fields are NULL. */
  503. for (const dfield_t* df = a.fields; df != af; df++) {
  504. if (dfield_is_null(df)) {
  505. goto no_report;
  506. }
  507. }
  508. row_merge_dup_report(dup, a.fields);
  509. }
  510. no_report:
  511. /* The n_uniq fields were equal, but we compare all fields so
  512. that we will get the same (internal) order as in the B-tree. */
  513. for (n = n_field - n_uniq + 1; --n; ) {
  514. cmp = cmp_dfield_dfield(af++, bf++);
  515. if (cmp) {
  516. return(cmp);
  517. }
  518. }
  519. /* This should never be reached, except in a secondary index
  520. when creating a secondary index and a PRIMARY KEY, and there
  521. is a duplicate in the PRIMARY KEY that has not been detected
  522. yet. Internally, an index must never contain duplicates. */
  523. return(cmp);
  524. }
  525. /** Wrapper for row_merge_tuple_sort() to inject some more context to
  526. UT_SORT_FUNCTION_BODY().
  527. @param tuples array of tuples that being sorted
  528. @param aux work area, same size as tuples[]
  529. @param low lower bound of the sorting area, inclusive
  530. @param high upper bound of the sorting area, inclusive */
  531. #define row_merge_tuple_sort_ctx(tuples, aux, low, high) \
  532. row_merge_tuple_sort(n_uniq, n_field, dup, tuples, aux, low, high)
  533. /** Wrapper for row_merge_tuple_cmp() to inject some more context to
  534. UT_SORT_FUNCTION_BODY().
  535. @param a first tuple to be compared
  536. @param b second tuple to be compared
  537. @return 1, 0, -1 if a is greater, equal, less, respectively, than b */
  538. #define row_merge_tuple_cmp_ctx(a,b) \
  539. row_merge_tuple_cmp(n_uniq, n_field, a, b, dup)
  540. /**********************************************************************//**
  541. Merge sort the tuple buffer in main memory. */
  542. static __attribute__((nonnull(4,5)))
  543. void
  544. row_merge_tuple_sort(
  545. /*=================*/
  546. ulint n_uniq, /*!< in: number of unique fields */
  547. ulint n_field,/*!< in: number of fields */
  548. row_merge_dup_t* dup, /*!< in/out: reporter of duplicates
  549. (NULL if non-unique index) */
  550. mtuple_t* tuples, /*!< in/out: tuples */
  551. mtuple_t* aux, /*!< in/out: work area */
  552. ulint low, /*!< in: lower bound of the
  553. sorting area, inclusive */
  554. ulint high) /*!< in: upper bound of the
  555. sorting area, exclusive */
  556. {
  557. ut_ad(n_field > 0);
  558. ut_ad(n_uniq <= n_field);
  559. UT_SORT_FUNCTION_BODY(row_merge_tuple_sort_ctx,
  560. tuples, aux, low, high, row_merge_tuple_cmp_ctx);
  561. }
  562. /******************************************************//**
  563. Sort a buffer. */
  564. UNIV_INTERN
  565. void
  566. row_merge_buf_sort(
  567. /*===============*/
  568. row_merge_buf_t* buf, /*!< in/out: sort buffer */
  569. row_merge_dup_t* dup) /*!< in/out: reporter of duplicates
  570. (NULL if non-unique index) */
  571. {
  572. row_merge_tuple_sort(dict_index_get_n_unique(buf->index),
  573. dict_index_get_n_fields(buf->index),
  574. dup,
  575. buf->tuples, buf->tmp_tuples, 0, buf->n_tuples);
  576. }
  577. /******************************************************//**
  578. Write a buffer to a block. */
  579. UNIV_INTERN
  580. void
  581. row_merge_buf_write(
  582. /*================*/
  583. const row_merge_buf_t* buf, /*!< in: sorted buffer */
  584. const merge_file_t* of UNIV_UNUSED,
  585. /*!< in: output file */
  586. row_merge_block_t* block) /*!< out: buffer for writing to file */
  587. {
  588. const dict_index_t* index = buf->index;
  589. ulint n_fields= dict_index_get_n_fields(index);
  590. byte* b = &block[0];
  591. for (ulint i = 0; i < buf->n_tuples; i++) {
  592. const mtuple_t* entry = &buf->tuples[i];
  593. row_merge_buf_encode(&b, index, entry, n_fields);
  594. ut_ad(b < &block[srv_sort_buf_size]);
  595. #ifdef UNIV_DEBUG
  596. if (row_merge_print_write) {
  597. fprintf(stderr, "row_merge_buf_write %p,%d,%lu %lu",
  598. (void*) b, of->fd, (ulong) of->offset,
  599. (ulong) i);
  600. row_merge_tuple_print(stderr, entry, n_fields);
  601. }
  602. #endif /* UNIV_DEBUG */
  603. }
  604. /* Write an "end-of-chunk" marker. */
  605. ut_a(b < &block[srv_sort_buf_size]);
  606. ut_a(b == &block[0] + buf->total_size);
  607. *b++ = 0;
  608. #ifdef UNIV_DEBUG_VALGRIND
  609. /* The rest of the block is uninitialized. Initialize it
  610. to avoid bogus warnings. */
  611. memset(b, 0xff, &block[srv_sort_buf_size] - b);
  612. #endif /* UNIV_DEBUG_VALGRIND */
  613. #ifdef UNIV_DEBUG
  614. if (row_merge_print_write) {
  615. fprintf(stderr, "row_merge_buf_write %p,%d,%lu EOF\n",
  616. (void*) b, of->fd, (ulong) of->offset);
  617. }
  618. #endif /* UNIV_DEBUG */
  619. }
  620. /******************************************************//**
  621. Create a memory heap and allocate space for row_merge_rec_offsets()
  622. and mrec_buf_t[3].
  623. @return memory heap */
  624. static
  625. mem_heap_t*
  626. row_merge_heap_create(
  627. /*==================*/
  628. const dict_index_t* index, /*!< in: record descriptor */
  629. mrec_buf_t** buf, /*!< out: 3 buffers */
  630. ulint** offsets1, /*!< out: offsets */
  631. ulint** offsets2) /*!< out: offsets */
  632. {
  633. ulint i = 1 + REC_OFFS_HEADER_SIZE
  634. + dict_index_get_n_fields(index);
  635. mem_heap_t* heap = mem_heap_create(2 * i * sizeof **offsets1
  636. + 3 * sizeof **buf);
  637. *buf = static_cast<mrec_buf_t*>(
  638. mem_heap_alloc(heap, 3 * sizeof **buf));
  639. *offsets1 = static_cast<ulint*>(
  640. mem_heap_alloc(heap, i * sizeof **offsets1));
  641. *offsets2 = static_cast<ulint*>(
  642. mem_heap_alloc(heap, i * sizeof **offsets2));
  643. (*offsets1)[0] = (*offsets2)[0] = i;
  644. (*offsets1)[1] = (*offsets2)[1] = dict_index_get_n_fields(index);
  645. return(heap);
  646. }
  647. /********************************************************************//**
  648. Read a merge block from the file system.
  649. @return TRUE if request was successful, FALSE if fail */
  650. UNIV_INTERN
  651. ibool
  652. row_merge_read(
  653. /*===========*/
  654. int fd, /*!< in: file descriptor */
  655. ulint offset, /*!< in: offset where to read
  656. in number of row_merge_block_t
  657. elements */
  658. row_merge_block_t* buf) /*!< out: data */
  659. {
  660. os_offset_t ofs = ((os_offset_t) offset) * srv_sort_buf_size;
  661. ibool success;
  662. DBUG_EXECUTE_IF("row_merge_read_failure", return(FALSE););
  663. #ifdef UNIV_DEBUG
  664. if (row_merge_print_block_read) {
  665. fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
  666. fd, (ulong) offset);
  667. }
  668. #endif /* UNIV_DEBUG */
  669. #ifdef UNIV_DEBUG
  670. if (row_merge_print_block_read) {
  671. fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
  672. fd, (ulong) offset);
  673. }
  674. #endif /* UNIV_DEBUG */
  675. success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf,
  676. ofs, srv_sort_buf_size, FALSE);
  677. #ifdef POSIX_FADV_DONTNEED
  678. /* Each block is read exactly once. Free up the file cache. */
  679. posix_fadvise(fd, ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED);
  680. #endif /* POSIX_FADV_DONTNEED */
  681. if (UNIV_UNLIKELY(!success)) {
  682. ut_print_timestamp(stderr);
  683. fprintf(stderr,
  684. " InnoDB: failed to read merge block at " UINT64PF "\n",
  685. ofs);
  686. }
  687. return(UNIV_LIKELY(success));
  688. }
  689. /********************************************************************//**
  690. Write a merge block to the file system.
  691. @return TRUE if request was successful, FALSE if fail */
  692. UNIV_INTERN
  693. ibool
  694. row_merge_write(
  695. /*============*/
  696. int fd, /*!< in: file descriptor */
  697. ulint offset, /*!< in: offset where to write,
  698. in number of row_merge_block_t elements */
  699. const void* buf) /*!< in: data */
  700. {
  701. size_t buf_len = srv_sort_buf_size;
  702. os_offset_t ofs = buf_len * (os_offset_t) offset;
  703. ibool ret;
  704. DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE););
  705. ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len);
  706. #ifdef UNIV_DEBUG
  707. if (row_merge_print_block_write) {
  708. fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
  709. fd, (ulong) offset);
  710. }
  711. #endif /* UNIV_DEBUG */
  712. #ifdef POSIX_FADV_DONTNEED
  713. /* The block will be needed on the next merge pass,
  714. but it can be evicted from the file cache meanwhile. */
  715. posix_fadvise(fd, ofs, buf_len, POSIX_FADV_DONTNEED);
  716. #endif /* POSIX_FADV_DONTNEED */
  717. return(UNIV_LIKELY(ret));
  718. }
  719. /********************************************************************//**
  720. Read a merge record.
  721. @return pointer to next record, or NULL on I/O error or end of list */
  722. UNIV_INTERN
  723. const byte*
  724. row_merge_read_rec(
  725. /*===============*/
  726. row_merge_block_t* block, /*!< in/out: file buffer */
  727. mrec_buf_t* buf, /*!< in/out: secondary buffer */
  728. const byte* b, /*!< in: pointer to record */
  729. const dict_index_t* index, /*!< in: index of the record */
  730. int fd, /*!< in: file descriptor */
  731. ulint* foffs, /*!< in/out: file offset */
  732. const mrec_t** mrec, /*!< out: pointer to merge record,
  733. or NULL on end of list
  734. (non-NULL on I/O error) */
  735. ulint* offsets)/*!< out: offsets of mrec */
  736. {
  737. ulint extra_size;
  738. ulint data_size;
  739. ulint avail_size;
  740. ut_ad(block);
  741. ut_ad(buf);
  742. ut_ad(b >= &block[0]);
  743. ut_ad(b < &block[srv_sort_buf_size]);
  744. ut_ad(index);
  745. ut_ad(foffs);
  746. ut_ad(mrec);
  747. ut_ad(offsets);
  748. ut_ad(*offsets == 1 + REC_OFFS_HEADER_SIZE
  749. + dict_index_get_n_fields(index));
  750. extra_size = *b++;
  751. if (UNIV_UNLIKELY(!extra_size)) {
  752. /* End of list */
  753. *mrec = NULL;
  754. #ifdef UNIV_DEBUG
  755. if (row_merge_print_read) {
  756. fprintf(stderr, "row_merge_read %p,%p,%d,%lu EOF\n",
  757. (const void*) b, (const void*) block,
  758. fd, (ulong) *foffs);
  759. }
  760. #endif /* UNIV_DEBUG */
  761. return(NULL);
  762. }
  763. if (extra_size >= 0x80) {
  764. /* Read another byte of extra_size. */
  765. if (UNIV_UNLIKELY(b >= &block[srv_sort_buf_size])) {
  766. if (!row_merge_read(fd, ++(*foffs), block)) {
  767. err_exit:
  768. /* Signal I/O error. */
  769. *mrec = b;
  770. return(NULL);
  771. }
  772. /* Wrap around to the beginning of the buffer. */
  773. b = &block[0];
  774. }
  775. extra_size = (extra_size & 0x7f) << 8;
  776. extra_size |= *b++;
  777. }
  778. /* Normalize extra_size. Above, value 0 signals "end of list". */
  779. extra_size--;
  780. /* Read the extra bytes. */
  781. if (UNIV_UNLIKELY(b + extra_size >= &block[srv_sort_buf_size])) {
  782. /* The record spans two blocks. Copy the entire record
  783. to the auxiliary buffer and handle this as a special
  784. case. */
  785. avail_size = &block[srv_sort_buf_size] - b;
  786. ut_ad(avail_size < sizeof *buf);
  787. memcpy(*buf, b, avail_size);
  788. if (!row_merge_read(fd, ++(*foffs), block)) {
  789. goto err_exit;
  790. }
  791. /* Wrap around to the beginning of the buffer. */
  792. b = &block[0];
  793. /* Copy the record. */
  794. memcpy(*buf + avail_size, b, extra_size - avail_size);
  795. b += extra_size - avail_size;
  796. *mrec = *buf + extra_size;
  797. rec_init_offsets_temp(*mrec, index, offsets);
  798. data_size = rec_offs_data_size(offsets);
  799. /* These overflows should be impossible given that
  800. records are much smaller than either buffer, and
  801. the record starts near the beginning of each buffer. */
  802. ut_a(extra_size + data_size < sizeof *buf);
  803. ut_a(b + data_size < &block[srv_sort_buf_size]);
  804. /* Copy the data bytes. */
  805. memcpy(*buf + extra_size, b, data_size);
  806. b += data_size;
  807. goto func_exit;
  808. }
  809. *mrec = b + extra_size;
  810. rec_init_offsets_temp(*mrec, index, offsets);
  811. data_size = rec_offs_data_size(offsets);
  812. ut_ad(extra_size + data_size < sizeof *buf);
  813. b += extra_size + data_size;
  814. if (UNIV_LIKELY(b < &block[srv_sort_buf_size])) {
  815. /* The record fits entirely in the block.
  816. This is the normal case. */
  817. goto func_exit;
  818. }
  819. /* The record spans two blocks. Copy it to buf. */
  820. b -= extra_size + data_size;
  821. avail_size = &block[srv_sort_buf_size] - b;
  822. memcpy(*buf, b, avail_size);
  823. *mrec = *buf + extra_size;
  824. #ifdef UNIV_DEBUG
  825. /* We cannot invoke rec_offs_make_valid() here, because there
  826. are no REC_N_NEW_EXTRA_BYTES between extra_size and data_size.
  827. Similarly, rec_offs_validate() would fail, because it invokes
  828. rec_get_status(). */
  829. offsets[2] = (ulint) *mrec;
  830. offsets[3] = (ulint) index;
  831. #endif /* UNIV_DEBUG */
  832. if (!row_merge_read(fd, ++(*foffs), block)) {
  833. goto err_exit;
  834. }
  835. /* Wrap around to the beginning of the buffer. */
  836. b = &block[0];
  837. /* Copy the rest of the record. */
  838. memcpy(*buf + avail_size, b, extra_size + data_size - avail_size);
  839. b += extra_size + data_size - avail_size;
  840. func_exit:
  841. #ifdef UNIV_DEBUG
  842. if (row_merge_print_read) {
  843. fprintf(stderr, "row_merge_read %p,%p,%d,%lu ",
  844. (const void*) b, (const void*) block,
  845. fd, (ulong) *foffs);
  846. rec_print_comp(stderr, *mrec, offsets);
  847. putc('\n', stderr);
  848. }
  849. #endif /* UNIV_DEBUG */
  850. return(b);
  851. }
  852. /********************************************************************//**
  853. Write a merge record. */
  854. static
  855. void
  856. row_merge_write_rec_low(
  857. /*====================*/
  858. byte* b, /*!< out: buffer */
  859. ulint e, /*!< in: encoded extra_size */
  860. #ifdef UNIV_DEBUG
  861. ulint size, /*!< in: total size to write */
  862. int fd, /*!< in: file descriptor */
  863. ulint foffs, /*!< in: file offset */
  864. #endif /* UNIV_DEBUG */
  865. const mrec_t* mrec, /*!< in: record to write */
  866. const ulint* offsets)/*!< in: offsets of mrec */
  867. #ifndef UNIV_DEBUG
  868. # define row_merge_write_rec_low(b, e, size, fd, foffs, mrec, offsets) \
  869. row_merge_write_rec_low(b, e, mrec, offsets)
  870. #endif /* !UNIV_DEBUG */
  871. {
  872. #ifdef UNIV_DEBUG
  873. const byte* const end = b + size;
  874. ut_ad(e == rec_offs_extra_size(offsets) + 1);
  875. if (row_merge_print_write) {
  876. fprintf(stderr, "row_merge_write %p,%d,%lu ",
  877. (void*) b, fd, (ulong) foffs);
  878. rec_print_comp(stderr, mrec, offsets);
  879. putc('\n', stderr);
  880. }
  881. #endif /* UNIV_DEBUG */
  882. if (e < 0x80) {
  883. *b++ = (byte) e;
  884. } else {
  885. *b++ = (byte) (0x80 | (e >> 8));
  886. *b++ = (byte) e;
  887. }
  888. memcpy(b, mrec - rec_offs_extra_size(offsets), rec_offs_size(offsets));
  889. ut_ad(b + rec_offs_size(offsets) == end);
  890. }
  891. /********************************************************************//**
  892. Write a merge record.
  893. @return pointer to end of block, or NULL on error */
  894. static
  895. byte*
  896. row_merge_write_rec(
  897. /*================*/
  898. row_merge_block_t* block, /*!< in/out: file buffer */
  899. mrec_buf_t* buf, /*!< in/out: secondary buffer */
  900. byte* b, /*!< in: pointer to end of block */
  901. int fd, /*!< in: file descriptor */
  902. ulint* foffs, /*!< in/out: file offset */
  903. const mrec_t* mrec, /*!< in: record to write */
  904. const ulint* offsets)/*!< in: offsets of mrec */
  905. {
  906. ulint extra_size;
  907. ulint size;
  908. ulint avail_size;
  909. ut_ad(block);
  910. ut_ad(buf);
  911. ut_ad(b >= &block[0]);
  912. ut_ad(b < &block[srv_sort_buf_size]);
  913. ut_ad(mrec);
  914. ut_ad(foffs);
  915. ut_ad(mrec < &block[0] || mrec > &block[srv_sort_buf_size]);
  916. ut_ad(mrec < buf[0] || mrec > buf[1]);
  917. /* Normalize extra_size. Value 0 signals "end of list". */
  918. extra_size = rec_offs_extra_size(offsets) + 1;
  919. size = extra_size + (extra_size >= 0x80)
  920. + rec_offs_data_size(offsets);
  921. if (UNIV_UNLIKELY(b + size >= &block[srv_sort_buf_size])) {
  922. /* The record spans two blocks.
  923. Copy it to the temporary buffer first. */
  924. avail_size = &block[srv_sort_buf_size] - b;
  925. row_merge_write_rec_low(buf[0],
  926. extra_size, size, fd, *foffs,
  927. mrec, offsets);
  928. /* Copy the head of the temporary buffer, write
  929. the completed block, and copy the tail of the
  930. record to the head of the new block. */
  931. memcpy(b, buf[0], avail_size);
  932. if (!row_merge_write(fd, (*foffs)++, block)) {
  933. return(NULL);
  934. }
  935. UNIV_MEM_INVALID(&block[0], srv_sort_buf_size);
  936. /* Copy the rest. */
  937. b = &block[0];
  938. memcpy(b, buf[0] + avail_size, size - avail_size);
  939. b += size - avail_size;
  940. } else {
  941. row_merge_write_rec_low(b, extra_size, size, fd, *foffs,
  942. mrec, offsets);
  943. b += size;
  944. }
  945. return(b);
  946. }
  947. /********************************************************************//**
  948. Write an end-of-list marker.
  949. @return pointer to end of block, or NULL on error */
  950. static
  951. byte*
  952. row_merge_write_eof(
  953. /*================*/
  954. row_merge_block_t* block, /*!< in/out: file buffer */
  955. byte* b, /*!< in: pointer to end of block */
  956. int fd, /*!< in: file descriptor */
  957. ulint* foffs) /*!< in/out: file offset */
  958. {
  959. ut_ad(block);
  960. ut_ad(b >= &block[0]);
  961. ut_ad(b < &block[srv_sort_buf_size]);
  962. ut_ad(foffs);
  963. #ifdef UNIV_DEBUG
  964. if (row_merge_print_write) {
  965. fprintf(stderr, "row_merge_write %p,%p,%d,%lu EOF\n",
  966. (void*) b, (void*) block, fd, (ulong) *foffs);
  967. }
  968. #endif /* UNIV_DEBUG */
  969. *b++ = 0;
  970. UNIV_MEM_ASSERT_RW(&block[0], b - &block[0]);
  971. UNIV_MEM_ASSERT_W(&block[0], srv_sort_buf_size);
  972. #ifdef UNIV_DEBUG_VALGRIND
  973. /* The rest of the block is uninitialized. Initialize it
  974. to avoid bogus warnings. */
  975. memset(b, 0xff, &block[srv_sort_buf_size] - b);
  976. #endif /* UNIV_DEBUG_VALGRIND */
  977. if (!row_merge_write(fd, (*foffs)++, block)) {
  978. return(NULL);
  979. }
  980. UNIV_MEM_INVALID(&block[0], srv_sort_buf_size);
  981. return(&block[0]);
  982. }
  983. /********************************************************************//**
  984. Reads clustered index of the table and create temporary files
  985. containing the index entries for the indexes to be built.
  986. @return DB_SUCCESS or error */
  987. static __attribute__((nonnull(1,2,3,4,6,9,10,16), warn_unused_result))
  988. dberr_t
  989. row_merge_read_clustered_index(
  990. /*===========================*/
  991. trx_t* trx, /*!< in: transaction */
  992. struct TABLE* table, /*!< in/out: MySQL table object,
  993. for reporting erroneous records */
  994. const dict_table_t* old_table,/*!< in: table where rows are
  995. read from */
  996. const dict_table_t* new_table,/*!< in: table where indexes are
  997. created; identical to old_table
  998. unless creating a PRIMARY KEY */
  999. bool online, /*!< in: true if creating indexes
  1000. online */
  1001. dict_index_t** index, /*!< in: indexes to be created */
  1002. dict_index_t* fts_sort_idx,
  1003. /*!< in: full-text index to be created,
  1004. or NULL */
  1005. fts_psort_t* psort_info,
  1006. /*!< in: parallel sort info for
  1007. fts_sort_idx creation, or NULL */
  1008. merge_file_t* files, /*!< in: temporary files */
  1009. const ulint* key_numbers,
  1010. /*!< in: MySQL key numbers to create */
  1011. ulint n_index,/*!< in: number of indexes to create */
  1012. const dtuple_t* add_cols,
  1013. /*!< in: default values of
  1014. added columns, or NULL */
  1015. const ulint* col_map,/*!< in: mapping of old column
  1016. numbers to new ones, or NULL
  1017. if old_table == new_table */
  1018. ulint add_autoinc,
  1019. /*!< in: number of added
  1020. AUTO_INCREMENT column, or
  1021. ULINT_UNDEFINED if none is added */
  1022. ib_sequence_t& sequence,/*!< in/out: autoinc sequence */
  1023. row_merge_block_t* block, /*!< in/out: file buffer */
  1024. float pct_cost) /*!< in: percent of task weight out of total alter job */
  1025. {
  1026. dict_index_t* clust_index; /* Clustered index */
  1027. mem_heap_t* row_heap; /* Heap memory to create
  1028. clustered index tuples */
  1029. row_merge_buf_t** merge_buf; /* Temporary list for records*/
  1030. btr_pcur_t pcur; /* Cursor on the clustered
  1031. index */
  1032. mtr_t mtr; /* Mini transaction */
  1033. dberr_t err = DB_SUCCESS;/* Return code */
  1034. ulint n_nonnull = 0; /* number of columns
  1035. changed to NOT NULL */
  1036. ulint* nonnull = NULL; /* NOT NULL columns */
  1037. dict_index_t* fts_index = NULL;/* FTS index */
  1038. doc_id_t doc_id = 0;
  1039. doc_id_t max_doc_id = 0;
  1040. ibool add_doc_id = FALSE;
  1041. os_event_t fts_parallel_sort_event = NULL;
  1042. ibool fts_pll_sort = FALSE;
  1043. ib_int64_t sig_count = 0;
  1044. float curr_progress;
  1045. ib_int64_t read_rows = 0;
  1046. ib_int64_t table_total_rows;
  1047. DBUG_ENTER("row_merge_read_clustered_index");
  1048. ut_ad((old_table == new_table) == !col_map);
  1049. ut_ad(!add_cols || col_map);
  1050. table_total_rows = dict_table_get_n_rows(old_table);
  1051. if(table_total_rows == 0) {
  1052. /* We don't know total row count */
  1053. table_total_rows = 1;
  1054. }
  1055. trx->op_info = "reading clustered index";
  1056. #ifdef FTS_INTERNAL_DIAG_PRINT
  1057. DEBUG_FTS_SORT_PRINT("FTS_SORT: Start Create Index\n");
  1058. #endif
  1059. /* Create and initialize memory for record buffers */
  1060. merge_buf = static_cast<row_merge_buf_t**>(
  1061. mem_alloc(n_index * sizeof *merge_buf));
  1062. for (ulint i = 0; i < n_index; i++) {
  1063. if (index[i]->type & DICT_FTS) {
  1064. /* We are building a FT index, make sure
  1065. we have the temporary 'fts_sort_idx' */
  1066. ut_a(fts_sort_idx);
  1067. fts_index = index[i];
  1068. merge_buf[i] = row_merge_buf_create(fts_sort_idx);
  1069. add_doc_id = DICT_TF2_FLAG_IS_SET(
  1070. new_table, DICT_TF2_FTS_ADD_DOC_ID);
  1071. /* If Doc ID does not exist in the table itself,
  1072. fetch the first FTS Doc ID */
  1073. if (add_doc_id) {
  1074. fts_get_next_doc_id(
  1075. (dict_table_t*) new_table,
  1076. &doc_id);
  1077. ut_ad(doc_id > 0);
  1078. }
  1079. fts_pll_sort = TRUE;
  1080. row_fts_start_psort(psort_info);
  1081. fts_parallel_sort_event =
  1082. psort_info[0].psort_common->sort_event;
  1083. } else {
  1084. merge_buf[i] = row_merge_buf_create(index[i]);
  1085. }
  1086. }
  1087. mtr_start(&mtr);
  1088. /* Find the clustered index and create a persistent cursor
  1089. based on that. */
  1090. clust_index = dict_table_get_first_index(old_table);
  1091. btr_pcur_open_at_index_side(
  1092. true, clust_index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
  1093. if (old_table != new_table) {
  1094. /* The table is being rebuilt. Identify the columns
  1095. that were flagged NOT NULL in the new table, so that
  1096. we can quickly check that the records in the old table
  1097. do not violate the added NOT NULL constraints. */
  1098. nonnull = static_cast<ulint*>(
  1099. mem_alloc(dict_table_get_n_cols(new_table)
  1100. * sizeof *nonnull));
  1101. for (ulint i = 0; i < dict_table_get_n_cols(old_table); i++) {
  1102. if (dict_table_get_nth_col(old_table, i)->prtype
  1103. & DATA_NOT_NULL) {
  1104. continue;
  1105. }
  1106. const ulint j = col_map[i];
  1107. if (j == ULINT_UNDEFINED) {
  1108. /* The column was dropped. */
  1109. continue;
  1110. }
  1111. if (dict_table_get_nth_col(new_table, j)->prtype
  1112. & DATA_NOT_NULL) {
  1113. nonnull[n_nonnull++] = j;
  1114. }
  1115. }
  1116. if (!n_nonnull) {
  1117. mem_free(nonnull);
  1118. nonnull = NULL;
  1119. }
  1120. }
  1121. row_heap = mem_heap_create(sizeof(mrec_buf_t));
  1122. /* Scan the clustered index. */
  1123. for (;;) {
  1124. const rec_t* rec;
  1125. ulint* offsets;
  1126. const dtuple_t* row;
  1127. row_ext_t* ext;
  1128. page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
  1129. page_cur_move_to_next(cur);
  1130. if (page_cur_is_after_last(cur)) {
  1131. if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
  1132. err = DB_INTERRUPTED;
  1133. trx->error_key_num = 0;
  1134. goto func_exit;
  1135. }
  1136. if (online && old_table != new_table) {
  1137. err = row_log_table_get_error(clust_index);
  1138. if (err != DB_SUCCESS) {
  1139. trx->error_key_num = 0;
  1140. goto func_exit;
  1141. }
  1142. }
  1143. #ifdef DBUG_OFF
  1144. # define dbug_run_purge false
  1145. #else /* DBUG_OFF */
  1146. bool dbug_run_purge = false;
  1147. #endif /* DBUG_OFF */
  1148. DBUG_EXECUTE_IF(
  1149. "ib_purge_on_create_index_page_switch",
  1150. dbug_run_purge = true;);
  1151. if (dbug_run_purge
  1152. || rw_lock_get_waiters(
  1153. dict_index_get_lock(clust_index))) {
  1154. /* There are waiters on the clustered
  1155. index tree lock, likely the purge
  1156. thread. Store and restore the cursor
  1157. position, and yield so that scanning a
  1158. large table will not starve other
  1159. threads. */
  1160. /* Store the cursor position on the last user
  1161. record on the page. */
  1162. btr_pcur_move_to_prev_on_page(&pcur);
  1163. /* Leaf pages must never be empty, unless
  1164. this is the only page in the index tree. */
  1165. ut_ad(btr_pcur_is_on_user_rec(&pcur)
  1166. || buf_block_get_page_no(
  1167. btr_pcur_get_block(&pcur))
  1168. == clust_index->page);
  1169. btr_pcur_store_position(&pcur, &mtr);
  1170. mtr_commit(&mtr);
  1171. if (dbug_run_purge) {
  1172. /* This is for testing
  1173. purposes only (see
  1174. DBUG_EXECUTE_IF above). We
  1175. signal the purge thread and
  1176. hope that the purge batch will
  1177. complete before we execute
  1178. btr_pcur_restore_position(). */
  1179. trx_purge_run();
  1180. os_thread_sleep(1000000);
  1181. }
  1182. /* Give the waiters a chance to proceed. */
  1183. os_thread_yield();
  1184. mtr_start(&mtr);
  1185. /* Restore position on the record, or its
  1186. predecessor if the record was purged
  1187. meanwhile. */
  1188. btr_pcur_restore_position(
  1189. BTR_SEARCH_LEAF, &pcur, &mtr);
  1190. /* Move to the successor of the
  1191. original record. */
  1192. if (!btr_pcur_move_to_next_user_rec(
  1193. &pcur, &mtr)) {
  1194. end_of_index:
  1195. row = NULL;
  1196. mtr_commit(&mtr);
  1197. mem_heap_free(row_heap);
  1198. if (nonnull) {
  1199. mem_free(nonnull);
  1200. }
  1201. goto write_buffers;
  1202. }
  1203. } else {
  1204. ulint next_page_no;
  1205. buf_block_t* block;
  1206. next_page_no = btr_page_get_next(
  1207. page_cur_get_page(cur), &mtr);
  1208. if (next_page_no == FIL_NULL) {
  1209. goto end_of_index;
  1210. }
  1211. block = page_cur_get_block(cur);
  1212. block = btr_block_get(
  1213. buf_block_get_space(block),
  1214. buf_block_get_zip_size(block),
  1215. next_page_no, BTR_SEARCH_LEAF,
  1216. clust_index, &mtr);
  1217. btr_leaf_page_release(page_cur_get_block(cur),
  1218. BTR_SEARCH_LEAF, &mtr);
  1219. page_cur_set_before_first(block, cur);
  1220. page_cur_move_to_next(cur);
  1221. ut_ad(!page_cur_is_after_last(cur));
  1222. }
  1223. }
  1224. rec = page_cur_get_rec(cur);
  1225. offsets = rec_get_offsets(rec, clust_index, NULL,
  1226. ULINT_UNDEFINED, &row_heap);
  1227. if (online) {
  1228. /* Perform a REPEATABLE READ.
  1229. When rebuilding the table online,
  1230. row_log_table_apply() must not see a newer
  1231. state of the table when applying the log.
  1232. This is mainly to prevent false duplicate key
  1233. errors, because the log will identify records
  1234. by the PRIMARY KEY, and also to prevent unsafe
  1235. BLOB access.
  1236. When creating a secondary index online, this
  1237. table scan must not see records that have only
  1238. been inserted to the clustered index, but have
  1239. not been written to the online_log of
  1240. index[]. If we performed READ UNCOMMITTED, it
  1241. could happen that the ADD INDEX reaches
  1242. ONLINE_INDEX_COMPLETE state between the time
  1243. the DML thread has updated the clustered index
  1244. but has not yet accessed secondary index. */
  1245. ut_ad(trx->read_view);
  1246. if (!read_view_sees_trx_id(
  1247. trx->read_view,
  1248. row_get_rec_trx_id(
  1249. rec, clust_index, offsets))) {
  1250. rec_t* old_vers;
  1251. row_vers_build_for_consistent_read(
  1252. rec, &mtr, clust_index, &offsets,
  1253. trx->read_view, &row_heap,
  1254. row_heap, &old_vers);
  1255. rec = old_vers;
  1256. if (!rec) {
  1257. continue;
  1258. }
  1259. }
  1260. if (rec_get_deleted_flag(
  1261. rec,
  1262. dict_table_is_comp(old_table))) {
  1263. /* This record was deleted in the latest
  1264. committed version, or it was deleted and
  1265. then reinserted-by-update before purge
  1266. kicked in. Skip it. */
  1267. continue;
  1268. }
  1269. ut_ad(!rec_offs_any_null_extern(rec, offsets));
  1270. } else if (rec_get_deleted_flag(
  1271. rec, dict_table_is_comp(old_table))) {
  1272. /* Skip delete-marked records.
  1273. Skipping delete-marked records will make the
  1274. created indexes unuseable for transactions
  1275. whose read views were created before the index
  1276. creation completed, but preserving the history
  1277. would make it tricky to detect duplicate
  1278. keys. */
  1279. continue;
  1280. }
  1281. /* When !online, we are holding a lock on old_table, preventing
  1282. any inserts that could have written a record 'stub' before
  1283. writing out off-page columns. */
  1284. ut_ad(!rec_offs_any_null_extern(rec, offsets));
  1285. /* Build a row based on the clustered index. */
  1286. row = row_build(ROW_COPY_POINTERS, clust_index,
  1287. rec, offsets, new_table,
  1288. add_cols, col_map, &ext, row_heap);
  1289. ut_ad(row);
  1290. for (ulint i = 0; i < n_nonnull; i++) {
  1291. const dfield_t* field = &row->fields[nonnull[i]];
  1292. ut_ad(dfield_get_type(field)->prtype & DATA_NOT_NULL);
  1293. if (dfield_is_null(field)) {
  1294. err = DB_INVALID_NULL;
  1295. trx->error_key_num = 0;
  1296. goto func_exit;
  1297. }
  1298. }
  1299. /* Get the next Doc ID */
  1300. if (add_doc_id) {
  1301. doc_id++;
  1302. } else {
  1303. doc_id = 0;
  1304. }
  1305. if (add_autoinc != ULINT_UNDEFINED) {
  1306. ut_ad(add_autoinc
  1307. < dict_table_get_n_user_cols(new_table));
  1308. const dfield_t* dfield;
  1309. dfield = dtuple_get_nth_field(row, add_autoinc);
  1310. if (dfield_is_null(dfield)) {
  1311. goto write_buffers;
  1312. }
  1313. const dtype_t* dtype = dfield_get_type(dfield);
  1314. byte* b = static_cast<byte*>(dfield_get_data(dfield));
  1315. if (sequence.eof()) {
  1316. err = DB_ERROR;
  1317. trx->error_key_num = 0;
  1318. ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
  1319. ER_AUTOINC_READ_FAILED, "[NULL]");
  1320. goto func_exit;
  1321. }
  1322. ulonglong value = sequence++;
  1323. switch (dtype_get_mtype(dtype)) {
  1324. case DATA_INT: {
  1325. ibool usign;
  1326. ulint len = dfield_get_len(dfield);
  1327. usign = dtype_get_prtype(dtype) & DATA_UNSIGNED;
  1328. mach_write_ulonglong(b, value, len, usign);
  1329. break;
  1330. }
  1331. case DATA_FLOAT:
  1332. mach_float_write(
  1333. b, static_cast<float>(value));
  1334. break;
  1335. case DATA_DOUBLE:
  1336. mach_double_write(
  1337. b, static_cast<double>(value));
  1338. break;
  1339. default:
  1340. ut_ad(0);
  1341. }
  1342. }
  1343. write_buffers:
  1344. /* Build all entries for all the indexes to be created
  1345. in a single scan of the clustered index. */
  1346. for (ulint i = 0; i < n_index; i++) {
  1347. row_merge_buf_t* buf = merge_buf[i];
  1348. merge_file_t* file = &files[i];
  1349. ulint rows_added = 0;
  1350. if (UNIV_LIKELY
  1351. (row && (rows_added = row_merge_buf_add(
  1352. buf, fts_index, old_table,
  1353. psort_info, row, ext, &doc_id)))) {
  1354. /* If we are creating FTS index,
  1355. a single row can generate more
  1356. records for tokenized word */
  1357. file->n_rec += rows_added;
  1358. if (doc_id > max_doc_id) {
  1359. max_doc_id = doc_id;
  1360. }
  1361. if (buf->index->type & DICT_FTS) {
  1362. /* Check if error occurs in child thread */
  1363. for (ulint j = 0; j < fts_sort_pll_degree; j++) {
  1364. if (psort_info[j].error != DB_SUCCESS) {
  1365. err = psort_info[j].error;
  1366. trx->error_key_num = i;
  1367. break;
  1368. }
  1369. }
  1370. if (err != DB_SUCCESS) {
  1371. break;
  1372. }
  1373. }
  1374. continue;
  1375. }
  1376. if (buf->index->type & DICT_FTS) {
  1377. if (!row || !doc_id) {
  1378. continue;
  1379. }
  1380. }
  1381. /* The buffer must be sufficiently large
  1382. to hold at least one record. It may only
  1383. be empty when we reach the end of the
  1384. clustered index. row_merge_buf_add()
  1385. must not have been called in this loop. */
  1386. ut_ad(buf->n_tuples || row == NULL);
  1387. /* We have enough data tuples to form a block.
  1388. Sort them and write to disk. */
  1389. if (buf->n_tuples) {
  1390. if (dict_index_is_unique(buf->index)) {
  1391. row_merge_dup_t dup = {
  1392. buf->index, table, col_map, 0};
  1393. row_merge_buf_sort(buf, &dup);
  1394. if (dup.n_dup) {
  1395. err = DB_DUPLICATE_KEY;
  1396. trx->error_key_num
  1397. = key_numbers[i];
  1398. break;
  1399. }
  1400. } else {
  1401. row_merge_buf_sort(buf, NULL);
  1402. }
  1403. } else if (online && new_table == old_table) {
  1404. /* Note the newest transaction that
  1405. modified this index when the scan was
  1406. completed. We prevent older readers
  1407. from accessing this index, to ensure
  1408. read consistency. */
  1409. trx_id_t max_trx_id;
  1410. ut_a(row == NULL);
  1411. rw_lock_x_lock(
  1412. dict_index_get_lock(buf->index));
  1413. ut_a(dict_index_get_online_status(buf->index)
  1414. == ONLINE_INDEX_CREATION);
  1415. max_trx_id = row_log_get_max_trx(buf->index);
  1416. if (max_trx_id > buf->index->trx_id) {
  1417. buf->index->trx_id = max_trx_id;
  1418. }
  1419. rw_lock_x_unlock(
  1420. dict_index_get_lock(buf->index));
  1421. }
  1422. row_merge_buf_write(buf, file, block);
  1423. if (!row_merge_write(file->fd, file->offset++,
  1424. block)) {
  1425. err = DB_TEMP_FILE_WRITE_FAILURE;
  1426. trx->error_key_num = i;
  1427. break;
  1428. }
  1429. UNIV_MEM_INVALID(&block[0], srv_sort_buf_size);
  1430. merge_buf[i] = row_merge_buf_empty(buf);
  1431. if (UNIV_LIKELY(row != NULL)) {
  1432. /* Try writing the record again, now
  1433. that the buffer has been written out
  1434. and emptied. */
  1435. if (UNIV_UNLIKELY
  1436. (!(rows_added = row_merge_buf_add(
  1437. buf, fts_index, old_table,
  1438. psort_info, row, ext,
  1439. &doc_id)))) {
  1440. /* An empty buffer should have enough
  1441. room for at least one record. */
  1442. ut_error;
  1443. }
  1444. file->n_rec += rows_added;
  1445. }
  1446. }
  1447. if (row == NULL) {
  1448. goto all_done;
  1449. }
  1450. if (err != DB_SUCCESS) {
  1451. goto func_exit;
  1452. }
  1453. mem_heap_empty(row_heap);
  1454. /* Increment innodb_onlineddl_pct_progress status variable */
  1455. read_rows++;
  1456. if(read_rows % 1000 == 0) {
  1457. /* Update progress for each 1000 rows */
  1458. curr_progress = (read_rows >= table_total_rows) ?
  1459. pct_cost :
  1460. ((pct_cost * read_rows) / table_total_rows);
  1461. /* presenting 10.12% as 1012 integer */
  1462. onlineddl_pct_progress = curr_progress * 100;
  1463. }
  1464. }
  1465. func_exit:
  1466. mtr_commit(&mtr);
  1467. mem_heap_free(row_heap);
  1468. if (nonnull) {
  1469. mem_free(nonnull);
  1470. }
  1471. all_done:
  1472. #ifdef FTS_INTERNAL_DIAG_PRINT
  1473. DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Scan Table\n");
  1474. #endif
  1475. if (fts_pll_sort) {
  1476. bool all_exit = false;
  1477. ulint trial_count = 0;
  1478. const ulint max_trial_count = 10000;
  1479. wait_again:
  1480. /* Check if error occurs in child thread */
  1481. for (ulint j = 0; j < fts_sort_pll_degree; j++) {
  1482. if (psort_info[j].error != DB_SUCCESS) {
  1483. err = psort_info[j].error;
  1484. trx->error_key_num = j;
  1485. break;
  1486. }
  1487. }
  1488. /* Tell all children that parent has done scanning */
  1489. for (ulint i = 0; i < fts_sort_pll_degree; i++) {
  1490. if (err == DB_SUCCESS) {
  1491. psort_info[i].state = FTS_PARENT_COMPLETE;
  1492. } else {
  1493. psort_info[i].state = FTS_PARENT_EXITING;
  1494. }
  1495. }
  1496. /* Now wait all children to report back to be completed */
  1497. os_event_wait_time_low(fts_parallel_sort_event,
  1498. 1000000, sig_count);
  1499. for (ulint i = 0; i < fts_sort_pll_degree; i++) {
  1500. if (psort_info[i].child_status != FTS_CHILD_COMPLETE
  1501. && psort_info[i].child_status != FTS_CHILD_EXITING) {
  1502. sig_count = os_event_reset(
  1503. fts_parallel_sort_event);
  1504. goto wait_again;
  1505. }
  1506. }
  1507. /* Now all children should complete, wait a bit until
  1508. they all finish setting the event, before we free everything.
  1509. This has a 10 second timeout */
  1510. do {
  1511. all_exit = true;
  1512. for (ulint j = 0; j < fts_sort_pll_degree; j++) {
  1513. if (psort_info[j].child_status
  1514. != FTS_CHILD_EXITING) {
  1515. all_exit = false;
  1516. os_thread_sleep(1000);
  1517. break;
  1518. }
  1519. }
  1520. trial_count++;
  1521. } while (!all_exit && trial_count < max_trial_count);
  1522. if (!all_exit) {
  1523. ut_ad(0);
  1524. ib_logf(IB_LOG_LEVEL_FATAL,
  1525. "Not all child sort threads exited"
  1526. " when creating FTS index '%s'",
  1527. fts_sort_idx->name);
  1528. }
  1529. }
  1530. #ifdef FTS_INTERNAL_DIAG_PRINT
  1531. DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Tokenization\n");
  1532. #endif
  1533. for (ulint i = 0; i < n_index; i++) {
  1534. row_merge_buf_free(merge_buf[i]);
  1535. }
  1536. row_fts_free_pll_merge_buf(psort_info);
  1537. mem_free(merge_buf);
  1538. btr_pcur_close(&pcur);
  1539. /* Update the next Doc ID we used. Table should be locked, so
  1540. no concurrent DML */
  1541. if (max_doc_id && err == DB_SUCCESS) {
  1542. /* Sync fts cache for other fts indexes to keep all
  1543. fts indexes consistent in sync_doc_id. */
  1544. err = fts_sync_table(const_cast<dict_table_t*>(new_table));
  1545. if (err == DB_SUCCESS) {
  1546. fts_update_next_doc_id(
  1547. 0, new_table, old_table->name, max_doc_id);
  1548. }
  1549. }
  1550. trx->op_info = "";
  1551. DBUG_RETURN(err);
  1552. }
  1553. /** Write a record via buffer 2 and read the next record to buffer N.
  1554. @param N number of the buffer (0 or 1)
  1555. @param INDEX record descriptor
  1556. @param AT_END statement to execute at end of input */
  1557. #define ROW_MERGE_WRITE_GET_NEXT(N, INDEX, AT_END) \
  1558. do { \
  1559. b2 = row_merge_write_rec(&block[2 * srv_sort_buf_size], \
  1560. &buf[2], b2, \
  1561. of->fd, &of->offset, \
  1562. mrec##N, offsets##N); \
  1563. if (UNIV_UNLIKELY(!b2 || ++of->n_rec > file->n_rec)) { \
  1564. goto corrupt; \
  1565. } \
  1566. b##N = row_merge_read_rec(&block[N * srv_sort_buf_size],\
  1567. &buf[N], b##N, INDEX, \
  1568. file->fd, foffs##N, \
  1569. &mrec##N, offsets##N); \
  1570. if (UNIV_UNLIKELY(!b##N)) { \
  1571. if (mrec##N) { \
  1572. goto corrupt; \
  1573. } \
  1574. AT_END; \
  1575. } \
  1576. } while (0)
  1577. /*************************************************************//**
  1578. Merge two blocks of records on disk and write a bigger block.
  1579. @return DB_SUCCESS or error code */
  1580. static __attribute__((nonnull, warn_unused_result))
  1581. dberr_t
  1582. row_merge_blocks(
  1583. /*=============*/
  1584. const row_merge_dup_t* dup, /*!< in: descriptor of
  1585. index being created */
  1586. const merge_file_t* file, /*!< in: file containing
  1587. index entries */
  1588. row_merge_block_t* block, /*!< in/out: 3 buffers */
  1589. ulint* foffs0, /*!< in/out: offset of first
  1590. source list in the file */
  1591. ulint* foffs1, /*!< in/out: offset of second
  1592. source list in the file */
  1593. merge_file_t* of) /*!< in/out: output file */
  1594. {
  1595. mem_heap_t* heap; /*!< memory heap for offsets0, offsets1 */
  1596. mrec_buf_t* buf; /*!< buffer for handling
  1597. split mrec in block[] */
  1598. const byte* b0; /*!< pointer to block[0] */
  1599. const byte* b1; /*!< pointer to block[srv_sort_buf_size] */
  1600. byte* b2; /*!< pointer to block[2 * srv_sort_buf_size] */
  1601. const mrec_t* mrec0; /*!< merge rec, points to block[0] or buf[0] */
  1602. const mrec_t* mrec1; /*!< merge rec, points to
  1603. block[srv_sort_buf_size] or buf[1] */
  1604. ulint* offsets0;/* offsets of mrec0 */
  1605. ulint* offsets1;/* offsets of mrec1 */
  1606. #ifdef UNIV_DEBUG
  1607. if (row_merge_print_block) {
  1608. fprintf(stderr,
  1609. "row_merge_blocks fd=%d ofs=%lu + fd=%d ofs=%lu"
  1610. " = fd=%d ofs=%lu\n",
  1611. file->fd, (ulong) *foffs0,
  1612. file->fd, (ulong) *foffs1,
  1613. of->fd, (ulong) of->offset);
  1614. }
  1615. #endif /* UNIV_DEBUG */
  1616. heap = row_merge_heap_create(dup->index, &buf, &offsets0, &offsets1);
  1617. /* Write a record and read the next record. Split the output
  1618. file in two halves, which can be merged on the following pass. */
  1619. if (!row_merge_read(file->fd, *foffs0, &block[0])
  1620. || !row_merge_read(file->fd, *foffs1, &block[srv_sort_buf_size])) {
  1621. corrupt:
  1622. mem_heap_free(heap);
  1623. return(DB_CORRUPTION);
  1624. }
  1625. b0 = &block[0];
  1626. b1 = &block[srv_sort_buf_size];
  1627. b2 = &block[2 * srv_sort_buf_size];
  1628. b0 = row_merge_read_rec(
  1629. &block[0], &buf[0], b0, dup->index,
  1630. file->fd, foffs0, &mrec0, offsets0);
  1631. b1 = row_merge_read_rec(
  1632. &block[srv_sort_buf_size],
  1633. &buf[srv_sort_buf_size], b1, dup->index,
  1634. file->fd, foffs1, &mrec1, offsets1);
  1635. if (UNIV_UNLIKELY(!b0 && mrec0)
  1636. || UNIV_UNLIKELY(!b1 && mrec1)) {
  1637. goto corrupt;
  1638. }
  1639. while (mrec0 && mrec1) {
  1640. switch (cmp_rec_rec_simple(
  1641. mrec0, mrec1, offsets0, offsets1,
  1642. dup->index, dup->table)) {
  1643. case 0:
  1644. mem_heap_free(heap);
  1645. return(DB_DUPLICATE_KEY);
  1646. case -1:
  1647. ROW_MERGE_WRITE_GET_NEXT(0, dup->index, goto merged);
  1648. break;
  1649. case 1:
  1650. ROW_MERGE_WRITE_GET_NEXT(1, dup->index, goto merged);
  1651. break;
  1652. default:
  1653. ut_error;
  1654. }
  1655. }
  1656. merged:
  1657. if (mrec0) {
  1658. /* append all mrec0 to output */
  1659. for (;;) {
  1660. ROW_MERGE_WRITE_GET_NEXT(0, dup->index, goto done0);
  1661. }
  1662. }
  1663. done0:
  1664. if (mrec1) {
  1665. /* append all mrec1 to output */
  1666. for (;;) {
  1667. ROW_MERGE_WRITE_GET_NEXT(1, dup->index, goto done1);
  1668. }
  1669. }
  1670. done1:
  1671. mem_heap_free(heap);
  1672. b2 = row_merge_write_eof(&block[2 * srv_sort_buf_size],
  1673. b2, of->fd, &of->offset);
  1674. return(b2 ? DB_SUCCESS : DB_CORRUPTION);
  1675. }
  1676. /*************************************************************//**
  1677. Copy a block of index entries.
  1678. @return TRUE on success, FALSE on failure */
  1679. static __attribute__((nonnull, warn_unused_result))
  1680. ibool
  1681. row_merge_blocks_copy(
  1682. /*==================*/
  1683. const dict_index_t* index, /*!< in: index being created */
  1684. const merge_file_t* file, /*!< in: input file */
  1685. row_merge_block_t* block, /*!< in/out: 3 buffers */
  1686. ulint* foffs0, /*!< in/out: input file offset */
  1687. merge_file_t* of) /*!< in/out: output file */
  1688. {
  1689. mem_heap_t* heap; /*!< memory heap for offsets0, offsets1 */
  1690. mrec_buf_t* buf; /*!< buffer for handling
  1691. split mrec in block[] */
  1692. const byte* b0; /*!< pointer to block[0] */
  1693. byte* b2; /*!< pointer to block[2 * srv_sort_buf_size] */
  1694. const mrec_t* mrec0; /*!< merge rec, points to block[0] */
  1695. ulint* offsets0;/* offsets of mrec0 */
  1696. ulint* offsets1;/* dummy offsets */
  1697. #ifdef UNIV_DEBUG
  1698. if (row_merge_print_block) {
  1699. fprintf(stderr,
  1700. "row_merge_blocks_copy fd=%d ofs=%lu"
  1701. " = fd=%d ofs=%lu\n",
  1702. file->fd, (ulong) foffs0,
  1703. of->fd, (ulong) of->offset);
  1704. }
  1705. #endif /* UNIV_DEBUG */
  1706. heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
  1707. /* Write a record and read the next record. Split the output
  1708. file in two halves, which can be merged on the following pass. */
  1709. if (!row_merge_read(file->fd, *foffs0, &block[0])) {
  1710. corrupt:
  1711. mem_heap_free(heap);
  1712. return(FALSE);
  1713. }
  1714. b0 = &block[0];
  1715. b2 = &block[2 * srv_sort_buf_size];
  1716. b0 = row_merge_read_rec(&block[0], &buf[0], b0, index,
  1717. file->fd, foffs0, &mrec0, offsets0);
  1718. if (UNIV_UNLIKELY(!b0 && mrec0)) {
  1719. goto corrupt;
  1720. }
  1721. if (mrec0) {
  1722. /* append all mrec0 to output */
  1723. for (;;) {
  1724. ROW_MERGE_WRITE_GET_NEXT(0, index, goto done0);
  1725. }
  1726. }
  1727. done0:
  1728. /* The file offset points to the beginning of the last page
  1729. that has been read. Update it to point to the next block. */
  1730. (*foffs0)++;
  1731. mem_heap_free(heap);
  1732. return(row_merge_write_eof(&block[2 * srv_sort_buf_size],
  1733. b2, of->fd, &of->offset)
  1734. != NULL);
  1735. }
  1736. /*************************************************************//**
  1737. Merge disk files.
  1738. @return DB_SUCCESS or error code */
  1739. static __attribute__((nonnull))
  1740. dberr_t
  1741. row_merge(
  1742. /*======*/
  1743. trx_t* trx, /*!< in: transaction */
  1744. const row_merge_dup_t* dup, /*!< in: descriptor of
  1745. index being created */
  1746. merge_file_t* file, /*!< in/out: file containing
  1747. index entries */
  1748. row_merge_block_t* block, /*!< in/out: 3 buffers */
  1749. int* tmpfd, /*!< in/out: temporary file handle */
  1750. ulint* num_run,/*!< in/out: Number of runs remain
  1751. to be merged */
  1752. ulint* run_offset) /*!< in/out: Array contains the
  1753. first offset number for each merge
  1754. run */
  1755. {
  1756. ulint foffs0; /*!< first input offset */
  1757. ulint foffs1; /*!< second input offset */
  1758. dberr_t error; /*!< error code */
  1759. merge_file_t of; /*!< output file */
  1760. const ulint ihalf = run_offset[*num_run / 2];
  1761. /*!< half the input file */
  1762. ulint n_run = 0;
  1763. /*!< num of runs generated from this merge */
  1764. UNIV_MEM_ASSERT_W(&block[0], 3 * srv_sort_buf_size);
  1765. ut_ad(ihalf < file->offset);
  1766. of.fd = *tmpfd;
  1767. of.offset = 0;
  1768. of.n_rec = 0;
  1769. #ifdef POSIX_FADV_SEQUENTIAL
  1770. /* The input file will be read sequentially, starting from the
  1771. beginning and the middle. In Linux, the POSIX_FADV_SEQUENTIAL
  1772. affects the entire file. Each block will be read exactly once. */
  1773. posix_fadvise(file->fd, 0, 0,
  1774. POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
  1775. #endif /* POSIX_FADV_SEQUENTIAL */
  1776. /* Merge blocks to the output file. */
  1777. foffs0 = 0;
  1778. foffs1 = ihalf;
  1779. UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
  1780. for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
  1781. if (trx_is_interrupted(trx)) {
  1782. return(DB_INTERRUPTED);
  1783. }
  1784. /* Remember the offset number for this run */
  1785. run_offset[n_run++] = of.offset;
  1786. error = row_merge_blocks(dup, file, block,
  1787. &foffs0, &foffs1, &of);
  1788. if (error != DB_SUCCESS) {
  1789. return(error);
  1790. }
  1791. }
  1792. /* Copy the last blocks, if there are any. */
  1793. while (foffs0 < ihalf) {
  1794. if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
  1795. return(DB_INTERRUPTED);
  1796. }
  1797. /* Remember the offset number for this run */
  1798. run_offset[n_run++] = of.offset;
  1799. if (!row_merge_blocks_copy(dup->index, file, block,
  1800. &foffs0, &of)) {
  1801. return(DB_CORRUPTION);
  1802. }
  1803. }
  1804. ut_ad(foffs0 == ihalf);
  1805. while (foffs1 < file->offset) {
  1806. if (trx_is_interrupted(trx)) {
  1807. return(DB_INTERRUPTED);
  1808. }
  1809. /* Remember the offset number for this run */
  1810. run_offset[n_run++] = of.offset;
  1811. if (!row_merge_blocks_copy(dup->index, file, block,
  1812. &foffs1, &of)) {
  1813. return(DB_CORRUPTION);
  1814. }
  1815. }
  1816. ut_ad(foffs1 == file->offset);
  1817. if (UNIV_UNLIKELY(of.n_rec != file->n_rec)) {
  1818. return(DB_CORRUPTION);
  1819. }
  1820. ut_ad(n_run <= *num_run);
  1821. *num_run = n_run;
  1822. /* Each run can contain one or more offsets. As merge goes on,
  1823. the number of runs (to merge) will reduce until we have one
  1824. single run. So the number of runs will always be smaller than
  1825. the number of offsets in file */
  1826. ut_ad((*num_run) <= file->offset);
  1827. /* The number of offsets in output file is always equal or
  1828. smaller than input file */
  1829. ut_ad(of.offset <= file->offset);
  1830. /* Swap file descriptors for the next pass. */
  1831. *tmpfd = file->fd;
  1832. *file = of;
  1833. UNIV_MEM_INVALID(&block[0], 3 * srv_sort_buf_size);
  1834. return(DB_SUCCESS);
  1835. }
  1836. /*************************************************************//**
  1837. Merge disk files.
  1838. @return DB_SUCCESS or error code */
  1839. UNIV_INTERN
  1840. dberr_t
  1841. row_merge_sort(
  1842. /*===========*/
  1843. trx_t* trx, /*!< in: transaction */
  1844. const row_merge_dup_t* dup, /*!< in: descriptor of
  1845. index being created */
  1846. merge_file_t* file, /*!< in/out: file containing
  1847. index entries */
  1848. row_merge_block_t* block, /*!< in/out: 3 buffers */
  1849. int* tmpfd, /*!< in/out: temporary file handle
  1850. */
  1851. const bool update_progress,
  1852. /*!< in: update progress
  1853. status variable or not */
  1854. const float pct_progress,
  1855. /*!< in: total progress percent
  1856. until now */
  1857. const float pct_cost) /*!< in: current progress percent */
  1858. {
  1859. const ulint half = file->offset / 2;
  1860. ulint num_runs;
  1861. ulint cur_run = 0;
  1862. ulint* run_offset;
  1863. dberr_t error = DB_SUCCESS;
  1864. ulint merge_count = 0;
  1865. ulint total_merge_sort_count;
  1866. float curr_progress = 0;
  1867. DBUG_ENTER("row_merge_sort");
  1868. /* Record the number of merge runs we need to perform */
  1869. num_runs = file->offset;
  1870. /* Find the number N which 2^N is greater or equal than num_runs */
  1871. /* N is merge sort running count */
  1872. total_merge_sort_count = ceil(my_log2f(num_runs));
  1873. if(total_merge_sort_count <= 0) {
  1874. total_merge_sort_count=1;
  1875. }
  1876. /* If num_runs are less than 1, nothing to merge */
  1877. if (num_runs <= 1) {
  1878. DBUG_RETURN(error);
  1879. }
  1880. /* "run_offset" records each run's first offset number */
  1881. run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
  1882. /* This tells row_merge() where to start for the first round
  1883. of merge. */
  1884. run_offset[half] = half;
  1885. /* The file should always contain at least one byte (the end
  1886. of file marker). Thus, it must be at least one block. */
  1887. ut_ad(file->offset > 0);
  1888. thd_progress_init(trx->mysql_thd, num_runs);
  1889. sql_print_information("InnoDB: Online DDL : merge-sorting has estimated %lu runs", num_runs);
  1890. /* Merge the runs until we have one big run */
  1891. do {
  1892. cur_run++;
  1893. /* Report progress of merge sort to MySQL for
  1894. show processlist progress field */
  1895. thd_progress_report(trx->mysql_thd, cur_run, num_runs);
  1896. sql_print_information("InnoDB: Online DDL : merge-sorting current run %lu estimated %lu runs", cur_run, num_runs);
  1897. error = row_merge(trx, dup, file, block, tmpfd,
  1898. &num_runs, run_offset);
  1899. if(update_progress) {
  1900. merge_count++;
  1901. curr_progress = (merge_count >= total_merge_sort_count) ?
  1902. pct_cost :
  1903. ((pct_cost * merge_count) / total_merge_sort_count);
  1904. /* presenting 10.12% as 1012 integer */;
  1905. onlineddl_pct_progress = (pct_progress + curr_progress) * 100;
  1906. }
  1907. if (error != DB_SUCCESS) {
  1908. break;
  1909. }
  1910. UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
  1911. } while (num_runs > 1);
  1912. mem_free(run_offset);
  1913. thd_progress_end(trx->mysql_thd);
  1914. DBUG_RETURN(error);
  1915. }
  1916. /*************************************************************//**
  1917. Copy externally stored columns to the data tuple. */
  1918. static __attribute__((nonnull))
  1919. void
  1920. row_merge_copy_blobs(
  1921. /*=================*/
  1922. const mrec_t* mrec, /*!< in: merge record */
  1923. const ulint* offsets,/*!< in: offsets of mrec */
  1924. ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
  1925. dtuple_t* tuple, /*!< in/out: data tuple */
  1926. mem_heap_t* heap) /*!< in/out: memory heap */
  1927. {
  1928. ut_ad(rec_offs_any_extern(offsets));
  1929. for (ulint i = 0; i < dtuple_get_n_fields(tuple); i++) {
  1930. ulint len;
  1931. const void* data;
  1932. dfield_t* field = dtuple_get_nth_field(tuple, i);
  1933. if (!dfield_is_ext(field)) {
  1934. continue;
  1935. }
  1936. ut_ad(!dfield_is_null(field));
  1937. /* During the creation of a PRIMARY KEY, the table is
  1938. X-locked, and we skip copying records that have been
  1939. marked for deletion. Therefore, externally stored
  1940. columns cannot possibly be freed between the time the
  1941. BLOB pointers are read (row_merge_read_clustered_index())
  1942. and dereferenced (below). */
  1943. data = btr_rec_copy_externally_stored_field(
  1944. mrec, offsets, zip_size, i, &len, heap, NULL);
  1945. /* Because we have locked the table, any records
  1946. written by incomplete transactions must have been
  1947. rolled back already. There must not be any incomplete
  1948. BLOB columns. */
  1949. ut_a(data);
  1950. dfield_set_data(field, data, len);
  1951. }
  1952. }
  1953. /********************************************************************//**
  1954. Read sorted file containing index data tuples and insert these data
  1955. tuples to the index
  1956. @return DB_SUCCESS or error number */
  1957. static __attribute__((nonnull, warn_unused_result))
  1958. dberr_t
  1959. row_merge_insert_index_tuples(
  1960. /*==========================*/
  1961. trx_id_t trx_id, /*!< in: transaction identifier */
  1962. dict_index_t* index, /*!< in: index */
  1963. const dict_table_t* old_table,/*!< in: old table */
  1964. int fd, /*!< in: file descriptor */
  1965. row_merge_block_t* block, /*!< in/out: file buffer */
  1966. const ib_int64_t table_total_rows, /*!< in: total rows of old table */
  1967. const float pct_progress, /*!< in: total progress percent until now */
  1968. const float pct_cost) /*!< in: current progress percent */
  1969. {
  1970. const byte* b;
  1971. mem_heap_t* heap;
  1972. mem_heap_t* tuple_heap;
  1973. mem_heap_t* ins_heap;
  1974. dberr_t error = DB_SUCCESS;
  1975. ulint foffs = 0;
  1976. ulint* offsets;
  1977. mrec_buf_t* buf;
  1978. ib_int64_t inserted_rows = 0;
  1979. float curr_progress;
  1980. DBUG_ENTER("row_merge_insert_index_tuples");
  1981. ut_ad(!srv_read_only_mode);
  1982. ut_ad(!(index->type & DICT_FTS));
  1983. ut_ad(trx_id);
  1984. tuple_heap = mem_heap_create(1000);
  1985. {
  1986. ulint i = 1 + REC_OFFS_HEADER_SIZE
  1987. + dict_index_get_n_fields(index);
  1988. heap = mem_heap_create(sizeof *buf + i * sizeof *offsets);
  1989. ins_heap = mem_heap_create(sizeof *buf + i * sizeof *offsets);
  1990. offsets = static_cast<ulint*>(
  1991. mem_heap_alloc(heap, i * sizeof *offsets));
  1992. offsets[0] = i;
  1993. offsets[1] = dict_index_get_n_fields(index);
  1994. }
  1995. b = block;
  1996. if (!row_merge_read(fd, foffs, block)) {
  1997. error = DB_CORRUPTION;
  1998. } else {
  1999. buf = static_cast<mrec_buf_t*>(
  2000. mem_heap_alloc(heap, sizeof *buf));
  2001. for (;;) {
  2002. const mrec_t* mrec;
  2003. dtuple_t* dtuple;
  2004. ulint n_ext;
  2005. big_rec_t* big_rec;
  2006. rec_t* rec;
  2007. btr_cur_t cursor;
  2008. mtr_t mtr;
  2009. b = row_merge_read_rec(block, buf, b, index,
  2010. fd, &foffs, &mrec, offsets);
  2011. if (UNIV_UNLIKELY(!b)) {
  2012. /* End of list, or I/O error */
  2013. if (mrec) {
  2014. error = DB_CORRUPTION;
  2015. }
  2016. break;
  2017. }
  2018. dict_index_t* old_index
  2019. = dict_table_get_first_index(old_table);
  2020. if (dict_index_is_clust(index)
  2021. && dict_index_is_online_ddl(old_index)) {
  2022. error = row_log_table_get_error(old_index);
  2023. if (error != DB_SUCCESS) {
  2024. break;
  2025. }
  2026. }
  2027. dtuple = row_rec_to_index_entry_low(
  2028. mrec, index, offsets, &n_ext, tuple_heap);
  2029. if (!n_ext) {
  2030. /* There are no externally stored columns. */
  2031. } else {
  2032. ut_ad(dict_index_is_clust(index));
  2033. /* Off-page columns can be fetched safely
  2034. when concurrent modifications to the table
  2035. are disabled. (Purge can process delete-marked
  2036. records, but row_merge_read_clustered_index()
  2037. would have skipped them.)
  2038. When concurrent modifications are enabled,
  2039. row_merge_read_clustered_index() will
  2040. only see rows from transactions that were
  2041. committed before the ALTER TABLE started
  2042. (REPEATABLE READ).
  2043. Any modifications after the
  2044. row_merge_read_clustered_index() scan
  2045. will go through row_log_table_apply().
  2046. Any modifications to off-page columns
  2047. will be tracked by
  2048. row_log_table_blob_alloc() and
  2049. row_log_table_blob_free(). */
  2050. row_merge_copy_blobs(
  2051. mrec, offsets,
  2052. dict_table_zip_size(old_table),
  2053. dtuple, tuple_heap);
  2054. }
  2055. ut_ad(dtuple_validate(dtuple));
  2056. log_free_check();
  2057. mtr_start(&mtr);
  2058. /* Insert after the last user record. */
  2059. btr_cur_open_at_index_side(
  2060. false, index, BTR_MODIFY_LEAF,
  2061. &cursor, 0, &mtr);
  2062. page_cur_position(
  2063. page_rec_get_prev(btr_cur_get_rec(&cursor)),
  2064. btr_cur_get_block(&cursor),
  2065. btr_cur_get_page_cur(&cursor));
  2066. cursor.flag = BTR_CUR_BINARY;
  2067. #ifdef UNIV_DEBUG
  2068. /* Check that the records are inserted in order. */
  2069. rec = btr_cur_get_rec(&cursor);
  2070. if (!page_rec_is_infimum(rec)) {
  2071. ulint* rec_offsets = rec_get_offsets(
  2072. rec, index, offsets,
  2073. ULINT_UNDEFINED, &tuple_heap);
  2074. ut_ad(cmp_dtuple_rec(dtuple, rec, rec_offsets)
  2075. > 0);
  2076. }
  2077. #endif /* UNIV_DEBUG */
  2078. ulint* ins_offsets = NULL;
  2079. error = btr_cur_optimistic_insert(
  2080. BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
  2081. | BTR_KEEP_SYS_FLAG | BTR_CREATE_FLAG,
  2082. &cursor, &ins_offsets, &ins_heap,
  2083. dtuple, &rec, &big_rec, 0, NULL, &mtr);
  2084. if (error == DB_FAIL) {
  2085. ut_ad(!big_rec);
  2086. mtr_commit(&mtr);
  2087. mtr_start(&mtr);
  2088. btr_cur_open_at_index_side(
  2089. false, index, BTR_MODIFY_TREE,
  2090. &cursor, 0, &mtr);
  2091. page_cur_position(
  2092. page_rec_get_prev(btr_cur_get_rec(
  2093. &cursor)),
  2094. btr_cur_get_block(&cursor),
  2095. btr_cur_get_page_cur(&cursor));
  2096. error = btr_cur_pessimistic_insert(
  2097. BTR_NO_UNDO_LOG_FLAG
  2098. | BTR_NO_LOCKING_FLAG
  2099. | BTR_KEEP_SYS_FLAG | BTR_CREATE_FLAG,
  2100. &cursor, &ins_offsets, &ins_heap,
  2101. dtuple, &rec, &big_rec, 0, NULL, &mtr);
  2102. }
  2103. if (!dict_index_is_clust(index)) {
  2104. page_update_max_trx_id(
  2105. btr_cur_get_block(&cursor),
  2106. btr_cur_get_page_zip(&cursor),
  2107. trx_id, &mtr);
  2108. }
  2109. mtr_commit(&mtr);
  2110. if (UNIV_LIKELY_NULL(big_rec)) {
  2111. /* If the system crashes at this
  2112. point, the clustered index record will
  2113. contain a null BLOB pointer. This
  2114. should not matter, because the copied
  2115. table will be dropped on crash
  2116. recovery anyway. */
  2117. ut_ad(dict_index_is_clust(index));
  2118. ut_ad(error == DB_SUCCESS);
  2119. error = row_ins_index_entry_big_rec(
  2120. dtuple, big_rec,
  2121. ins_offsets, &ins_heap,
  2122. index, NULL, __FILE__, __LINE__);
  2123. dtuple_convert_back_big_rec(
  2124. index, dtuple, big_rec);
  2125. }
  2126. if (error != DB_SUCCESS) {
  2127. goto err_exit;
  2128. }
  2129. mem_heap_empty(tuple_heap);
  2130. mem_heap_empty(ins_heap);
  2131. /* Increment innodb_onlineddl_pct_progress status variable */
  2132. inserted_rows++;
  2133. if(inserted_rows % 1000 == 0) {
  2134. /* Update progress for each 1000 rows */
  2135. curr_progress = (inserted_rows >= table_total_rows ||
  2136. table_total_rows <= 0) ?
  2137. pct_cost :
  2138. ((pct_cost * inserted_rows) / table_total_rows);
  2139. /* presenting 10.12% as 1012 integer */;
  2140. onlineddl_pct_progress = (pct_progress + curr_progress) * 100;
  2141. }
  2142. }
  2143. }
  2144. err_exit:
  2145. mem_heap_free(tuple_heap);
  2146. mem_heap_free(ins_heap);
  2147. mem_heap_free(heap);
  2148. DBUG_RETURN(error);
  2149. }
  2150. /*********************************************************************//**
  2151. Sets an exclusive lock on a table, for the duration of creating indexes.
  2152. @return error code or DB_SUCCESS */
  2153. UNIV_INTERN
  2154. dberr_t
  2155. row_merge_lock_table(
  2156. /*=================*/
  2157. trx_t* trx, /*!< in/out: transaction */
  2158. dict_table_t* table, /*!< in: table to lock */
  2159. enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */
  2160. {
  2161. mem_heap_t* heap;
  2162. que_thr_t* thr;
  2163. dberr_t err;
  2164. sel_node_t* node;
  2165. ut_ad(!srv_read_only_mode);
  2166. ut_ad(mode == LOCK_X || mode == LOCK_S);
  2167. heap = mem_heap_create(512);
  2168. trx->op_info = "setting table lock for creating or dropping index";
  2169. node = sel_node_create(heap);
  2170. thr = pars_complete_graph_for_exec(node, trx, heap);
  2171. thr->graph->state = QUE_FORK_ACTIVE;
  2172. /* We use the select query graph as the dummy graph needed
  2173. in the lock module call */
  2174. thr = static_cast<que_thr_t*>(
  2175. que_fork_get_first_thr(
  2176. static_cast<que_fork_t*>(que_node_get_parent(thr))));
  2177. que_thr_move_to_run_state_for_mysql(thr, trx);
  2178. run_again:
  2179. thr->run_node = thr;
  2180. thr->prev_node = thr->common.parent;
  2181. err = lock_table(0, table, mode, thr);
  2182. trx->error_state = err;
  2183. if (UNIV_LIKELY(err == DB_SUCCESS)) {
  2184. que_thr_stop_for_mysql_no_error(thr, trx);
  2185. } else {
  2186. que_thr_stop_for_mysql(thr);
  2187. if (err != DB_QUE_THR_SUSPENDED) {
  2188. bool was_lock_wait;
  2189. was_lock_wait = row_mysql_handle_errors(
  2190. &err, trx, thr, NULL);
  2191. if (was_lock_wait) {
  2192. goto run_again;
  2193. }
  2194. } else {
  2195. que_thr_t* run_thr;
  2196. que_node_t* parent;
  2197. parent = que_node_get_parent(thr);
  2198. run_thr = que_fork_start_command(
  2199. static_cast<que_fork_t*>(parent));
  2200. ut_a(run_thr == thr);
  2201. /* There was a lock wait but the thread was not
  2202. in a ready to run or running state. */
  2203. trx->error_state = DB_LOCK_WAIT;
  2204. goto run_again;
  2205. }
  2206. }
  2207. que_graph_free(thr->graph);
  2208. trx->op_info = "";
  2209. return(err);
  2210. }
  2211. /*********************************************************************//**
  2212. Drop an index that was created before an error occurred.
  2213. The data dictionary must have been locked exclusively by the caller,
  2214. because the transaction will not be committed. */
  2215. static
  2216. void
  2217. row_merge_drop_index_dict(
  2218. /*======================*/
  2219. trx_t* trx, /*!< in/out: dictionary transaction */
  2220. index_id_t index_id)/*!< in: index identifier */
  2221. {
  2222. static const char sql[] =
  2223. "PROCEDURE DROP_INDEX_PROC () IS\n"
  2224. "BEGIN\n"
  2225. "DELETE FROM SYS_FIELDS WHERE INDEX_ID=:indexid;\n"
  2226. "DELETE FROM SYS_INDEXES WHERE ID=:indexid;\n"
  2227. "END;\n";
  2228. dberr_t error;
  2229. pars_info_t* info;
  2230. ut_ad(!srv_read_only_mode);
  2231. ut_ad(mutex_own(&dict_sys->mutex));
  2232. ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
  2233. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2234. #ifdef UNIV_SYNC_DEBUG
  2235. ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
  2236. #endif /* UNIV_SYNC_DEBUG */
  2237. info = pars_info_create();
  2238. pars_info_add_ull_literal(info, "indexid", index_id);
  2239. trx->op_info = "dropping index from dictionary";
  2240. error = que_eval_sql(info, sql, FALSE, trx);
  2241. if (error != DB_SUCCESS) {
  2242. /* Even though we ensure that DDL transactions are WAIT
  2243. and DEADLOCK free, we could encounter other errors e.g.,
  2244. DB_TOO_MANY_CONCURRENT_TRXS. */
  2245. trx->error_state = DB_SUCCESS;
  2246. ut_print_timestamp(stderr);
  2247. fprintf(stderr, " InnoDB: Error: row_merge_drop_index_dict "
  2248. "failed with error code: %u.\n", (unsigned) error);
  2249. }
  2250. trx->op_info = "";
  2251. }
  2252. /*********************************************************************//**
  2253. Drop indexes that were created before an error occurred.
  2254. The data dictionary must have been locked exclusively by the caller,
  2255. because the transaction will not be committed. */
  2256. UNIV_INTERN
  2257. void
  2258. row_merge_drop_indexes_dict(
  2259. /*========================*/
  2260. trx_t* trx, /*!< in/out: dictionary transaction */
  2261. table_id_t table_id)/*!< in: table identifier */
  2262. {
  2263. static const char sql[] =
  2264. "PROCEDURE DROP_INDEXES_PROC () IS\n"
  2265. "ixid CHAR;\n"
  2266. "found INT;\n"
  2267. "DECLARE CURSOR index_cur IS\n"
  2268. " SELECT ID FROM SYS_INDEXES\n"
  2269. " WHERE TABLE_ID=:tableid AND\n"
  2270. " SUBSTR(NAME,0,1)='" TEMP_INDEX_PREFIX_STR "'\n"
  2271. "FOR UPDATE;\n"
  2272. "BEGIN\n"
  2273. "found := 1;\n"
  2274. "OPEN index_cur;\n"
  2275. "WHILE found = 1 LOOP\n"
  2276. " FETCH index_cur INTO ixid;\n"
  2277. " IF (SQL % NOTFOUND) THEN\n"
  2278. " found := 0;\n"
  2279. " ELSE\n"
  2280. " DELETE FROM SYS_FIELDS WHERE INDEX_ID=ixid;\n"
  2281. " DELETE FROM SYS_INDEXES WHERE CURRENT OF index_cur;\n"
  2282. " END IF;\n"
  2283. "END LOOP;\n"
  2284. "CLOSE index_cur;\n"
  2285. "END;\n";
  2286. dberr_t error;
  2287. pars_info_t* info;
  2288. ut_ad(!srv_read_only_mode);
  2289. ut_ad(mutex_own(&dict_sys->mutex));
  2290. ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
  2291. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2292. #ifdef UNIV_SYNC_DEBUG
  2293. ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
  2294. #endif /* UNIV_SYNC_DEBUG */
  2295. /* It is possible that table->n_ref_count > 1 when
  2296. locked=TRUE. In this case, all code that should have an open
  2297. handle to the table be waiting for the next statement to execute,
  2298. or waiting for a meta-data lock.
  2299. A concurrent purge will be prevented by dict_operation_lock. */
  2300. info = pars_info_create();
  2301. pars_info_add_ull_literal(info, "tableid", table_id);
  2302. trx->op_info = "dropping indexes";
  2303. error = que_eval_sql(info, sql, FALSE, trx);
  2304. if (error != DB_SUCCESS) {
  2305. /* Even though we ensure that DDL transactions are WAIT
  2306. and DEADLOCK free, we could encounter other errors e.g.,
  2307. DB_TOO_MANY_CONCURRENT_TRXS. */
  2308. trx->error_state = DB_SUCCESS;
  2309. ut_print_timestamp(stderr);
  2310. fprintf(stderr, " InnoDB: Error: row_merge_drop_indexes_dict "
  2311. "failed with error code: %u.\n", (unsigned) error);
  2312. }
  2313. trx->op_info = "";
  2314. }
  2315. /*********************************************************************//**
  2316. Drop indexes that were created before an error occurred.
  2317. The data dictionary must have been locked exclusively by the caller,
  2318. because the transaction will not be committed. */
  2319. UNIV_INTERN
  2320. void
  2321. row_merge_drop_indexes(
  2322. /*===================*/
  2323. trx_t* trx, /*!< in/out: dictionary transaction */
  2324. dict_table_t* table, /*!< in/out: table containing the indexes */
  2325. ibool locked) /*!< in: TRUE=table locked,
  2326. FALSE=may need to do a lazy drop */
  2327. {
  2328. dict_index_t* index;
  2329. dict_index_t* next_index;
  2330. ut_ad(!srv_read_only_mode);
  2331. ut_ad(mutex_own(&dict_sys->mutex));
  2332. ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
  2333. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2334. #ifdef UNIV_SYNC_DEBUG
  2335. ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
  2336. #endif /* UNIV_SYNC_DEBUG */
  2337. index = dict_table_get_first_index(table);
  2338. ut_ad(dict_index_is_clust(index));
  2339. ut_ad(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE);
  2340. /* the caller should have an open handle to the table */
  2341. ut_ad(table->n_ref_count >= 1);
  2342. /* It is possible that table->n_ref_count > 1 when
  2343. locked=TRUE. In this case, all code that should have an open
  2344. handle to the table be waiting for the next statement to execute,
  2345. or waiting for a meta-data lock.
  2346. A concurrent purge will be prevented by dict_operation_lock. */
  2347. if (!locked && table->n_ref_count > 1) {
  2348. /* We will have to drop the indexes later, when the
  2349. table is guaranteed to be no longer in use. Mark the
  2350. indexes as incomplete and corrupted, so that other
  2351. threads will stop using them. Let dict_table_close()
  2352. or crash recovery or the next invocation of
  2353. prepare_inplace_alter_table() take care of dropping
  2354. the indexes. */
  2355. while ((index = dict_table_get_next_index(index)) != NULL) {
  2356. ut_ad(!dict_index_is_clust(index));
  2357. switch (dict_index_get_online_status(index)) {
  2358. case ONLINE_INDEX_ABORTED_DROPPED:
  2359. continue;
  2360. case ONLINE_INDEX_COMPLETE:
  2361. if (*index->name != TEMP_INDEX_PREFIX) {
  2362. /* Do nothing to already
  2363. published indexes. */
  2364. } else if (index->type & DICT_FTS) {
  2365. /* Drop a completed FULLTEXT
  2366. index, due to a timeout during
  2367. MDL upgrade for
  2368. commit_inplace_alter_table().
  2369. Because only concurrent reads
  2370. are allowed (and they are not
  2371. seeing this index yet) we
  2372. are safe to drop the index. */
  2373. dict_index_t* prev = UT_LIST_GET_PREV(
  2374. indexes, index);
  2375. /* At least there should be
  2376. the clustered index before
  2377. this one. */
  2378. ut_ad(prev);
  2379. ut_a(table->fts);
  2380. fts_drop_index(table, index, trx);
  2381. /* Since
  2382. INNOBASE_SHARE::idx_trans_tbl
  2383. is shared between all open
  2384. ha_innobase handles to this
  2385. table, no thread should be
  2386. accessing this dict_index_t
  2387. object. Also, we should be
  2388. holding LOCK=SHARED MDL on the
  2389. table even after the MDL
  2390. upgrade timeout. */
  2391. /* We can remove a DICT_FTS
  2392. index from the cache, because
  2393. we do not allow ADD FULLTEXT INDEX
  2394. with LOCK=NONE. If we allowed that,
  2395. we should exclude FTS entries from
  2396. prebuilt->ins_node->entry_list
  2397. in ins_node_create_entry_list(). */
  2398. dict_index_remove_from_cache(
  2399. table, index);
  2400. index = prev;
  2401. } else {
  2402. rw_lock_x_lock(
  2403. dict_index_get_lock(index));
  2404. dict_index_set_online_status(
  2405. index, ONLINE_INDEX_ABORTED);
  2406. index->type |= DICT_CORRUPT;
  2407. table->drop_aborted = TRUE;
  2408. goto drop_aborted;
  2409. }
  2410. continue;
  2411. case ONLINE_INDEX_CREATION:
  2412. rw_lock_x_lock(dict_index_get_lock(index));
  2413. ut_ad(*index->name == TEMP_INDEX_PREFIX);
  2414. row_log_abort_sec(index);
  2415. drop_aborted:
  2416. rw_lock_x_unlock(dict_index_get_lock(index));
  2417. DEBUG_SYNC_C("merge_drop_index_after_abort");
  2418. /* covered by dict_sys->mutex */
  2419. MONITOR_INC(MONITOR_BACKGROUND_DROP_INDEX);
  2420. /* fall through */
  2421. case ONLINE_INDEX_ABORTED:
  2422. /* Drop the index tree from the
  2423. data dictionary and free it from
  2424. the tablespace, but keep the object
  2425. in the data dictionary cache. */
  2426. row_merge_drop_index_dict(trx, index->id);
  2427. rw_lock_x_lock(dict_index_get_lock(index));
  2428. dict_index_set_online_status(
  2429. index, ONLINE_INDEX_ABORTED_DROPPED);
  2430. rw_lock_x_unlock(dict_index_get_lock(index));
  2431. table->drop_aborted = TRUE;
  2432. continue;
  2433. }
  2434. ut_error;
  2435. }
  2436. return;
  2437. }
  2438. row_merge_drop_indexes_dict(trx, table->id);
  2439. /* Invalidate all row_prebuilt_t::ins_graph that are referring
  2440. to this table. That is, force row_get_prebuilt_insert_row() to
  2441. rebuild prebuilt->ins_node->entry_list). */
  2442. ut_ad(table->def_trx_id <= trx->id);
  2443. table->def_trx_id = trx->id;
  2444. next_index = dict_table_get_next_index(index);
  2445. while ((index = next_index) != NULL) {
  2446. /* read the next pointer before freeing the index */
  2447. next_index = dict_table_get_next_index(index);
  2448. ut_ad(!dict_index_is_clust(index));
  2449. if (*index->name == TEMP_INDEX_PREFIX) {
  2450. /* If it is FTS index, drop from table->fts
  2451. and also drop its auxiliary tables */
  2452. if (index->type & DICT_FTS) {
  2453. ut_a(table->fts);
  2454. fts_drop_index(table, index, trx);
  2455. }
  2456. switch (dict_index_get_online_status(index)) {
  2457. case ONLINE_INDEX_CREATION:
  2458. /* This state should only be possible
  2459. when prepare_inplace_alter_table() fails
  2460. after invoking row_merge_create_index().
  2461. In inplace_alter_table(),
  2462. row_merge_build_indexes()
  2463. should never leave the index in this state.
  2464. It would invoke row_log_abort_sec() on
  2465. failure. */
  2466. case ONLINE_INDEX_COMPLETE:
  2467. /* In these cases, we are able to drop
  2468. the index straight. The DROP INDEX was
  2469. never deferred. */
  2470. break;
  2471. case ONLINE_INDEX_ABORTED:
  2472. case ONLINE_INDEX_ABORTED_DROPPED:
  2473. /* covered by dict_sys->mutex */
  2474. MONITOR_DEC(MONITOR_BACKGROUND_DROP_INDEX);
  2475. }
  2476. dict_index_remove_from_cache(table, index);
  2477. }
  2478. }
  2479. table->drop_aborted = FALSE;
  2480. ut_d(dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE));
  2481. }
  2482. /*********************************************************************//**
  2483. Drop all partially created indexes during crash recovery. */
  2484. UNIV_INTERN
  2485. void
  2486. row_merge_drop_temp_indexes(void)
  2487. /*=============================*/
  2488. {
  2489. static const char sql[] =
  2490. "PROCEDURE DROP_TEMP_INDEXES_PROC () IS\n"
  2491. "ixid CHAR;\n"
  2492. "found INT;\n"
  2493. "DECLARE CURSOR index_cur IS\n"
  2494. " SELECT ID FROM SYS_INDEXES\n"
  2495. " WHERE SUBSTR(NAME,0,1)='" TEMP_INDEX_PREFIX_STR "'\n"
  2496. "FOR UPDATE;\n"
  2497. "BEGIN\n"
  2498. "found := 1;\n"
  2499. "OPEN index_cur;\n"
  2500. "WHILE found = 1 LOOP\n"
  2501. " FETCH index_cur INTO ixid;\n"
  2502. " IF (SQL % NOTFOUND) THEN\n"
  2503. " found := 0;\n"
  2504. " ELSE\n"
  2505. " DELETE FROM SYS_FIELDS WHERE INDEX_ID=ixid;\n"
  2506. " DELETE FROM SYS_INDEXES WHERE CURRENT OF index_cur;\n"
  2507. " END IF;\n"
  2508. "END LOOP;\n"
  2509. "CLOSE index_cur;\n"
  2510. "END;\n";
  2511. trx_t* trx;
  2512. dberr_t error;
  2513. /* Load the table definitions that contain partially defined
  2514. indexes, so that the data dictionary information can be checked
  2515. when accessing the tablename.ibd files. */
  2516. trx = trx_allocate_for_background();
  2517. trx->op_info = "dropping partially created indexes";
  2518. row_mysql_lock_data_dictionary(trx);
  2519. /* Ensure that this transaction will be rolled back and locks
  2520. will be released, if the server gets killed before the commit
  2521. gets written to the redo log. */
  2522. trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
  2523. trx->op_info = "dropping indexes";
  2524. error = que_eval_sql(NULL, sql, FALSE, trx);
  2525. if (error != DB_SUCCESS) {
  2526. /* Even though we ensure that DDL transactions are WAIT
  2527. and DEADLOCK free, we could encounter other errors e.g.,
  2528. DB_TOO_MANY_CONCURRENT_TRXS. */
  2529. trx->error_state = DB_SUCCESS;
  2530. ut_print_timestamp(stderr);
  2531. fprintf(stderr, " InnoDB: Error: row_merge_drop_temp_indexes "
  2532. "failed with error code: %u.\n", (unsigned) error);
  2533. }
  2534. trx_commit_for_mysql(trx);
  2535. row_mysql_unlock_data_dictionary(trx);
  2536. trx_free_for_background(trx);
  2537. }
  2538. /*********************************************************************//**
  2539. Creates temporary merge files, and if UNIV_PFS_IO defined, register
  2540. the file descriptor with Performance Schema.
  2541. @return file descriptor, or -1 on failure */
  2542. UNIV_INTERN
  2543. int
  2544. row_merge_file_create_low(void)
  2545. /*===========================*/
  2546. {
  2547. int fd;
  2548. #ifdef UNIV_PFS_IO
  2549. /* This temp file open does not go through normal
  2550. file APIs, add instrumentation to register with
  2551. performance schema */
  2552. struct PSI_file_locker* locker = NULL;
  2553. PSI_file_locker_state state;
  2554. register_pfs_file_open_begin(&state, locker, innodb_file_temp_key,
  2555. PSI_FILE_OPEN,
  2556. "Innodb Merge Temp File",
  2557. __FILE__, __LINE__);
  2558. #endif
  2559. fd = innobase_mysql_tmpfile();
  2560. #ifdef UNIV_PFS_IO
  2561. register_pfs_file_open_end(locker, fd);
  2562. #endif
  2563. if (fd < 0) {
  2564. ib_logf(IB_LOG_LEVEL_ERROR,
  2565. "Cannot create temporary merge file");
  2566. return (-1);
  2567. }
  2568. return(fd);
  2569. }
  2570. /*********************************************************************//**
  2571. Create a merge file.
  2572. @return file descriptor, or -1 on failure */
  2573. UNIV_INTERN
  2574. int
  2575. row_merge_file_create(
  2576. /*==================*/
  2577. merge_file_t* merge_file) /*!< out: merge file structure */
  2578. {
  2579. merge_file->fd = row_merge_file_create_low();
  2580. merge_file->offset = 0;
  2581. merge_file->n_rec = 0;
  2582. if (merge_file->fd >= 0) {
  2583. if (srv_disable_sort_file_cache) {
  2584. os_file_set_nocache(merge_file->fd,
  2585. "row0merge.cc", "sort");
  2586. }
  2587. }
  2588. return(merge_file->fd);
  2589. }
  2590. /*********************************************************************//**
  2591. Destroy a merge file. And de-register the file from Performance Schema
  2592. if UNIV_PFS_IO is defined. */
  2593. UNIV_INTERN
  2594. void
  2595. row_merge_file_destroy_low(
  2596. /*=======================*/
  2597. int fd) /*!< in: merge file descriptor */
  2598. {
  2599. #ifdef UNIV_PFS_IO
  2600. struct PSI_file_locker* locker = NULL;
  2601. PSI_file_locker_state state;
  2602. register_pfs_file_io_begin(&state, locker,
  2603. fd, 0, PSI_FILE_CLOSE,
  2604. __FILE__, __LINE__);
  2605. #endif
  2606. if (fd >= 0) {
  2607. close(fd);
  2608. }
  2609. #ifdef UNIV_PFS_IO
  2610. register_pfs_file_io_end(locker, 0);
  2611. #endif
  2612. }
  2613. /*********************************************************************//**
  2614. Destroy a merge file. */
  2615. UNIV_INTERN
  2616. void
  2617. row_merge_file_destroy(
  2618. /*===================*/
  2619. merge_file_t* merge_file) /*!< in/out: merge file structure */
  2620. {
  2621. ut_ad(!srv_read_only_mode);
  2622. if (merge_file->fd != -1) {
  2623. row_merge_file_destroy_low(merge_file->fd);
  2624. merge_file->fd = -1;
  2625. }
  2626. }
  2627. /*********************************************************************//**
  2628. Rename an index in the dictionary that was created. The data
  2629. dictionary must have been locked exclusively by the caller, because
  2630. the transaction will not be committed.
  2631. @return DB_SUCCESS if all OK */
  2632. UNIV_INTERN
  2633. dberr_t
  2634. row_merge_rename_index_to_add(
  2635. /*==========================*/
  2636. trx_t* trx, /*!< in/out: transaction */
  2637. table_id_t table_id, /*!< in: table identifier */
  2638. index_id_t index_id) /*!< in: index identifier */
  2639. {
  2640. dberr_t err = DB_SUCCESS;
  2641. pars_info_t* info = pars_info_create();
  2642. /* We use the private SQL parser of Innobase to generate the
  2643. query graphs needed in renaming indexes. */
  2644. static const char rename_index[] =
  2645. "PROCEDURE RENAME_INDEX_PROC () IS\n"
  2646. "BEGIN\n"
  2647. "UPDATE SYS_INDEXES SET NAME=SUBSTR(NAME,1,LENGTH(NAME)-1)\n"
  2648. "WHERE TABLE_ID = :tableid AND ID = :indexid;\n"
  2649. "END;\n";
  2650. ut_ad(trx);
  2651. ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
  2652. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2653. trx->op_info = "renaming index to add";
  2654. pars_info_add_ull_literal(info, "tableid", table_id);
  2655. pars_info_add_ull_literal(info, "indexid", index_id);
  2656. err = que_eval_sql(info, rename_index, FALSE, trx);
  2657. if (err != DB_SUCCESS) {
  2658. /* Even though we ensure that DDL transactions are WAIT
  2659. and DEADLOCK free, we could encounter other errors e.g.,
  2660. DB_TOO_MANY_CONCURRENT_TRXS. */
  2661. trx->error_state = DB_SUCCESS;
  2662. ut_print_timestamp(stderr);
  2663. fprintf(stderr,
  2664. " InnoDB: Error: row_merge_rename_index_to_add "
  2665. "failed with error code: %u.\n", (unsigned) err);
  2666. }
  2667. trx->op_info = "";
  2668. return(err);
  2669. }
  2670. /*********************************************************************//**
  2671. Rename an index in the dictionary that is to be dropped. The data
  2672. dictionary must have been locked exclusively by the caller, because
  2673. the transaction will not be committed.
  2674. @return DB_SUCCESS if all OK */
  2675. UNIV_INTERN
  2676. dberr_t
  2677. row_merge_rename_index_to_drop(
  2678. /*===========================*/
  2679. trx_t* trx, /*!< in/out: transaction */
  2680. table_id_t table_id, /*!< in: table identifier */
  2681. index_id_t index_id) /*!< in: index identifier */
  2682. {
  2683. dberr_t err;
  2684. pars_info_t* info = pars_info_create();
  2685. ut_ad(!srv_read_only_mode);
  2686. /* We use the private SQL parser of Innobase to generate the
  2687. query graphs needed in renaming indexes. */
  2688. static const char rename_index[] =
  2689. "PROCEDURE RENAME_INDEX_PROC () IS\n"
  2690. "BEGIN\n"
  2691. "UPDATE SYS_INDEXES SET NAME=CONCAT('"
  2692. TEMP_INDEX_PREFIX_STR "',NAME)\n"
  2693. "WHERE TABLE_ID = :tableid AND ID = :indexid;\n"
  2694. "END;\n";
  2695. ut_ad(trx);
  2696. ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
  2697. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2698. trx->op_info = "renaming index to drop";
  2699. pars_info_add_ull_literal(info, "tableid", table_id);
  2700. pars_info_add_ull_literal(info, "indexid", index_id);
  2701. err = que_eval_sql(info, rename_index, FALSE, trx);
  2702. if (err != DB_SUCCESS) {
  2703. /* Even though we ensure that DDL transactions are WAIT
  2704. and DEADLOCK free, we could encounter other errors e.g.,
  2705. DB_TOO_MANY_CONCURRENT_TRXS. */
  2706. trx->error_state = DB_SUCCESS;
  2707. ut_print_timestamp(stderr);
  2708. fprintf(stderr,
  2709. " InnoDB: Error: row_merge_rename_index_to_drop "
  2710. "failed with error code: %u.\n", (unsigned) err);
  2711. }
  2712. trx->op_info = "";
  2713. return(err);
  2714. }
  2715. /*********************************************************************//**
  2716. Provide a new pathname for a table that is being renamed if it belongs to
  2717. a file-per-table tablespace. The caller is responsible for freeing the
  2718. memory allocated for the return value.
  2719. @return new pathname of tablespace file, or NULL if space = 0 */
  2720. UNIV_INTERN
  2721. char*
  2722. row_make_new_pathname(
  2723. /*==================*/
  2724. dict_table_t* table, /*!< in: table to be renamed */
  2725. const char* new_name) /*!< in: new name */
  2726. {
  2727. char* new_path;
  2728. char* old_path;
  2729. ut_ad(table->space != TRX_SYS_SPACE);
  2730. old_path = fil_space_get_first_path(table->space);
  2731. ut_a(old_path);
  2732. new_path = os_file_make_new_pathname(old_path, new_name);
  2733. mem_free(old_path);
  2734. return(new_path);
  2735. }
  2736. /*********************************************************************//**
  2737. Rename the tables in the data dictionary. The data dictionary must
  2738. have been locked exclusively by the caller, because the transaction
  2739. will not be committed.
  2740. @return error code or DB_SUCCESS */
  2741. UNIV_INTERN
  2742. dberr_t
  2743. row_merge_rename_tables_dict(
  2744. /*=========================*/
  2745. dict_table_t* old_table, /*!< in/out: old table, renamed to
  2746. tmp_name */
  2747. dict_table_t* new_table, /*!< in/out: new table, renamed to
  2748. old_table->name */
  2749. const char* tmp_name, /*!< in: new name for old_table */
  2750. trx_t* trx) /*!< in/out: dictionary transaction */
  2751. {
  2752. dberr_t err = DB_ERROR;
  2753. pars_info_t* info;
  2754. ut_ad(!srv_read_only_mode);
  2755. ut_ad(old_table != new_table);
  2756. ut_ad(mutex_own(&dict_sys->mutex));
  2757. ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
  2758. ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_TABLE
  2759. || trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
  2760. trx->op_info = "renaming tables";
  2761. /* We use the private SQL parser of Innobase to generate the query
  2762. graphs needed in updating the dictionary data in system tables. */
  2763. info = pars_info_create();
  2764. pars_info_add_str_literal(info, "new_name", new_table->name);
  2765. pars_info_add_str_literal(info, "old_name", old_table->name);
  2766. pars_info_add_str_literal(info, "tmp_name", tmp_name);
  2767. err = que_eval_sql(info,
  2768. "PROCEDURE RENAME_TABLES () IS\n"
  2769. "BEGIN\n"
  2770. "UPDATE SYS_TABLES SET NAME = :tmp_name\n"
  2771. " WHERE NAME = :old_name;\n"
  2772. "UPDATE SYS_TABLES SET NAME = :old_name\n"
  2773. " WHERE NAME = :new_name;\n"
  2774. "END;\n", FALSE, trx);
  2775. /* Update SYS_TABLESPACES and SYS_DATAFILES if the old
  2776. table is in a non-system tablespace where space > 0. */
  2777. if (err == DB_SUCCESS
  2778. && old_table->space != TRX_SYS_SPACE
  2779. && !old_table->ibd_file_missing) {
  2780. /* Make pathname to update SYS_DATAFILES. */
  2781. char* tmp_path = row_make_new_pathname(old_table, tmp_name);
  2782. info = pars_info_create();
  2783. pars_info_add_str_literal(info, "tmp_name", tmp_name);
  2784. pars_info_add_str_literal(info, "tmp_path", tmp_path);
  2785. pars_info_add_int4_literal(info, "old_space",
  2786. (lint) old_table->space);
  2787. err = que_eval_sql(info,
  2788. "PROCEDURE RENAME_OLD_SPACE () IS\n"
  2789. "BEGIN\n"
  2790. "UPDATE SYS_TABLESPACES"
  2791. " SET NAME = :tmp_name\n"
  2792. " WHERE SPACE = :old_space;\n"
  2793. "UPDATE SYS_DATAFILES"
  2794. " SET PATH = :tmp_path\n"
  2795. " WHERE SPACE = :old_space;\n"
  2796. "END;\n", FALSE, trx);
  2797. mem_free(tmp_path);
  2798. }
  2799. /* Update SYS_TABLESPACES and SYS_DATAFILES if the new
  2800. table is in a non-system tablespace where space > 0. */
  2801. if (err == DB_SUCCESS && new_table->space != TRX_SYS_SPACE) {
  2802. /* Make pathname to update SYS_DATAFILES. */
  2803. char* old_path = row_make_new_pathname(
  2804. new_table, old_table->name);
  2805. info = pars_info_create();
  2806. pars_info_add_str_literal(info, "old_name", old_table->name);
  2807. pars_info_add_str_literal(info, "old_path", old_path);
  2808. pars_info_add_int4_literal(info, "new_space",
  2809. (lint) new_table->space);
  2810. err = que_eval_sql(info,
  2811. "PROCEDURE RENAME_NEW_SPACE () IS\n"
  2812. "BEGIN\n"
  2813. "UPDATE SYS_TABLESPACES"
  2814. " SET NAME = :old_name\n"
  2815. " WHERE SPACE = :new_space;\n"
  2816. "UPDATE SYS_DATAFILES"
  2817. " SET PATH = :old_path\n"
  2818. " WHERE SPACE = :new_space;\n"
  2819. "END;\n", FALSE, trx);
  2820. mem_free(old_path);
  2821. }
  2822. if (err == DB_SUCCESS && dict_table_is_discarded(new_table)) {
  2823. err = row_import_update_discarded_flag(
  2824. trx, new_table->id, true, true);
  2825. }
  2826. trx->op_info = "";
  2827. return(err);
  2828. }
  2829. /*********************************************************************//**
  2830. Create and execute a query graph for creating an index.
  2831. @return DB_SUCCESS or error code */
  2832. static __attribute__((nonnull, warn_unused_result))
  2833. dberr_t
  2834. row_merge_create_index_graph(
  2835. /*=========================*/
  2836. trx_t* trx, /*!< in: trx */
  2837. dict_table_t* table, /*!< in: table */
  2838. dict_index_t* index) /*!< in: index */
  2839. {
  2840. ind_node_t* node; /*!< Index creation node */
  2841. mem_heap_t* heap; /*!< Memory heap */
  2842. que_thr_t* thr; /*!< Query thread */
  2843. dberr_t err;
  2844. ut_ad(trx);
  2845. ut_ad(table);
  2846. ut_ad(index);
  2847. heap = mem_heap_create(512);
  2848. index->table = table;
  2849. node = ind_create_graph_create(index, heap, false);
  2850. thr = pars_complete_graph_for_exec(node, trx, heap);
  2851. ut_a(thr == que_fork_start_command(
  2852. static_cast<que_fork_t*>(que_node_get_parent(thr))));
  2853. que_run_threads(thr);
  2854. err = trx->error_state;
  2855. que_graph_free((que_t*) que_node_get_parent(thr));
  2856. return(err);
  2857. }
  2858. /*********************************************************************//**
  2859. Create the index and load in to the dictionary.
  2860. @return index, or NULL on error */
  2861. UNIV_INTERN
  2862. dict_index_t*
  2863. row_merge_create_index(
  2864. /*===================*/
  2865. trx_t* trx, /*!< in/out: trx (sets error_state) */
  2866. dict_table_t* table, /*!< in: the index is on this table */
  2867. const index_def_t* index_def)
  2868. /*!< in: the index definition */
  2869. {
  2870. dict_index_t* index;
  2871. dberr_t err;
  2872. ulint n_fields = index_def->n_fields;
  2873. ulint i;
  2874. ut_ad(!srv_read_only_mode);
  2875. /* Create the index prototype, using the passed in def, this is not
  2876. a persistent operation. We pass 0 as the space id, and determine at
  2877. a lower level the space id where to store the table. */
  2878. index = dict_mem_index_create(table->name, index_def->name,
  2879. 0, index_def->ind_type, n_fields);
  2880. ut_a(index);
  2881. for (i = 0; i < n_fields; i++) {
  2882. index_field_t* ifield = &index_def->fields[i];
  2883. dict_mem_index_add_field(
  2884. index, dict_table_get_col_name(table, ifield->col_no),
  2885. ifield->prefix_len);
  2886. }
  2887. /* Add the index to SYS_INDEXES, using the index prototype. */
  2888. err = row_merge_create_index_graph(trx, table, index);
  2889. if (err == DB_SUCCESS) {
  2890. index = dict_table_get_index_on_name(table, index_def->name);
  2891. ut_a(index);
  2892. /* Note the id of the transaction that created this
  2893. index, we use it to restrict readers from accessing
  2894. this index, to ensure read consistency. */
  2895. ut_ad(index->trx_id == trx->id);
  2896. } else {
  2897. index = NULL;
  2898. }
  2899. return(index);
  2900. }
  2901. /*********************************************************************//**
  2902. Check if a transaction can use an index. */
  2903. UNIV_INTERN
  2904. ibool
  2905. row_merge_is_index_usable(
  2906. /*======================*/
  2907. const trx_t* trx, /*!< in: transaction */
  2908. const dict_index_t* index) /*!< in: index to check */
  2909. {
  2910. if (!dict_index_is_clust(index)
  2911. && dict_index_is_online_ddl(index)) {
  2912. /* Indexes that are being created are not useable. */
  2913. return(FALSE);
  2914. }
  2915. return(!dict_index_is_corrupted(index)
  2916. && (dict_table_is_temporary(index->table)
  2917. || !trx->read_view
  2918. || read_view_sees_trx_id(trx->read_view, index->trx_id)));
  2919. }
  2920. /*********************************************************************//**
  2921. Drop a table. The caller must have ensured that the background stats
  2922. thread is not processing the table. This can be done by calling
  2923. dict_stats_wait_bg_to_stop_using_table() after locking the dictionary and
  2924. before calling this function.
  2925. @return DB_SUCCESS or error code */
  2926. UNIV_INTERN
  2927. dberr_t
  2928. row_merge_drop_table(
  2929. /*=================*/
  2930. trx_t* trx, /*!< in: transaction */
  2931. dict_table_t* table) /*!< in: table to drop */
  2932. {
  2933. ut_ad(!srv_read_only_mode);
  2934. /* There must be no open transactions on the table. */
  2935. ut_a(table->n_ref_count == 0);
  2936. return(row_drop_table_for_mysql(table->name, trx, false, false));
  2937. }
  2938. /*********************************************************************//**
  2939. Build indexes on a table by reading a clustered index,
  2940. creating a temporary file containing index entries, merge sorting
  2941. these index entries and inserting sorted index entries to indexes.
  2942. @return DB_SUCCESS or error code */
  2943. UNIV_INTERN
  2944. dberr_t
  2945. row_merge_build_indexes(
  2946. /*====================*/
  2947. trx_t* trx, /*!< in: transaction */
  2948. dict_table_t* old_table, /*!< in: table where rows are
  2949. read from */
  2950. dict_table_t* new_table, /*!< in: table where indexes are
  2951. created; identical to old_table
  2952. unless creating a PRIMARY KEY */
  2953. bool online, /*!< in: true if creating indexes
  2954. online */
  2955. dict_index_t** indexes, /*!< in: indexes to be created */
  2956. const ulint* key_numbers, /*!< in: MySQL key numbers */
  2957. ulint n_indexes, /*!< in: size of indexes[] */
  2958. struct TABLE* table, /*!< in/out: MySQL table, for
  2959. reporting erroneous key value
  2960. if applicable */
  2961. const dtuple_t* add_cols, /*!< in: default values of
  2962. added columns, or NULL */
  2963. const ulint* col_map, /*!< in: mapping of old column
  2964. numbers to new ones, or NULL
  2965. if old_table == new_table */
  2966. ulint add_autoinc, /*!< in: number of added
  2967. AUTO_INCREMENT column, or
  2968. ULINT_UNDEFINED if none is added */
  2969. ib_sequence_t& sequence) /*!< in: autoinc instance if
  2970. add_autoinc != ULINT_UNDEFINED */
  2971. {
  2972. merge_file_t* merge_files;
  2973. row_merge_block_t* block;
  2974. ulint block_size;
  2975. ulint i;
  2976. ulint j;
  2977. dberr_t error;
  2978. int tmpfd = -1;
  2979. dict_index_t* fts_sort_idx = NULL;
  2980. fts_psort_t* psort_info = NULL;
  2981. fts_psort_t* merge_info = NULL;
  2982. ib_int64_t sig_count = 0;
  2983. bool fts_psort_initiated = false;
  2984. float total_static_cost = 0;
  2985. float total_dynamic_cost = 0;
  2986. uint total_index_blocks = 0;
  2987. float pct_cost=0;
  2988. float pct_progress=0;
  2989. DBUG_ENTER("row_merge_build_indexes");
  2990. ut_ad(!srv_read_only_mode);
  2991. ut_ad((old_table == new_table) == !col_map);
  2992. ut_ad(!add_cols || col_map);
  2993. /* Allocate memory for merge file data structure and initialize
  2994. fields */
  2995. block_size = 3 * srv_sort_buf_size;
  2996. block = static_cast<row_merge_block_t*>(
  2997. os_mem_alloc_large(&block_size));
  2998. if (block == NULL) {
  2999. DBUG_RETURN(DB_OUT_OF_MEMORY);
  3000. }
  3001. trx_start_if_not_started_xa(trx);
  3002. merge_files = static_cast<merge_file_t*>(
  3003. mem_alloc(n_indexes * sizeof *merge_files));
  3004. /* Initialize all the merge file descriptors, so that we
  3005. don't call row_merge_file_destroy() on uninitialized
  3006. merge file descriptor */
  3007. for (i = 0; i < n_indexes; i++) {
  3008. merge_files[i].fd = -1;
  3009. }
  3010. total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX;
  3011. total_dynamic_cost = COST_BUILD_INDEX_DYNAMIC * n_indexes;
  3012. for (i = 0; i < n_indexes; i++) {
  3013. if (row_merge_file_create(&merge_files[i]) < 0) {
  3014. error = DB_OUT_OF_MEMORY;
  3015. goto func_exit;
  3016. }
  3017. if (indexes[i]->type & DICT_FTS) {
  3018. ibool opt_doc_id_size = FALSE;
  3019. /* To build FTS index, we would need to extract
  3020. doc's word, Doc ID, and word's position, so
  3021. we need to build a "fts sort index" indexing
  3022. on above three 'fields' */
  3023. fts_sort_idx = row_merge_create_fts_sort_index(
  3024. indexes[i], old_table, &opt_doc_id_size);
  3025. row_merge_dup_t* dup = static_cast<row_merge_dup_t*>(
  3026. ut_malloc(sizeof *dup));
  3027. dup->index = fts_sort_idx;
  3028. dup->table = table;
  3029. dup->col_map = col_map;
  3030. dup->n_dup = 0;
  3031. row_fts_psort_info_init(
  3032. trx, dup, new_table, opt_doc_id_size,
  3033. &psort_info, &merge_info);
  3034. /* "We need to ensure that we free the resources
  3035. allocated */
  3036. fts_psort_initiated = true;
  3037. }
  3038. }
  3039. tmpfd = row_merge_file_create_low();
  3040. if (tmpfd < 0) {
  3041. error = DB_OUT_OF_MEMORY;
  3042. goto func_exit;
  3043. }
  3044. /* Reset the MySQL row buffer that is used when reporting
  3045. duplicate keys. */
  3046. innobase_rec_reset(table);
  3047. sql_print_information("InnoDB: Online DDL : Start");
  3048. sql_print_information("InnoDB: Online DDL : Start reading clustered "
  3049. "index of the table and create temporary files");
  3050. pct_cost = COST_READ_CLUSTERED_INDEX * 100 / (total_static_cost + total_dynamic_cost);
  3051. /* Read clustered index of the table and create files for
  3052. secondary index entries for merge sort */
  3053. error = row_merge_read_clustered_index(
  3054. trx, table, old_table, new_table, online, indexes,
  3055. fts_sort_idx, psort_info, merge_files, key_numbers,
  3056. n_indexes, add_cols, col_map,
  3057. add_autoinc, sequence, block, pct_cost);
  3058. pct_progress += pct_cost;
  3059. sql_print_information("InnoDB: Online DDL : End of reading "
  3060. "clustered index of the table and create temporary files");
  3061. for (i = 0; i < n_indexes; i++) {
  3062. total_index_blocks += merge_files[i].offset;
  3063. }
  3064. if (error != DB_SUCCESS) {
  3065. goto func_exit;
  3066. }
  3067. DEBUG_SYNC_C("row_merge_after_scan");
  3068. /* Now we have files containing index entries ready for
  3069. sorting and inserting. */
  3070. for (i = 0; i < n_indexes; i++) {
  3071. dict_index_t* sort_idx = indexes[i];
  3072. if (indexes[i]->type & DICT_FTS) {
  3073. os_event_t fts_parallel_merge_event;
  3074. sort_idx = fts_sort_idx;
  3075. fts_parallel_merge_event
  3076. = merge_info[0].psort_common->merge_event;
  3077. if (FTS_PLL_MERGE) {
  3078. ulint trial_count = 0;
  3079. bool all_exit = false;
  3080. os_event_reset(fts_parallel_merge_event);
  3081. row_fts_start_parallel_merge(merge_info);
  3082. wait_again:
  3083. os_event_wait_time_low(
  3084. fts_parallel_merge_event, 1000000,
  3085. sig_count);
  3086. for (j = 0; j < FTS_NUM_AUX_INDEX; j++) {
  3087. if (merge_info[j].child_status
  3088. != FTS_CHILD_COMPLETE
  3089. && merge_info[j].child_status
  3090. != FTS_CHILD_EXITING) {
  3091. sig_count = os_event_reset(
  3092. fts_parallel_merge_event);
  3093. goto wait_again;
  3094. }
  3095. }
  3096. /* Now all children should complete, wait
  3097. a bit until they all finish using event */
  3098. while (!all_exit && trial_count < 10000) {
  3099. all_exit = true;
  3100. for (j = 0; j < FTS_NUM_AUX_INDEX;
  3101. j++) {
  3102. if (merge_info[j].child_status
  3103. != FTS_CHILD_EXITING) {
  3104. all_exit = false;
  3105. os_thread_sleep(1000);
  3106. break;
  3107. }
  3108. }
  3109. trial_count++;
  3110. }
  3111. if (!all_exit) {
  3112. ib_logf(IB_LOG_LEVEL_ERROR,
  3113. "Not all child merge threads"
  3114. " exited when creating FTS"
  3115. " index '%s'",
  3116. indexes[i]->name);
  3117. }
  3118. } else {
  3119. /* This cannot report duplicates; an
  3120. assertion would fail in that case. */
  3121. error = row_fts_merge_insert(
  3122. sort_idx, new_table,
  3123. psort_info, 0);
  3124. }
  3125. #ifdef FTS_INTERNAL_DIAG_PRINT
  3126. DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Insert\n");
  3127. #endif
  3128. } else {
  3129. row_merge_dup_t dup = {
  3130. sort_idx, table, col_map, 0};
  3131. pct_cost = (COST_BUILD_INDEX_STATIC +
  3132. (total_dynamic_cost * merge_files[i].offset /
  3133. total_index_blocks)) /
  3134. (total_static_cost + total_dynamic_cost)
  3135. * PCT_COST_MERGESORT_INDEX * 100;
  3136. sql_print_information("InnoDB: Online DDL : Start merge-sorting"
  3137. " index %s (%lu / %lu), estimated cost : %2.4f",
  3138. indexes[i]->name, (i+1), n_indexes, pct_cost);
  3139. error = row_merge_sort(
  3140. trx, &dup, &merge_files[i],
  3141. block, &tmpfd, true, pct_progress, pct_cost);
  3142. pct_progress += pct_cost;
  3143. sql_print_information("InnoDB: Online DDL : End of "
  3144. " merge-sorting index %s (%lu / %lu)",
  3145. indexes[i]->name, (i+1), n_indexes);
  3146. if (error == DB_SUCCESS) {
  3147. pct_cost = (COST_BUILD_INDEX_STATIC +
  3148. (total_dynamic_cost * merge_files[i].offset /
  3149. total_index_blocks)) /
  3150. (total_static_cost + total_dynamic_cost) *
  3151. PCT_COST_INSERT_INDEX * 100;
  3152. sql_print_information("InnoDB: Online DDL : Start "
  3153. "building index %s (%lu / %lu), estimated "
  3154. "cost : %2.4f", indexes[i]->name, (i+1),
  3155. n_indexes, pct_cost);
  3156. error = row_merge_insert_index_tuples(
  3157. trx->id, sort_idx, old_table,
  3158. merge_files[i].fd, block,
  3159. merge_files[i].n_rec, pct_progress, pct_cost);
  3160. pct_progress += pct_cost;
  3161. sql_print_information("InnoDB: Online DDL : "
  3162. "End of building index %s (%lu / %lu)",
  3163. indexes[i]->name, (i+1), n_indexes);
  3164. }
  3165. }
  3166. /* Close the temporary file to free up space. */
  3167. row_merge_file_destroy(&merge_files[i]);
  3168. if (indexes[i]->type & DICT_FTS) {
  3169. row_fts_psort_info_destroy(psort_info, merge_info);
  3170. fts_psort_initiated = false;
  3171. } else if (error != DB_SUCCESS || !online) {
  3172. /* Do not apply any online log. */
  3173. } else if (old_table != new_table) {
  3174. ut_ad(!sort_idx->online_log);
  3175. ut_ad(sort_idx->online_status
  3176. == ONLINE_INDEX_COMPLETE);
  3177. } else {
  3178. sql_print_information("InnoDB: Online DDL : Start applying row log");
  3179. DEBUG_SYNC_C("row_log_apply_before");
  3180. error = row_log_apply(trx, sort_idx, table);
  3181. DEBUG_SYNC_C("row_log_apply_after");
  3182. sql_print_information("InnoDB: Online DDL : End of applying row log");
  3183. }
  3184. sql_print_information("InnoDB: Online DDL : Completed");
  3185. if (error != DB_SUCCESS) {
  3186. trx->error_key_num = key_numbers[i];
  3187. goto func_exit;
  3188. }
  3189. if (indexes[i]->type & DICT_FTS && fts_enable_diag_print) {
  3190. char* name = (char*) indexes[i]->name;
  3191. if (*name == TEMP_INDEX_PREFIX) {
  3192. name++;
  3193. }
  3194. ut_print_timestamp(stderr);
  3195. fprintf(stderr, " InnoDB: Finished building "
  3196. "full-text index %s\n", name);
  3197. }
  3198. }
  3199. func_exit:
  3200. DBUG_EXECUTE_IF(
  3201. "ib_build_indexes_too_many_concurrent_trxs",
  3202. error = DB_TOO_MANY_CONCURRENT_TRXS;
  3203. trx->error_state = error;);
  3204. if (fts_psort_initiated) {
  3205. /* Clean up FTS psort related resource */
  3206. row_fts_psort_info_destroy(psort_info, merge_info);
  3207. fts_psort_initiated = false;
  3208. }
  3209. row_merge_file_destroy_low(tmpfd);
  3210. for (i = 0; i < n_indexes; i++) {
  3211. row_merge_file_destroy(&merge_files[i]);
  3212. }
  3213. if (fts_sort_idx) {
  3214. dict_mem_index_free(fts_sort_idx);
  3215. }
  3216. mem_free(merge_files);
  3217. os_mem_free_large(block, block_size);
  3218. DICT_TF2_FLAG_UNSET(new_table, DICT_TF2_FTS_ADD_DOC_ID);
  3219. if (online && old_table == new_table && error != DB_SUCCESS) {
  3220. /* On error, flag all online secondary index creation
  3221. as aborted. */
  3222. for (i = 0; i < n_indexes; i++) {
  3223. ut_ad(!(indexes[i]->type & DICT_FTS));
  3224. ut_ad(*indexes[i]->name == TEMP_INDEX_PREFIX);
  3225. ut_ad(!dict_index_is_clust(indexes[i]));
  3226. /* Completed indexes should be dropped as
  3227. well, and indexes whose creation was aborted
  3228. should be dropped from the persistent
  3229. storage. However, at this point we can only
  3230. set some flags in the not-yet-published
  3231. indexes. These indexes will be dropped later
  3232. in row_merge_drop_indexes(), called by
  3233. rollback_inplace_alter_table(). */
  3234. switch (dict_index_get_online_status(indexes[i])) {
  3235. case ONLINE_INDEX_COMPLETE:
  3236. break;
  3237. case ONLINE_INDEX_CREATION:
  3238. rw_lock_x_lock(
  3239. dict_index_get_lock(indexes[i]));
  3240. row_log_abort_sec(indexes[i]);
  3241. indexes[i]->type |= DICT_CORRUPT;
  3242. rw_lock_x_unlock(
  3243. dict_index_get_lock(indexes[i]));
  3244. new_table->drop_aborted = TRUE;
  3245. /* fall through */
  3246. case ONLINE_INDEX_ABORTED_DROPPED:
  3247. case ONLINE_INDEX_ABORTED:
  3248. MONITOR_MUTEX_INC(
  3249. &dict_sys->mutex,
  3250. MONITOR_BACKGROUND_DROP_INDEX);
  3251. }
  3252. }
  3253. }
  3254. DBUG_RETURN(error);
  3255. }