You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2020 lines
54 KiB

  1. /*****************************************************************************
  2. Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
  3. Copyright (c) 2017, 2018, MariaDB Corporation.
  4. This program is free software; you can redistribute it and/or modify it under
  5. the terms of the GNU General Public License as published by the Free Software
  6. Foundation; version 2 of the License.
  7. This program is distributed in the hope that it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  9. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  10. You should have received a copy of the GNU General Public License along with
  11. this program; if not, write to the Free Software Foundation, Inc.,
  12. 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
  13. *****************************************************************************/
  14. /**************************************************//**
  15. @file gis/gis0sea.cc
  16. InnoDB R-tree search interfaces
  17. Created 2014/01/16 Jimmy Yang
  18. ***********************************************************************/
  19. #include "fsp0fsp.h"
  20. #include "page0page.h"
  21. #include "page0cur.h"
  22. #include "page0zip.h"
  23. #include "gis0rtree.h"
  24. #include "btr0cur.h"
  25. #include "btr0sea.h"
  26. #include "btr0pcur.h"
  27. #include "rem0cmp.h"
  28. #include "lock0lock.h"
  29. #include "ibuf0ibuf.h"
  30. #include "trx0trx.h"
  31. #include "srv0mon.h"
  32. #include "gis0geo.h"
  33. /** Restore the stored position of a persistent cursor bufferfixing the page */
  34. static
  35. bool
  36. rtr_cur_restore_position(
  37. ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */
  38. btr_cur_t* cursor, /*!< in: detached persistent cursor */
  39. ulint level, /*!< in: index level */
  40. mtr_t* mtr); /*!< in: mtr */
  41. /*************************************************************//**
  42. Pop out used parent path entry, until we find the parent with matching
  43. page number */
  44. static
  45. void
  46. rtr_adjust_parent_path(
  47. /*===================*/
  48. rtr_info_t* rtr_info, /* R-Tree info struct */
  49. ulint page_no) /* page number to look for */
  50. {
  51. while (!rtr_info->parent_path->empty()) {
  52. if (rtr_info->parent_path->back().child_no == page_no) {
  53. break;
  54. } else {
  55. if (rtr_info->parent_path->back().cursor) {
  56. btr_pcur_close(
  57. rtr_info->parent_path->back().cursor);
  58. ut_free(rtr_info->parent_path->back().cursor);
  59. }
  60. rtr_info->parent_path->pop_back();
  61. }
  62. }
  63. }
  64. /*************************************************************//**
  65. Find the next matching record. This function is used by search
  66. or record locating during index delete/update.
  67. @return true if there is suitable record found, otherwise false */
  68. static
  69. bool
  70. rtr_pcur_getnext_from_path(
  71. /*=======================*/
  72. const dtuple_t* tuple, /*!< in: data tuple */
  73. page_cur_mode_t mode, /*!< in: cursor search mode */
  74. btr_cur_t* btr_cur,/*!< in: persistent cursor; NOTE that the
  75. function may release the page latch */
  76. ulint target_level,
  77. /*!< in: target level */
  78. ulint latch_mode,
  79. /*!< in: latch_mode */
  80. bool index_locked,
  81. /*!< in: index tree locked */
  82. mtr_t* mtr) /*!< in: mtr */
  83. {
  84. dict_index_t* index = btr_cur->index;
  85. bool found = false;
  86. ulint space = dict_index_get_space(index);
  87. page_cur_t* page_cursor;
  88. ulint level = 0;
  89. node_visit_t next_rec;
  90. rtr_info_t* rtr_info = btr_cur->rtr_info;
  91. node_seq_t page_ssn;
  92. ulint my_latch_mode;
  93. ulint skip_parent = false;
  94. bool new_split = false;
  95. bool need_parent;
  96. bool for_delete = false;
  97. bool for_undo_ins = false;
  98. /* exhausted all the pages to be searched */
  99. if (rtr_info->path->empty()) {
  100. return(false);
  101. }
  102. ut_ad(dtuple_get_n_fields_cmp(tuple));
  103. my_latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
  104. for_delete = latch_mode & BTR_RTREE_DELETE_MARK;
  105. for_undo_ins = latch_mode & BTR_RTREE_UNDO_INS;
  106. /* There should be no insert coming to this function. Only
  107. mode with BTR_MODIFY_* should be delete */
  108. ut_ad(mode != PAGE_CUR_RTREE_INSERT);
  109. ut_ad(my_latch_mode == BTR_SEARCH_LEAF
  110. || my_latch_mode == BTR_MODIFY_LEAF
  111. || my_latch_mode == BTR_MODIFY_TREE
  112. || my_latch_mode == BTR_CONT_MODIFY_TREE);
  113. /* Whether need to track parent information. Only need so
  114. when we do tree altering operations (such as index page merge) */
  115. need_parent = ((my_latch_mode == BTR_MODIFY_TREE
  116. || my_latch_mode == BTR_CONT_MODIFY_TREE)
  117. && mode == PAGE_CUR_RTREE_LOCATE);
  118. if (!index_locked) {
  119. ut_ad(latch_mode & BTR_SEARCH_LEAF
  120. || latch_mode & BTR_MODIFY_LEAF);
  121. mtr_s_lock(dict_index_get_lock(index), mtr);
  122. } else {
  123. ut_ad(mtr_memo_contains_flagged(mtr, &index->lock,
  124. MTR_MEMO_SX_LOCK
  125. | MTR_MEMO_S_LOCK
  126. | MTR_MEMO_X_LOCK));
  127. }
  128. const page_size_t& page_size = dict_table_page_size(index->table);
  129. /* Pop each node/page to be searched from "path" structure
  130. and do a search on it. Please note, any pages that are in
  131. the "path" structure are protected by "page" lock, so tey
  132. cannot be shrunk away */
  133. do {
  134. buf_block_t* block;
  135. node_seq_t path_ssn;
  136. const page_t* page;
  137. ulint rw_latch = RW_X_LATCH;
  138. ulint tree_idx;
  139. mutex_enter(&rtr_info->rtr_path_mutex);
  140. next_rec = rtr_info->path->back();
  141. rtr_info->path->pop_back();
  142. level = next_rec.level;
  143. path_ssn = next_rec.seq_no;
  144. tree_idx = btr_cur->tree_height - level - 1;
  145. /* Maintain the parent path info as well, if needed */
  146. if (need_parent && !skip_parent && !new_split) {
  147. ulint old_level;
  148. ulint new_level;
  149. ut_ad(!rtr_info->parent_path->empty());
  150. /* Cleanup unused parent info */
  151. if (rtr_info->parent_path->back().cursor) {
  152. btr_pcur_close(
  153. rtr_info->parent_path->back().cursor);
  154. ut_free(rtr_info->parent_path->back().cursor);
  155. }
  156. old_level = rtr_info->parent_path->back().level;
  157. rtr_info->parent_path->pop_back();
  158. ut_ad(!rtr_info->parent_path->empty());
  159. /* check whether there is a level change. If so,
  160. the current parent path needs to pop enough
  161. nodes to adjust to the new search page */
  162. new_level = rtr_info->parent_path->back().level;
  163. if (old_level < new_level) {
  164. rtr_adjust_parent_path(
  165. rtr_info, next_rec.page_no);
  166. }
  167. ut_ad(!rtr_info->parent_path->empty());
  168. ut_ad(next_rec.page_no
  169. == rtr_info->parent_path->back().child_no);
  170. }
  171. mutex_exit(&rtr_info->rtr_path_mutex);
  172. skip_parent = false;
  173. new_split = false;
  174. /* Once we have pages in "path", these pages are
  175. predicate page locked, so they can't be shrunk away.
  176. They also have SSN (split sequence number) to detect
  177. splits, so we can directly latch single page while
  178. getting them. They can be unlatched if not qualified.
  179. One reason for pre-latch is that we might need to position
  180. some parent position (requires latch) during search */
  181. if (level == 0) {
  182. /* S latched for SEARCH_LEAF, and X latched
  183. for MODIFY_LEAF */
  184. if (my_latch_mode <= BTR_MODIFY_LEAF) {
  185. rw_latch = my_latch_mode;
  186. }
  187. if (my_latch_mode == BTR_CONT_MODIFY_TREE
  188. || my_latch_mode == BTR_MODIFY_TREE) {
  189. rw_latch = RW_NO_LATCH;
  190. }
  191. } else if (level == target_level) {
  192. rw_latch = RW_X_LATCH;
  193. }
  194. /* Release previous locked blocks */
  195. if (my_latch_mode != BTR_SEARCH_LEAF) {
  196. for (ulint idx = 0; idx < btr_cur->tree_height;
  197. idx++) {
  198. if (rtr_info->tree_blocks[idx]) {
  199. mtr_release_block_at_savepoint(
  200. mtr,
  201. rtr_info->tree_savepoints[idx],
  202. rtr_info->tree_blocks[idx]);
  203. rtr_info->tree_blocks[idx] = NULL;
  204. }
  205. }
  206. for (ulint idx = RTR_MAX_LEVELS; idx < RTR_MAX_LEVELS + 3;
  207. idx++) {
  208. if (rtr_info->tree_blocks[idx]) {
  209. mtr_release_block_at_savepoint(
  210. mtr,
  211. rtr_info->tree_savepoints[idx],
  212. rtr_info->tree_blocks[idx]);
  213. rtr_info->tree_blocks[idx] = NULL;
  214. }
  215. }
  216. }
  217. /* set up savepoint to record any locks to be taken */
  218. rtr_info->tree_savepoints[tree_idx] = mtr_set_savepoint(mtr);
  219. #ifdef UNIV_RTR_DEBUG
  220. ut_ad(!(rw_lock_own(&btr_cur->page_cur.block->lock, RW_LOCK_X)
  221. ||
  222. rw_lock_own(&btr_cur->page_cur.block->lock, RW_LOCK_S))
  223. || my_latch_mode == BTR_MODIFY_TREE
  224. || my_latch_mode == BTR_CONT_MODIFY_TREE
  225. || !page_is_leaf(buf_block_get_frame(
  226. btr_cur->page_cur.block)));
  227. #endif /* UNIV_RTR_DEBUG */
  228. page_id_t page_id(space, next_rec.page_no);
  229. dberr_t err = DB_SUCCESS;
  230. block = buf_page_get_gen(
  231. page_id, page_size,
  232. rw_latch, NULL, BUF_GET, __FILE__, __LINE__, mtr, &err);
  233. if (block == NULL) {
  234. continue;
  235. } else if (rw_latch != RW_NO_LATCH) {
  236. ut_ad(!dict_index_is_ibuf(index));
  237. buf_block_dbg_add_level(block, SYNC_TREE_NODE);
  238. }
  239. rtr_info->tree_blocks[tree_idx] = block;
  240. page = buf_block_get_frame(block);
  241. page_ssn = page_get_ssn_id(page);
  242. /* If there are splits, push the splitted page.
  243. Note that we have SX lock on index->lock, there
  244. should not be any split/shrink happening here */
  245. if (page_ssn > path_ssn) {
  246. ulint next_page_no = btr_page_get_next(page, mtr);
  247. rtr_non_leaf_stack_push(
  248. rtr_info->path, next_page_no, path_ssn,
  249. level, 0, NULL, 0);
  250. if (!srv_read_only_mode
  251. && mode != PAGE_CUR_RTREE_INSERT
  252. && mode != PAGE_CUR_RTREE_LOCATE) {
  253. ut_ad(rtr_info->thr);
  254. lock_place_prdt_page_lock(
  255. space, next_page_no, index,
  256. rtr_info->thr);
  257. }
  258. new_split = true;
  259. #if UNIV_GIS_DEBUG
  260. fprintf(stderr,
  261. "GIS_DIAG: Splitted page found: %d, %ld\n",
  262. static_cast<int>(need_parent), next_page_no);
  263. #endif
  264. }
  265. page_cursor = btr_cur_get_page_cur(btr_cur);
  266. page_cursor->rec = NULL;
  267. if (mode == PAGE_CUR_RTREE_LOCATE) {
  268. if (level == target_level && level == 0) {
  269. ulint low_match;
  270. found = false;
  271. low_match = page_cur_search(
  272. block, index, tuple,
  273. PAGE_CUR_LE,
  274. btr_cur_get_page_cur(btr_cur));
  275. if (low_match == dtuple_get_n_fields_cmp(
  276. tuple)) {
  277. rec_t* rec = btr_cur_get_rec(btr_cur);
  278. if (!rec_get_deleted_flag(rec,
  279. dict_table_is_comp(index->table))
  280. || (!for_delete && !for_undo_ins)) {
  281. found = true;
  282. btr_cur->low_match = low_match;
  283. } else {
  284. /* mark we found deleted row */
  285. btr_cur->rtr_info->fd_del
  286. = true;
  287. }
  288. }
  289. } else {
  290. page_cur_mode_t page_mode = mode;
  291. if (level == target_level
  292. && target_level != 0) {
  293. page_mode = PAGE_CUR_RTREE_GET_FATHER;
  294. }
  295. found = rtr_cur_search_with_match(
  296. block, index, tuple, page_mode,
  297. page_cursor, btr_cur->rtr_info);
  298. /* Save the position of parent if needed */
  299. if (found && need_parent) {
  300. btr_pcur_t* r_cursor =
  301. rtr_get_parent_cursor(
  302. btr_cur, level, false);
  303. rec_t* rec = page_cur_get_rec(
  304. page_cursor);
  305. page_cur_position(
  306. rec, block,
  307. btr_pcur_get_page_cur(r_cursor));
  308. r_cursor->pos_state =
  309. BTR_PCUR_IS_POSITIONED;
  310. r_cursor->latch_mode = my_latch_mode;
  311. btr_pcur_store_position(r_cursor, mtr);
  312. #ifdef UNIV_DEBUG
  313. ulint num_stored =
  314. rtr_store_parent_path(
  315. block, btr_cur,
  316. rw_latch, level, mtr);
  317. ut_ad(num_stored > 0);
  318. #else
  319. rtr_store_parent_path(
  320. block, btr_cur, rw_latch,
  321. level, mtr);
  322. #endif /* UNIV_DEBUG */
  323. }
  324. }
  325. } else {
  326. found = rtr_cur_search_with_match(
  327. block, index, tuple, mode, page_cursor,
  328. btr_cur->rtr_info);
  329. }
  330. /* Attach predicate lock if needed, no matter whether
  331. there are matched records */
  332. if (mode != PAGE_CUR_RTREE_INSERT
  333. && mode != PAGE_CUR_RTREE_LOCATE
  334. && mode >= PAGE_CUR_CONTAIN
  335. && btr_cur->rtr_info->need_prdt_lock
  336. && found) {
  337. lock_prdt_t prdt;
  338. trx_t* trx = thr_get_trx(
  339. btr_cur->rtr_info->thr);
  340. lock_mutex_enter();
  341. lock_init_prdt_from_mbr(
  342. &prdt, &btr_cur->rtr_info->mbr,
  343. mode, trx->lock.lock_heap);
  344. lock_mutex_exit();
  345. if (rw_latch == RW_NO_LATCH) {
  346. rw_lock_s_lock(&(block->lock));
  347. }
  348. lock_prdt_lock(block, &prdt, index, LOCK_S,
  349. LOCK_PREDICATE, btr_cur->rtr_info->thr,
  350. mtr);
  351. if (rw_latch == RW_NO_LATCH) {
  352. rw_lock_s_unlock(&(block->lock));
  353. }
  354. }
  355. if (found) {
  356. if (level == target_level) {
  357. page_cur_t* r_cur;;
  358. if (my_latch_mode == BTR_MODIFY_TREE
  359. && level == 0) {
  360. ut_ad(rw_latch == RW_NO_LATCH);
  361. page_id_t my_page_id(
  362. space, block->page.id.page_no());
  363. btr_cur_latch_leaves(
  364. block, my_page_id,
  365. page_size, BTR_MODIFY_TREE,
  366. btr_cur, mtr);
  367. }
  368. r_cur = btr_cur_get_page_cur(btr_cur);
  369. page_cur_position(
  370. page_cur_get_rec(page_cursor),
  371. page_cur_get_block(page_cursor),
  372. r_cur);
  373. btr_cur->low_match = level != 0 ?
  374. DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1
  375. : btr_cur->low_match;
  376. break;
  377. }
  378. /* Keep the parent path node, which points to
  379. last node just located */
  380. skip_parent = true;
  381. } else {
  382. /* Release latch on the current page */
  383. ut_ad(rtr_info->tree_blocks[tree_idx]);
  384. mtr_release_block_at_savepoint(
  385. mtr, rtr_info->tree_savepoints[tree_idx],
  386. rtr_info->tree_blocks[tree_idx]);
  387. rtr_info->tree_blocks[tree_idx] = NULL;
  388. }
  389. } while (!rtr_info->path->empty());
  390. const rec_t* rec = btr_cur_get_rec(btr_cur);
  391. if (page_rec_is_infimum(rec) || page_rec_is_supremum(rec)) {
  392. mtr_commit(mtr);
  393. mtr_start(mtr);
  394. } else if (!index_locked) {
  395. mtr_memo_release(mtr, dict_index_get_lock(index),
  396. MTR_MEMO_X_LOCK);
  397. }
  398. return(found);
  399. }
  400. /*************************************************************//**
  401. Find the next matching record. This function will first exhaust
  402. the copied record listed in the rtr_info->matches vector before
  403. moving to the next page
  404. @return true if there is suitable record found, otherwise false */
  405. bool
  406. rtr_pcur_move_to_next(
  407. /*==================*/
  408. const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in
  409. tuple must be set so that it cannot get
  410. compared to the node ptr page number field! */
  411. page_cur_mode_t mode, /*!< in: cursor search mode */
  412. btr_pcur_t* cursor, /*!< in: persistent cursor; NOTE that the
  413. function may release the page latch */
  414. ulint level, /*!< in: target level */
  415. mtr_t* mtr) /*!< in: mtr */
  416. {
  417. rtr_info_t* rtr_info = cursor->btr_cur.rtr_info;
  418. ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED);
  419. mutex_enter(&rtr_info->matches->rtr_match_mutex);
  420. /* First retrieve the next record on the current page */
  421. if (!rtr_info->matches->matched_recs->empty()) {
  422. rtr_rec_t rec;
  423. rec = rtr_info->matches->matched_recs->back();
  424. rtr_info->matches->matched_recs->pop_back();
  425. mutex_exit(&rtr_info->matches->rtr_match_mutex);
  426. cursor->btr_cur.page_cur.rec = rec.r_rec;
  427. cursor->btr_cur.page_cur.block = &rtr_info->matches->block;
  428. DEBUG_SYNC_C("rtr_pcur_move_to_next_return");
  429. return(true);
  430. }
  431. mutex_exit(&rtr_info->matches->rtr_match_mutex);
  432. /* Fetch the next page */
  433. return(rtr_pcur_getnext_from_path(tuple, mode, &cursor->btr_cur,
  434. level, cursor->latch_mode,
  435. false, mtr));
  436. }
  437. /*************************************************************//**
  438. Check if the cursor holds record pointing to the specified child page
  439. @return true if it is (pointing to the child page) false otherwise */
  440. static
  441. bool
  442. rtr_compare_cursor_rec(
  443. /*===================*/
  444. dict_index_t* index, /*!< in: index */
  445. btr_cur_t* cursor, /*!< in: Cursor to check */
  446. ulint page_no, /*!< in: desired child page number */
  447. mem_heap_t** heap) /*!< in: memory heap */
  448. {
  449. const rec_t* rec;
  450. ulint* offsets;
  451. rec = btr_cur_get_rec(cursor);
  452. offsets = rec_get_offsets(
  453. rec, index, NULL, false, ULINT_UNDEFINED, heap);
  454. return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no);
  455. }
  456. /**************************************************************//**
  457. Initializes and opens a persistent cursor to an index tree. It should be
  458. closed with btr_pcur_close. Mainly called by row_search_index_entry() */
  459. void
  460. rtr_pcur_open_low(
  461. /*==============*/
  462. dict_index_t* index, /*!< in: index */
  463. ulint level, /*!< in: level in the rtree */
  464. const dtuple_t* tuple, /*!< in: tuple on which search done */
  465. page_cur_mode_t mode, /*!< in: PAGE_CUR_RTREE_LOCATE, ... */
  466. ulint latch_mode,/*!< in: BTR_SEARCH_LEAF, ... */
  467. btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
  468. const char* file, /*!< in: file name */
  469. unsigned line, /*!< in: line where called */
  470. mtr_t* mtr) /*!< in: mtr */
  471. {
  472. btr_cur_t* btr_cursor;
  473. ulint n_fields;
  474. ulint low_match;
  475. rec_t* rec;
  476. bool tree_latched = false;
  477. bool for_delete = false;
  478. bool for_undo_ins = false;
  479. ut_ad(level == 0);
  480. ut_ad(latch_mode & BTR_MODIFY_LEAF || latch_mode & BTR_MODIFY_TREE);
  481. ut_ad(mode == PAGE_CUR_RTREE_LOCATE);
  482. /* Initialize the cursor */
  483. btr_pcur_init(cursor);
  484. for_delete = latch_mode & BTR_RTREE_DELETE_MARK;
  485. for_undo_ins = latch_mode & BTR_RTREE_UNDO_INS;
  486. cursor->latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
  487. cursor->search_mode = mode;
  488. /* Search with the tree cursor */
  489. btr_cursor = btr_pcur_get_btr_cur(cursor);
  490. btr_cursor->rtr_info = rtr_create_rtr_info(false, false,
  491. btr_cursor, index);
  492. /* Purge will SX lock the tree instead of take Page Locks */
  493. if (btr_cursor->thr) {
  494. btr_cursor->rtr_info->need_page_lock = true;
  495. btr_cursor->rtr_info->thr = btr_cursor->thr;
  496. }
  497. btr_cur_search_to_nth_level(index, level, tuple, mode, latch_mode,
  498. btr_cursor, 0, file, line, mtr);
  499. cursor->pos_state = BTR_PCUR_IS_POSITIONED;
  500. cursor->trx_if_known = NULL;
  501. low_match = btr_pcur_get_low_match(cursor);
  502. rec = btr_pcur_get_rec(cursor);
  503. n_fields = dtuple_get_n_fields(tuple);
  504. if (latch_mode & BTR_ALREADY_S_LATCHED) {
  505. ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
  506. MTR_MEMO_S_LOCK));
  507. tree_latched = true;
  508. }
  509. if (latch_mode & BTR_MODIFY_TREE) {
  510. ut_ad(mtr_memo_contains_flagged(mtr, &index->lock,
  511. MTR_MEMO_X_LOCK
  512. | MTR_MEMO_SX_LOCK));
  513. tree_latched = true;
  514. }
  515. if (page_rec_is_infimum(rec) || low_match != n_fields
  516. || (rec_get_deleted_flag(rec, dict_table_is_comp(index->table))
  517. && (for_delete || for_undo_ins))) {
  518. if (rec_get_deleted_flag(rec, dict_table_is_comp(index->table))
  519. && for_delete) {
  520. btr_cursor->rtr_info->fd_del = true;
  521. btr_cursor->low_match = 0;
  522. }
  523. /* Did not find matched row in first dive. Release
  524. latched block if any before search more pages */
  525. if (latch_mode & BTR_MODIFY_LEAF) {
  526. ulint tree_idx = btr_cursor->tree_height - 1;
  527. rtr_info_t* rtr_info = btr_cursor->rtr_info;
  528. ut_ad(level == 0);
  529. if (rtr_info->tree_blocks[tree_idx]) {
  530. mtr_release_block_at_savepoint(
  531. mtr,
  532. rtr_info->tree_savepoints[tree_idx],
  533. rtr_info->tree_blocks[tree_idx]);
  534. rtr_info->tree_blocks[tree_idx] = NULL;
  535. }
  536. }
  537. bool ret = rtr_pcur_getnext_from_path(
  538. tuple, mode, btr_cursor, level, latch_mode,
  539. tree_latched, mtr);
  540. if (ret) {
  541. low_match = btr_pcur_get_low_match(cursor);
  542. ut_ad(low_match == n_fields);
  543. }
  544. }
  545. }
  546. /* Get the rtree page father.
  547. @param[in] index rtree index
  548. @param[in] block child page in the index
  549. @param[in] mtr mtr
  550. @param[in] sea_cur search cursor, contains information
  551. about parent nodes in search
  552. @param[in] cursor cursor on node pointer record,
  553. its page x-latched */
  554. void
  555. rtr_page_get_father(
  556. dict_index_t* index,
  557. buf_block_t* block,
  558. mtr_t* mtr,
  559. btr_cur_t* sea_cur,
  560. btr_cur_t* cursor)
  561. {
  562. mem_heap_t* heap = mem_heap_create(100);
  563. #ifdef UNIV_DEBUG
  564. ulint* offsets;
  565. offsets = rtr_page_get_father_block(
  566. NULL, heap, index, block, mtr, sea_cur, cursor);
  567. ulint page_no = btr_node_ptr_get_child_page_no(cursor->page_cur.rec,
  568. offsets);
  569. ut_ad(page_no == block->page.id.page_no());
  570. #else
  571. rtr_page_get_father_block(
  572. NULL, heap, index, block, mtr, sea_cur, cursor);
  573. #endif
  574. mem_heap_free(heap);
  575. }
  576. /** Returns the upper level node pointer to a R-Tree page. It is assumed
  577. that mtr holds an SX-latch or X-latch on the tree.
  578. @return rec_get_offsets() of the node pointer record */
  579. static
  580. ulint*
  581. rtr_page_get_father_node_ptr(
  582. ulint* offsets,/*!< in: work area for the return value */
  583. mem_heap_t* heap, /*!< in: memory heap to use */
  584. btr_cur_t* sea_cur,/*!< in: search cursor */
  585. btr_cur_t* cursor, /*!< in: cursor pointing to user record,
  586. out: cursor on node pointer record,
  587. its page x-latched */
  588. mtr_t* mtr) /*!< in: mtr */
  589. {
  590. dtuple_t* tuple;
  591. rec_t* user_rec;
  592. rec_t* node_ptr;
  593. ulint level;
  594. ulint page_no;
  595. dict_index_t* index;
  596. rtr_mbr_t mbr;
  597. page_no = btr_cur_get_block(cursor)->page.id.page_no();
  598. index = btr_cur_get_index(cursor);
  599. ut_ad(srv_read_only_mode
  600. || mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
  601. MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
  602. ut_ad(dict_index_get_page(index) != page_no);
  603. level = btr_page_get_level(btr_cur_get_page(cursor));
  604. user_rec = btr_cur_get_rec(cursor);
  605. ut_a(page_rec_is_user_rec(user_rec));
  606. offsets = rec_get_offsets(user_rec, index, offsets, !level,
  607. ULINT_UNDEFINED, &heap);
  608. rtr_get_mbr_from_rec(user_rec, offsets, &mbr);
  609. tuple = rtr_index_build_node_ptr(
  610. index, &mbr, user_rec, page_no, heap, level);
  611. if (sea_cur && !sea_cur->rtr_info) {
  612. sea_cur = NULL;
  613. }
  614. rtr_get_father_node(index, level + 1, tuple, sea_cur, cursor,
  615. page_no, mtr);
  616. node_ptr = btr_cur_get_rec(cursor);
  617. ut_ad(!page_rec_is_comp(node_ptr)
  618. || rec_get_status(node_ptr) == REC_STATUS_NODE_PTR);
  619. offsets = rec_get_offsets(node_ptr, index, offsets, false,
  620. ULINT_UNDEFINED, &heap);
  621. ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets);
  622. if (child_page != page_no) {
  623. const rec_t* print_rec;
  624. ib::fatal error;
  625. error << "Corruption of index " << index->name
  626. << " of table " << index->table->name
  627. << " parent page " << page_no
  628. << " child page " << child_page;
  629. print_rec = page_rec_get_next(
  630. page_get_infimum_rec(page_align(user_rec)));
  631. offsets = rec_get_offsets(print_rec, index, offsets,
  632. page_rec_is_leaf(user_rec),
  633. ULINT_UNDEFINED, &heap);
  634. error << "; child ";
  635. rec_print(error.m_oss, print_rec,
  636. rec_get_info_bits(print_rec, rec_offs_comp(offsets)),
  637. offsets);
  638. offsets = rec_get_offsets(node_ptr, index, offsets, false,
  639. ULINT_UNDEFINED, &heap);
  640. error << "; parent ";
  641. rec_print(error.m_oss, print_rec,
  642. rec_get_info_bits(print_rec, rec_offs_comp(offsets)),
  643. offsets);
  644. error << ". You should dump + drop + reimport the table to"
  645. " fix the corruption. If the crash happens at"
  646. " database startup, see "
  647. "https://mariadb.com/kb/en/library/xtradbinnodb-recovery-modes/"
  648. " about forcing"
  649. " recovery. Then dump + drop + reimport.";
  650. }
  651. return(offsets);
  652. }
  653. /************************************************************//**
  654. Returns the father block to a page. It is assumed that mtr holds
  655. an X or SX latch on the tree.
  656. @return rec_get_offsets() of the node pointer record */
  657. ulint*
  658. rtr_page_get_father_block(
  659. /*======================*/
  660. ulint* offsets,/*!< in: work area for the return value */
  661. mem_heap_t* heap, /*!< in: memory heap to use */
  662. dict_index_t* index, /*!< in: b-tree index */
  663. buf_block_t* block, /*!< in: child page in the index */
  664. mtr_t* mtr, /*!< in: mtr */
  665. btr_cur_t* sea_cur,/*!< in: search cursor, contains information
  666. about parent nodes in search */
  667. btr_cur_t* cursor) /*!< out: cursor on node pointer record,
  668. its page x-latched */
  669. {
  670. rec_t* rec = page_rec_get_next(
  671. page_get_infimum_rec(buf_block_get_frame(block)));
  672. btr_cur_position(index, rec, block, cursor);
  673. return(rtr_page_get_father_node_ptr(offsets, heap, sea_cur,
  674. cursor, mtr));
  675. }
  676. /********************************************************************//**
  677. Returns the upper level node pointer to a R-Tree page. It is assumed
  678. that mtr holds an x-latch on the tree. */
  679. void
  680. rtr_get_father_node(
  681. /*================*/
  682. dict_index_t* index, /*!< in: index */
  683. ulint level, /*!< in: the tree level of search */
  684. const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in
  685. tuple must be set so that it cannot get
  686. compared to the node ptr page number field! */
  687. btr_cur_t* sea_cur,/*!< in: search cursor */
  688. btr_cur_t* btr_cur,/*!< in/out: tree cursor; the cursor page is
  689. s- or x-latched, but see also above! */
  690. ulint page_no,/*!< Current page no */
  691. mtr_t* mtr) /*!< in: mtr */
  692. {
  693. mem_heap_t* heap = NULL;
  694. bool ret = false;
  695. const rec_t* rec;
  696. ulint n_fields;
  697. bool new_rtr = false;
  698. /* Try to optimally locate the parent node. Level should always
  699. less than sea_cur->tree_height unless the root is splitting */
  700. if (sea_cur && sea_cur->tree_height > level) {
  701. ut_ad(mtr_memo_contains_flagged(mtr,
  702. dict_index_get_lock(index),
  703. MTR_MEMO_X_LOCK
  704. | MTR_MEMO_SX_LOCK));
  705. ret = rtr_cur_restore_position(
  706. BTR_CONT_MODIFY_TREE, sea_cur, level, mtr);
  707. /* Once we block shrink tree nodes while there are
  708. active search on it, this optimal locating should always
  709. succeeds */
  710. ut_ad(ret);
  711. if (ret) {
  712. btr_pcur_t* r_cursor = rtr_get_parent_cursor(
  713. sea_cur, level, false);
  714. rec = btr_pcur_get_rec(r_cursor);
  715. ut_ad(r_cursor->rel_pos == BTR_PCUR_ON);
  716. page_cur_position(rec,
  717. btr_pcur_get_block(r_cursor),
  718. btr_cur_get_page_cur(btr_cur));
  719. btr_cur->rtr_info = sea_cur->rtr_info;
  720. btr_cur->tree_height = sea_cur->tree_height;
  721. ut_ad(rtr_compare_cursor_rec(
  722. index, btr_cur, page_no, &heap));
  723. goto func_exit;
  724. }
  725. }
  726. /* We arrive here in one of two scenario
  727. 1) check table and btr_valide
  728. 2) index root page being raised */
  729. ut_ad(!sea_cur || sea_cur->tree_height == level);
  730. if (btr_cur->rtr_info) {
  731. rtr_clean_rtr_info(btr_cur->rtr_info, true);
  732. } else {
  733. new_rtr = true;
  734. }
  735. btr_cur->rtr_info = rtr_create_rtr_info(false, false, btr_cur, index);
  736. if (sea_cur && sea_cur->tree_height == level) {
  737. /* root split, and search the new root */
  738. btr_cur_search_to_nth_level(
  739. index, level, tuple, PAGE_CUR_RTREE_LOCATE,
  740. BTR_CONT_MODIFY_TREE, btr_cur, 0,
  741. __FILE__, __LINE__, mtr);
  742. } else {
  743. /* btr_validate */
  744. ut_ad(level >= 1);
  745. ut_ad(!sea_cur);
  746. btr_cur_search_to_nth_level(
  747. index, level, tuple, PAGE_CUR_RTREE_LOCATE,
  748. BTR_CONT_MODIFY_TREE, btr_cur, 0,
  749. __FILE__, __LINE__, mtr);
  750. rec = btr_cur_get_rec(btr_cur);
  751. n_fields = dtuple_get_n_fields_cmp(tuple);
  752. if (page_rec_is_infimum(rec)
  753. || (btr_cur->low_match != n_fields)) {
  754. ret = rtr_pcur_getnext_from_path(
  755. tuple, PAGE_CUR_RTREE_LOCATE, btr_cur,
  756. level, BTR_CONT_MODIFY_TREE,
  757. true, mtr);
  758. ut_ad(ret && btr_cur->low_match == n_fields);
  759. }
  760. }
  761. ret = rtr_compare_cursor_rec(
  762. index, btr_cur, page_no, &heap);
  763. ut_ad(ret);
  764. func_exit:
  765. if (heap) {
  766. mem_heap_free(heap);
  767. }
  768. if (new_rtr && btr_cur->rtr_info) {
  769. rtr_clean_rtr_info(btr_cur->rtr_info, true);
  770. btr_cur->rtr_info = NULL;
  771. }
  772. }
  773. /*******************************************************************//**
  774. Create a RTree search info structure */
  775. rtr_info_t*
  776. rtr_create_rtr_info(
  777. /******************/
  778. bool need_prdt, /*!< in: Whether predicate lock
  779. is needed */
  780. bool init_matches, /*!< in: Whether to initiate the
  781. "matches" structure for collecting
  782. matched leaf records */
  783. btr_cur_t* cursor, /*!< in: tree search cursor */
  784. dict_index_t* index) /*!< in: index struct */
  785. {
  786. rtr_info_t* rtr_info;
  787. index = index ? index : cursor->index;
  788. ut_ad(index);
  789. rtr_info = static_cast<rtr_info_t*>(ut_zalloc_nokey(sizeof(*rtr_info)));
  790. rtr_info->allocated = true;
  791. rtr_info->cursor = cursor;
  792. rtr_info->index = index;
  793. if (init_matches) {
  794. rtr_info->heap = mem_heap_create(sizeof(*(rtr_info->matches)));
  795. rtr_info->matches = static_cast<matched_rec_t*>(
  796. mem_heap_zalloc(
  797. rtr_info->heap,
  798. sizeof(*rtr_info->matches)));
  799. rtr_info->matches->matched_recs
  800. = UT_NEW_NOKEY(rtr_rec_vector());
  801. rtr_info->matches->bufp = page_align(rtr_info->matches->rec_buf
  802. + UNIV_PAGE_SIZE_MAX + 1);
  803. mutex_create(LATCH_ID_RTR_MATCH_MUTEX,
  804. &rtr_info->matches->rtr_match_mutex);
  805. rw_lock_create(PFS_NOT_INSTRUMENTED,
  806. &(rtr_info->matches->block.lock),
  807. SYNC_LEVEL_VARYING);
  808. }
  809. rtr_info->path = UT_NEW_NOKEY(rtr_node_path_t());
  810. rtr_info->parent_path = UT_NEW_NOKEY(rtr_node_path_t());
  811. rtr_info->need_prdt_lock = need_prdt;
  812. mutex_create(LATCH_ID_RTR_PATH_MUTEX,
  813. &rtr_info->rtr_path_mutex);
  814. mutex_enter(&index->rtr_track->rtr_active_mutex);
  815. index->rtr_track->rtr_active->push_back(rtr_info);
  816. mutex_exit(&index->rtr_track->rtr_active_mutex);
  817. return(rtr_info);
  818. }
  819. /*******************************************************************//**
  820. Update a btr_cur_t with rtr_info */
  821. void
  822. rtr_info_update_btr(
  823. /******************/
  824. btr_cur_t* cursor, /*!< in/out: tree cursor */
  825. rtr_info_t* rtr_info) /*!< in: rtr_info to set to the
  826. cursor */
  827. {
  828. ut_ad(rtr_info);
  829. cursor->rtr_info = rtr_info;
  830. }
  831. /*******************************************************************//**
  832. Initialize a R-Tree Search structure */
  833. void
  834. rtr_init_rtr_info(
  835. /****************/
  836. rtr_info_t* rtr_info, /*!< in: rtr_info to set to the
  837. cursor */
  838. bool need_prdt, /*!< in: Whether predicate lock is
  839. needed */
  840. btr_cur_t* cursor, /*!< in: tree search cursor */
  841. dict_index_t* index, /*!< in: index structure */
  842. bool reinit) /*!< in: Whether this is a reinit */
  843. {
  844. ut_ad(rtr_info);
  845. if (!reinit) {
  846. /* Reset all members. */
  847. rtr_info->path = NULL;
  848. rtr_info->parent_path = NULL;
  849. rtr_info->matches = NULL;
  850. mutex_create(LATCH_ID_RTR_PATH_MUTEX,
  851. &rtr_info->rtr_path_mutex);
  852. memset(rtr_info->tree_blocks, 0x0,
  853. sizeof(rtr_info->tree_blocks));
  854. memset(rtr_info->tree_savepoints, 0x0,
  855. sizeof(rtr_info->tree_savepoints));
  856. rtr_info->mbr.xmin = 0.0;
  857. rtr_info->mbr.xmax = 0.0;
  858. rtr_info->mbr.ymin = 0.0;
  859. rtr_info->mbr.ymax = 0.0;
  860. rtr_info->thr = NULL;
  861. rtr_info->heap = NULL;
  862. rtr_info->cursor = NULL;
  863. rtr_info->index = NULL;
  864. rtr_info->need_prdt_lock = false;
  865. rtr_info->need_page_lock = false;
  866. rtr_info->allocated = false;
  867. rtr_info->mbr_adj = false;
  868. rtr_info->fd_del = false;
  869. rtr_info->search_tuple = NULL;
  870. rtr_info->search_mode = PAGE_CUR_UNSUPP;
  871. }
  872. ut_ad(!rtr_info->matches || rtr_info->matches->matched_recs->empty());
  873. rtr_info->path = UT_NEW_NOKEY(rtr_node_path_t());
  874. rtr_info->parent_path = UT_NEW_NOKEY(rtr_node_path_t());
  875. rtr_info->need_prdt_lock = need_prdt;
  876. rtr_info->cursor = cursor;
  877. rtr_info->index = index;
  878. mutex_enter(&index->rtr_track->rtr_active_mutex);
  879. index->rtr_track->rtr_active->push_back(rtr_info);
  880. mutex_exit(&index->rtr_track->rtr_active_mutex);
  881. }
  882. /**************************************************************//**
  883. Clean up R-Tree search structure */
  884. void
  885. rtr_clean_rtr_info(
  886. /*===============*/
  887. rtr_info_t* rtr_info, /*!< in: RTree search info */
  888. bool free_all) /*!< in: need to free rtr_info itself */
  889. {
  890. dict_index_t* index;
  891. bool initialized = false;
  892. if (!rtr_info) {
  893. return;
  894. }
  895. index = rtr_info->index;
  896. if (index) {
  897. mutex_enter(&index->rtr_track->rtr_active_mutex);
  898. }
  899. while (rtr_info->parent_path && !rtr_info->parent_path->empty()) {
  900. btr_pcur_t* cur = rtr_info->parent_path->back().cursor;
  901. rtr_info->parent_path->pop_back();
  902. if (cur) {
  903. btr_pcur_close(cur);
  904. ut_free(cur);
  905. }
  906. }
  907. UT_DELETE(rtr_info->parent_path);
  908. rtr_info->parent_path = NULL;
  909. if (rtr_info->path != NULL) {
  910. UT_DELETE(rtr_info->path);
  911. rtr_info->path = NULL;
  912. initialized = true;
  913. }
  914. if (rtr_info->matches) {
  915. rtr_info->matches->used = false;
  916. rtr_info->matches->locked = false;
  917. rtr_info->matches->valid = false;
  918. rtr_info->matches->matched_recs->clear();
  919. }
  920. if (index) {
  921. index->rtr_track->rtr_active->remove(rtr_info);
  922. mutex_exit(&index->rtr_track->rtr_active_mutex);
  923. }
  924. if (free_all) {
  925. if (rtr_info->matches) {
  926. if (rtr_info->matches->matched_recs != NULL) {
  927. UT_DELETE(rtr_info->matches->matched_recs);
  928. }
  929. rw_lock_free(&(rtr_info->matches->block.lock));
  930. mutex_destroy(&rtr_info->matches->rtr_match_mutex);
  931. }
  932. if (rtr_info->heap) {
  933. mem_heap_free(rtr_info->heap);
  934. }
  935. if (initialized) {
  936. mutex_destroy(&rtr_info->rtr_path_mutex);
  937. }
  938. if (rtr_info->allocated) {
  939. ut_free(rtr_info);
  940. }
  941. }
  942. }
  943. /**************************************************************//**
  944. Rebuilt the "path" to exclude the removing page no */
  945. static
  946. void
  947. rtr_rebuild_path(
  948. /*=============*/
  949. rtr_info_t* rtr_info, /*!< in: RTree search info */
  950. ulint page_no) /*!< in: need to free rtr_info itself */
  951. {
  952. rtr_node_path_t* new_path
  953. = UT_NEW_NOKEY(rtr_node_path_t());
  954. rtr_node_path_t::iterator rit;
  955. #ifdef UNIV_DEBUG
  956. ulint before_size = rtr_info->path->size();
  957. #endif /* UNIV_DEBUG */
  958. for (rit = rtr_info->path->begin();
  959. rit != rtr_info->path->end(); ++rit) {
  960. node_visit_t next_rec = *rit;
  961. if (next_rec.page_no == page_no) {
  962. continue;
  963. }
  964. new_path->push_back(next_rec);
  965. #ifdef UNIV_DEBUG
  966. node_visit_t rec = new_path->back();
  967. ut_ad(rec.level < rtr_info->cursor->tree_height
  968. && rec.page_no > 0);
  969. #endif /* UNIV_DEBUG */
  970. }
  971. UT_DELETE(rtr_info->path);
  972. ut_ad(new_path->size() == before_size - 1);
  973. rtr_info->path = new_path;
  974. if (!rtr_info->parent_path->empty()) {
  975. rtr_node_path_t* new_parent_path = UT_NEW_NOKEY(
  976. rtr_node_path_t());
  977. for (rit = rtr_info->parent_path->begin();
  978. rit != rtr_info->parent_path->end(); ++rit) {
  979. node_visit_t next_rec = *rit;
  980. if (next_rec.child_no == page_no) {
  981. btr_pcur_t* cur = next_rec.cursor;
  982. if (cur) {
  983. btr_pcur_close(cur);
  984. ut_free(cur);
  985. }
  986. continue;
  987. }
  988. new_parent_path->push_back(next_rec);
  989. }
  990. UT_DELETE(rtr_info->parent_path);
  991. rtr_info->parent_path = new_parent_path;
  992. }
  993. }
  994. /**************************************************************//**
  995. Check whether a discarding page is in anyone's search path */
  996. void
  997. rtr_check_discard_page(
  998. /*===================*/
  999. dict_index_t* index, /*!< in: index */
  1000. btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on
  1001. the root page */
  1002. buf_block_t* block) /*!< in: block of page to be discarded */
  1003. {
  1004. ulint pageno = block->page.id.page_no();
  1005. rtr_info_t* rtr_info;
  1006. rtr_info_active::iterator it;
  1007. mutex_enter(&index->rtr_track->rtr_active_mutex);
  1008. for (it = index->rtr_track->rtr_active->begin();
  1009. it != index->rtr_track->rtr_active->end(); ++it) {
  1010. rtr_info = *it;
  1011. rtr_node_path_t::iterator rit;
  1012. bool found = false;
  1013. if (cursor && rtr_info == cursor->rtr_info) {
  1014. continue;
  1015. }
  1016. mutex_enter(&rtr_info->rtr_path_mutex);
  1017. for (rit = rtr_info->path->begin();
  1018. rit != rtr_info->path->end(); ++rit) {
  1019. node_visit_t node = *rit;
  1020. if (node.page_no == pageno) {
  1021. found = true;
  1022. break;
  1023. }
  1024. }
  1025. if (found) {
  1026. rtr_rebuild_path(rtr_info, pageno);
  1027. }
  1028. mutex_exit(&rtr_info->rtr_path_mutex);
  1029. if (rtr_info->matches) {
  1030. mutex_enter(&rtr_info->matches->rtr_match_mutex);
  1031. if ((&rtr_info->matches->block)->page.id.page_no()
  1032. == pageno) {
  1033. if (!rtr_info->matches->matched_recs->empty()) {
  1034. rtr_info->matches->matched_recs->clear();
  1035. }
  1036. ut_ad(rtr_info->matches->matched_recs->empty());
  1037. rtr_info->matches->valid = false;
  1038. }
  1039. mutex_exit(&rtr_info->matches->rtr_match_mutex);
  1040. }
  1041. }
  1042. mutex_exit(&index->rtr_track->rtr_active_mutex);
  1043. lock_mutex_enter();
  1044. lock_prdt_page_free_from_discard(block, lock_sys.prdt_hash);
  1045. lock_prdt_page_free_from_discard(block, lock_sys.prdt_page_hash);
  1046. lock_mutex_exit();
  1047. }
  1048. /** Restore the stored position of a persistent cursor bufferfixing the page */
  1049. static
  1050. bool
  1051. rtr_cur_restore_position(
  1052. ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */
  1053. btr_cur_t* btr_cur, /*!< in: detached persistent cursor */
  1054. ulint level, /*!< in: index level */
  1055. mtr_t* mtr) /*!< in: mtr */
  1056. {
  1057. dict_index_t* index;
  1058. mem_heap_t* heap;
  1059. btr_pcur_t* r_cursor = rtr_get_parent_cursor(btr_cur, level, false);
  1060. dtuple_t* tuple;
  1061. bool ret = false;
  1062. ut_ad(mtr);
  1063. ut_ad(r_cursor);
  1064. ut_ad(mtr->is_active());
  1065. index = btr_cur_get_index(btr_cur);
  1066. if (r_cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
  1067. || r_cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE) {
  1068. return(false);
  1069. }
  1070. DBUG_EXECUTE_IF(
  1071. "rtr_pessimistic_position",
  1072. r_cursor->modify_clock = 100;
  1073. );
  1074. ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
  1075. if (!buf_pool_is_obsolete(r_cursor->withdraw_clock)
  1076. && buf_page_optimistic_get(RW_X_LATCH,
  1077. r_cursor->block_when_stored,
  1078. r_cursor->modify_clock,
  1079. __FILE__, __LINE__, mtr)) {
  1080. ut_ad(r_cursor->pos_state == BTR_PCUR_IS_POSITIONED);
  1081. ut_ad(r_cursor->rel_pos == BTR_PCUR_ON);
  1082. #ifdef UNIV_DEBUG
  1083. do {
  1084. const rec_t* rec;
  1085. const ulint* offsets1;
  1086. const ulint* offsets2;
  1087. ulint comp;
  1088. rec = btr_pcur_get_rec(r_cursor);
  1089. heap = mem_heap_create(256);
  1090. offsets1 = rec_get_offsets(
  1091. r_cursor->old_rec, index, NULL, !level,
  1092. r_cursor->old_n_fields, &heap);
  1093. offsets2 = rec_get_offsets(
  1094. rec, index, NULL, !level,
  1095. r_cursor->old_n_fields, &heap);
  1096. comp = rec_offs_comp(offsets1);
  1097. if (rec_get_info_bits(r_cursor->old_rec, comp)
  1098. & REC_INFO_MIN_REC_FLAG) {
  1099. ut_ad(rec_get_info_bits(rec, comp)
  1100. & REC_INFO_MIN_REC_FLAG);
  1101. } else {
  1102. ut_ad(!cmp_rec_rec(r_cursor->old_rec,
  1103. rec, offsets1, offsets2,
  1104. index));
  1105. }
  1106. mem_heap_free(heap);
  1107. } while (0);
  1108. #endif /* UNIV_DEBUG */
  1109. return(true);
  1110. }
  1111. /* Page has changed, for R-Tree, the page cannot be shrunk away,
  1112. so we search the page and its right siblings */
  1113. buf_block_t* block;
  1114. node_seq_t page_ssn;
  1115. const page_t* page;
  1116. page_cur_t* page_cursor;
  1117. node_visit_t* node = rtr_get_parent_node(btr_cur, level, false);
  1118. ulint space = dict_index_get_space(index);
  1119. node_seq_t path_ssn = node->seq_no;
  1120. page_size_t page_size = dict_table_page_size(index->table);
  1121. ulint page_no = node->page_no;
  1122. heap = mem_heap_create(256);
  1123. tuple = dict_index_build_data_tuple(r_cursor->old_rec, index, !level,
  1124. r_cursor->old_n_fields, heap);
  1125. page_cursor = btr_pcur_get_page_cur(r_cursor);
  1126. ut_ad(r_cursor == node->cursor);
  1127. search_again:
  1128. page_id_t page_id(space, page_no);
  1129. dberr_t err = DB_SUCCESS;
  1130. block = buf_page_get_gen(
  1131. page_id, page_size, RW_X_LATCH, NULL,
  1132. BUF_GET, __FILE__, __LINE__, mtr, &err);
  1133. ut_ad(block);
  1134. /* Get the page SSN */
  1135. page = buf_block_get_frame(block);
  1136. page_ssn = page_get_ssn_id(page);
  1137. ulint low_match = page_cur_search(
  1138. block, index, tuple, PAGE_CUR_LE, page_cursor);
  1139. if (low_match == r_cursor->old_n_fields) {
  1140. const rec_t* rec;
  1141. const ulint* offsets1;
  1142. const ulint* offsets2;
  1143. ulint comp;
  1144. rec = btr_pcur_get_rec(r_cursor);
  1145. offsets1 = rec_get_offsets(
  1146. r_cursor->old_rec, index, NULL, !level,
  1147. r_cursor->old_n_fields, &heap);
  1148. offsets2 = rec_get_offsets(
  1149. rec, index, NULL, !level,
  1150. r_cursor->old_n_fields, &heap);
  1151. comp = rec_offs_comp(offsets1);
  1152. if ((rec_get_info_bits(r_cursor->old_rec, comp)
  1153. & REC_INFO_MIN_REC_FLAG)
  1154. && (rec_get_info_bits(rec, comp) & REC_INFO_MIN_REC_FLAG)) {
  1155. r_cursor->pos_state = BTR_PCUR_IS_POSITIONED;
  1156. ret = true;
  1157. } else if (!cmp_rec_rec(r_cursor->old_rec, rec, offsets1, offsets2,
  1158. index)) {
  1159. r_cursor->pos_state = BTR_PCUR_IS_POSITIONED;
  1160. ret = true;
  1161. }
  1162. }
  1163. /* Check the page SSN to see if it has been splitted, if so, search
  1164. the right page */
  1165. if (!ret && page_ssn > path_ssn) {
  1166. page_no = btr_page_get_next(page, mtr);
  1167. goto search_again;
  1168. }
  1169. mem_heap_free(heap);
  1170. return(ret);
  1171. }
  1172. /****************************************************************//**
  1173. Copy the leaf level R-tree record, and push it to matched_rec in rtr_info */
  1174. static
  1175. void
  1176. rtr_leaf_push_match_rec(
  1177. /*====================*/
  1178. const rec_t* rec, /*!< in: record to copy */
  1179. rtr_info_t* rtr_info, /*!< in/out: search stack */
  1180. ulint* offsets, /*!< in: offsets */
  1181. bool is_comp) /*!< in: is compact format */
  1182. {
  1183. byte* buf;
  1184. matched_rec_t* match_rec = rtr_info->matches;
  1185. rec_t* copy;
  1186. ulint data_len;
  1187. rtr_rec_t rtr_rec;
  1188. buf = match_rec->block.frame + match_rec->used;
  1189. ut_ad(page_rec_is_leaf(rec));
  1190. copy = rec_copy(buf, rec, offsets);
  1191. if (is_comp) {
  1192. rec_set_next_offs_new(copy, PAGE_NEW_SUPREMUM);
  1193. } else {
  1194. rec_set_next_offs_old(copy, PAGE_OLD_SUPREMUM);
  1195. }
  1196. rtr_rec.r_rec = copy;
  1197. rtr_rec.locked = false;
  1198. match_rec->matched_recs->push_back(rtr_rec);
  1199. match_rec->valid = true;
  1200. data_len = rec_offs_data_size(offsets) + rec_offs_extra_size(offsets);
  1201. match_rec->used += data_len;
  1202. ut_ad(match_rec->used < UNIV_PAGE_SIZE);
  1203. }
  1204. /**************************************************************//**
  1205. Store the parent path cursor
  1206. @return number of cursor stored */
  1207. ulint
  1208. rtr_store_parent_path(
  1209. /*==================*/
  1210. const buf_block_t* block, /*!< in: block of the page */
  1211. btr_cur_t* btr_cur,/*!< in/out: persistent cursor */
  1212. ulint latch_mode,
  1213. /*!< in: latch_mode */
  1214. ulint level, /*!< in: index level */
  1215. mtr_t* mtr) /*!< in: mtr */
  1216. {
  1217. ulint num = btr_cur->rtr_info->parent_path->size();
  1218. ulint num_stored = 0;
  1219. while (num >= 1) {
  1220. node_visit_t* node = &(*btr_cur->rtr_info->parent_path)[
  1221. num - 1];
  1222. btr_pcur_t* r_cursor = node->cursor;
  1223. buf_block_t* cur_block;
  1224. if (node->level > level) {
  1225. break;
  1226. }
  1227. r_cursor->pos_state = BTR_PCUR_IS_POSITIONED;
  1228. r_cursor->latch_mode = latch_mode;
  1229. cur_block = btr_pcur_get_block(r_cursor);
  1230. if (cur_block == block) {
  1231. btr_pcur_store_position(r_cursor, mtr);
  1232. num_stored++;
  1233. } else {
  1234. break;
  1235. }
  1236. num--;
  1237. }
  1238. return(num_stored);
  1239. }
  1240. /**************************************************************//**
  1241. push a nonleaf index node to the search path for insertion */
  1242. static
  1243. void
  1244. rtr_non_leaf_insert_stack_push(
  1245. /*===========================*/
  1246. dict_index_t* index, /*!< in: index descriptor */
  1247. rtr_node_path_t* path, /*!< in/out: search path */
  1248. ulint level, /*!< in: index page level */
  1249. ulint child_no,/*!< in: child page no */
  1250. const buf_block_t* block, /*!< in: block of the page */
  1251. const rec_t* rec, /*!< in: positioned record */
  1252. double mbr_inc)/*!< in: MBR needs to be enlarged */
  1253. {
  1254. node_seq_t new_seq;
  1255. btr_pcur_t* my_cursor;
  1256. ulint page_no = block->page.id.page_no();
  1257. my_cursor = static_cast<btr_pcur_t*>(
  1258. ut_malloc_nokey(sizeof(*my_cursor)));
  1259. btr_pcur_init(my_cursor);
  1260. page_cur_position(rec, block, btr_pcur_get_page_cur(my_cursor));
  1261. (btr_pcur_get_btr_cur(my_cursor))->index = index;
  1262. new_seq = rtr_get_current_ssn_id(index);
  1263. rtr_non_leaf_stack_push(path, page_no, new_seq, level, child_no,
  1264. my_cursor, mbr_inc);
  1265. }
  1266. /** Copy a buf_block_t strcuture, except "block->lock" and "block->mutex".
  1267. @param[in,out] matches copy to match->block
  1268. @param[in] block block to copy */
  1269. static
  1270. void
  1271. rtr_copy_buf(
  1272. matched_rec_t* matches,
  1273. const buf_block_t* block)
  1274. {
  1275. /* Copy all members of "block" to "matches->block" except "mutex"
  1276. and "lock". We skip "mutex" and "lock" because they are not used
  1277. from the dummy buf_block_t we create here and because memcpy()ing
  1278. them generates (valid) compiler warnings that the vtable pointer
  1279. will be copied. It is also undefined what will happen with the
  1280. newly memcpy()ed mutex if the source mutex was acquired by
  1281. (another) thread while it was copied. */
  1282. memcpy(&matches->block.page, &block->page, sizeof(buf_page_t));
  1283. matches->block.frame = block->frame;
  1284. matches->block.unzip_LRU = block->unzip_LRU;
  1285. ut_d(matches->block.in_unzip_LRU_list = block->in_unzip_LRU_list);
  1286. ut_d(matches->block.in_withdraw_list = block->in_withdraw_list);
  1287. /* Skip buf_block_t::mutex */
  1288. /* Skip buf_block_t::lock */
  1289. matches->block.lock_hash_val = block->lock_hash_val;
  1290. matches->block.modify_clock = block->modify_clock;
  1291. #ifdef BTR_CUR_HASH_ADAPT
  1292. matches->block.n_hash_helps = block->n_hash_helps;
  1293. matches->block.n_fields = block->n_fields;
  1294. matches->block.left_side = block->left_side;
  1295. #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
  1296. matches->block.n_pointers = block->n_pointers;
  1297. #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
  1298. matches->block.curr_n_fields = block->curr_n_fields;
  1299. matches->block.curr_left_side = block->curr_left_side;
  1300. matches->block.index = block->index;
  1301. #endif /* BTR_CUR_HASH_ADAPT */
  1302. ut_d(matches->block.debug_latch = block->debug_latch);
  1303. }
  1304. /****************************************************************//**
  1305. Generate a shadow copy of the page block header to save the
  1306. matched records */
  1307. static
  1308. void
  1309. rtr_init_match(
  1310. /*===========*/
  1311. matched_rec_t* matches,/*!< in/out: match to initialize */
  1312. const buf_block_t* block, /*!< in: buffer block */
  1313. const page_t* page) /*!< in: buffer page */
  1314. {
  1315. ut_ad(matches->matched_recs->empty());
  1316. matches->locked = false;
  1317. rtr_copy_buf(matches, block);
  1318. matches->block.frame = matches->bufp;
  1319. matches->valid = false;
  1320. /* We have to copy PAGE_W*_SUPREMUM_END bytes so that we can
  1321. use infimum/supremum of this page as normal btr page for search. */
  1322. memcpy(matches->block.frame, page, page_is_comp(page)
  1323. ? PAGE_NEW_SUPREMUM_END
  1324. : PAGE_OLD_SUPREMUM_END);
  1325. matches->used = page_is_comp(page)
  1326. ? PAGE_NEW_SUPREMUM_END
  1327. : PAGE_OLD_SUPREMUM_END;
  1328. #ifdef RTR_SEARCH_DIAGNOSTIC
  1329. ulint pageno = page_get_page_no(page);
  1330. fprintf(stderr, "INNODB_RTR: Searching leaf page %d\n",
  1331. static_cast<int>(pageno));
  1332. #endif /* RTR_SEARCH_DIAGNOSTIC */
  1333. }
  1334. /****************************************************************//**
  1335. Get the bounding box content from an index record */
  1336. void
  1337. rtr_get_mbr_from_rec(
  1338. /*=================*/
  1339. const rec_t* rec, /*!< in: data tuple */
  1340. const ulint* offsets,/*!< in: offsets array */
  1341. rtr_mbr_t* mbr) /*!< out MBR */
  1342. {
  1343. ulint rec_f_len;
  1344. const byte* data;
  1345. data = rec_get_nth_field(rec, offsets, 0, &rec_f_len);
  1346. rtr_read_mbr(data, mbr);
  1347. }
  1348. /****************************************************************//**
  1349. Get the bounding box content from a MBR data record */
  1350. void
  1351. rtr_get_mbr_from_tuple(
  1352. /*===================*/
  1353. const dtuple_t* dtuple, /*!< in: data tuple */
  1354. rtr_mbr* mbr) /*!< out: mbr to fill */
  1355. {
  1356. const dfield_t* dtuple_field;
  1357. ulint dtuple_f_len;
  1358. byte* data;
  1359. dtuple_field = dtuple_get_nth_field(dtuple, 0);
  1360. dtuple_f_len = dfield_get_len(dtuple_field);
  1361. ut_a(dtuple_f_len >= 4 * sizeof(double));
  1362. data = static_cast<byte*>(dfield_get_data(dtuple_field));
  1363. rtr_read_mbr(data, mbr);
  1364. }
  1365. /****************************************************************//**
  1366. Searches the right position in rtree for a page cursor. */
  1367. bool
  1368. rtr_cur_search_with_match(
  1369. /*======================*/
  1370. const buf_block_t* block, /*!< in: buffer block */
  1371. dict_index_t* index, /*!< in: index descriptor */
  1372. const dtuple_t* tuple, /*!< in: data tuple */
  1373. page_cur_mode_t mode, /*!< in: PAGE_CUR_RTREE_INSERT,
  1374. PAGE_CUR_RTREE_LOCATE etc. */
  1375. page_cur_t* cursor, /*!< in/out: page cursor */
  1376. rtr_info_t* rtr_info)/*!< in/out: search stack */
  1377. {
  1378. bool found = false;
  1379. const page_t* page;
  1380. const rec_t* rec;
  1381. const rec_t* last_rec;
  1382. ulint offsets_[REC_OFFS_NORMAL_SIZE];
  1383. ulint* offsets = offsets_;
  1384. mem_heap_t* heap = NULL;
  1385. int cmp = 1;
  1386. double least_inc = DBL_MAX;
  1387. const rec_t* best_rec;
  1388. const rec_t* last_match_rec = NULL;
  1389. bool match_init = false;
  1390. ulint space = block->page.id.space();
  1391. page_cur_mode_t orig_mode = mode;
  1392. const rec_t* first_rec = NULL;
  1393. rec_offs_init(offsets_);
  1394. ut_ad(RTREE_SEARCH_MODE(mode));
  1395. ut_ad(dict_index_is_spatial(index));
  1396. page = buf_block_get_frame(block);
  1397. const ulint level = btr_page_get_level(page);
  1398. const bool is_leaf = !level;
  1399. if (mode == PAGE_CUR_RTREE_LOCATE) {
  1400. ut_ad(level != 0);
  1401. mode = PAGE_CUR_WITHIN;
  1402. }
  1403. rec = page_dir_slot_get_rec(page_dir_get_nth_slot(page, 0));
  1404. last_rec = rec;
  1405. best_rec = rec;
  1406. if (page_rec_is_infimum(rec)) {
  1407. rec = page_rec_get_next_const(rec);
  1408. }
  1409. /* Check insert tuple size is larger than first rec, and try to
  1410. avoid it if possible */
  1411. if (mode == PAGE_CUR_RTREE_INSERT && !page_rec_is_supremum(rec)) {
  1412. ulint new_rec_size = rec_get_converted_size(index, tuple, 0);
  1413. offsets = rec_get_offsets(rec, index, offsets, is_leaf,
  1414. dtuple_get_n_fields_cmp(tuple),
  1415. &heap);
  1416. if (rec_offs_size(offsets) < new_rec_size) {
  1417. first_rec = rec;
  1418. }
  1419. /* If this is the left-most page of this index level
  1420. and the table is a compressed table, try to avoid
  1421. first page as much as possible, as there will be problem
  1422. when update MIN_REC rec in compress table */
  1423. if (buf_block_get_page_zip(block)
  1424. && !page_has_prev(page)
  1425. && page_get_n_recs(page) >= 2) {
  1426. rec = page_rec_get_next_const(rec);
  1427. }
  1428. }
  1429. while (!page_rec_is_supremum(rec)) {
  1430. offsets = rec_get_offsets(rec, index, offsets, is_leaf,
  1431. dtuple_get_n_fields_cmp(tuple),
  1432. &heap);
  1433. if (!is_leaf) {
  1434. switch (mode) {
  1435. case PAGE_CUR_CONTAIN:
  1436. case PAGE_CUR_INTERSECT:
  1437. case PAGE_CUR_MBR_EQUAL:
  1438. /* At non-leaf level, we will need to check
  1439. both CONTAIN and INTERSECT for either of
  1440. the search mode */
  1441. cmp = cmp_dtuple_rec_with_gis(
  1442. tuple, rec, offsets, PAGE_CUR_CONTAIN);
  1443. if (cmp != 0) {
  1444. cmp = cmp_dtuple_rec_with_gis(
  1445. tuple, rec, offsets,
  1446. PAGE_CUR_INTERSECT);
  1447. }
  1448. break;
  1449. case PAGE_CUR_DISJOINT:
  1450. cmp = cmp_dtuple_rec_with_gis(
  1451. tuple, rec, offsets, mode);
  1452. if (cmp != 0) {
  1453. cmp = cmp_dtuple_rec_with_gis(
  1454. tuple, rec, offsets,
  1455. PAGE_CUR_INTERSECT);
  1456. }
  1457. break;
  1458. case PAGE_CUR_RTREE_INSERT:
  1459. double increase;
  1460. double area;
  1461. cmp = cmp_dtuple_rec_with_gis(
  1462. tuple, rec, offsets, PAGE_CUR_WITHIN);
  1463. if (cmp != 0) {
  1464. increase = rtr_rec_cal_increase(
  1465. tuple, rec, offsets, &area);
  1466. /* Once it goes beyond DBL_MAX,
  1467. it would not make sense to record
  1468. such value, just make it
  1469. DBL_MAX / 2 */
  1470. if (increase >= DBL_MAX) {
  1471. increase = DBL_MAX / 2;
  1472. }
  1473. if (increase < least_inc) {
  1474. least_inc = increase;
  1475. best_rec = rec;
  1476. } else if (best_rec
  1477. && best_rec == first_rec) {
  1478. /* if first_rec is set,
  1479. we will try to avoid it */
  1480. least_inc = increase;
  1481. best_rec = rec;
  1482. }
  1483. }
  1484. break;
  1485. case PAGE_CUR_RTREE_GET_FATHER:
  1486. cmp = cmp_dtuple_rec_with_gis_internal(
  1487. tuple, rec, offsets);
  1488. break;
  1489. default:
  1490. /* WITHIN etc. */
  1491. cmp = cmp_dtuple_rec_with_gis(
  1492. tuple, rec, offsets, mode);
  1493. }
  1494. } else {
  1495. /* At leaf level, INSERT should translate to LE */
  1496. ut_ad(mode != PAGE_CUR_RTREE_INSERT);
  1497. cmp = cmp_dtuple_rec_with_gis(
  1498. tuple, rec, offsets, mode);
  1499. }
  1500. if (cmp == 0) {
  1501. found = true;
  1502. /* If located, the matching node/rec will be pushed
  1503. to rtr_info->path for non-leaf nodes, or
  1504. rtr_info->matches for leaf nodes */
  1505. if (rtr_info && mode != PAGE_CUR_RTREE_INSERT) {
  1506. if (!is_leaf) {
  1507. ulint page_no;
  1508. node_seq_t new_seq;
  1509. bool is_loc;
  1510. is_loc = (orig_mode
  1511. == PAGE_CUR_RTREE_LOCATE
  1512. || orig_mode
  1513. == PAGE_CUR_RTREE_GET_FATHER);
  1514. offsets = rec_get_offsets(
  1515. rec, index, offsets, false,
  1516. ULINT_UNDEFINED, &heap);
  1517. page_no = btr_node_ptr_get_child_page_no(
  1518. rec, offsets);
  1519. ut_ad(level >= 1);
  1520. /* Get current SSN, before we insert
  1521. it into the path stack */
  1522. new_seq = rtr_get_current_ssn_id(index);
  1523. rtr_non_leaf_stack_push(
  1524. rtr_info->path,
  1525. page_no,
  1526. new_seq, level - 1, 0,
  1527. NULL, 0);
  1528. if (is_loc) {
  1529. rtr_non_leaf_insert_stack_push(
  1530. index,
  1531. rtr_info->parent_path,
  1532. level, page_no, block,
  1533. rec, 0);
  1534. }
  1535. if (!srv_read_only_mode
  1536. && (rtr_info->need_page_lock
  1537. || !is_loc)) {
  1538. /* Lock the page, preventing it
  1539. from being shrunk */
  1540. lock_place_prdt_page_lock(
  1541. space, page_no, index,
  1542. rtr_info->thr);
  1543. }
  1544. } else {
  1545. ut_ad(orig_mode
  1546. != PAGE_CUR_RTREE_LOCATE);
  1547. if (!match_init) {
  1548. rtr_init_match(
  1549. rtr_info->matches,
  1550. block, page);
  1551. match_init = true;
  1552. }
  1553. /* Collect matched records on page */
  1554. offsets = rec_get_offsets(
  1555. rec, index, offsets, true,
  1556. ULINT_UNDEFINED, &heap);
  1557. rtr_leaf_push_match_rec(
  1558. rec, rtr_info, offsets,
  1559. page_is_comp(page));
  1560. }
  1561. last_match_rec = rec;
  1562. } else {
  1563. /* This is the insertion case, it will break
  1564. once it finds the first MBR that can accomodate
  1565. the inserting rec */
  1566. break;
  1567. }
  1568. }
  1569. last_rec = rec;
  1570. rec = page_rec_get_next_const(rec);
  1571. }
  1572. /* All records on page are searched */
  1573. if (page_rec_is_supremum(rec)) {
  1574. if (!is_leaf) {
  1575. if (!found) {
  1576. /* No match case, if it is for insertion,
  1577. then we select the record that result in
  1578. least increased area */
  1579. if (mode == PAGE_CUR_RTREE_INSERT) {
  1580. ulint child_no;
  1581. ut_ad(least_inc < DBL_MAX);
  1582. offsets = rec_get_offsets(
  1583. best_rec, index, offsets,
  1584. false, ULINT_UNDEFINED, &heap);
  1585. child_no =
  1586. btr_node_ptr_get_child_page_no(
  1587. best_rec, offsets);
  1588. rtr_non_leaf_insert_stack_push(
  1589. index, rtr_info->parent_path,
  1590. level, child_no, block,
  1591. best_rec, least_inc);
  1592. page_cur_position(best_rec, block,
  1593. cursor);
  1594. rtr_info->mbr_adj = true;
  1595. } else {
  1596. /* Position at the last rec of the
  1597. page, if it is not the leaf page */
  1598. page_cur_position(last_rec, block,
  1599. cursor);
  1600. }
  1601. } else {
  1602. /* There are matching records, position
  1603. in the last matching records */
  1604. if (rtr_info) {
  1605. rec = last_match_rec;
  1606. page_cur_position(
  1607. rec, block, cursor);
  1608. }
  1609. }
  1610. } else if (rtr_info) {
  1611. /* Leaf level, no match, position at the
  1612. last (supremum) rec */
  1613. if (!last_match_rec) {
  1614. page_cur_position(rec, block, cursor);
  1615. goto func_exit;
  1616. }
  1617. /* There are matched records */
  1618. matched_rec_t* match_rec = rtr_info->matches;
  1619. rtr_rec_t test_rec;
  1620. test_rec = match_rec->matched_recs->back();
  1621. #ifdef UNIV_DEBUG
  1622. ulint offsets_2[REC_OFFS_NORMAL_SIZE];
  1623. ulint* offsets2 = offsets_2;
  1624. rec_offs_init(offsets_2);
  1625. ut_ad(found);
  1626. /* Verify the record to be positioned is the same
  1627. as the last record in matched_rec vector */
  1628. offsets2 = rec_get_offsets(test_rec.r_rec, index,
  1629. offsets2, true,
  1630. ULINT_UNDEFINED, &heap);
  1631. offsets = rec_get_offsets(last_match_rec, index,
  1632. offsets, true,
  1633. ULINT_UNDEFINED, &heap);
  1634. ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec,
  1635. offsets2, offsets, index) == 0);
  1636. #endif /* UNIV_DEBUG */
  1637. /* Pop the last match record and position on it */
  1638. match_rec->matched_recs->pop_back();
  1639. page_cur_position(test_rec.r_rec, &match_rec->block,
  1640. cursor);
  1641. }
  1642. } else {
  1643. if (mode == PAGE_CUR_RTREE_INSERT) {
  1644. ulint child_no;
  1645. ut_ad(!last_match_rec && rec);
  1646. offsets = rec_get_offsets(
  1647. rec, index, offsets, false,
  1648. ULINT_UNDEFINED, &heap);
  1649. child_no = btr_node_ptr_get_child_page_no(rec, offsets);
  1650. rtr_non_leaf_insert_stack_push(
  1651. index, rtr_info->parent_path, level, child_no,
  1652. block, rec, 0);
  1653. } else if (rtr_info && found && !is_leaf) {
  1654. rec = last_match_rec;
  1655. }
  1656. page_cur_position(rec, block, cursor);
  1657. }
  1658. #ifdef UNIV_DEBUG
  1659. /* Verify that we are positioned at the same child page as pushed in
  1660. the path stack */
  1661. if (!is_leaf && (!page_rec_is_supremum(rec) || found)
  1662. && mode != PAGE_CUR_RTREE_INSERT) {
  1663. ulint page_no;
  1664. offsets = rec_get_offsets(rec, index, offsets, false,
  1665. ULINT_UNDEFINED, &heap);
  1666. page_no = btr_node_ptr_get_child_page_no(rec, offsets);
  1667. if (rtr_info && found) {
  1668. rtr_node_path_t* path = rtr_info->path;
  1669. node_visit_t last_visit = path->back();
  1670. ut_ad(last_visit.page_no == page_no);
  1671. }
  1672. }
  1673. #endif /* UNIV_DEBUG */
  1674. func_exit:
  1675. if (UNIV_LIKELY_NULL(heap)) {
  1676. mem_heap_free(heap);
  1677. }
  1678. return(found);
  1679. }