You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

670 lines
19 KiB

20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
20 years ago
  1. /******************************************************
  2. Row versions
  3. (c) 1997 Innobase Oy
  4. Created 2/6/1997 Heikki Tuuri
  5. *******************************************************/
  6. #include "row0vers.h"
  7. #ifdef UNIV_NONINL
  8. #include "row0vers.ic"
  9. #endif
  10. #include "dict0dict.h"
  11. #include "dict0boot.h"
  12. #include "btr0btr.h"
  13. #include "mach0data.h"
  14. #include "trx0rseg.h"
  15. #include "trx0trx.h"
  16. #include "trx0roll.h"
  17. #include "trx0undo.h"
  18. #include "trx0purge.h"
  19. #include "trx0rec.h"
  20. #include "que0que.h"
  21. #include "row0row.h"
  22. #include "row0upd.h"
  23. #include "rem0cmp.h"
  24. #include "read0read.h"
  25. #include "lock0lock.h"
  26. /*********************************************************************
  27. Finds out if an active transaction has inserted or modified a secondary
  28. index record. NOTE: the kernel mutex is temporarily released in this
  29. function! */
  30. trx_t*
  31. row_vers_impl_x_locked_off_kernel(
  32. /*==============================*/
  33. /* out: NULL if committed, else the active
  34. transaction; NOTE that the kernel mutex is
  35. temporarily released! */
  36. const rec_t* rec, /* in: record in a secondary index */
  37. dict_index_t* index, /* in: the secondary index */
  38. const ulint* offsets)/* in: rec_get_offsets(rec, index) */
  39. {
  40. dict_index_t* clust_index;
  41. rec_t* clust_rec;
  42. ulint* clust_offsets;
  43. rec_t* version;
  44. rec_t* prev_version;
  45. dulint trx_id;
  46. dulint prev_trx_id;
  47. mem_heap_t* heap;
  48. mem_heap_t* heap2;
  49. dtuple_t* row;
  50. dtuple_t* entry = NULL; /* assignment to eliminate compiler
  51. warning */
  52. trx_t* trx;
  53. ulint vers_del;
  54. ulint rec_del;
  55. ulint err;
  56. mtr_t mtr;
  57. ulint comp;
  58. ut_ad(mutex_own(&kernel_mutex));
  59. #ifdef UNIV_SYNC_DEBUG
  60. ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
  61. #endif /* UNIV_SYNC_DEBUG */
  62. mutex_exit(&kernel_mutex);
  63. mtr_start(&mtr);
  64. /* Search for the clustered index record: this is a time-consuming
  65. operation: therefore we release the kernel mutex; also, the release
  66. is required by the latching order convention. The latch on the
  67. clustered index locks the top of the stack of versions. We also
  68. reserve purge_latch to lock the bottom of the version stack. */
  69. clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
  70. &clust_index, &mtr);
  71. if (!clust_rec) {
  72. /* In a rare case it is possible that no clust rec is found
  73. for a secondary index record: if in row0umod.c
  74. row_undo_mod_remove_clust_low() we have already removed the
  75. clust rec, while purge is still cleaning and removing
  76. secondary index records associated with earlier versions of
  77. the clustered index record. In that case there cannot be
  78. any implicit lock on the secondary index record, because
  79. an active transaction which has modified the secondary index
  80. record has also modified the clustered index record. And in
  81. a rollback we always undo the modifications to secondary index
  82. records before the clustered index record. */
  83. mutex_enter(&kernel_mutex);
  84. mtr_commit(&mtr);
  85. return(NULL);
  86. }
  87. heap = mem_heap_create(1024);
  88. clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL,
  89. ULINT_UNDEFINED, &heap);
  90. trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
  91. mtr_s_lock(&(purge_sys->latch), &mtr);
  92. mutex_enter(&kernel_mutex);
  93. trx = NULL;
  94. if (!trx_is_active(trx_id)) {
  95. /* The transaction that modified or inserted clust_rec is no
  96. longer active: no implicit lock on rec */
  97. goto exit_func;
  98. }
  99. if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index,
  100. clust_offsets, TRUE)) {
  101. /* Corruption noticed: try to avoid a crash by returning */
  102. goto exit_func;
  103. }
  104. comp = page_rec_is_comp(rec);
  105. ut_ad(index->table == clust_index->table);
  106. ut_ad(!!comp == dict_table_is_comp(index->table));
  107. ut_ad(!comp == !page_rec_is_comp(clust_rec));
  108. /* We look up if some earlier version, which was modified by the trx_id
  109. transaction, of the clustered index record would require rec to be in
  110. a different state (delete marked or unmarked, or have different field
  111. values, or not existing). If there is such a version, then rec was
  112. modified by the trx_id transaction, and it has an implicit x-lock on
  113. rec. Note that if clust_rec itself would require rec to be in a
  114. different state, then the trx_id transaction has not yet had time to
  115. modify rec, and does not necessarily have an implicit x-lock on rec. */
  116. rec_del = rec_get_deleted_flag(rec, comp);
  117. trx = NULL;
  118. version = clust_rec;
  119. for (;;) {
  120. mutex_exit(&kernel_mutex);
  121. /* While we retrieve an earlier version of clust_rec, we
  122. release the kernel mutex, because it may take time to access
  123. the disk. After the release, we have to check if the trx_id
  124. transaction is still active. We keep the semaphore in mtr on
  125. the clust_rec page, so that no other transaction can update
  126. it and get an implicit x-lock on rec. */
  127. heap2 = heap;
  128. heap = mem_heap_create(1024);
  129. err = trx_undo_prev_version_build(clust_rec, &mtr, version,
  130. clust_index, clust_offsets,
  131. heap, &prev_version);
  132. mem_heap_free(heap2); /* free version and clust_offsets */
  133. if (prev_version) {
  134. row_ext_t* ext;
  135. clust_offsets = rec_get_offsets(
  136. prev_version, clust_index, NULL,
  137. ULINT_UNDEFINED, &heap);
  138. row = row_build(ROW_COPY_POINTERS, clust_index,
  139. prev_version, clust_offsets,
  140. &ext, heap);
  141. entry = row_build_index_entry(row, ext, index, heap);
  142. }
  143. mutex_enter(&kernel_mutex);
  144. if (!trx_is_active(trx_id)) {
  145. /* Transaction no longer active: no implicit x-lock */
  146. break;
  147. }
  148. /* If the transaction is still active, the previous version
  149. of clust_rec must be accessible if not a fresh insert; we
  150. may assert the following: */
  151. ut_ad(err == DB_SUCCESS);
  152. if (prev_version == NULL) {
  153. /* It was a freshly inserted version: there is an
  154. implicit x-lock on rec */
  155. trx = trx_get_on_id(trx_id);
  156. break;
  157. }
  158. /* If we get here, we know that the trx_id transaction is
  159. still active and it has modified prev_version. Let us check
  160. if prev_version would require rec to be in a different
  161. state. */
  162. vers_del = rec_get_deleted_flag(prev_version, comp);
  163. /* We check if entry and rec are identified in the alphabetical
  164. ordering */
  165. if (0 == cmp_dtuple_rec(entry, rec, offsets)) {
  166. /* The delete marks of rec and prev_version should be
  167. equal for rec to be in the state required by
  168. prev_version */
  169. if (rec_del != vers_del) {
  170. trx = trx_get_on_id(trx_id);
  171. break;
  172. }
  173. /* It is possible that the row was updated so that the
  174. secondary index record remained the same in
  175. alphabetical ordering, but the field values changed
  176. still. For example, 'abc' -> 'ABC'. Check also that. */
  177. dtuple_set_types_binary(entry,
  178. dtuple_get_n_fields(entry));
  179. if (0 != cmp_dtuple_rec(entry, rec, offsets)) {
  180. trx = trx_get_on_id(trx_id);
  181. break;
  182. }
  183. } else if (!rec_del) {
  184. /* The delete mark should be set in rec for it to be
  185. in the state required by prev_version */
  186. trx = trx_get_on_id(trx_id);
  187. break;
  188. }
  189. prev_trx_id = row_get_rec_trx_id(prev_version, clust_index,
  190. clust_offsets);
  191. if (0 != ut_dulint_cmp(trx_id, prev_trx_id)) {
  192. /* The versions modified by the trx_id transaction end
  193. to prev_version: no implicit x-lock */
  194. break;
  195. }
  196. version = prev_version;
  197. }/* for (;;) */
  198. exit_func:
  199. mtr_commit(&mtr);
  200. mem_heap_free(heap);
  201. return(trx);
  202. }
  203. /*********************************************************************
  204. Finds out if we must preserve a delete marked earlier version of a clustered
  205. index record, because it is >= the purge view. */
  206. ibool
  207. row_vers_must_preserve_del_marked(
  208. /*==============================*/
  209. /* out: TRUE if earlier version should be preserved */
  210. dulint trx_id, /* in: transaction id in the version */
  211. mtr_t* mtr) /* in: mtr holding the latch on the clustered index
  212. record; it will also hold the latch on purge_view */
  213. {
  214. #ifdef UNIV_SYNC_DEBUG
  215. ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
  216. #endif /* UNIV_SYNC_DEBUG */
  217. mtr_s_lock(&(purge_sys->latch), mtr);
  218. if (trx_purge_update_undo_must_exist(trx_id)) {
  219. /* A purge operation is not yet allowed to remove this
  220. delete marked record */
  221. return(TRUE);
  222. }
  223. return(FALSE);
  224. }
  225. /*********************************************************************
  226. Finds out if a version of the record, where the version >= the current
  227. purge view, should have ientry as its secondary index entry. We check
  228. if there is any not delete marked version of the record where the trx
  229. id >= purge view, and the secondary index entry and ientry are identified in
  230. the alphabetical ordering; exactly in this case we return TRUE. */
  231. ibool
  232. row_vers_old_has_index_entry(
  233. /*=========================*/
  234. /* out: TRUE if earlier version should have */
  235. ibool also_curr,/* in: TRUE if also rec is included in the
  236. versions to search; otherwise only versions
  237. prior to it are searched */
  238. rec_t* rec, /* in: record in the clustered index; the
  239. caller must have a latch on the page */
  240. mtr_t* mtr, /* in: mtr holding the latch on rec; it will
  241. also hold the latch on purge_view */
  242. dict_index_t* index, /* in: the secondary index */
  243. const dtuple_t* ientry) /* in: the secondary index entry */
  244. {
  245. rec_t* version;
  246. rec_t* prev_version;
  247. dict_index_t* clust_index;
  248. ulint* clust_offsets;
  249. mem_heap_t* heap;
  250. mem_heap_t* heap2;
  251. const dtuple_t* row;
  252. const dtuple_t* entry;
  253. ulint err;
  254. ulint comp;
  255. ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
  256. || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
  257. #ifdef UNIV_SYNC_DEBUG
  258. ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
  259. #endif /* UNIV_SYNC_DEBUG */
  260. mtr_s_lock(&(purge_sys->latch), mtr);
  261. clust_index = dict_table_get_first_index(index->table);
  262. comp = page_rec_is_comp(rec);
  263. ut_ad(!dict_table_is_comp(index->table) == !comp);
  264. heap = mem_heap_create(1024);
  265. clust_offsets = rec_get_offsets(rec, clust_index, NULL,
  266. ULINT_UNDEFINED, &heap);
  267. if (also_curr && !rec_get_deleted_flag(rec, comp)) {
  268. row_ext_t* ext;
  269. row = row_build(ROW_COPY_POINTERS, clust_index,
  270. rec, clust_offsets, &ext, heap);
  271. entry = row_build_index_entry(row, ext, index, heap);
  272. /* NOTE that we cannot do the comparison as binary
  273. fields because the row is maybe being modified so that
  274. the clustered index record has already been updated
  275. to a different binary value in a char field, but the
  276. collation identifies the old and new value anyway! */
  277. if (!dtuple_coll_cmp(ientry, entry)) {
  278. mem_heap_free(heap);
  279. return(TRUE);
  280. }
  281. }
  282. version = rec;
  283. for (;;) {
  284. heap2 = heap;
  285. heap = mem_heap_create(1024);
  286. err = trx_undo_prev_version_build(rec, mtr, version,
  287. clust_index, clust_offsets,
  288. heap, &prev_version);
  289. mem_heap_free(heap2); /* free version and clust_offsets */
  290. if (err != DB_SUCCESS || !prev_version) {
  291. /* Versions end here */
  292. mem_heap_free(heap);
  293. return(FALSE);
  294. }
  295. clust_offsets = rec_get_offsets(prev_version, clust_index,
  296. NULL, ULINT_UNDEFINED, &heap);
  297. if (!rec_get_deleted_flag(prev_version, comp)) {
  298. row_ext_t* ext;
  299. row = row_build(ROW_COPY_POINTERS, clust_index,
  300. prev_version, clust_offsets,
  301. &ext, heap);
  302. entry = row_build_index_entry(row, ext, index, heap);
  303. /* NOTE that we cannot do the comparison as binary
  304. fields because maybe the secondary index record has
  305. already been updated to a different binary value in
  306. a char field, but the collation identifies the old
  307. and new value anyway! */
  308. if (!dtuple_coll_cmp(ientry, entry)) {
  309. mem_heap_free(heap);
  310. return(TRUE);
  311. }
  312. }
  313. version = prev_version;
  314. }
  315. }
  316. /*********************************************************************
  317. Constructs the version of a clustered index record which a consistent
  318. read should see. We assume that the trx id stored in rec is such that
  319. the consistent read should not see rec in its present version. */
  320. ulint
  321. row_vers_build_for_consistent_read(
  322. /*===============================*/
  323. /* out: DB_SUCCESS or DB_MISSING_HISTORY */
  324. rec_t* rec, /* in: record in a clustered index; the
  325. caller must have a latch on the page; this
  326. latch locks the top of the stack of versions
  327. of this records */
  328. mtr_t* mtr, /* in: mtr holding the latch on rec */
  329. dict_index_t* index, /* in: the clustered index */
  330. ulint** offsets,/* in/out: offsets returned by
  331. rec_get_offsets(rec, index) */
  332. read_view_t* view, /* in: the consistent read view */
  333. mem_heap_t** offset_heap,/* in/out: memory heap from which
  334. the offsets are allocated */
  335. mem_heap_t* in_heap,/* in: memory heap from which the memory for
  336. old_vers is allocated; memory for possible
  337. intermediate versions is allocated and freed
  338. locally within the function */
  339. rec_t** old_vers)/* out, own: old version, or NULL if the
  340. record does not exist in the view, that is,
  341. it was freshly inserted afterwards */
  342. {
  343. rec_t* version;
  344. rec_t* prev_version;
  345. dulint trx_id;
  346. mem_heap_t* heap = NULL;
  347. byte* buf;
  348. ulint err;
  349. ut_ad(dict_index_is_clust(index));
  350. ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
  351. || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
  352. #ifdef UNIV_SYNC_DEBUG
  353. ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
  354. #endif /* UNIV_SYNC_DEBUG */
  355. ut_ad(rec_offs_validate(rec, index, *offsets));
  356. trx_id = row_get_rec_trx_id(rec, index, *offsets);
  357. ut_ad(!read_view_sees_trx_id(view, trx_id));
  358. rw_lock_s_lock(&(purge_sys->latch));
  359. version = rec;
  360. for (;;) {
  361. mem_heap_t* heap2 = heap;
  362. trx_undo_rec_t* undo_rec;
  363. dulint roll_ptr;
  364. dulint undo_no;
  365. heap = mem_heap_create(1024);
  366. /* If we have high-granularity consistent read view and
  367. creating transaction of the view is the same as trx_id in
  368. the record we see this record only in the case when
  369. undo_no of the record is < undo_no in the view. */
  370. if (view->type == VIEW_HIGH_GRANULARITY
  371. && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) {
  372. roll_ptr = row_get_rec_roll_ptr(version, index,
  373. *offsets);
  374. undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
  375. undo_no = trx_undo_rec_get_undo_no(undo_rec);
  376. mem_heap_empty(heap);
  377. if (ut_dulint_cmp(view->undo_no, undo_no) > 0) {
  378. /* The view already sees this version: we can
  379. copy it to in_heap and return */
  380. buf = mem_heap_alloc(in_heap,
  381. rec_offs_size(*offsets));
  382. *old_vers = rec_copy(buf, version, *offsets);
  383. rec_offs_make_valid(*old_vers, index,
  384. *offsets);
  385. err = DB_SUCCESS;
  386. break;
  387. }
  388. }
  389. err = trx_undo_prev_version_build(rec, mtr, version, index,
  390. *offsets, heap,
  391. &prev_version);
  392. if (heap2) {
  393. mem_heap_free(heap2); /* free version */
  394. }
  395. if (err != DB_SUCCESS) {
  396. break;
  397. }
  398. if (prev_version == NULL) {
  399. /* It was a freshly inserted version */
  400. *old_vers = NULL;
  401. err = DB_SUCCESS;
  402. break;
  403. }
  404. *offsets = rec_get_offsets(prev_version, index, *offsets,
  405. ULINT_UNDEFINED, offset_heap);
  406. trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
  407. if (read_view_sees_trx_id(view, trx_id)) {
  408. /* The view already sees this version: we can copy
  409. it to in_heap and return */
  410. buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
  411. *old_vers = rec_copy(buf, prev_version, *offsets);
  412. rec_offs_make_valid(*old_vers, index, *offsets);
  413. err = DB_SUCCESS;
  414. break;
  415. }
  416. version = prev_version;
  417. }/* for (;;) */
  418. mem_heap_free(heap);
  419. rw_lock_s_unlock(&(purge_sys->latch));
  420. return(err);
  421. }
  422. /*********************************************************************
  423. Constructs the last committed version of a clustered index record,
  424. which should be seen by a semi-consistent read. */
  425. ulint
  426. row_vers_build_for_semi_consistent_read(
  427. /*====================================*/
  428. /* out: DB_SUCCESS or DB_MISSING_HISTORY */
  429. rec_t* rec, /* in: record in a clustered index; the
  430. caller must have a latch on the page; this
  431. latch locks the top of the stack of versions
  432. of this records */
  433. mtr_t* mtr, /* in: mtr holding the latch on rec */
  434. dict_index_t* index, /* in: the clustered index */
  435. ulint** offsets,/* in/out: offsets returned by
  436. rec_get_offsets(rec, index) */
  437. mem_heap_t** offset_heap,/* in/out: memory heap from which
  438. the offsets are allocated */
  439. mem_heap_t* in_heap,/* in: memory heap from which the memory for
  440. old_vers is allocated; memory for possible
  441. intermediate versions is allocated and freed
  442. locally within the function */
  443. rec_t** old_vers)/* out, own: rec, old version, or NULL if the
  444. record does not exist in the view, that is,
  445. it was freshly inserted afterwards */
  446. {
  447. rec_t* version;
  448. mem_heap_t* heap = NULL;
  449. byte* buf;
  450. ulint err;
  451. dulint rec_trx_id = ut_dulint_zero;
  452. ut_ad(dict_index_is_clust(index));
  453. ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
  454. || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
  455. #ifdef UNIV_SYNC_DEBUG
  456. ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
  457. #endif /* UNIV_SYNC_DEBUG */
  458. ut_ad(rec_offs_validate(rec, index, *offsets));
  459. rw_lock_s_lock(&(purge_sys->latch));
  460. /* The S-latch on purge_sys prevents the purge view from
  461. changing. Thus, if we have an uncommitted transaction at
  462. this point, then purge cannot remove its undo log even if
  463. the transaction could commit now. */
  464. version = rec;
  465. for (;;) {
  466. trx_t* version_trx;
  467. mem_heap_t* heap2;
  468. rec_t* prev_version;
  469. dulint version_trx_id;
  470. version_trx_id = row_get_rec_trx_id(version, index, *offsets);
  471. if (rec == version) {
  472. rec_trx_id = version_trx_id;
  473. }
  474. mutex_enter(&kernel_mutex);
  475. version_trx = trx_get_on_id(version_trx_id);
  476. mutex_exit(&kernel_mutex);
  477. if (!version_trx
  478. || version_trx->conc_state == TRX_NOT_STARTED
  479. || version_trx->conc_state == TRX_COMMITTED_IN_MEMORY) {
  480. /* We found a version that belongs to a
  481. committed transaction: return it. */
  482. if (rec == version) {
  483. *old_vers = rec;
  484. err = DB_SUCCESS;
  485. break;
  486. }
  487. /* We assume that a rolled-back transaction stays in
  488. TRX_ACTIVE state until all the changes have been
  489. rolled back and the transaction is removed from
  490. the global list of transactions. */
  491. if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) {
  492. /* The transaction was committed while
  493. we searched for earlier versions.
  494. Return the current version as a
  495. semi-consistent read. */
  496. version = rec;
  497. *offsets = rec_get_offsets(version,
  498. index, *offsets,
  499. ULINT_UNDEFINED,
  500. offset_heap);
  501. }
  502. buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets));
  503. *old_vers = rec_copy(buf, version, *offsets);
  504. rec_offs_make_valid(*old_vers, index, *offsets);
  505. err = DB_SUCCESS;
  506. break;
  507. }
  508. heap2 = heap;
  509. heap = mem_heap_create(1024);
  510. err = trx_undo_prev_version_build(rec, mtr, version, index,
  511. *offsets, heap,
  512. &prev_version);
  513. if (heap2) {
  514. mem_heap_free(heap2); /* free version */
  515. }
  516. if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
  517. break;
  518. }
  519. if (prev_version == NULL) {
  520. /* It was a freshly inserted version */
  521. *old_vers = NULL;
  522. err = DB_SUCCESS;
  523. break;
  524. }
  525. version = prev_version;
  526. *offsets = rec_get_offsets(version, index, *offsets,
  527. ULINT_UNDEFINED, offset_heap);
  528. }/* for (;;) */
  529. if (heap) {
  530. mem_heap_free(heap);
  531. }
  532. rw_lock_s_unlock(&(purge_sys->latch));
  533. return(err);
  534. }