Browse Source

MDEV-12219 Discard temporary undo logs at transaction commit

Starting with MySQL 5.7, temporary tables in InnoDB are handled
differently from persistent tables. Because temporary tables are
private to a connection, concurrency control and multi-versioning
(MVCC) are not applicable. For performance reasons, purge is
disabled as well. Rollback is supported for temporary tables;
that is why we have the temporary undo logs in the first place.

Because MVCC and purge are disabled for temporary tables, we should
discard all temporary undo logs already at transaction commit,
just like we discard the persistent insert_undo logs. Before this
change, update_undo logs were being preserved.

trx_temp_undo_t: A wrapper for temporary undo logs, comprising
a rollback segment and a single temporary undo log.

trx_rsegs_t::m_noredo: Use trx_temp_undo_t.
(Instead of insert_undo, update_undo, there will be a single undo.)

trx_is_noredo_rseg_updated(), trx_is_rseg_assigned(): Remove.

trx_undo_add_page(): Remove the parameter undo_ptr.
Acquire and release the rollback segment mutex inside the function.

trx_undo_free_last_page(): Remove the parameter trx.

trx_undo_truncate_end(): Remove the parameter trx, and add the
parameter is_temp. Clean up the code a bit.

trx_undo_assign_undo(): Split the parameter undo_ptr into rseg, undo.

trx_undo_commit_cleanup(): Renamed from trx_undo_insert_cleanup().
Replace the parameter undo_ptr with undo.
This will discard the temporary undo or insert_undo log at
commit/rollback.

trx_purge_add_update_undo_to_history(), trx_undo_update_cleanup():
Remove 3 parameters. Always operate on the persistent update_undo.

trx_serialise(): Renamed from trx_serialisation_number_get().

trx_write_serialisation_history(): Simplify the code flow.
If there are no persistent changes, do not update MONITOR_TRX_COMMIT_UNDO.

trx_commit_in_memory(): Simplify the logic, and add assertions.

trx_undo_page_report_modify(): Keep a direct reference to the
persistent update_undo log.

trx_undo_report_row_operation(): Simplify some code.
Always assign TRX_UNDO_INSERT for temporary undo logs.

trx_prepare_low(): Keep only one parameter. Prepare all 3 undo logs.

trx_roll_try_truncate(): Remove the parameter undo_ptr.
Try to truncate all 3 undo logs of the transaction.

trx_roll_pop_top_rec_of_trx_low(): Remove.

trx_roll_pop_top_rec_of_trx(): Remove the redundant parameter
trx->roll_limit. Clear roll_limit when exhausting the undo logs.
Consider all 3 undo logs at once, prioritizing the persistent
undo logs.

row_undo(): Minor cleanup. Let trx_roll_pop_top_rec_of_trx()
reset the trx->roll_limit.
pull/339/head
Marko Mäkelä 9 years ago
parent
commit
13e5c9de80
  1. 5
      storage/innobase/include/trx0purge.h
  2. 35
      storage/innobase/include/trx0roll.h
  3. 15
      storage/innobase/include/trx0trx.h
  4. 27
      storage/innobase/include/trx0trx.ic
  5. 117
      storage/innobase/include/trx0undo.h
  6. 6
      storage/innobase/include/trx0undo.ic
  7. 7
      storage/innobase/row/row0import.cc
  8. 14
      storage/innobase/row/row0trunc.cc
  9. 36
      storage/innobase/row/row0undo.cc
  10. 28
      storage/innobase/trx/trx0purge.cc
  11. 176
      storage/innobase/trx/trx0rec.cc
  12. 175
      storage/innobase/trx/trx0roll.cc
  13. 324
      storage/innobase/trx/trx0trx.cc
  14. 338
      storage/innobase/trx/trx0undo.cc

5
storage/innobase/include/trx0purge.h

@ -58,13 +58,8 @@ void
trx_purge_add_update_undo_to_history(
/*=================================*/
trx_t* trx, /*!< in: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in: update undo log. */
page_t* undo_page, /*!< in: update undo log header page,
x-latched */
bool update_rseg_history_len,
/*!< in: if true: update rseg history
len else skip updating it. */
ulint n_added_logs, /*!< in: number of logs added */
mtr_t* mtr); /*!< in: mtr */
/*******************************************************************//**
This function runs a purge batch.

35
storage/innobase/include/trx0roll.h

@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -50,32 +51,16 @@ trx_savept_t
trx_savept_take(
/*============*/
trx_t* trx); /*!< in: transaction */
/********************************************************************//**
Pops the topmost record when the two undo logs of a transaction are seen
as a single stack of records ordered by their undo numbers.
@return undo log record copied to heap, NULL if none left, or if the
undo number of the top record would be less than the limit */
trx_undo_rec_t*
trx_roll_pop_top_rec_of_trx_low(
/*============================*/
trx_t* trx, /*!< in/out: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in: rollback segment to look
for next undo log record. */
undo_no_t limit, /*!< in: least undo number we need */
roll_ptr_t* roll_ptr, /*!< out: roll pointer to undo record */
mem_heap_t* heap); /*!< in/out: memory heap where copied */
/********************************************************************//**
Get next undo log record from redo and noredo rollback segments.
@return undo log record copied to heap, NULL if none left, or if the
undo number of the top record would be less than the limit */
/** Get the last undo log record of a transaction (for rollback).
@param[in,out] trx transaction
@param[out] roll_ptr DB_ROLL_PTR to the undo record
@param[in,out] heap memory heap for allocation
@return undo log record copied to heap
@retval NULL if none left or the roll_limit (savepoint) was reached */
trx_undo_rec_t*
trx_roll_pop_top_rec_of_trx(
/*========================*/
trx_t* trx, /*!< in: transaction */
undo_no_t limit, /*!< in: least undo number we need */
roll_ptr_t* roll_ptr, /*!< out: roll pointer to undo record */
mem_heap_t* heap); /*!< in: memory heap where copied */
trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Rollback or clean up any incomplete transactions which were

15
storage/innobase/include/trx0trx.h

@ -856,6 +856,14 @@ struct trx_undo_ptr_t {
NULL if no update performed yet */
};
/** An instance of temporary rollback segment. */
struct trx_temp_undo_t {
/** temporary rollback segment, or NULL if not assigned yet */
trx_rseg_t* rseg;
/** pointer to the undo log, or NULL if nothing logged yet */
trx_undo_t* undo;
};
/** Rollback segments assigned to a transaction for undo logging. */
struct trx_rsegs_t {
/** undo log ptr holding reference to a rollback segment that resides in
@ -863,10 +871,9 @@ struct trx_rsegs_t {
to be recovered on crash. */
trx_undo_ptr_t m_redo;
/** undo log ptr holding reference to a rollback segment that resides in
temp tablespace used for undo logging of tables that doesn't need
to be recovered on crash. */
trx_undo_ptr_t m_noredo;
/** undo log for temporary tables; discarded immediately after
transaction commit/rollback */
trx_temp_undo_t m_noredo;
};
enum trx_rseg_type_t {

27
storage/innobase/include/trx0trx.ic

@ -225,18 +225,6 @@ trx_is_redo_rseg_updated(
|| trx->rsegs.m_redo.update_undo != 0);
}
/********************************************************************//**
Check if noredo rseg is modified for insert/update. */
UNIV_INLINE
bool
trx_is_noredo_rseg_updated(
/*=======================*/
const trx_t* trx) /*!< in: transaction */
{
return(trx->rsegs.m_noredo.insert_undo != 0
|| trx->rsegs.m_noredo.update_undo != 0);
}
/********************************************************************//**
Check if redo/noredo rseg is modified for insert/update. */
UNIV_INLINE
@ -245,20 +233,7 @@ trx_is_rseg_updated(
/*================*/
const trx_t* trx) /*!< in: transaction */
{
return(trx_is_redo_rseg_updated(trx)
|| trx_is_noredo_rseg_updated(trx));
}
/********************************************************************//**
Check if redo/nonredo rseg is valid. */
UNIV_INLINE
bool
trx_is_rseg_assigned(
/*=================*/
const trx_t* trx) /*!< in: transaction */
{
return(trx->rsegs.m_redo.rseg != NULL
|| trx->rsegs.m_noredo.rseg != NULL);
return(trx_is_redo_rseg_updated(trx) || trx->rsegs.m_noredo.undo);
}
/**

117
storage/innobase/include/trx0undo.h

@ -35,6 +35,15 @@ Created 3/26/1996 Heikki Tuuri
#include "page0types.h"
#include "trx0xa.h"
/** The LSB of the "is insert" flag in DB_ROLL_PTR */
#define ROLL_PTR_INSERT_FLAG_POS 55
/** The LSB of the 7-bit trx_rseg_t::id in DB_ROLL_PTR */
#define ROLL_PTR_RSEG_ID_POS 48
/** The LSB of the 32-bit undo log page number in DB_ROLL_PTR */
#define ROLL_PTR_PAGE_POS 16
/** The LSB of the 16-bit byte offset within an undo log page in DB_ROLL_PTR */
#define ROLL_PTR_BYTE_POS 0
/***********************************************************************//**
Builds a roll pointer.
@return roll pointer */
@ -194,57 +203,31 @@ trx_undo_get_first_rec(
ulint mode,
mtr_t* mtr);
/********************************************************************//**
Tries to add a page to the undo log segment where the undo log is placed.
@return X-latched block if success, else NULL */
/** Allocate an undo log page.
@param[in,out] trx transaction
@param[in,out] undo undo log
@param[in,out] mtr mini-transaction that does not hold any page latch
@return X-latched block if success
@retval NULL on failure */
buf_block_t*
trx_undo_add_page(
/*==============*/
trx_t* trx, /*!< in: transaction */
trx_undo_t* undo, /*!< in: undo log memory object */
trx_undo_ptr_t* undo_ptr, /*!< in: assign undo log from
referred rollback segment. */
mtr_t* mtr) /*!< in: mtr which does not have
a latch to any undo log page;
the caller must have reserved
the rollback segment mutex */
trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Frees the last undo log page.
The caller must hold the rollback segment mutex. */
/** Free the last undo log page. The caller must hold the rseg mutex.
@param[in,out] undo undo log
@param[in,out] mtr mini-transaction that does not hold any undo log page
or that has allocated the undo log page */
void
trx_undo_free_last_page_func(
/*==========================*/
#ifdef UNIV_DEBUG
const trx_t* trx, /*!< in: transaction */
#endif /* UNIV_DEBUG */
trx_undo_t* undo, /*!< in/out: undo log memory copy */
mtr_t* mtr) /*!< in/out: mini-transaction which does not
have a latch to any undo log page or which
has allocated the undo log page */
trx_undo_free_last_page(trx_undo_t* undo, mtr_t* mtr)
MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
# define trx_undo_free_last_page(trx,undo,mtr) \
trx_undo_free_last_page_func(trx,undo,mtr)
#else /* UNIV_DEBUG */
# define trx_undo_free_last_page(trx,undo,mtr) \
trx_undo_free_last_page_func(undo,mtr)
#endif /* UNIV_DEBUG */
/***********************************************************************//**
Truncates an undo log from the end. This function is used during a rollback
to free space from an undo log. */
/** Truncate the tail of an undo log during rollback.
@param[in,out] undo undo log
@param[in] limit all undo logs after this limit will be discarded
@param[in] is_temp whether this is temporary undo log */
void
trx_undo_truncate_end_func(
/*=======================*/
trx_t* trx, /*!< in: transaction whose undo log it is */
trx_undo_t* undo, /*!< in/out: undo log */
undo_no_t limit) /*!< in: all undo records with undo number
>= this value should be truncated */
MY_ATTRIBUTE((nonnull(1,2)));
#define trx_undo_truncate_end(trx, undo, limit) \
trx_undo_truncate_end_func(trx, undo, limit)
trx_undo_truncate_end(trx_undo_t* undo, undo_no_t limit, bool is_temp)
MY_ATTRIBUTE((nonnull));
/** Truncate the head of an undo log.
NOTE that only whole pages are freed; the header page is not
@ -269,20 +252,23 @@ ulint
trx_undo_lists_init(
/*================*/
trx_rseg_t* rseg); /*!< in: rollback segment memory object */
/**********************************************************************//**
Assigns an undo log for a transaction. A new undo log is created or a cached
undo log reused.
@return DB_SUCCESS if undo log assign successful, possible error codes
are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
DB_OUT_OF_MEMORY */
/** Assign an undo log for a transaction.
A new undo log is created or a cached undo log reused.
@param[in,out] trx transaction
@param[in] rseg rollback segment
@param[out] undo the undo log
@param[in] type TRX_UNDO_INSERT or TRX_UNDO_UPDATE
@retval DB_SUCCESS on success
@retval DB_TOO_MANY_CONCURRENT_TRXS
@retval DB_OUT_OF_FILE_SPACE
@retval DB_READ_ONLY
@retval DB_OUT_OF_MEMORY */
dberr_t
trx_undo_assign_undo(
/*=================*/
trx_t* trx, /*!< in: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in: assign undo log from
referred rollback segment. */
ulint type) /*!< in: TRX_UNDO_INSERT or
TRX_UNDO_UPDATE */
trx_t* trx,
trx_rseg_t* rseg,
trx_undo_t** undo,
ulint type)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@ -315,24 +301,17 @@ trx_undo_update_cleanup(
/*====================*/
trx_t* trx, /*!< in: trx owning the update
undo log */
trx_undo_ptr_t* undo_ptr, /*!< in: update undo log. */
page_t* undo_page, /*!< in: update undo log header page,
x-latched */
bool update_rseg_history_len,
/*!< in: if true: update rseg history
len else skip updating it. */
ulint n_added_logs, /*!< in: number of logs added */
mtr_t* mtr); /*!< in: mtr */
/** Frees an insert undo log after a transaction commit or rollback.
Knowledge of inserts is not needed after a commit or rollback, therefore
/** Free an insert or temporary undo log after commit or rollback.
The information is not needed after a commit or rollback, therefore
the data can be discarded.
@param[in,out] undo_ptr undo log to clean up
@param[in] noredo whether the undo tablespace is redo logged */
@param[in,out] undo undo log
@param[in] is_temp whether this is temporary undo log */
void
trx_undo_insert_cleanup(
trx_undo_ptr_t* undo_ptr,
bool noredo);
trx_undo_commit_cleanup(trx_undo_t* undo, bool is_temp);
/********************************************************************//**
At shutdown, frees the undo logs of a PREPARED transaction. */

6
storage/innobase/include/trx0undo.ic

@ -47,9 +47,9 @@ trx_undo_build_roll_ptr(
ut_ad(rseg_id < TRX_SYS_N_RSEGS);
ut_ad(offset < 65536);
roll_ptr = (roll_ptr_t) is_insert << 55
| (roll_ptr_t) rseg_id << 48
| (roll_ptr_t) page_no << 16
roll_ptr = (roll_ptr_t) is_insert << ROLL_PTR_INSERT_FLAG_POS
| (roll_ptr_t) rseg_id << ROLL_PTR_RSEG_ID_POS
| (roll_ptr_t) page_no << ROLL_PTR_PAGE_POS
| offset;
return(roll_ptr);
}

7
storage/innobase/row/row0import.cc

@ -3388,9 +3388,10 @@ row_import_for_mysql(
mutex_enter(&trx->undo_mutex);
/* IMPORT tablespace is blocked for temp-tables and so we don't
need to assign temporary rollback segment for this trx. */
err = trx_undo_assign_undo(trx, &trx->rsegs.m_redo, TRX_UNDO_UPDATE);
/* TODO: Do not write any undo log for the IMPORT cleanup. */
trx_undo_t** pundo = &trx->rsegs.m_redo.update_undo;
err = trx_undo_assign_undo(trx, trx->rsegs.m_redo.rseg, pundo,
TRX_UNDO_UPDATE);
mutex_exit(&trx->undo_mutex);

14
storage/innobase/row/row0trunc.cc

@ -38,6 +38,13 @@ Created 2013-04-12 Sunny Bains
#include "os0file.h"
#include <vector>
/* FIXME: For temporary tables, use a simple approach of btr_free()
and btr_create() of each index tree. */
/* FIXME: For persistent tables, remove this code in MDEV-11655
and use a combination of the transactional DDL log to make atomic the
low-level operations ha_innobase::delete_table(), ha_innobase::create(). */
bool truncate_t::s_fix_up_active = false;
truncate_t::tables_t truncate_t::s_tables;
truncate_t::truncated_tables_t truncate_t::s_truncated_tables;
@ -1830,14 +1837,11 @@ row_truncate_table_for_mysql(
/* Step-6: Truncate operation can be rolled back in case of error
till some point. Associate rollback segment to record undo log. */
if (!dict_table_is_temporary(table)) {
/* Temporary tables don't need undo logging for autocommit stmt.
On crash (i.e. mysql restart) temporary tables are anyway not
accessible. */
mutex_enter(&trx->undo_mutex);
trx_undo_t** pundo = &trx->rsegs.m_redo.update_undo;
err = trx_undo_assign_undo(
trx, &trx->rsegs.m_redo, TRX_UNDO_UPDATE);
trx, trx->rsegs.m_redo.rseg, pundo, TRX_UNDO_UPDATE);
mutex_exit(&trx->undo_mutex);

36
storage/innobase/row/row0undo.cc

@ -264,47 +264,23 @@ row_undo(
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
trx_t* trx;
roll_ptr_t roll_ptr;
ibool locked_data_dict;
ut_ad(node != NULL);
ut_ad(thr != NULL);
trx = node->trx;
trx_t* trx = node->trx;
ut_ad(trx->in_rollback);
if (node->state == UNDO_NODE_FETCH_NEXT) {
node->undo_rec = trx_roll_pop_top_rec_of_trx(
trx, trx->roll_limit, &roll_ptr, node->heap);
trx, &node->roll_ptr, node->heap);
if (!node->undo_rec) {
/* Rollback completed for this query thread */
thr->run_node = que_node_get_parent(node);
/* Mark any partial rollback completed, so
that if the transaction object is committed
and reused later, the roll_limit will remain
at 0. trx->roll_limit will be nonzero during a
partial rollback only. */
trx->roll_limit = 0;
ut_d(trx->in_rollback = false);
return(DB_SUCCESS);
}
node->roll_ptr = roll_ptr;
node->undo_no = trx_undo_rec_get_undo_no(node->undo_rec);
if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
node->state = UNDO_NODE_INSERT;
} else {
node->state = UNDO_NODE_MODIFY;
}
node->state = trx_undo_roll_ptr_is_insert(node->roll_ptr)
? UNDO_NODE_INSERT : UNDO_NODE_MODIFY;
}
/* Prevent DROP TABLE etc. while we are rolling back this row.
@ -312,13 +288,15 @@ row_undo(
then we already have dict_operation_lock locked in x-mode. Do not
try to lock again, because that would cause a hang. */
locked_data_dict = (trx->dict_operation_lock_mode == 0);
const bool locked_data_dict = (trx->dict_operation_lock_mode == 0);
if (locked_data_dict) {
row_mysql_freeze_data_dictionary(trx);
}
dberr_t err;
if (node->state == UNDO_NODE_INSERT) {
err = row_undo_ins(node, thr);

28
storage/innobase/trx/trx0purge.cc

@ -252,26 +252,15 @@ void
trx_purge_add_update_undo_to_history(
/*=================================*/
trx_t* trx, /*!< in: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in/out: update undo log. */
page_t* undo_page, /*!< in: update undo log header page,
x-latched */
bool update_rseg_history_len,
/*!< in: if true: update rseg history
len else skip updating it. */
ulint n_added_logs, /*!< in: number of logs added */
mtr_t* mtr) /*!< in: mtr */
{
trx_undo_t* undo;
trx_rseg_t* rseg;
trx_rsegf_t* rseg_header;
trx_ulogf_t* undo_header;
undo = undo_ptr->update_undo;
rseg = undo->rseg;
rseg_header = trx_rsegf_get(rseg->space, rseg->page_no, mtr);
undo_header = undo_page + undo->hdr_offset;
trx_undo_t* undo = trx->rsegs.m_redo.update_undo;
trx_rseg_t* rseg = undo->rseg;
trx_rsegf_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, mtr);
trx_ulogf_t* undo_header = undo_page + undo->hdr_offset;
if (undo->state != TRX_UNDO_CACHED) {
ulint hist_size;
@ -306,11 +295,8 @@ trx_purge_add_update_undo_to_history(
flst_add_first(rseg_header + TRX_RSEG_HISTORY,
undo_header + TRX_UNDO_HISTORY_NODE, mtr);
if (update_rseg_history_len) {
my_atomic_addlint(
&trx_sys->rseg_history_len, n_added_logs);
srv_wake_purge_thread_if_not_active();
}
my_atomic_addlint(&trx_sys->rseg_history_len, 1);
srv_wake_purge_thread_if_not_active();
/* Write the trx number to the undo log header */
mlog_write_ull(undo_header + TRX_UNDO_TRX_NO, trx->no, mtr);

176
storage/innobase/trx/trx0rec.cc

@ -862,7 +862,7 @@ trx_undo_page_report_modify(
virtual column info */
mtr_t* mtr) /*!< in: mtr */
{
dict_table_t* table;
dict_table_t* table = index->table;
ulint first_free;
byte* ptr;
const byte* field;
@ -872,7 +872,6 @@ trx_undo_page_report_modify(
byte* type_cmpl_ptr;
ulint i;
trx_id_t trx_id;
trx_undo_ptr_t* undo_ptr;
ibool ignore_prefix = FALSE;
byte ext_buf[REC_VERSION_56_MAX_INDEX_COL_LEN
+ BTR_EXTERN_FIELD_REF_SIZE];
@ -880,15 +879,13 @@ trx_undo_page_report_modify(
ut_a(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_TYPE) == TRX_UNDO_UPDATE);
table = index->table;
/* If table instance is temporary then select noredo rseg as changes
to undo logs don't need REDO logging given that they are not
restored on restart as corresponding object doesn't exist on restart.*/
undo_ptr = dict_table_is_temporary(index->table)
? &trx->rsegs.m_noredo : &trx->rsegs.m_redo;
ut_ad(mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_page) == TRX_UNDO_UPDATE
|| (dict_table_is_temporary(table)
&& mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE
+ undo_page) == TRX_UNDO_INSERT));
trx_undo_t* update_undo = dict_table_is_temporary(table)
? NULL : trx->rsegs.m_redo.update_undo;
first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE);
@ -1006,8 +1003,7 @@ trx_undo_page_report_modify(
need to double check if there are any non-indexed columns
being registered in update vector in case they will be indexed
in new table */
if (dict_index_is_online_ddl(index)
&& index->table->n_v_cols > 0) {
if (dict_index_is_online_ddl(index) && table->n_v_cols > 0) {
for (i = 0; i < upd_get_n_fields(update); i++) {
upd_field_t* fld = upd_get_nth_field(
update, i);
@ -1113,7 +1109,9 @@ trx_undo_page_report_modify(
/* Notify purge that it eventually has to
free the old externally stored field */
undo_ptr->update_undo->del_marks = TRUE;
if (update_undo) {
update_undo->del_marks = TRUE;
}
*type_cmpl_ptr |= TRX_UNDO_UPD_EXTERN;
} else {
@ -1182,7 +1180,9 @@ trx_undo_page_report_modify(
double mbr[SPDIMS * 2];
mem_heap_t* row_heap = NULL;
undo_ptr->update_undo->del_marks = TRUE;
if (update_undo) {
update_undo->del_marks = TRUE;
}
if (trx_undo_left(undo_page, ptr) < 5) {
@ -1866,18 +1866,19 @@ trx_undo_report_row_operation(
flag was specified */
{
trx_t* trx;
trx_undo_t* undo;
ulint page_no;
buf_block_t* undo_block;
trx_undo_ptr_t* undo_ptr;
mtr_t mtr;
dberr_t err = DB_SUCCESS;
#ifdef UNIV_DEBUG
int loop_count = 0;
#endif /* UNIV_DEBUG */
ut_a(dict_index_is_clust(index));
ut_ad(!rec || rec_offs_validate(rec, index, offsets));
ut_ad(!srv_read_only_mode);
ut_ad(op_type == TRX_UNDO_INSERT_OP || op_type == TRX_UNDO_MODIFY_OP);
ut_ad((op_type != TRX_UNDO_INSERT_OP)
|| (clust_entry && !update && !rec));
if (flags & BTR_NO_UNDO_LOG_FLAG) {
@ -1886,87 +1887,53 @@ trx_undo_report_row_operation(
return(DB_SUCCESS);
}
ut_ad(thr);
ut_ad(!srv_read_only_mode);
ut_ad((op_type != TRX_UNDO_INSERT_OP)
|| (clust_entry && !update && !rec));
trx = thr_get_trx(thr);
bool is_temp_table = dict_table_is_temporary(index->table);
/* Temporary tables do not go into INFORMATION_SCHEMA.TABLES,
so do not bother adding it to the list of modified tables by
the transaction - this list is only used for maintaining
INFORMATION_SCHEMA.TABLES.UPDATE_TIME. */
if (!is_temp_table) {
trx->mod_tables.insert(index->table);
}
/* If trx is read-only then only temp-tables can be written.
If trx is read-write and involves temp-table only then we
assign temporary rseg. */
if (trx->read_only || is_temp_table) {
mtr.start();
trx_undo_t** pundo;
trx_rseg_t* rseg;
const bool is_temp = dict_table_is_temporary(index->table);
ut_ad(!srv_read_only_mode || is_temp_table);
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
/* MySQL should block writes to non-temporary tables. */
ut_a(is_temp_table);
rseg = trx->rsegs.m_noredo.rseg;
if (trx->rsegs.m_noredo.rseg == 0) {
if (!rseg) {
trx_assign_rseg(trx);
rseg = trx->rsegs.m_noredo.rseg;
}
}
/* If object is temporary, disable REDO logging that is done to track
changes done to UNDO logs. This is feasible given that temporary tables
are not restored on restart. */
mtr_start(&mtr);
dict_disable_redo_if_temporary(index->table, &mtr);
mutex_enter(&trx->undo_mutex);
/* If object is temp-table then select noredo rseg as changes
to undo logs don't need REDO logging given that they are not
restored on restart as corresponding object doesn't exist on restart.*/
undo_ptr = is_temp_table ? &trx->rsegs.m_noredo : &trx->rsegs.m_redo;
switch (op_type) {
case TRX_UNDO_INSERT_OP:
undo = undo_ptr->insert_undo;
if (undo == NULL) {
err = trx_undo_assign_undo(
trx, undo_ptr, TRX_UNDO_INSERT);
undo = undo_ptr->insert_undo;
if (undo == NULL) {
/* Did not succeed */
ut_ad(err != DB_SUCCESS);
goto err_exit;
}
pundo = &trx->rsegs.m_noredo.undo;
} else {
ut_ad(!trx->read_only);
/* Keep INFORMATION_SCHEMA.TABLES.UPDATE_TIME
up-to-date for persistent tables. Temporary tables are
not listed there. */
trx->mod_tables.insert(index->table);
ut_ad(err == DB_SUCCESS);
}
break;
default:
ut_ad(op_type == TRX_UNDO_MODIFY_OP);
pundo = op_type == TRX_UNDO_INSERT_OP
? &trx->rsegs.m_redo.insert_undo
: &trx->rsegs.m_redo.update_undo;
rseg = trx->rsegs.m_redo.rseg;
}
undo = undo_ptr->update_undo;
mutex_enter(&trx->undo_mutex);
dberr_t err;
if (undo == NULL) {
err = trx_undo_assign_undo(
trx, undo_ptr, TRX_UNDO_UPDATE);
undo = undo_ptr->update_undo;
if (*pundo) {
err = DB_SUCCESS;
} else if (op_type == TRX_UNDO_INSERT_OP || is_temp) {
err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_INSERT);
} else {
err = trx_undo_assign_undo(trx, rseg, pundo, TRX_UNDO_UPDATE);
}
if (undo == NULL) {
/* Did not succeed */
ut_ad(err != DB_SUCCESS);
goto err_exit;
}
}
trx_undo_t* undo = *pundo;
ut_ad(err == DB_SUCCESS);
ut_ad((err == DB_SUCCESS) == (undo != NULL));
if (undo == NULL) {
goto err_exit;
}
page_no = undo->last_page_no;
@ -2020,13 +1987,14 @@ trx_undo_report_row_operation(
latches, such as SYNC_FSP and SYNC_FSP_PAGE. */
mtr_commit(&mtr);
mtr_start_trx(&mtr, trx);
dict_disable_redo_if_temporary(
index->table, &mtr);
mtr.start(trx);
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
mutex_enter(&undo_ptr->rseg->mutex);
trx_undo_free_last_page(trx, undo, &mtr);
mutex_exit(&undo_ptr->rseg->mutex);
mutex_enter(&rseg->mutex);
trx_undo_free_last_page(undo, &mtr);
mutex_exit(&rseg->mutex);
err = DB_UNDO_RECORD_TOO_BIG;
goto err_exit;
@ -2045,13 +2013,13 @@ trx_undo_report_row_operation(
undo->guess_block = undo_block;
trx->undo_no++;
trx->undo_rseg_space = undo_ptr->rseg->space;
trx->undo_rseg_space = rseg->space;
mutex_exit(&trx->undo_mutex);
*roll_ptr = trx_undo_build_roll_ptr(
op_type == TRX_UNDO_INSERT_OP,
undo_ptr->rseg->id, page_no, offset);
rseg->id, page_no, offset);
return(DB_SUCCESS);
}
@ -2060,17 +2028,13 @@ trx_undo_report_row_operation(
/* We have to extend the undo log by one page */
ut_ad(++loop_count < 2);
mtr_start_trx(&mtr, trx);
dict_disable_redo_if_temporary(index->table, &mtr);
mtr.start(trx);
/* When we add a page to an undo log, this is analogous to
a pessimistic insert in a B-tree, and we must reserve the
counterpart of the tree latch, which is the rseg mutex. */
mutex_enter(&undo_ptr->rseg->mutex);
undo_block = trx_undo_add_page(trx, undo, undo_ptr, &mtr);
mutex_exit(&undo_ptr->rseg->mutex);
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
undo_block = trx_undo_add_page(trx, undo, &mtr);
page_no = undo->last_page_no;
DBUG_EXECUTE_IF("ib_err_ins_undo_page_add_failure",
@ -2084,10 +2048,8 @@ trx_undo_report_row_operation(
" log pages. Please add new data file to the tablespace or"
" check if filesystem is full or enable auto-extension for"
" the tablespace",
((undo->space == srv_sys_space.space_id())
? "system" :
((fsp_is_system_temporary(undo->space))
? "temporary" : "undo")));
undo->space == TRX_SYS_SPACE
? "system" : is_temp ? "temporary" : "undo");
/* Did not succeed: out of space */
err = DB_OUT_OF_FILE_SPACE;

175
storage/innobase/trx/trx0roll.cc

@ -217,8 +217,7 @@ trx_rollback_low(
case TRX_STATE_PREPARED:
ut_ad(!trx_is_autocommit_non_locking(trx));
if (trx->rsegs.m_redo.rseg != NULL
&& trx_is_redo_rseg_updated(trx)) {
if (trx_is_redo_rseg_updated(trx)) {
/* Change the undo log state back from
TRX_UNDO_PREPARED to TRX_UNDO_ACTIVE
so that if the system gets killed,
@ -891,27 +890,38 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
OS_THREAD_DUMMY_RETURN;
}
/***********************************************************************//**
Tries truncate the undo logs. */
/** Try to truncate the undo logs.
@param[in,out] trx transaction */
static
void
trx_roll_try_truncate(
/*==================*/
trx_t* trx, /*!< in/out: transaction */
trx_undo_ptr_t* undo_ptr) /*!< in: rollback segment to look
for next undo log record. */
trx_roll_try_truncate(trx_t* trx)
{
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(mutex_own(&undo_ptr->rseg->mutex));
trx->pages_undone = 0;
if (undo_ptr->insert_undo) {
trx_undo_truncate_end(trx, undo_ptr->insert_undo, trx->undo_no);
undo_no_t undo_no = trx->undo_no;
trx_undo_t* insert_undo = trx->rsegs.m_redo.insert_undo;
trx_undo_t* update_undo = trx->rsegs.m_redo.update_undo;
if (insert_undo || update_undo) {
mutex_enter(&trx->rsegs.m_redo.rseg->mutex);
if (insert_undo) {
ut_ad(insert_undo->rseg == trx->rsegs.m_redo.rseg);
trx_undo_truncate_end(insert_undo, undo_no, false);
}
if (update_undo) {
ut_ad(update_undo->rseg == trx->rsegs.m_redo.rseg);
trx_undo_truncate_end(update_undo, undo_no, false);
}
mutex_exit(&trx->rsegs.m_redo.rseg->mutex);
}
if (undo_ptr->update_undo) {
trx_undo_truncate_end(trx, undo_ptr->update_undo, trx->undo_no);
if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
mutex_enter(&undo->rseg->mutex);
trx_undo_truncate_end(undo, undo_no, true);
mutex_exit(&undo->rseg->mutex);
}
#ifdef WITH_WSREP_OUT
@ -963,75 +973,73 @@ trx_roll_pop_top_rec(
return(undo_page + offset);
}
/********************************************************************//**
Pops the topmost record when the two undo logs of a transaction are seen
as a single stack of records ordered by their undo numbers.
@return undo log record copied to heap, NULL if none left, or if the
undo number of the top record would be less than the limit */
/** Get the last undo log record of a transaction (for rollback).
@param[in,out] trx transaction
@param[out] roll_ptr DB_ROLL_PTR to the undo record
@param[in,out] heap memory heap for allocation
@return undo log record copied to heap
@retval NULL if none left or the roll_limit (savepoint) was reached */
trx_undo_rec_t*
trx_roll_pop_top_rec_of_trx_low(
/*============================*/
trx_t* trx, /*!< in/out: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in: rollback segment to look
for next undo log record. */
undo_no_t limit, /*!< in: least undo number we need */
roll_ptr_t* roll_ptr, /*!< out: roll pointer to undo record */
mem_heap_t* heap) /*!< in/out: memory heap where copied */
trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
{
trx_undo_t* undo;
trx_undo_t* ins_undo;
trx_undo_t* upd_undo;
trx_undo_rec_t* undo_rec;
trx_undo_rec_t* undo_rec_copy;
undo_no_t undo_no;
ibool is_insert;
trx_rseg_t* rseg;
mtr_t mtr;
rseg = undo_ptr->rseg;
mutex_enter(&trx->undo_mutex);
if (trx->pages_undone >= TRX_ROLL_TRUNC_THRESHOLD) {
mutex_enter(&rseg->mutex);
trx_roll_try_truncate(trx, undo_ptr);
mutex_exit(&rseg->mutex);
trx_roll_try_truncate(trx);
}
ins_undo = undo_ptr->insert_undo;
upd_undo = undo_ptr->update_undo;
if (!ins_undo || ins_undo->empty) {
undo = upd_undo;
} else if (!upd_undo || upd_undo->empty) {
undo = ins_undo;
} else if (upd_undo->top_undo_no > ins_undo->top_undo_no) {
undo = upd_undo;
trx_undo_t* undo;
trx_undo_t* insert = trx->rsegs.m_redo.insert_undo;
trx_undo_t* update = trx->rsegs.m_redo.update_undo;
trx_undo_t* temp = trx->rsegs.m_noredo.undo;
const undo_no_t limit = trx->roll_limit;
ut_ad(!insert || !update || !insert->top_undo_no
|| insert->top_undo_no != update->top_undo_no);
ut_ad(!insert || !temp || !insert->top_undo_no
|| insert->top_undo_no != temp->top_undo_no);
ut_ad(!update || !temp || !update->top_undo_no
|| update->top_undo_no != temp->top_undo_no);
if (insert && !insert->empty && limit <= insert->top_undo_no) {
if (update && !update->empty
&& update->top_undo_no > insert->top_undo_no) {
undo = update;
} else {
undo = insert;
}
} else if (update && !update->empty && limit <= update->top_undo_no) {
undo = update;
} else if (temp && !temp->empty && limit <= temp->top_undo_no) {
undo = temp;
} else {
undo = ins_undo;
}
if (!undo || undo->empty || limit > undo->top_undo_no) {
mutex_enter(&rseg->mutex);
trx_roll_try_truncate(trx, undo_ptr);
mutex_exit(&rseg->mutex);
trx_roll_try_truncate(trx);
/* Mark any ROLLBACK TO SAVEPOINT completed, so that
if the transaction object is committed and reused
later, we will default to a full ROLLBACK. */
trx->roll_limit = 0;
ut_d(trx->in_rollback = false);
mutex_exit(&trx->undo_mutex);
return(NULL);
}
is_insert = (undo == ins_undo);
ut_ad(!undo->empty);
ut_ad(limit <= undo->top_undo_no);
*roll_ptr = trx_undo_build_roll_ptr(
is_insert, undo->rseg->id, undo->top_page_no, undo->top_offset);
mtr_start(&mtr);
false, undo->rseg->id, undo->top_page_no, undo->top_offset);
undo_rec = trx_roll_pop_top_rec(trx, undo, &mtr);
mtr_t mtr;
mtr.start();
undo_no = trx_undo_rec_get_undo_no(undo_rec);
trx_undo_rec_t* undo_rec = trx_roll_pop_top_rec(trx, undo, &mtr);
const undo_no_t undo_no = trx_undo_rec_get_undo_no(undo_rec);
if (trx_undo_rec_get_type(undo_rec) == TRX_UNDO_INSERT_REC) {
ut_ad(undo == insert || undo == temp);
*roll_ptr |= 1ULL << ROLL_PTR_INSERT_FLAG_POS;
} else {
ut_ad(undo == update || undo == temp);
}
ut_ad(trx_roll_check_undo_rec_ordering(
undo_no, undo->rseg->space, trx));
@ -1059,43 +1067,14 @@ trx_roll_pop_top_rec_of_trx_low(
trx->undo_no = undo_no;
trx->undo_rseg_space = undo->rseg->space;
undo_rec_copy = trx_undo_rec_copy(undo_rec, heap);
mutex_exit(&trx->undo_mutex);
mtr_commit(&mtr);
trx_undo_rec_t* undo_rec_copy = trx_undo_rec_copy(undo_rec, heap);
mtr.commit();
return(undo_rec_copy);
}
/********************************************************************//**
Get next undo log record from redo and noredo rollback segments.
@return undo log record copied to heap, NULL if none left, or if the
undo number of the top record would be less than the limit */
trx_undo_rec_t*
trx_roll_pop_top_rec_of_trx(
/*========================*/
trx_t* trx, /*!< in: transaction */
undo_no_t limit, /*!< in: least undo number we need */
roll_ptr_t* roll_ptr, /*!< out: roll pointer to undo record */
mem_heap_t* heap) /*!< in: memory heap where copied */
{
trx_undo_rec_t* undo_rec = 0;
if (trx_is_redo_rseg_updated(trx)) {
undo_rec = trx_roll_pop_top_rec_of_trx_low(
trx, &trx->rsegs.m_redo, limit, roll_ptr, heap);
}
if (undo_rec == 0 && trx_is_noredo_rseg_updated(trx)) {
undo_rec = trx_roll_pop_top_rec_of_trx_low(
trx, &trx->rsegs.m_noredo, limit, roll_ptr, heap);
}
return(undo_rec);
}
/****************************************************************//**
Builds an undo 'query' graph for a transaction. The actual rollback is
performed by executing this query graph like a query subprocedure call.

324
storage/innobase/trx/trx0trx.cc

@ -1487,65 +1487,29 @@ trx_start_low(
MONITOR_INC(MONITOR_TRX_ACTIVE);
}
/****************************************************************//**
Set the transaction serialisation number.
@return true if the transaction number was added to the serialisation_list. */
/** Set the serialisation number for a persistent committed transaction.
@param[in,out] trx committed transaction with persistent changes
@param[in,out] rseg rollback segment for update_undo, or NULL */
static
bool
trx_serialisation_number_get(
/*=========================*/
trx_t* trx, /*!< in/out: transaction */
trx_undo_ptr_t* redo_rseg_undo_ptr, /*!< in/out: Set trx
serialisation number in
referred undo rseg. */
trx_undo_ptr_t* noredo_rseg_undo_ptr) /*!< in/out: Set trx
serialisation number in
referred undo rseg. */
void
trx_serialise(trx_t* trx, trx_rseg_t* rseg)
{
bool added_trx_no;
trx_rseg_t* redo_rseg = 0;
trx_rseg_t* noredo_rseg = 0;
if (redo_rseg_undo_ptr != NULL) {
ut_ad(mutex_own(&redo_rseg_undo_ptr->rseg->mutex));
redo_rseg = redo_rseg_undo_ptr->rseg;
}
if (noredo_rseg_undo_ptr != NULL) {
ut_ad(mutex_own(&noredo_rseg_undo_ptr->rseg->mutex));
noredo_rseg = noredo_rseg_undo_ptr->rseg;
}
ut_ad(!rseg || rseg == trx->rsegs.m_redo.rseg);
trx_sys_mutex_enter();
trx->no = trx_sys_get_new_trx_id();
/* Track the minimum serialisation number. */
if (!trx->read_only) {
UT_LIST_ADD_LAST(trx_sys->serialisation_list, trx);
added_trx_no = true;
} else {
added_trx_no = false;
}
UT_LIST_ADD_LAST(trx_sys->serialisation_list, trx);
/* If the rollack segment is not empty then the
new trx_t::no can't be less than any trx_t::no
already in the rollback segment. User threads only
produce events when a rollback segment is empty. */
if ((redo_rseg != NULL && redo_rseg->last_page_no == FIL_NULL)
|| (noredo_rseg != NULL && noredo_rseg->last_page_no == FIL_NULL)) {
if (rseg && rseg->last_page_no == FIL_NULL) {
TrxUndoRsegs elem(trx->no);
if (redo_rseg != NULL && redo_rseg->last_page_no == FIL_NULL) {
elem.push_back(redo_rseg);
}
if (noredo_rseg != NULL
&& noredo_rseg->last_page_no == FIL_NULL) {
elem.push_back(noredo_rseg);
}
elem.push_back(rseg);
mutex_enter(&purge_sys->pq_mutex);
@ -1562,8 +1526,6 @@ trx_serialisation_number_get(
} else {
trx_sys_mutex_exit();
}
return(added_trx_no);
}
/****************************************************************//**
@ -1586,112 +1548,57 @@ trx_write_serialisation_history(
UNDO trx number. This is required for the purge in-memory data
structures too. */
bool own_redo_rseg_mutex = false;
bool own_noredo_rseg_mutex = false;
/* Get rollback segment mutex. */
if (trx->rsegs.m_redo.rseg != NULL && trx_is_redo_rseg_updated(trx)) {
if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
/* Undo log for temporary tables is discarded at transaction
commit. There is no purge for temporary tables, and also no
MVCC, because they are private to a session. */
mutex_enter(&trx->rsegs.m_redo.rseg->mutex);
own_redo_rseg_mutex = true;
}
mtr_t temp_mtr;
if (trx->rsegs.m_noredo.rseg != NULL
&& trx_is_noredo_rseg_updated(trx)) {
mutex_enter(&trx->rsegs.m_noredo.rseg->mutex);
own_noredo_rseg_mutex = true;
mtr_start(&temp_mtr);
mtr_t temp_mtr;
temp_mtr.start();
temp_mtr.set_log_mode(MTR_LOG_NO_REDO);
}
/* If transaction involves insert then truncate undo logs. */
if (trx->rsegs.m_redo.insert_undo != NULL) {
trx_undo_set_state_at_finish(
trx->rsegs.m_redo.insert_undo, mtr);
mutex_enter(&trx->rsegs.m_noredo.rseg->mutex);
trx_undo_set_state_at_finish(undo, &temp_mtr);
mutex_exit(&trx->rsegs.m_noredo.rseg->mutex);
temp_mtr.commit();
}
if (trx->rsegs.m_noredo.insert_undo != NULL) {
trx_undo_set_state_at_finish(
trx->rsegs.m_noredo.insert_undo, &temp_mtr);
if (!trx->rsegs.m_redo.rseg) {
ut_ad(!trx->rsegs.m_redo.insert_undo);
ut_ad(!trx->rsegs.m_redo.update_undo);
return false;
}
bool serialised = false;
/* If transaction involves update then add rollback segments
to purge queue. */
if (trx->rsegs.m_redo.update_undo != NULL
|| trx->rsegs.m_noredo.update_undo != NULL) {
/* Assign the transaction serialisation number and add these
rollback segments to purge trx-no sorted priority queue
if this is the first UNDO log being written to assigned
rollback segments. */
trx_undo_ptr_t* redo_rseg_undo_ptr =
trx->rsegs.m_redo.update_undo != NULL
? &trx->rsegs.m_redo : NULL;
trx_undo_ptr_t* noredo_rseg_undo_ptr =
trx->rsegs.m_noredo.update_undo != NULL
? &trx->rsegs.m_noredo : NULL;
/* Will set trx->no and will add rseg to purge queue. */
serialised = trx_serialisation_number_get(
trx, redo_rseg_undo_ptr, noredo_rseg_undo_ptr);
/* It is not necessary to obtain trx->undo_mutex here because
only a single OS thread is allowed to do the transaction commit
for this transaction. */
if (trx->rsegs.m_redo.update_undo != NULL) {
page_t* undo_hdr_page;
undo_hdr_page = trx_undo_set_state_at_finish(
trx->rsegs.m_redo.update_undo, mtr);
/* Delay update of rseg_history_len if we plan to add
non-redo update_undo too. This is to avoid immediate
invocation of purge as we need to club these 2 segments
with same trx-no as single unit. */
bool update_rseg_len =
!(trx->rsegs.m_noredo.update_undo != NULL);
trx_undo_update_cleanup(
trx, &trx->rsegs.m_redo, undo_hdr_page,
update_rseg_len, (update_rseg_len ? 1 : 0),
mtr);
}
DBUG_EXECUTE_IF("ib_trx_crash_during_commit", DBUG_SUICIDE(););
trx_undo_t* insert = trx->rsegs.m_redo.insert_undo;
trx_undo_t* update = trx->rsegs.m_redo.update_undo;
if (trx->rsegs.m_noredo.update_undo != NULL) {
page_t* undo_hdr_page;
if (!insert && !update) {
return false;
}
undo_hdr_page = trx_undo_set_state_at_finish(
trx->rsegs.m_noredo.update_undo, &temp_mtr);
ut_ad(!trx->read_only);
trx_rseg_t* update_rseg = update ? trx->rsegs.m_redo.rseg : NULL;
mutex_enter(&trx->rsegs.m_redo.rseg->mutex);
ulint n_added_logs =
(redo_rseg_undo_ptr != NULL) ? 2 : 1;
/* Assign the transaction serialisation number and add any
update_undo log to the purge queue. */
trx_serialise(trx, update_rseg);
trx_undo_update_cleanup(
trx, &trx->rsegs.m_noredo, undo_hdr_page,
true, n_added_logs, &temp_mtr);
}
/* It is not necessary to acquire trx->undo_mutex here because
only a single OS thread is allowed to commit this transaction. */
if (insert) {
trx_undo_set_state_at_finish(insert, mtr);
}
if (update) {
/* The undo logs and possible delete-marked records
for updates and deletes will be purged later. */
page_t* undo_hdr_page = trx_undo_set_state_at_finish(
update, mtr);
if (own_redo_rseg_mutex) {
mutex_exit(&trx->rsegs.m_redo.rseg->mutex);
own_redo_rseg_mutex = false;
trx_undo_update_cleanup(trx, undo_hdr_page, mtr);
}
if (own_noredo_rseg_mutex) {
mutex_exit(&trx->rsegs.m_noredo.rseg->mutex);
own_noredo_rseg_mutex = false;
mtr_commit(&temp_mtr);
}
mutex_exit(&trx->rsegs.m_redo.rseg->mutex);
MONITOR_INC(MONITOR_TRX_COMMIT_UNDO);
@ -1720,7 +1627,7 @@ trx_write_serialisation_history(
trx->mysql_log_file_name = NULL;
}
return(serialised);
return(true);
}
/********************************************************************
@ -2005,21 +1912,28 @@ trx_commit_in_memory(
}
}
if (trx->rsegs.m_redo.rseg != NULL) {
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
ut_ad(!trx->rsegs.m_redo.update_undo);
if (trx_rseg_t* rseg = trx->rsegs.m_redo.rseg) {
mutex_enter(&rseg->mutex);
ut_ad(rseg->trx_ref_count > 0);
--rseg->trx_ref_count;
mutex_exit(&rseg->mutex);
}
if (mtr != NULL) {
if (trx->rsegs.m_redo.insert_undo != NULL) {
trx_undo_insert_cleanup(&trx->rsegs.m_redo, false);
if (trx_undo_t*& insert = trx->rsegs.m_redo.insert_undo) {
ut_ad(insert->rseg == rseg);
trx_undo_commit_cleanup(insert, false);
insert = NULL;
}
}
ut_ad(!trx->rsegs.m_redo.insert_undo);
if (trx->rsegs.m_noredo.insert_undo != NULL) {
trx_undo_insert_cleanup(&trx->rsegs.m_noredo, true);
if (mtr != NULL) {
if (trx_undo_t*& undo = trx->rsegs.m_noredo.undo) {
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
trx_undo_commit_cleanup(undo, true);
undo = NULL;
}
/* NOTE that we could possibly make a group commit more
@ -2074,6 +1988,8 @@ trx_commit_in_memory(
srv_active_wake_master_thread();
}
ut_ad(!trx->rsegs.m_noredo.undo);
/* Free all savepoints, starting from the first. */
trx_named_savept_t* savep = UT_LIST_GET_FIRST(trx->trx_savepoints);
@ -2242,13 +2158,13 @@ trx_cleanup_at_db_startup(
trx_t* trx) /*!< in: transaction */
{
ut_ad(trx->is_recovered);
ut_ad(!trx->rsegs.m_noredo.undo);
ut_ad(!trx->rsegs.m_redo.update_undo);
/* At db start-up there shouldn't be any active trx on temp-table
that needs insert_cleanup as temp-table are not visible on
restart and temporary rseg is re-created. */
if (trx->rsegs.m_redo.insert_undo != NULL) {
trx_undo_insert_cleanup(&trx->rsegs.m_redo, false);
if (trx_undo_t*& undo = trx->rsegs.m_redo.insert_undo) {
ut_ad(undo->rseg == trx->rsegs.m_redo.rseg);
trx_undo_commit_cleanup(undo, false);
undo = NULL;
}
memset(&trx->rsegs, 0x0, sizeof(trx->rsegs));
@ -2765,66 +2681,67 @@ trx_weight_ge(
return(TRX_WEIGHT(a) >= TRX_WEIGHT(b));
}
/****************************************************************//**
Prepares a transaction for given rollback segment.
@return lsn_t: lsn assigned for commit of scheduled rollback segment */
/** Prepare a transaction.
@return log sequence number that makes the XA PREPARE durable
@retval 0 if no changes needed to be made durable */
static
lsn_t
trx_prepare_low(
/*============*/
trx_t* trx, /*!< in/out: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in/out: pointer to rollback
segment scheduled for prepare. */
bool noredo_logging) /*!< in: turn-off redo logging. */
trx_prepare_low(trx_t* trx)
{
lsn_t lsn;
mtr_t mtr;
if (undo_ptr->insert_undo != NULL || undo_ptr->update_undo != NULL) {
mtr_t mtr;
trx_rseg_t* rseg = undo_ptr->rseg;
/* It is not necessary to acquire trx->undo_mutex here because
only the owning (connection) thread of the transaction is
allowed to perform XA PREPARE. */
mtr_start_sync(&mtr);
if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
if (noredo_logging) {
mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
}
mtr.start();
mtr.set_log_mode(MTR_LOG_NO_REDO);
/* Change the undo log segment states from TRX_UNDO_ACTIVE to
TRX_UNDO_PREPARED: these modifications to the file data
structure define the transaction as prepared in the file-based
world, at the serialization point of lsn. */
mutex_enter(&undo->rseg->mutex);
trx_undo_set_state_at_prepare(trx, undo, false, &mtr);
mutex_exit(&undo->rseg->mutex);
mutex_enter(&rseg->mutex);
mtr.commit();
}
if (undo_ptr->insert_undo != NULL) {
trx_undo_t* insert = trx->rsegs.m_redo.insert_undo;
trx_undo_t* update = trx->rsegs.m_redo.update_undo;
/* It is not necessary to obtain trx->undo_mutex here
because only a single OS thread is allowed to do the
transaction prepare for this transaction. */
trx_undo_set_state_at_prepare(
trx, undo_ptr->insert_undo, false, &mtr);
}
if (!insert && !update) {
/* There were no changes to persistent tables. */
return(0);
}
if (undo_ptr->update_undo != NULL) {
trx_undo_set_state_at_prepare(
trx, undo_ptr->update_undo, false, &mtr);
}
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
mutex_exit(&rseg->mutex);
mtr.start(true);
/*--------------*/
/* This mtr commit makes the transaction prepared in
file-based world. */
mtr_commit(&mtr);
/*--------------*/
/* Change the undo log segment states from TRX_UNDO_ACTIVE to
TRX_UNDO_PREPARED: these modifications to the file data
structure define the transaction as prepared in the file-based
world, at the serialization point of lsn. */
lsn = mtr.commit_lsn();
ut_ad(noredo_logging || lsn > 0);
} else {
lsn = 0;
mutex_enter(&rseg->mutex);
if (insert) {
ut_ad(insert->rseg == rseg);
trx_undo_set_state_at_prepare(trx, insert, false, &mtr);
}
return(lsn);
if (update) {
ut_ad(update->rseg == rseg);
trx_undo_set_state_at_prepare(trx, update, false, &mtr);
}
mutex_exit(&rseg->mutex);
/* Make the XA PREPARE durable. */
mtr.commit();
ut_ad(mtr.commit_lsn() > 0);
return(mtr.commit_lsn());
}
/****************************************************************//**
@ -2839,25 +2756,14 @@ trx_prepare(
be rolled back asynchronously now. It must commit or rollback
synhronously. */
lsn_t lsn = 0;
/* Only fresh user transactions can be prepared.
Recovered transactions cannot. */
ut_a(!trx->is_recovered);
if (trx->rsegs.m_redo.rseg != NULL && trx_is_redo_rseg_updated(trx)) {
lsn = trx_prepare_low(trx, &trx->rsegs.m_redo, false);
}
lsn_t lsn = trx_prepare_low(trx);
DBUG_EXECUTE_IF("ib_trx_crash_during_xa_prepare_step", DBUG_SUICIDE(););
if (trx->rsegs.m_noredo.rseg != NULL
&& trx_is_noredo_rseg_updated(trx)) {
trx_prepare_low(trx, &trx->rsegs.m_noredo, true);
}
/*--------------------------------------*/
ut_a(trx->state == TRX_STATE_ACTIVE);
trx_sys_mutex_enter();

338
storage/innobase/trx/trx0undo.cc

@ -862,35 +862,29 @@ trx_undo_discard_latest_update_undo(
trx_undo_discard_latest_log(undo_page, mtr);
}
/********************************************************************//**
Tries to add a page to the undo log segment where the undo log is placed.
@return X-latched block if success, else NULL */
/** Allocate an undo log page.
@param[in,out] trx transaction
@param[in,out] undo undo log
@param[in,out] mtr mini-transaction that does not hold any page latch
@return X-latched block if success
@retval NULL on failure */
buf_block_t*
trx_undo_add_page(
/*==============*/
trx_t* trx, /*!< in: transaction */
trx_undo_t* undo, /*!< in: undo log memory object */
trx_undo_ptr_t* undo_ptr, /*!< in: assign undo log from
referred rollback segment. */
mtr_t* mtr) /*!< in: mtr which does not have
a latch to any undo log page;
the caller must have reserved
the rollback segment mutex */
trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr)
{
page_t* header_page;
buf_block_t* new_block;
page_t* new_page;
trx_rseg_t* rseg;
ulint n_reserved;
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(mutex_own(&(undo_ptr->rseg->mutex)));
trx_rseg_t* rseg = undo->rseg;
buf_block_t* new_block = NULL;
ulint n_reserved;
page_t* header_page;
rseg = undo_ptr->rseg;
/* When we add a page to an undo log, this is analogous to
a pessimistic insert in a B-tree, and we must reserve the
counterpart of the tree latch, which is the rseg mutex. */
mutex_enter(&rseg->mutex);
if (rseg->curr_size == rseg->max_size) {
return(NULL);
goto func_exit;
}
header_page = trx_undo_page_get(
@ -898,8 +892,7 @@ trx_undo_add_page(
if (!fsp_reserve_free_extents(&n_reserved, undo->space, 1,
FSP_UNDO, mtr)) {
return(NULL);
goto func_exit;
}
new_block = fseg_alloc_free_page_general(
@ -909,26 +902,26 @@ trx_undo_add_page(
fil_space_release_free_extents(undo->space, n_reserved);
if (new_block == NULL) {
/* No space left */
return(NULL);
if (!new_block) {
goto func_exit;
}
ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1);
buf_block_dbg_add_level(new_block, SYNC_TRX_UNDO_PAGE);
undo->last_page_no = new_block->page.id.page_no();
new_page = buf_block_get_frame(new_block);
trx_undo_page_init(new_page, undo->type, mtr);
trx_undo_page_init(new_block->frame, undo->type, mtr);
flst_add_last(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
new_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
flst_add_last(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST
+ header_page,
TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE
+ new_block->frame,
mtr);
undo->size++;
rseg->curr_size++;
func_exit:
mutex_exit(&rseg->mutex);
return(new_block);
}
@ -986,21 +979,13 @@ trx_undo_free_page(
return(last_addr.page);
}
/********************************************************************//**
Frees the last undo log page.
The caller must hold the rollback segment mutex. */
/** Free the last undo log page. The caller must hold the rseg mutex.
@param[in,out] undo undo log
@param[in,out] mtr mini-transaction that does not hold any undo log page
or that has allocated the undo log page */
void
trx_undo_free_last_page_func(
/*==========================*/
#ifdef UNIV_DEBUG
const trx_t* trx, /*!< in: transaction */
#endif /* UNIV_DEBUG */
trx_undo_t* undo, /*!< in/out: undo log memory copy */
mtr_t* mtr) /*!< in/out: mini-transaction which does not
have a latch to any undo log page or which
has allocated the undo log page */
trx_undo_free_last_page(trx_undo_t* undo, mtr_t* mtr)
{
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(undo->hdr_page_no != undo->last_page_no);
ut_ad(undo->size > 0);
@ -1039,46 +1024,28 @@ trx_undo_empty_header_page(
mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, end, MLOG_2BYTES, mtr);
}
/***********************************************************************//**
Truncates an undo log from the end. This function is used during a rollback
to free space from an undo log. */
/** Truncate the tail of an undo log during rollback.
@param[in,out] undo undo log
@param[in] limit all undo logs after this limit will be discarded
@param[in] is_temp whether this is temporary undo log */
void
trx_undo_truncate_end_func(
/*=======================*/
trx_t* trx, /*!< in: transaction whose undo log it is */
trx_undo_t* undo, /*!< in: undo log */
undo_no_t limit) /*!< in: all undo records with undo number
>= this value should be truncated */
trx_undo_truncate_end(trx_undo_t* undo, undo_no_t limit, bool is_temp)
{
page_t* undo_page;
ulint last_page_no;
trx_undo_rec_t* rec;
trx_undo_rec_t* trunc_here;
mtr_t mtr;
const bool noredo = trx_sys_is_noredo_rseg_slot(undo->rseg->id);
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(mutex_own(&undo->rseg->mutex));
ut_ad(is_temp == trx_sys_is_noredo_rseg_slot(undo->rseg->id));
for (;;) {
mtr_start_trx(&mtr, trx);
if (noredo) {
mtr_t mtr;
mtr.start();
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
ut_ad(trx->rsegs.m_noredo.rseg == undo->rseg);
} else {
ut_ad(trx->rsegs.m_redo.rseg == undo->rseg);
}
trunc_here = NULL;
last_page_no = undo->last_page_no;
undo_page = trx_undo_page_get(
page_id_t(undo->space, last_page_no), &mtr);
rec = trx_undo_page_get_last_rec(undo_page, undo->hdr_page_no,
undo->hdr_offset);
trx_undo_rec_t* trunc_here = NULL;
page_t* undo_page = trx_undo_page_get(
page_id_t(undo->space, undo->last_page_no), &mtr);
trx_undo_rec_t* rec = trx_undo_page_get_last_rec(
undo_page, undo->hdr_page_no, undo->hdr_offset);
while (rec) {
if (trx_undo_rec_get_undo_no(rec) >= limit) {
/* Truncate at least this record off, maybe
@ -1093,25 +1060,22 @@ trx_undo_truncate_end_func(
undo->hdr_offset);
}
if (last_page_no == undo->hdr_page_no) {
if (undo->last_page_no == undo->hdr_page_no) {
function_exit:
if (trunc_here) {
mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE,
trunc_here - undo_page,
MLOG_2BYTES, &mtr);
}
goto function_exit;
mtr.commit();
return;
}
ut_ad(last_page_no == undo->last_page_no);
trx_undo_free_last_page(trx, undo, &mtr);
mtr_commit(&mtr);
}
function_exit:
if (trunc_here) {
mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE,
trunc_here - undo_page, MLOG_2BYTES, &mtr);
trx_undo_free_last_page(undo, &mtr);
mtr.commit();
}
mtr_commit(&mtr);
}
/** Truncate the head of an undo log.
@ -1688,90 +1652,70 @@ trx_undo_mark_as_dict_operation(
undo->dict_operation = TRUE;
}
/**********************************************************************//**
Assigns an undo log for a transaction. A new undo log is created or a cached
undo log reused.
@return DB_SUCCESS if undo log assign successful, possible error codes
are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
DB_OUT_OF_MEMORY */
/** Assign an undo log for a transaction.
A new undo log is created or a cached undo log reused.
@param[in,out] trx transaction
@param[in] rseg rollback segment
@param[out] undo the undo log
@param[in] type TRX_UNDO_INSERT or TRX_UNDO_UPDATE
@retval DB_SUCCESS on success
@retval DB_TOO_MANY_CONCURRENT_TRXS
@retval DB_OUT_OF_FILE_SPACE
@retval DB_READ_ONLY
@retval DB_OUT_OF_MEMORY */
dberr_t
trx_undo_assign_undo(
/*=================*/
trx_t* trx, /*!< in: transaction */
trx_undo_ptr_t* undo_ptr, /*!< in: assign undo log from
referred rollback segment. */
ulint type) /*!< in: TRX_UNDO_INSERT or
TRX_UNDO_UPDATE */
trx_t* trx,
trx_rseg_t* rseg,
trx_undo_t** undo,
ulint type)
{
trx_rseg_t* rseg;
trx_undo_t* undo;
const bool is_temp = rseg == trx->rsegs.m_noredo.rseg;
mtr_t mtr;
dberr_t err = DB_SUCCESS;
ut_ad(trx);
/* In case of read-only scenario trx->rsegs.m_redo.rseg can be NULL but
still request for assigning undo logs is valid as temporary tables
can be updated in read-only mode.
If there is no rollback segment assigned to trx and still there is
object being updated there is something wrong and so this condition
check. */
ut_ad(trx_is_rseg_assigned(trx));
rseg = undo_ptr->rseg;
ut_ad(mutex_own(&(trx->undo_mutex)));
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(rseg == trx->rsegs.m_redo.rseg
|| rseg == trx->rsegs.m_noredo.rseg);
ut_ad(type == TRX_UNDO_INSERT || type == TRX_UNDO_UPDATE);
mtr_start_trx(&mtr, trx);
if (&trx->rsegs.m_noredo == undo_ptr) {
mtr.set_log_mode(MTR_LOG_NO_REDO);;
} else {
ut_ad(&trx->rsegs.m_redo == undo_ptr);
}
mtr.start(trx);
if (trx_sys_is_noredo_rseg_slot(rseg->id)) {
mtr.set_log_mode(MTR_LOG_NO_REDO);;
ut_ad(undo_ptr == &trx->rsegs.m_noredo);
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
ut_ad(undo == &trx->rsegs.m_noredo.undo);
} else {
ut_ad(undo_ptr == &trx->rsegs.m_redo);
ut_ad(undo == (type == TRX_UNDO_INSERT
? &trx->rsegs.m_redo.insert_undo
: &trx->rsegs.m_redo.update_undo));
}
mutex_enter(&rseg->mutex);
DBUG_EXECUTE_IF(
"ib_create_table_fail_too_many_trx",
err = DB_TOO_MANY_CONCURRENT_TRXS;
goto func_exit;
);
undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, trx->xid,
*undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, trx->xid,
&mtr);
if (undo == NULL) {
if (*undo == NULL) {
err = trx_undo_create(trx, rseg, type, trx->id, trx->xid,
&undo, &mtr);
undo, &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
}
}
if (type == TRX_UNDO_INSERT) {
UT_LIST_ADD_FIRST(rseg->insert_undo_list, undo);
ut_ad(undo_ptr->insert_undo == NULL);
undo_ptr->insert_undo = undo;
if (is_temp) {
UT_LIST_ADD_FIRST(rseg->insert_undo_list, *undo);
} else {
UT_LIST_ADD_FIRST(rseg->update_undo_list, undo);
ut_ad(undo_ptr->update_undo == NULL);
undo_ptr->update_undo = undo;
}
if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE) {
trx_undo_mark_as_dict_operation(trx, undo, &mtr);
UT_LIST_ADD_FIRST(type == TRX_UNDO_INSERT
? rseg->insert_undo_list
: rseg->update_undo_list, *undo);
if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE) {
trx_undo_mark_as_dict_operation(trx, *undo, &mtr);
}
}
func_exit:
mutex_exit(&(rseg->mutex));
mtr_commit(&mtr);
mutex_exit(&rseg->mutex);
mtr.commit();
return(err);
}
@ -1881,30 +1825,20 @@ trx_undo_update_cleanup(
/*====================*/
trx_t* trx, /*!< in: trx owning the update
undo log */
trx_undo_ptr_t* undo_ptr, /*!< in: update undo log. */
page_t* undo_page, /*!< in: update undo log header page,
x-latched */
bool update_rseg_history_len,
/*!< in: if true: update rseg history
len else skip updating it. */
ulint n_added_logs, /*!< in: number of logs added */
mtr_t* mtr) /*!< in: mtr */
{
trx_rseg_t* rseg;
trx_undo_t* undo;
undo = undo_ptr->update_undo;
rseg = undo_ptr->rseg;
trx_undo_t* undo = trx->rsegs.m_redo.update_undo;
trx_rseg_t* rseg = undo->rseg;
ut_ad(mutex_own(&(rseg->mutex)));
ut_ad(mutex_own(&rseg->mutex));
trx_purge_add_update_undo_to_history(
trx, undo_ptr, undo_page,
update_rseg_history_len, n_added_logs, mtr);
trx_purge_add_update_undo_to_history(trx, undo_page, mtr);
UT_LIST_REMOVE(rseg->update_undo_list, undo);
undo_ptr->update_undo = NULL;
trx->rsegs.m_redo.update_undo = NULL;
if (undo->state == TRX_UNDO_CACHED) {
@ -1918,55 +1852,39 @@ trx_undo_update_cleanup(
}
}
/** Frees an insert undo log after a transaction commit or rollback.
Knowledge of inserts is not needed after a commit or rollback, therefore
/** Free an insert or temporary undo log after commit or rollback.
The information is not needed after a commit or rollback, therefore
the data can be discarded.
@param[in,out] undo_ptr undo log to clean up
@param[in] noredo whether the undo tablespace is redo logged */
@param[in,out] undo undo log
@param[in] is_temp whether this is temporary undo log */
void
trx_undo_insert_cleanup(
trx_undo_ptr_t* undo_ptr,
bool noredo)
trx_undo_commit_cleanup(trx_undo_t* undo, bool is_temp)
{
trx_undo_t* undo;
trx_rseg_t* rseg;
undo = undo_ptr->insert_undo;
ut_ad(undo != NULL);
rseg = undo_ptr->rseg;
ut_ad(noredo == trx_sys_is_noredo_rseg_slot(rseg->id));
trx_rseg_t* rseg = undo->rseg;
ut_ad(is_temp == trx_sys_is_noredo_rseg_slot(rseg->id));
mutex_enter(&(rseg->mutex));
mutex_enter(&rseg->mutex);
UT_LIST_REMOVE(rseg->insert_undo_list, undo);
undo_ptr->insert_undo = NULL;
if (undo->state == TRX_UNDO_CACHED) {
UT_LIST_ADD_FIRST(rseg->insert_undo_cached, undo);
MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
} else {
ut_ad(undo->state == TRX_UNDO_TO_FREE);
/* Delete first the undo log segment in the file */
mutex_exit(&(rseg->mutex));
trx_undo_seg_free(undo, noredo);
mutex_enter(&(rseg->mutex));
mutex_exit(&rseg->mutex);
trx_undo_seg_free(undo, is_temp);
mutex_enter(&rseg->mutex);
ut_ad(rseg->curr_size > undo->size);
rseg->curr_size -= undo->size;
trx_undo_mem_free(undo);
}
mutex_exit(&(rseg->mutex));
mutex_exit(&rseg->mutex);
}
/********************************************************************//**
@ -2022,27 +1940,13 @@ trx_undo_free_prepared(
trx->rsegs.m_redo.insert_undo = NULL;
}
if (trx->rsegs.m_noredo.update_undo) {
ut_a(trx->rsegs.m_noredo.update_undo->state
== TRX_UNDO_PREPARED);
UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->update_undo_list,
trx->rsegs.m_noredo.update_undo);
trx_undo_mem_free(trx->rsegs.m_noredo.update_undo);
trx->rsegs.m_noredo.update_undo = NULL;
}
if (trx->rsegs.m_noredo.insert_undo) {
ut_a(trx->rsegs.m_noredo.insert_undo->state
== TRX_UNDO_PREPARED);
if (trx_undo_t*& undo = trx->rsegs.m_noredo.undo) {
ut_a(undo->state == TRX_UNDO_PREPARED);
UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->insert_undo_list,
trx->rsegs.m_noredo.insert_undo);
trx_undo_mem_free(trx->rsegs.m_noredo.insert_undo);
trx->rsegs.m_noredo.insert_undo = NULL;
undo);
trx_undo_mem_free(undo);
undo = NULL;
}
}

Loading…
Cancel
Save