Browse Source

MDEV-24167: Replace fil_space::latch

We must avoid acquiring a latch while we are already holding one.
The tablespace latch was being acquired recursively in some
operations that allocate or free pages.
bb-10.6-MDEV-16437
Marko Mäkelä 5 years ago
parent
commit
edbde4a11f
  1. 1
      mysql-test/suite/perfschema/r/sxlock_func.result
  2. 11
      storage/innobase/btr/btr0btr.cc
  3. 13
      storage/innobase/btr/btr0cur.cc
  4. 12
      storage/innobase/fil/fil0fil.cc
  5. 32
      storage/innobase/fsp/fsp0fsp.cc
  6. 18
      storage/innobase/handler/ha_innodb.cc
  7. 6
      storage/innobase/ibuf/ibuf0ibuf.cc
  8. 5
      storage/innobase/include/btr0btr.h
  9. 56
      storage/innobase/include/fil0fil.h
  10. 6
      storage/innobase/include/fsp0fsp.h
  11. 45
      storage/innobase/include/mtr0mtr.h
  12. 2
      storage/innobase/include/mtr0mtr.ic
  13. 6
      storage/innobase/include/mtr0types.h
  14. 3
      storage/innobase/include/sync0types.h
  15. 48
      storage/innobase/lock/lock0lock.cc
  16. 70
      storage/innobase/mtr/mtr0mtr.cc
  17. 2
      storage/innobase/srv/srv0srv.cc
  18. 3
      storage/innobase/srv/srv0start.cc
  19. 27
      storage/innobase/sync/sync0debug.cc
  20. 2
      storage/innobase/trx/trx0purge.cc
  21. 2
      storage/innobase/trx/trx0rec.cc
  22. 4
      storage/innobase/trx/trx0rseg.cc
  23. 2
      storage/innobase/trx/trx0sys.cc

1
mysql-test/suite/perfschema/r/sxlock_func.result

@ -7,7 +7,6 @@ TRUNCATE TABLE performance_schema.events_waits_current;
select name from performance_schema.setup_instruments
where name like "wait/synch/sxlock/%" order by name;
name
wait/synch/sxlock/innodb/fil_space_latch
wait/synch/sxlock/innodb/index_tree_rw_lock
select name from performance_schema.rwlock_instances
where name in

11
storage/innobase/btr/btr0btr.cc

@ -604,7 +604,7 @@ btr_get_size(
if (!root) {
return ULINT_UNDEFINED;
}
mtr_x_lock_space(index->table->space, mtr);
mtr->x_lock_space(index->table->space);
if (flag == BTR_N_LEAF_PAGES) {
fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->frame, &n, mtr);
@ -652,7 +652,7 @@ btr_get_size_and_reserved(
return ULINT_UNDEFINED;
}
mtr_x_lock_space(index->table->space, mtr);
mtr->x_lock_space(index->table->space);
ulint n = fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->frame, used, mtr);
@ -691,9 +691,10 @@ btr_page_free_for_ibuf(
@param[in,out] index index tree
@param[in,out] block block to be freed
@param[in,out] mtr mini-transaction
@param[in] blob whether this is freeing a BLOB page */
@param[in] blob whether this is freeing a BLOB page
@param[in] latched whether index->table->space->x_lock() was called */
void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob)
bool blob, bool space_latched)
{
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
@ -726,7 +727,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
? PAGE_HEADER + PAGE_BTR_SEG_LEAF
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
fseg_free_page(seg_header,
index->table->space, id.page_no(), mtr);
index->table->space, id.page_no(), mtr, space_latched);
buf_page_free(id, mtr, __FILE__, __LINE__);
/* The page was marked free in the allocation bitmap, but it

13
storage/innobase/btr/btr0cur.cc

@ -7638,8 +7638,8 @@ btr_free_externally_stored_field(
MTR_MEMO_PAGE_X_FIX));
ut_ad(!rec || rec_offs_validate(rec, index, offsets));
ut_ad(!rec || field_ref == btr_rec_get_field_ref(rec, offsets, i));
ut_ad(local_mtr->is_named_space(
page_get_space_id(page_align(field_ref))));
ut_ad(index->table->space_id == index->table->space->id);
ut_ad(local_mtr->is_named_space(index->table->space));
if (UNIV_UNLIKELY(!memcmp(field_ref, field_ref_zero,
BTR_EXTERN_FIELD_REF_SIZE))) {
@ -7653,7 +7653,6 @@ btr_free_externally_stored_field(
ut_ad(!(mach_read_from_4(field_ref + BTR_EXTERN_LEN)
& ~((BTR_EXTERN_OWNER_FLAG
| BTR_EXTERN_INHERITED_FLAG) << 24)));
ut_ad(space_id == index->table->space->id);
ut_ad(space_id == index->table->space_id);
const ulint ext_zip_size = index->table->space->zip_size();
@ -7727,7 +7726,9 @@ btr_free_externally_stored_field(
}
next_page_no = mach_read_from_4(page + FIL_PAGE_NEXT);
btr_page_free(index, ext_block, &mtr, true);
btr_page_free(index, ext_block, &mtr, true,
local_mtr->memo_contains(
*index->table->space));
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO,
@ -7751,7 +7752,9 @@ btr_free_externally_stored_field(
next_page_no = mach_read_from_4(
page + FIL_PAGE_DATA
+ BTR_BLOB_HDR_NEXT_PAGE_NO);
btr_page_free(index, ext_block, &mtr, true);
btr_page_free(index, ext_block, &mtr, true,
local_mtr->memo_contains(
*index->table->space));
mtr.write<4>(*block, BTR_EXTERN_PAGE_NO + field_ref,
next_page_no);

12
storage/innobase/fil/fil0fil.cc

@ -853,7 +853,6 @@ fil_space_free_low(
ut_ad(space->size == 0);
rw_lock_free(&space->latch);
fil_space_destroy_crypt_data(&space->crypt_data);
space->~fil_space_t();
@ -885,7 +884,7 @@ fil_space_free(
if (space != NULL) {
if (x_latched) {
rw_lock_x_unlock(&space->latch);
space->x_unlock();
}
if (!recv_recovery_is_on()) {
@ -958,7 +957,7 @@ fil_space_t *fil_space_t::create(const char *name, ulint id, ulint flags,
<< " " << fil_crypt_get_type(crypt_data));
}
rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP);
space->latch.init(fil_space_latch_key);
if (space->purpose == FIL_TYPE_TEMPORARY) {
/* SysTablespace::open_or_create() would pass
@ -978,7 +977,6 @@ fil_space_t *fil_space_t::create(const char *name, ulint id, ulint flags,
<< " to the tablespace memory cache, but tablespace '"
<< old_space->name << "' already exists in the cache!";
mutex_exit(&fil_system.mutex);
rw_lock_free(&space->latch);
space->~fil_space_t();
ut_free(space->name);
ut_free(space);
@ -1797,7 +1795,7 @@ void fil_close_tablespace(ulint id)
return;
}
rw_lock_x_lock(&space->latch);
space->x_lock();
/* Invalidate in the buffer pool all pages belonging to the
tablespace. Since we have invoked space->set_stopping(), readahead
@ -1809,11 +1807,11 @@ void fil_close_tablespace(ulint id)
os_aio_wait_until_no_pending_writes();
ut_ad(space->is_stopping());
/* If the free is successful, the X lock will be released before
/* If the free is successful, the wrlock will be released before
the space memory data structure is freed. */
if (!fil_space_free(id, true)) {
rw_lock_x_unlock(&space->latch);
space->x_unlock();
}
/* If it is a delete then also delete any generated files, otherwise

32
storage/innobase/fsp/fsp0fsp.cc

@ -318,7 +318,7 @@ xdes_get_descriptor_with_space_hdr(
mtr_t* mtr,
bool init_space = false)
{
ut_ad(mtr->memo_contains(*space));
ut_ad(space->is_owner());
ut_ad(mtr->memo_contains_flagged(header, MTR_MEMO_PAGE_SX_FIX
| MTR_MEMO_PAGE_X_FIX));
/* Read free limit and space size */
@ -404,7 +404,7 @@ xdes_get_descriptor_const(
page_no_t offset,
mtr_t* mtr)
{
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_S_LOCK));
ut_ad(mtr->memo_contains(*space, true));
ut_ad(offset < space->free_limit);
ut_ad(offset < space->size_in_header);
@ -549,7 +549,7 @@ void fsp_header_init(fil_space_t* space, uint32_t size, mtr_t* mtr)
buf_block_t *free_block = buf_LRU_get_free_block(false);
mtr_x_lock_space(space, mtr);
mtr->x_lock_space(space);
buf_block_t* block = buf_page_create(space, 0, zip_size, mtr,
free_block);
@ -1262,7 +1262,7 @@ static void fsp_free_page(fil_space_t* space, page_no_t offset, mtr_t* mtr)
@param[in,out] mtr mini-transaction */
static void fsp_free_extent(fil_space_t* space, page_no_t offset, mtr_t* mtr)
{
ut_ad(mtr->memo_contains(*space));
ut_ad(space->is_owner());
buf_block_t *block= fsp_get_header(space, mtr);
buf_block_t *xdes= 0;
@ -1650,7 +1650,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr,
ut_ad(byte_offset + FSEG_HEADER_SIZE
<= srv_page_size - FIL_PAGE_DATA_END);
mtr_x_lock_space(space, mtr);
mtr->x_lock_space(space);
ut_d(space->modify_check(*mtr));
if (block) {
@ -2199,7 +2199,7 @@ fseg_alloc_free_page_general(
uint32_t n_reserved;
space_id = page_get_space_id(page_align(seg_header));
space = mtr_x_lock_space(space_id, mtr);
space = mtr->x_lock_space(space_id);
inode = fseg_inode_get(seg_header, space_id, space->zip_size(),
mtr, &iblock);
if (!space->full_crc32()) {
@ -2323,7 +2323,7 @@ fsp_reserve_free_extents(
const uint32_t extent_size = FSP_EXTENT_SIZE;
mtr_x_lock_space(space, mtr);
mtr->x_lock_space(space);
const unsigned physical_size = space->physical_size();
buf_block_t* header = fsp_get_header(space, mtr);
@ -2528,18 +2528,24 @@ fseg_free_page_low(
@param[in,out] seg_header file segment header
@param[in,out] space tablespace
@param[in] offset page number
@param[in,out] mtr mini-transaction */
@param[in,out] mtr mini-transaction
@param[in] have_latch whether space->x_lock() was already called */
void
fseg_free_page(
fseg_header_t* seg_header,
fil_space_t* space,
uint32_t offset,
mtr_t* mtr)
mtr_t* mtr,
bool have_latch)
{
DBUG_ENTER("fseg_free_page");
fseg_inode_t* seg_inode;
buf_block_t* iblock;
mtr_x_lock_space(space, mtr);
if (have_latch) {
ut_ad(space->is_owner());
} else {
mtr->x_lock_space(space);
}
DBUG_LOG("fseg_free_page", "space_id: " << space->id
<< ", page_no: " << offset);
@ -2569,7 +2575,7 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
page);
mtr.start();
mtr_s_lock_space(space, &mtr);
mtr.s_lock_space(space);
if (page >= space->free_limit || page >= space->size_in_header) {
is_free = true;
@ -2666,7 +2672,7 @@ fseg_free_step(
const uint32_t space_id = page_get_space_id(page_align(header));
const uint32_t header_page = page_get_page_no(page_align(header));
fil_space_t* space = mtr_x_lock_space(space_id, mtr);
fil_space_t* space = mtr->x_lock_space(space_id);
buf_block_t* xdes;
xdes_t* descr = xdes_get_descriptor(space, header_page, &xdes, mtr);
@ -2740,7 +2746,7 @@ fseg_free_step_not_header(
const uint32_t space_id = page_get_space_id(page_align(header));
ut_ad(mtr->is_named_space(space_id));
fil_space_t* space = mtr_x_lock_space(space_id, mtr);
fil_space_t* space = mtr->x_lock_space(space_id);
buf_block_t* iblock;
inode = fseg_inode_get(header, space_id, space->zip_size(), mtr,

18
storage/innobase/handler/ha_innodb.cc

@ -493,10 +493,7 @@ const struct _ft_vft_ext ft_vft_ext_result = {innobase_fts_get_version,
#ifdef HAVE_PSI_INTERFACE
# define PSI_KEY(n) {&n##_key, #n, 0}
/* All RWLOCK used in Innodb are SX-locks */
# define PSI_RWLOCK_KEY(n) {&n##_key, #n, PSI_RWLOCK_FLAG_SX}
/* Keys to register pthread mutexes/cond in the current file with
/** Keys to register pthread mutexes/cond in the current file with
performance schema */
static mysql_pfs_key_t commit_cond_mutex_key;
static mysql_pfs_key_t commit_cond_key;
@ -562,15 +559,16 @@ static PSI_mutex_info all_innodb_mutexes[] = {
/* all_innodb_rwlocks array contains rwlocks that are
performance schema instrumented if "UNIV_PFS_RWLOCK"
is defined */
static PSI_rwlock_info all_innodb_rwlocks[] = {
static PSI_rwlock_info all_innodb_rwlocks[] =
{
# ifdef BTR_CUR_HASH_ADAPT
{ &btr_search_latch_key, "btr_search_latch", 0 },
# endif
{ &dict_operation_lock_key, "dict_operation_lock", 0 },
PSI_RWLOCK_KEY(fil_space_latch),
{ &trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0 },
{ &trx_purge_latch_key, "trx_purge_latch", 0 },
PSI_RWLOCK_KEY(index_tree_rw_lock),
{ &dict_operation_lock_key, "dict_operation_lock", 0 },
{ &fil_space_latch_key, "fil_space_latch", 0 },
{ &trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0 },
{ &trx_purge_latch_key, "trx_purge_latch", 0 },
{ &index_tree_rw_lock_key, "index_tree_rw_lock", PSI_RWLOCK_FLAG_SX }
};
# endif /* UNIV_PFS_RWLOCK */

6
storage/innobase/ibuf/ibuf0ibuf.cc

@ -417,7 +417,7 @@ ibuf_init_at_db_start(void)
mtr.start();
compile_time_assert(IBUF_SPACE_ID == TRX_SYS_SPACE);
compile_time_assert(IBUF_SPACE_ID == 0);
mtr_x_lock_space(fil_system.sys_space, &mtr);
mtr.x_lock_space(fil_system.sys_space);
buf_block_t* header_page = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
0, RW_X_LATCH, &mtr);
@ -1834,7 +1834,7 @@ static bool ibuf_add_free_page()
mtr.start();
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
mtr_x_lock_space(fil_system.sys_space, &mtr);
mtr.x_lock_space(fil_system.sys_space);
header_page = ibuf_header_page_get(&mtr);
/* Allocate a new page: NOTE that if the page has been a part of a
@ -1908,7 +1908,7 @@ ibuf_remove_free_page(void)
/* Acquire the fsp latch before the ibuf header, obeying the latching
order */
mtr_x_lock_space(fil_system.sys_space, &mtr);
mtr.x_lock_space(fil_system.sys_space);
header_page = ibuf_header_page_get(&mtr);
/* Prevent pessimistic inserts to insert buffer trees for a while */

5
storage/innobase/include/btr0btr.h

@ -648,10 +648,11 @@ btr_page_create(
@param[in,out] index index tree
@param[in,out] block block to be freed
@param[in,out] mtr mini-transaction
@param[in] blob whether this is freeing a BLOB page */
@param[in] blob whether this is freeing a BLOB page
@param[in] latched whether index->table->space->x_lock() was called */
MY_ATTRIBUTE((nonnull))
void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob = false);
bool blob = false, bool space_latched = false);
/**************************************************************//**
Gets the root node of a tree and x- or s-latches it.

56
storage/innobase/include/fil0fil.h

@ -33,6 +33,7 @@ Created 10/25/1995 Heikki Tuuri
#ifndef UNIV_INNOCHECKSUM
#include "srw_lock.h"
#include "buf0dblwr.h"
#include "hash0hash.h"
#include "log0recv.h"
@ -340,6 +341,12 @@ struct fil_space_t final
{
#ifndef UNIV_INNOCHECKSUM
friend fil_node_t;
~fil_space_t()
{
ut_ad(!latch_owner);
ut_ad(!latch_count);
latch.destroy();
}
ulint id; /*!< space id */
hash_node_t hash; /*!< hash chain node */
char* name; /*!< Tablespace name */
@ -389,9 +396,11 @@ private:
static constexpr uint32_t NEEDS_FSYNC= 1U << 29;
/** The reference count */
static constexpr uint32_t PENDING= ~(STOPPING | CLOSING | NEEDS_FSYNC);
/** latch protecting all page allocation bitmap pages */
srw_lock latch;
ut_d(os_thread_id_t latch_owner;)
ut_d(Atomic_relaxed<uint32_t> latch_count;)
public:
rw_lock_t latch; /*!< latch protecting the file space storage
allocation */
UT_LIST_NODE_T(fil_space_t) named_spaces;
/*!< list of spaces for which FILE_MODIFY
records have been issued */
@ -458,7 +467,6 @@ public:
@return whether the reservation succeeded */
bool reserve_free_extents(uint32_t n_free_now, uint32_t n_to_reserve)
{
ut_ad(rw_lock_own(&latch, RW_LOCK_X));
if (n_reserved_extents + n_to_reserve > n_free_now) {
return false;
}
@ -472,7 +480,6 @@ public:
void release_free_extents(uint32_t n_reserved)
{
if (!n_reserved) return;
ut_ad(rw_lock_own(&latch, RW_LOCK_X));
ut_a(n_reserved_extents >= n_reserved);
n_reserved_extents -= n_reserved;
}
@ -950,11 +957,7 @@ public:
}
/** Update committed_size in mtr_t::commit() */
void set_committed_size()
{
ut_ad(rw_lock_own(&latch, RW_LOCK_X));
committed_size= size;
}
void set_committed_size() { committed_size= size; }
/** @return the last persisted page number */
uint32_t last_page_number() const { return committed_size - 1; }
@ -990,6 +993,41 @@ public:
static inline fil_space_t *next(fil_space_t *space, bool recheck,
bool encrypt);
#ifdef UNIV_DEBUG
bool is_latched() const { return latch_count != 0; }
bool is_owner() const { return latch_owner == os_thread_get_curr_id(); }
#endif
/** Acquire the allocation latch in exclusive mode */
void x_lock()
{
latch.wr_lock();
ut_ad(!latch_owner);
ut_d(latch_owner= os_thread_get_curr_id());
ut_ad(!latch_count.fetch_add(1));
}
/** Release the allocation latch from exclusive mode */
void x_unlock()
{
ut_ad(latch_count.fetch_sub(1) == 1);
ut_ad(latch_owner == os_thread_get_curr_id());
ut_d(latch_owner= 0);
latch.wr_unlock();
}
/** Acquire the allocation latch in shared mode */
void s_lock()
{
latch.rd_lock();
ut_ad(!latch_owner);
ut_d(latch_count.fetch_add(1));
}
/** Release the allocation latch from shared mode */
void s_unlock()
{
ut_ad(latch_count.fetch_sub(1));
ut_ad(!latch_owner);
latch.rd_unlock();
}
private:
/** @return whether the file is usable for io() */
ATTRIBUTE_COLD bool prepare(bool have_mutex= false);

6
storage/innobase/include/fsp0fsp.h

@ -477,13 +477,15 @@ fsp_reserve_free_extents(
@param[in,out] seg_header file segment header
@param[in,out] space tablespace
@param[in] offset page number
@param[in,out] mtr mini-transaction */
@param[in,out] mtr mini-transaction
@param[in] have_latch whether space->x_lock() was already called */
void
fseg_free_page(
fseg_header_t* seg_header,
fil_space_t* space,
uint32_t offset,
mtr_t* mtr);
mtr_t* mtr,
bool have_latch = false);
/** Determine whether a page is free.
@param[in,out] space tablespace
@param[in] page page number

45
storage/innobase/include/mtr0mtr.h

@ -65,9 +65,6 @@ savepoint. */
/** Push an object to an mtr memo stack. */
#define mtr_memo_push(m, o, t) (m)->memo_push(o, t)
#define mtr_s_lock_space(s, m) (m)->s_lock_space((s), __FILE__, __LINE__)
#define mtr_x_lock_space(s, m) (m)->x_lock_space((s), __FILE__, __LINE__)
#define mtr_s_lock_index(i, m) (m)->s_lock(&(i)->lock, __FILE__, __LINE__)
#define mtr_x_lock_index(i, m) (m)->x_lock(&(i)->lock, __FILE__, __LINE__)
#define mtr_sx_lock_index(i, m) (m)->sx_lock(&(i)->lock, __FILE__, __LINE__)
@ -214,13 +211,8 @@ struct mtr_t {
/** Acquire a tablespace X-latch.
@param[in] space_id tablespace ID
@param[in] file file name from where called
@param[in] line line number in file
@return the tablespace object (never NULL) */
fil_space_t* x_lock_space(
ulint space_id,
const char* file,
unsigned line);
fil_space_t* x_lock_space(ulint space_id);
/** Acquire a shared rw-latch.
@param[in] lock rw-latch
@ -253,30 +245,19 @@ struct mtr_t {
}
/** Acquire a tablespace S-latch.
@param[in] space tablespace
@param[in] file file name from where called
@param[in] line line number in file */
void s_lock_space(fil_space_t* space, const char* file, unsigned line)
@param[in] space tablespace */
void s_lock_space(fil_space_t* space)
{
ut_ad(space->purpose == FIL_TYPE_TEMPORARY
|| space->purpose == FIL_TYPE_IMPORT
|| space->purpose == FIL_TYPE_TABLESPACE);
s_lock(&space->latch, file, line);
memo_push(space, MTR_MEMO_SPACE_S_LOCK);
space->s_lock();
}
/** Acquire a tablespace X-latch.
@param[in] space tablespace
@param[in] file file name from where called
@param[in] line line number in file */
void x_lock_space(fil_space_t* space, const char* file, unsigned line)
{
ut_ad(space->purpose == FIL_TYPE_TEMPORARY
|| space->purpose == FIL_TYPE_IMPORT
|| space->purpose == FIL_TYPE_TABLESPACE);
memo_push(space, MTR_MEMO_SPACE_X_LOCK);
rw_lock_x_lock_inline(&space->latch, 0, file, line);
}
@param[in] space tablespace */
void x_lock_space(fil_space_t* space);
/** Release an object in the memo stack.
@param object object
@param type object type
@ -334,6 +315,12 @@ public:
return false;
#endif
}
/** Check if we are holding tablespace latch
@param space tablespace to search for
@param shared whether to look for shared latch, instead of exclusive
@return whether space.latch is being held */
bool memo_contains(const fil_space_t& space, bool shared= false)
MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
/** Check if we are holding an rw-latch in this mini-transaction
@param lock latch to search for
@ -341,12 +328,6 @@ public:
@return whether (lock,type) is contained */
bool memo_contains(const rw_lock_t &lock, mtr_memo_type_t type)
MY_ATTRIBUTE((warn_unused_result));
/** Check if we are holding exclusive tablespace latch
@param space tablespace to search for
@return whether space.latch is being held */
bool memo_contains(const fil_space_t& space)
MY_ATTRIBUTE((warn_unused_result));
/** Check if memo contains the given item.
@param object object to search

2
storage/innobase/include/mtr0mtr.ic

@ -43,7 +43,7 @@ mtr_t::memo_push(void* object, mtr_memo_type_t type)
ut_ad(is_active());
ut_ad(object != NULL);
ut_ad(type >= MTR_MEMO_PAGE_S_FIX);
ut_ad(type <= MTR_MEMO_SPACE_X_LOCK);
ut_ad(type <= MTR_MEMO_SPACE_S_LOCK);
ut_ad(ut_is_2pow(type));
/* If this mtr has x-fixed a clean page then we set

6
storage/innobase/include/mtr0types.h

@ -339,8 +339,10 @@ enum mtr_memo_type_t {
MTR_MEMO_SX_LOCK = RW_SX_LATCH << 5,
/** acquire X-latch on fil_space_t::latch */
MTR_MEMO_SPACE_X_LOCK = MTR_MEMO_SX_LOCK << 1
/** rd_lock() on fil_space_t::latch */
MTR_MEMO_SPACE_X_LOCK = MTR_MEMO_SX_LOCK << 1,
/** wr_lock() on fil_space_t::latch */
MTR_MEMO_SPACE_S_LOCK = MTR_MEMO_SX_LOCK << 2
};
#endif /* !UNIV_CHECKSUM */

3
storage/innobase/include/sync0types.h

@ -216,7 +216,6 @@ enum latch_level_t {
SYNC_IBUF_MUTEX,
SYNC_FSP_PAGE,
SYNC_FSP,
SYNC_EXTERN_STORAGE,
SYNC_TRX_UNDO_PAGE,
SYNC_RSEG_HEADER,
@ -287,7 +286,6 @@ enum latch_id_t {
LATCH_ID_WORK_QUEUE,
LATCH_ID_BUF_BLOCK_LOCK,
LATCH_ID_BUF_BLOCK_DEBUG,
LATCH_ID_FIL_SPACE,
LATCH_ID_IBUF_INDEX_TREE,
LATCH_ID_INDEX_TREE,
LATCH_ID_DICT_TABLE_STATS,
@ -951,7 +949,6 @@ struct sync_checker : public sync_check_functor_t
{
if (some_allowed) {
switch (level) {
case SYNC_FSP:
case SYNC_DICT:
case SYNC_NO_ORDER_CHECK:
return(false);

48
storage/innobase/lock/lock0lock.cc

@ -292,15 +292,12 @@ bool
lock_validate();
/*============*/
/*********************************************************************//**
Validates the record lock queues on a page.
@return TRUE if ok */
static
ibool
lock_rec_validate_page(
/*===================*/
const buf_block_t* block) /*!< in: buffer block */
MY_ATTRIBUTE((warn_unused_result));
/** Validate the record lock queues on a page.
@param block buffer pool block
@param latched whether the tablespace latch may be held
@return true if ok */
static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/* The lock system */
@ -2379,7 +2376,10 @@ lock_move_reorganize_page(
mem_heap_free(heap);
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
if (fil_space_t* space = fil_space_t::get(page_id.space())) {
ut_ad(lock_rec_validate_page(block, space->is_latched()));
space->release();
}
#endif
}
@ -2491,8 +2491,12 @@ lock_move_rec_list_end(
lock_mutex_exit();
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
ut_ad(lock_rec_validate_page(new_block));
if (fil_space_t* space = fil_space_t::get(page_id.space())) {
const bool is_latched{space->is_latched()};
ut_ad(lock_rec_validate_page(block, is_latched));
ut_ad(lock_rec_validate_page(new_block, is_latched));
space->release();
}
#endif
}
@ -4536,14 +4540,11 @@ func_exit:
goto func_exit;
}
/*********************************************************************//**
Validates the record lock queues on a page.
@return TRUE if ok */
static
ibool
lock_rec_validate_page(
/*===================*/
const buf_block_t* block) /*!< in: buffer block */
/** Validate the record lock queues on a page.
@param block buffer pool block
@param latched whether the tablespace latch may be held
@return true if ok */
static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
{
const lock_t* lock;
const rec_t* rec;
@ -4577,8 +4578,8 @@ loop:
ut_ad(!trx_is_ac_nl_ro(lock->trx));
/* Only validate the record queues when this thread is not
holding a space->latch. */
if (!sync_check_find(SYNC_FSP))
holding a tablespace latch. */
if (!latched)
for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
if (i == PAGE_HEAP_NO_SUPREMUM
@ -4690,7 +4691,8 @@ static void lock_rec_block_validate(const page_id_t page_id)
if (block) {
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
ut_ad(lock_rec_validate_page(block));
ut_ad(lock_rec_validate_page(block,
space->is_latched()));
}
mtr_commit(&mtr);

70
storage/innobase/mtr/mtr0mtr.cc

@ -212,11 +212,11 @@ static void memo_slot_release(mtr_memo_slot_t *slot)
rw_lock_sx_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
break;
case MTR_MEMO_SPACE_X_LOCK:
{
fil_space_t *space= static_cast<fil_space_t*>(slot->object);
space->set_committed_size();
rw_lock_x_unlock(&space->latch);
}
static_cast<fil_space_t*>(slot->object)->set_committed_size();
static_cast<fil_space_t*>(slot->object)->x_unlock();
break;
case MTR_MEMO_SPACE_S_LOCK:
static_cast<fil_space_t*>(slot->object)->s_unlock();
break;
case MTR_MEMO_X_LOCK:
rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
@ -254,11 +254,11 @@ struct ReleaseLatches {
rw_lock_s_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
break;
case MTR_MEMO_SPACE_X_LOCK:
{
fil_space_t *space= static_cast<fil_space_t*>(slot->object);
space->set_committed_size();
rw_lock_x_unlock(&space->latch);
}
static_cast<fil_space_t*>(slot->object)->set_committed_size();
static_cast<fil_space_t*>(slot->object)->x_unlock();
break;
case MTR_MEMO_SPACE_S_LOCK:
static_cast<fil_space_t*>(slot->object)->s_unlock();
break;
case MTR_MEMO_X_LOCK:
rw_lock_x_unlock(reinterpret_cast<rw_lock_t*>(slot->object));
@ -426,7 +426,7 @@ void mtr_t::commit()
freed_space= fil_system.sys_space;
}
ut_ad(memo_contains(*freed_space));
ut_ad(freed_space->is_owner());
/* Update the last freed lsn */
freed_space->update_last_freed_lsn(m_commit_lsn);
@ -545,13 +545,10 @@ bool mtr_t::is_named_space(const fil_space_t* space) const
#endif /* UNIV_DEBUG */
/** Acquire a tablespace X-latch.
NOTE: use mtr_x_lock_space().
@param[in] space_id tablespace ID
@param[in] file file name from where called
@param[in] line line number in file
@return the tablespace object (never NULL) */
fil_space_t*
mtr_t::x_lock_space(ulint space_id, const char* file, unsigned line)
mtr_t::x_lock_space(ulint space_id)
{
fil_space_t* space;
@ -569,10 +566,24 @@ mtr_t::x_lock_space(ulint space_id, const char* file, unsigned line)
ut_ad(space);
ut_ad(space->id == space_id);
x_lock_space(space, file, line);
x_lock_space(space);
return(space);
}
/** Acquire a tablespace X-latch.
@param[in] space tablespace */
void mtr_t::x_lock_space(fil_space_t *space)
{
ut_ad(space->purpose == FIL_TYPE_TEMPORARY ||
space->purpose == FIL_TYPE_IMPORT ||
space->purpose == FIL_TYPE_TABLESPACE);
if (!memo_contains(*space))
{
memo_push(space, MTR_MEMO_SPACE_X_LOCK);
space->x_lock();
}
}
/** Release an object in the memo stack.
@return true if released */
bool
@ -937,6 +948,21 @@ bool mtr_t::have_x_latch(const buf_block_t &block) const
return true;
}
/** Check if we are holding exclusive tablespace latch
@param space tablespace to search for
@param shared whether to look for shared latch, instead of exclusive
@return whether space.latch is being held */
bool mtr_t::memo_contains(const fil_space_t& space, bool shared)
{
Iterate<Find> iteration(Find(&space, shared
? MTR_MEMO_SPACE_S_LOCK
: MTR_MEMO_SPACE_X_LOCK));
if (m_memo.for_each_block_in_reverse(iteration))
return false;
ut_ad(shared || space.is_owner());
return true;
}
#ifdef UNIV_DEBUG
/** Check if we are holding an rw-latch in this mini-transaction
@param lock latch to search for
@ -965,18 +991,6 @@ bool mtr_t::memo_contains(const rw_lock_t &lock, mtr_memo_type_t type)
return true;
}
/** Check if we are holding exclusive tablespace latch
@param space tablespace to search for
@return whether space.latch is being held */
bool mtr_t::memo_contains(const fil_space_t& space)
{
Iterate<Find> iteration(Find(&space, MTR_MEMO_SPACE_X_LOCK));
if (m_memo.for_each_block_in_reverse(iteration))
return false;
ut_ad(rw_lock_own(const_cast<rw_lock_t*>(&space.latch), RW_LOCK_X));
return true;
}
/** Debug check for flags */
struct FlaggedCheck {
FlaggedCheck(const void* ptr, ulint flags)

2
storage/innobase/srv/srv0srv.cc

@ -708,7 +708,7 @@ static void srv_init()
ut_d(srv_master_thread_disabled_event = os_event_create(0));
/* page_zip_stat_per_index_mutex is acquired from:
1. page_zip_compress() (after SYNC_FSP)
1. page_zip_compress()
2. page_zip_decompress()
3. i_s_cmp_per_index_fill_low() (where SYNC_DICT is acquired)
4. innodb_cmp_per_index_update(), no other latches

3
storage/innobase/srv/srv0start.cc

@ -1512,8 +1512,7 @@ file_checked:
if (sum_of_new_sizes > 0) {
/* New data file(s) were added */
mtr.start();
mtr.x_lock_space(fil_system.sys_space,
__FILE__, __LINE__);
mtr.x_lock_space(fil_system.sys_space);
buf_block_t* block = buf_page_get(
page_id_t(0, 0), 0,
RW_SX_LATCH, &mtr);

27
storage/innobase/sync/sync0debug.cc

@ -475,7 +475,6 @@ LatchDebug::LatchDebug()
LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
LEVEL_MAP_INSERT(SYNC_FSP);
LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
@ -798,13 +797,6 @@ LatchDebug::check_order(
break;
case SYNC_FSP_PAGE:
ut_a(find(latches, SYNC_FSP) != 0);
break;
case SYNC_FSP:
ut_a(find(latches, SYNC_FSP) != 0
|| basic_check(latches, level, SYNC_FSP));
break;
case SYNC_TRX_UNDO_PAGE:
@ -830,9 +822,7 @@ LatchDebug::check_order(
break;
case SYNC_TREE_NODE:
ut_a(find(latches, SYNC_FSP) == &fil_system.temp_space->latch
|| find(latches, SYNC_INDEX_TREE)
ut_a(find(latches, SYNC_INDEX_TREE)
|| basic_check(latches, level, SYNC_TREE_NODE - 1));
break;
@ -859,13 +849,12 @@ LatchDebug::check_order(
pre-allocated new pages may only be used while holding
ibuf_mutex, in btr_page_alloc_for_ibuf(). */
ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
|| find(latches, SYNC_FSP) != 0);
ut_ad(find(latches, SYNC_IBUF_MUTEX) != 0
|| fil_system.sys_space->is_owner());
break;
case SYNC_IBUF_INDEX_TREE:
if (find(latches, SYNC_FSP) != 0) {
if (fil_system.sys_space->is_owner()) {
basic_check(latches, level, level - 1);
} else {
basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
@ -873,14 +862,12 @@ LatchDebug::check_order(
break;
case SYNC_IBUF_PESS_INSERT_MUTEX:
basic_check(latches, level, SYNC_FSP - 1);
basic_check(latches, level, SYNC_FSP_PAGE);
ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
break;
case SYNC_IBUF_HEADER:
basic_check(latches, level, SYNC_FSP - 1);
basic_check(latches, level, SYNC_FSP_PAGE);
ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
break;
@ -1299,8 +1286,6 @@ sync_latch_meta_init()
PFS_NOT_INSTRUMENTED);
#endif /* UNIV_DEBUG */
LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
index_tree_rw_lock_key);

2
storage/innobase/trx/trx0purge.cc

@ -697,7 +697,7 @@ not_free:
mtr_t mtr;
const ulint size = SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
mtr.start();
mtr_x_lock_space(purge_sys.truncate.current, &mtr);
mtr.x_lock_space(purge_sys.truncate.current);
/* Associate the undo tablespace with mtr.
During mtr::commit(), InnoDB can use the undo
tablespace object to clear all freed ranges */

2
storage/innobase/trx/trx0rec.cc

@ -2039,7 +2039,7 @@ trx_undo_report_row_operation(
tree latch, which is the rseg
mutex. We must commit the mini-transaction
first, because it may be holding lower-level
latches, such as SYNC_FSP and SYNC_FSP_PAGE. */
latches, such as SYNC_FSP_PAGE. */
mtr.commit();
mtr.start();

4
storage/innobase/trx/trx0rseg.cc

@ -671,7 +671,7 @@ trx_rseg_create(ulint space_id)
mtr.start();
fil_space_t* space = mtr_x_lock_space(space_id, &mtr);
fil_space_t* space = mtr.x_lock_space(space_id);
ut_ad(space->purpose == FIL_TYPE_TABLESPACE);
if (buf_block_t* sys_header = trx_sysf_get(&mtr)) {
@ -706,7 +706,7 @@ trx_temp_rseg_create()
for (ulong i = 0; i < TRX_SYS_N_RSEGS; i++) {
mtr.start();
mtr.set_log_mode(MTR_LOG_NO_REDO);
mtr_x_lock_space(fil_system.temp_space, &mtr);
mtr.x_lock_space(fil_system.temp_space);
buf_block_t* rblock = trx_rseg_header_create(
fil_system.temp_space, i, NULL, &mtr);

2
storage/innobase/trx/trx0sys.cc

@ -154,7 +154,7 @@ trx_sysf_create(
then enter the kernel: we must do it in this order to conform
to the latching order rules. */
mtr_x_lock_space(fil_system.sys_space, mtr);
mtr->x_lock_space(fil_system.sys_space);
compile_time_assert(TRX_SYS_SPACE == 0);
/* Create the trx sys file block in a new allocated file segment */

Loading…
Cancel
Save