Browse Source

MDEV-35049: btr_search_check_free_space_in_heap() is a bottleneck

Let us use implement a simple fixed-size allocator for the adaptive hash
index, insted of complicating mem_heap_t or mem_block_info_t.

MEM_HEAP_BTR_SEARCH: Remove.

mem_block_info_t::free_block(), mem_heap_free_block_free(): Remove.

mem_heap_free_top(), mem_heap_get_top(): Remove.

btr_sea::partition::spare: Replaces mem_block_info_t::free_block.
This keeps one spare block per adaptive hash index partition, to
process an insert.

We must not wait for buf_pool.mutex while holding
any btr_sea::partition::latch. That is why we cache one block for
future allocations. This is protected by a new
btr_sea::partition::blocks_mutex in order to relieve pressure on
btr_sea::partition::latch.

btr_sea::partition::prepare_insert(): Replaces
btr_search_check_free_space_in_heap().

btr_sea::partition::erase(): Replaces ha_search_and_delete_if_found().

btr_sea::partition::cleanup_after_erase(): Replaces the most part of
ha_delete_hash_node(). Unlike the previous implementation, we will
retain a spare block for prepare_insert().
This should reduce some contention on buf_pool.mutex.

btr_search.n_parts: Replaces btr_ahi_parts.

btr_search.enabled: Replaces btr_search_enabled. This must hold
whenever buf_block_t::index is set while a thread is holding a
btr_sea::partition::latch.

dict_index_t::search_info: Remove pointer indirection, and use
Atomic_relaxed or Atomic_counter for most fields.

btr_search_guess_on_hash(): Let the caller ensure that latch_mode is
BTR_MODIFY_LEAF or BTR_SEARCH_LEAF. Release btr_sea::partition::latch
before buffer-fixing the block. The page latch that we already acquired
is preventing buffer pool eviction. We must validate both
block->index and block->page.state while holding part.latch
in order to avoid race conditions with buffer page relocation
or buf_pool_t::resize().

btr_search_check_guess(): Remove the constant parameter
can_only_compare_to_cursor_rec=false.

ahi_node: Replaces ha_node_t.

This has been tested by running the regression test suite
with the adaptive hash index enabled:
./mtr --mysqld=--loose-innodb-adaptive-hash-index=ON

Reviewed by: Vladislav Lesin
bb-11.8-all-builders
Marko Mäkelä 9 months ago
parent
commit
9c8bdc6c15
  1. 6
      extra/mariabackup/xtrabackup.cc
  2. 3
      storage/innobase/CMakeLists.txt
  3. 14
      storage/innobase/btr/btr0btr.cc
  4. 59
      storage/innobase/btr/btr0cur.cc
  5. 1202
      storage/innobase/btr/btr0sea.cc
  6. 8
      storage/innobase/buf/buf0buf.cc
  7. 11
      storage/innobase/buf/buf0lru.cc
  8. 2
      storage/innobase/dict/dict0crea.cc
  9. 15
      storage/innobase/dict/dict0dict.cc
  10. 16
      storage/innobase/dict/dict0mem.cc
  11. 5
      storage/innobase/gis/gis0sea.cc
  12. 13
      storage/innobase/handler/ha_innodb.cc
  13. 6
      storage/innobase/handler/handler0alter.cc
  14. 6
      storage/innobase/ibuf/ibuf0ibuf.cc
  15. 7
      storage/innobase/include/btr0cur.h
  16. 396
      storage/innobase/include/btr0sea.h
  17. 117
      storage/innobase/include/btr0sea.inl
  18. 11
      storage/innobase/include/btr0types.h
  19. 4
      storage/innobase/include/buf0buf.h
  20. 66
      storage/innobase/include/dict0mem.h
  21. 60
      storage/innobase/include/ha0ha.h
  22. 154
      storage/innobase/include/ha0ha.inl
  23. 68
      storage/innobase/include/mem0mem.h
  24. 93
      storage/innobase/include/mem0mem.inl
  25. 52
      storage/innobase/mem/mem0mem.cc
  26. 3
      storage/innobase/mtr/mtr0mtr.cc
  27. 7
      storage/innobase/row/row0import.cc
  28. 11
      storage/innobase/row/row0ins.cc
  29. 2
      storage/innobase/row/row0merge.cc
  30. 2
      storage/innobase/row/row0mysql.cc
  31. 2
      storage/innobase/row/row0sel.cc
  32. 35
      storage/innobase/srv/srv0srv.cc
  33. 2
      storage/innobase/srv/srv0start.cc

6
extra/mariabackup/xtrabackup.cc

@ -1845,8 +1845,8 @@ struct my_option xb_server_options[] =
#ifdef BTR_CUR_HASH_ADAPT
{"innodb_adaptive_hash_index", OPT_INNODB_ADAPTIVE_HASH_INDEX,
"Enable InnoDB adaptive hash index (disabled by default).",
&btr_search_enabled,
&btr_search_enabled,
&btr_search.enabled,
&btr_search.enabled,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
#endif /* BTR_CUR_HASH_ADAPT */
{"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT,
@ -2402,7 +2402,7 @@ static bool innodb_init_param()
srv_page_size = 0;
srv_page_size_shift = 0;
#ifdef BTR_CUR_HASH_ADAPT
btr_ahi_parts = 1;
btr_search.n_parts = 1;
#endif /* BTR_CUR_HASH_ADAPT */
if (innobase_page_size != (1LL << 14)) {

3
storage/innobase/CMakeLists.txt

@ -193,7 +193,6 @@ SET(INNOBASE_SOURCES
include/btr0pcur.h
include/btr0pcur.inl
include/btr0sea.h
include/btr0sea.inl
include/btr0types.h
include/buf0buddy.h
include/buf0buf.h
@ -259,8 +258,6 @@ SET(INNOBASE_SOURCES
include/gis0rtree.inl
include/gis0type.h
include/ha_prototypes.h
include/ha0ha.h
include/ha0ha.inl
include/ha0storage.h
include/ha0storage.inl
include/handler0alter.h

14
storage/innobase/btr/btr0btr.cc

@ -280,7 +280,7 @@ btr_root_block_get(
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *&guess= btr_search_get_info(index)->root_guess;
buf_block_t *&guess= index->search_info.root_guess;
guess=
#endif
block=
@ -1085,7 +1085,7 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *&guess= btr_search_get_info(this)->root_guess;
buf_block_t *&guess= search_info.root_guess;
guess=
#endif
root_block= buf_page_get_gen({table->space_id, page},
@ -1095,14 +1095,12 @@ dberr_t dict_index_t::clear(que_thr_t *thr)
{
btr_free_but_not_root(root_block, mtr.get_log_mode()
#ifdef BTR_CUR_HASH_ADAPT
,n_ahi_pages() != 0
,any_ahi_pages()
#endif
);
btr_search_drop_page_hash_index(root_block, false);
#ifdef BTR_CUR_HASH_ADAPT
if (root_block->index)
btr_search_drop_page_hash_index(root_block, false);
ut_ad(n_ahi_pages() == 0);
ut_ad(!any_ahi_pages());
#endif
mtr.memset(root_block, PAGE_HEADER + PAGE_BTR_SEG_LEAF,
FSEG_HEADER_SIZE, 0);
@ -1147,7 +1145,7 @@ void btr_drop_temporary_table(const dict_table_t &table)
#ifndef BTR_CUR_ADAPT
static constexpr buf_block_t *guess= nullptr;
#else
buf_block_t *guess= index->search_info->root_guess;
buf_block_t *guess= index->search_info.root_guess;
#endif
if (buf_block_t *block= buf_page_get_gen({SRV_TMP_SPACE_ID, index->page},
0, RW_X_LATCH, guess, BUF_GET,

59
storage/innobase/btr/btr0cur.cc

@ -1122,18 +1122,19 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
#ifndef BTR_CUR_ADAPT
guess= nullptr;
#else
btr_search_t *info= btr_search_get_info(index());
auto info= &index()->search_info;
guess= info->root_guess;
# ifdef BTR_CUR_HASH_ADAPT
# ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++;
# endif
bool ahi_enabled= btr_search_enabled;
/* We do a dirty read of btr_search_enabled below,
and btr_search_guess_on_hash() will have to check it again. */
if (!ahi_enabled);
else if (btr_search_guess_on_hash(index(), info, tuple, mode,
if (latch_mode > BTR_MODIFY_LEAF)
/* The adaptive hash index cannot be useful for these searches. */;
/* We do a dirty read of btr_search.enabled below,
and btr_search_guess_on_hash() will have to check it again. */
else if (!btr_search.enabled);
else if (btr_search_guess_on_hash(index(), tuple, mode,
latch_mode, this, mtr))
{
/* Search using the hash index succeeded */
@ -1397,7 +1398,7 @@ release_tree:
reached_latched_leaf:
#ifdef BTR_CUR_HASH_ADAPT
if (ahi_enabled && !(tuple->info_bits & REC_INFO_MIN_REC_FLAG))
if (btr_search.enabled && !(tuple->info_bits & REC_INFO_MIN_REC_FLAG))
{
if (page_cur_search_with_match_bytes(tuple, mode,
&up_match, &up_bytes,
@ -1422,18 +1423,18 @@ release_tree:
goto need_opposite_intention;
#ifdef BTR_CUR_HASH_ADAPT
/* We do a dirty read of btr_search_enabled here. We will
properly check btr_search_enabled again in
/* We do a dirty read of btr_search.enabled here. We will recheck in
btr_search_build_page_hash_index() before building a page hash
index, while holding search latch. */
if (!btr_search_enabled);
if (!btr_search.enabled);
else if (tuple->info_bits & REC_INFO_MIN_REC_FLAG)
/* This may be a search tuple for btr_pcur_t::restore_position(). */
ut_ad(tuple->is_metadata() ||
(tuple->is_metadata(tuple->info_bits ^ REC_STATUS_INSTANT)));
else if (index()->table->is_temporary());
else if (!rec_is_metadata(page_cur.rec, *index()))
btr_search_info_update(index(), this);
else if (!rec_is_metadata(page_cur.rec, *index()) &&
index()->search_info.hash_analysis_useful())
search_info_update();
#endif /* BTR_CUR_HASH_ADAPT */
goto func_exit;
@ -1660,18 +1661,18 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
ut_ad(low_match != ULINT_UNDEFINED || mode != PAGE_CUR_LE);
#ifdef BTR_CUR_HASH_ADAPT
/* We do a dirty read of btr_search_enabled here. We will
properly check btr_search_enabled again in
/* We do a dirty read of btr_search.enabled here. We will recheck in
btr_search_build_page_hash_index() before building a page hash
index, while holding search latch. */
if (!btr_search_enabled);
if (!btr_search.enabled);
else if (tuple->info_bits & REC_INFO_MIN_REC_FLAG)
/* This may be a search tuple for btr_pcur_t::restore_position(). */
ut_ad(tuple->is_metadata() ||
(tuple->is_metadata(tuple->info_bits ^ REC_STATUS_INSTANT)));
else if (index()->table->is_temporary());
else if (!rec_is_metadata(page_cur.rec, *index()))
btr_search_info_update(index(), this);
else if (!rec_is_metadata(page_cur.rec, *index()) &&
index()->search_info.hash_analysis_useful())
search_info_update();
#endif /* BTR_CUR_HASH_ADAPT */
err= DB_SUCCESS;
}
@ -1774,8 +1775,7 @@ dberr_t btr_cur_search_to_nth_level(ulint level,
#ifndef BTR_CUR_ADAPT
buf_block_t *block= nullptr;
#else
btr_search_t *info= btr_search_get_info(index);
buf_block_t *block= info->root_guess;
buf_block_t *block= index->search_info.root_guess;
#endif /* BTR_CUR_ADAPT */
ut_ad(mtr->memo_contains_flagged(&index->lock,
@ -2520,12 +2520,10 @@ fail_err:
ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else if (index->table->is_temporary()) {
} else {
srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert(
cursor, ahi_latch);
btr_search_update_hash_node_on_insert(cursor);
} else {
btr_search_update_hash_on_insert(cursor, ahi_latch);
btr_search_update_hash_on_insert(cursor);
}
}
#endif /* BTR_CUR_HASH_ADAPT */
@ -2698,8 +2696,7 @@ btr_cur_pessimistic_insert(
ut_ad(!(flags & BTR_CREATE_FLAG));
} else if (index->table->is_temporary()) {
} else {
btr_search_update_hash_on_insert(
cursor, btr_search_sys.get_latch(*index));
btr_search_update_hash_on_insert(cursor);
}
#endif /* BTR_CUR_HASH_ADAPT */
if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
@ -3249,9 +3246,9 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT
{
srw_spin_lock* ahi_latch = block->index
? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) {
auto part = block->index
? &btr_search.get_part(*index) : nullptr;
if (part) {
/* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields
are being updated? */
@ -3269,7 +3266,7 @@ btr_cur_update_in_place(
btr_search_update_hash_on_delete(cursor);
}
ahi_latch->wr_lock(SRW_LOCK_CALL);
part->latch.wr_lock(SRW_LOCK_CALL);
}
assert_block_ahi_valid(block);
@ -3279,8 +3276,8 @@ btr_cur_update_in_place(
mtr);
#ifdef BTR_CUR_HASH_ADAPT
if (ahi_latch) {
ahi_latch->wr_unlock();
if (part) {
part->latch.wr_unlock();
}
}
#endif /* BTR_CUR_HASH_ADAPT */

1202
storage/innobase/btr/btr0sea.cc
File diff suppressed because it is too large
View File

8
storage/innobase/buf/buf0buf.cc

@ -1793,11 +1793,9 @@ inline void buf_pool_t::resize()
/* disable AHI if needed */
buf_resize_status("Disabling adaptive hash index.");
btr_search_s_lock_all();
const bool btr_search_disabled = btr_search_enabled;
btr_search_s_unlock_all();
const bool btr_search_disabled = btr_search.enabled;
btr_search_disable();
btr_search.disable();
if (btr_search_disabled) {
ib::info() << "disabled adaptive hash index.";
@ -2098,7 +2096,7 @@ calc_buf_pool_size:
#ifdef BTR_CUR_HASH_ADAPT
/* enable AHI if needed */
if (btr_search_disabled) {
btr_search_enable(true);
btr_search.enable(true);
ib::info() << "Re-enabled adaptive hash index.";
}
#endif /* BTR_CUR_HASH_ADAPT */

11
storage/innobase/buf/buf0lru.cc

@ -273,6 +273,10 @@ buf_block_t* buf_LRU_get_free_only()
while (block != NULL) {
ut_ad(block->page.in_free_list);
ut_d(block->page.in_free_list = FALSE);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->n_pointers);
ut_ad(!block->index);
#endif
ut_ad(!block->page.oldest_modification());
ut_ad(!block->page.in_LRU_list);
ut_a(!block->page.in_file());
@ -282,10 +286,6 @@ buf_block_t* buf_LRU_get_free_only()
|| UT_LIST_GET_LEN(buf_pool.withdraw)
>= buf_pool.withdraw_target
|| !buf_pool.will_be_withdrawn(block->page)) {
/* No adaptive hash index entries may point to
a free block. */
assert_block_ahi_empty(block);
block->page.set_state(buf_page_t::MEMORY);
block->page.set_os_used();
break;
@ -981,7 +981,10 @@ buf_LRU_block_free_non_file_page(
void* data;
ut_ad(block->page.state() == buf_page_t::MEMORY);
#ifdef BTR_CUR_HASH_ADAPT
assert_block_ahi_empty(block);
block->n_hash_helps = 0;
#endif
ut_ad(!block->page.in_free_list);
ut_ad(!block->page.oldest_modification());
ut_ad(!block->page.in_LRU_list);

2
storage/innobase/dict/dict0crea.cc

@ -1280,7 +1280,7 @@ dict_create_index_step(
}
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!node->index->search_info->ref_count);
ut_ad(!node->index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(table, node->index);
node->index = NULL;

15
storage/innobase/dict/dict0dict.cc

@ -1227,7 +1227,7 @@ static bool dict_table_can_be_evicted(dict_table_t *table)
for (const dict_index_t* index
= dict_table_get_first_index(table);
index; index = dict_table_get_next_index(index)) {
if (index->n_ahi_pages()) {
if (index->any_ahi_pages()) {
return false;
}
}
@ -1254,9 +1254,6 @@ dict_index_t *dict_index_t::clone() const
ut_ad(!rtr_track);
const size_t size= sizeof *this + n_fields * sizeof(*fields) +
#ifdef BTR_CUR_ADAPT
sizeof *search_info +
#endif
1 + strlen(name) +
n_uniq * (sizeof *stat_n_diff_key_vals +
sizeof *stat_n_sample_sizes +
@ -1271,9 +1268,6 @@ dict_index_t *dict_index_t::clone() const
index->name= mem_heap_strdup(heap, name);
index->fields= static_cast<dict_field_t*>
(mem_heap_dup(heap, fields, n_fields * sizeof *fields));
#ifdef BTR_CUR_ADAPT
index->search_info= btr_search_info_create(index->heap);
#endif /* BTR_CUR_ADAPT */
index->stat_n_diff_key_vals= static_cast<ib_uint64_t*>
(mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_diff_key_vals));
index->stat_n_sample_sizes= static_cast<ib_uint64_t*>
@ -1288,7 +1282,7 @@ dict_index_t *dict_index_t::clone() const
@return this or a clone */
dict_index_t *dict_index_t::clone_if_needed()
{
if (!search_info->ref_count)
if (!search_info.ref_count)
return this;
dict_index_t *prev= UT_LIST_GET_PREV(indexes, this);
@ -2022,9 +2016,6 @@ dict_index_add_to_cache(
/* Add the new index as the last index for the table */
UT_LIST_ADD_LAST(new_index->table->indexes, new_index);
#ifdef BTR_CUR_ADAPT
new_index->search_info = btr_search_info_create(new_index->heap);
#endif /* BTR_CUR_ADAPT */
new_index->page = unsigned(page_no);
new_index->lock.SRW_LOCK_INIT(index_tree_rw_lock_key);
@ -2090,7 +2081,7 @@ dict_index_remove_from_cache_low(
only free the dict_index_t struct when this count drops to
zero. See also: dict_table_can_be_evicted() */
if (index->n_ahi_pages()) {
if (index->any_ahi_pages()) {
table->autoinc_mutex.wr_lock();
index->set_freed();
UT_LIST_ADD_LAST(table->freed_indexes, index);

16
storage/innobase/dict/dict0mem.cc

@ -807,6 +807,22 @@ dict_mem_foreign_create(void)
DBUG_RETURN(foreign);
}
/** Duplicate a string to a memory heap, with lower-case conversion
@param heap memory heap where string is allocated
@param cs the character set of the string
@param str the source string
@return own: a NUL-terminated lower-cased copy of str */
static LEX_STRING mem_heap_alloc_casedn_z(mem_heap_t *heap,
CHARSET_INFO *cs,
const LEX_CSTRING &str) noexcept
{
size_t nbytes= str.length * cs->casedn_multiply() + 1;
LEX_STRING res;
res.str= static_cast<char*>(mem_heap_alloc(heap, nbytes));
res.length= cs->casedn_z(str.str, str.length, res.str, nbytes);
return res;
}
/**********************************************************************//**
Sets the foreign_table_name_lookup pointer based on the value of
lower_case_table_names. If that is 0 or 1, foreign_table_name_lookup

5
storage/innobase/gis/gis0sea.cc

@ -582,8 +582,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
#ifndef BTR_CUR_ADAPT
buf_block_t *guess= nullptr;
#else
btr_search_t *const info= btr_search_get_info(index);
buf_block_t *guess= info->root_guess;
buf_block_t *&guess= index->search_info.root_guess;
#endif
/* Store the position of the tree latch we push to mtr so that we
@ -731,7 +730,7 @@ dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
rtr_get_mbr_from_tuple(tuple, &cur->rtr_info->mbr);
#ifdef BTR_CUR_ADAPT
info->root_guess= block;
guess= block;
#endif
}

13
storage/innobase/handler/ha_innodb.cc

@ -17584,9 +17584,9 @@ innodb_adaptive_hash_index_update(THD*, st_mysql_sys_var*, void*,
{
mysql_mutex_unlock(&LOCK_global_system_variables);
if (*(my_bool*) save) {
btr_search_enable();
btr_search.enable();
} else {
btr_search_disable();
btr_search.disable();
}
mysql_mutex_lock(&LOCK_global_system_variables);
}
@ -19189,18 +19189,15 @@ static MYSQL_SYSVAR_BOOL(stats_traditional, srv_stats_sample_traditional,
NULL, NULL, TRUE);
#ifdef BTR_CUR_HASH_ADAPT
static MYSQL_SYSVAR_BOOL(adaptive_hash_index, btr_search_enabled,
static MYSQL_SYSVAR_BOOL(adaptive_hash_index, *(my_bool*) &btr_search.enabled,
PLUGIN_VAR_OPCMDARG,
"Enable InnoDB adaptive hash index (disabled by default)",
NULL, innodb_adaptive_hash_index_update, false);
/** Number of distinct partitions of AHI.
Each partition is protected by its own latch and so we have parts number
of latches protecting complete search system. */
static MYSQL_SYSVAR_ULONG(adaptive_hash_index_parts, btr_ahi_parts,
static MYSQL_SYSVAR_ULONG(adaptive_hash_index_parts, btr_search.n_parts,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Number of InnoDB Adaptive Hash Index Partitions (default 8)",
NULL, NULL, 8, 1, 512, 0);
NULL, NULL, 8, 1, array_elements(btr_search.parts), 0);
#endif /* BTR_CUR_HASH_ADAPT */
static MYSQL_SYSVAR_UINT(compression_level, page_zip_level,

6
storage/innobase/handler/handler0alter.cc

@ -5968,12 +5968,12 @@ static bool innobase_instant_try(
#ifdef BTR_CUR_HASH_ADAPT
/* Acquire the ahi latch to avoid a race condition
between ahi access and instant alter table */
srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
ahi_latch->wr_lock(SRW_LOCK_CALL);
btr_sea::partition& part = btr_search.get_part(*index);
part.latch.wr_lock(SRW_LOCK_CALL);
#endif /* BTR_CUR_HASH_ADAPT */
const bool metadata_changed = ctx->instant_column();
#ifdef BTR_CUR_HASH_ADAPT
ahi_latch->wr_unlock();
part.latch.wr_unlock();
#endif /* BTR_CUR_HASH_ADAPT */
DBUG_ASSERT(index->n_fields >= n_old_fields);

6
storage/innobase/ibuf/ibuf0ibuf.cc

@ -900,9 +900,9 @@ ATTRIBUTE_COLD dberr_t ibuf_upgrade()
sql_print_information("InnoDB: Upgrading the change buffer");
#ifdef BTR_CUR_HASH_ADAPT
const bool ahi= btr_search_enabled;
const bool ahi= btr_search.enabled;
if (ahi)
btr_search_disable();
btr_search.disable();
#endif
dict_table_t *ibuf_table= dict_table_t::create({C_STRING_WITH_LEN("ibuf")},
@ -1007,7 +1007,7 @@ ATTRIBUTE_COLD dberr_t ibuf_upgrade()
#ifdef BTR_CUR_HASH_ADAPT
if (ahi)
btr_search_enable();
btr_search.enable();
#endif
ibuf_index->lock.free();

7
storage/innobase/include/btr0cur.h

@ -33,9 +33,6 @@ Created 10/16/1994 Heikki Tuuri
#include "rem0types.h"
#include "gis0type.h"
#include "my_base.h"
#ifdef BTR_CUR_HASH_ADAPT
# include "srw_lock.h"
#endif
/** Mode flags for btr_cur operations; these can be ORed */
enum {
@ -745,6 +742,10 @@ struct btr_cur_t {
@return error code */
inline dberr_t open_random_leaf(rec_offs *&offsets, mem_heap_t *& heap,
mtr_t &mtr);
#ifdef BTR_CUR_HASH_ADAPT
void search_info_update() const noexcept;
#endif
};
/** Modify the delete-mark flag of a record.

396
storage/innobase/include/btr0sea.h

@ -24,43 +24,24 @@ The index tree adaptive search
Created 2/17/1996 Heikki Tuuri
*************************************************************************/
#ifndef btr0sea_h
#define btr0sea_h
#pragma once
#include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h"
#include "srw_lock.h"
#include "buf0buf.h"
#ifdef UNIV_PFS_RWLOCK
extern mysql_pfs_key_t btr_search_latch_key;
#endif /* UNIV_PFS_RWLOCK */
#define btr_search_sys_create() btr_search_sys.create()
#define btr_search_sys_free() btr_search_sys.free()
/** Disable the adaptive hash search system and empty the index. */
void btr_search_disable();
/** Enable the adaptive hash search system.
@param resize whether buf_pool_t::resize() is the caller */
void btr_search_enable(bool resize= false);
/*********************************************************************//**
Updates the search info. */
UNIV_INLINE
void
btr_search_info_update(
/*===================*/
dict_index_t* index, /*!< in: index of the cursor */
btr_cur_t* cursor);/*!< in: cursor which was just positioned */
#define btr_search_sys_create() btr_search.create()
#define btr_search_sys_free() btr_search.free()
/** Tries to guess the right search position based on the hash search info
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
and the function returns TRUE, then cursor->up_match and cursor->low_match
both have sensible values.
@param[in,out] index index
@param[in,out] info index search info
@param[in] tuple logical record
@param[in] mode PAGE_CUR_L, ....
@param[in] latch_mode BTR_SEARCH_LEAF, ...
@ -70,12 +51,11 @@ both have sensible values.
bool
btr_search_guess_on_hash(
dict_index_t* index,
btr_search_t* info,
const dtuple_t* tuple,
ulint mode,
ulint latch_mode,
btr_cur_t* cursor,
mtr_t* mtr);
mtr_t* mtr) noexcept;
/** Move or delete hash entries for moved records, usually in a page split.
If new_block is already hashed, then any hash index for block is dropped.
@ -83,10 +63,8 @@ If new_block is not hashed, and block is hashed, then a new hash index is
built to new_block with the same parameters as block.
@param[in,out] new_block destination page
@param[in,out] block source page (subject to deletion later) */
void
btr_search_move_or_delete_hash_entries(
buf_block_t* new_block,
buf_block_t* block);
void btr_search_move_or_delete_hash_entries(buf_block_t *new_block,
buf_block_t *block) noexcept;
/** Drop any adaptive hash index entries that point to an index page.
@param[in,out] block block containing index page, s- or x-latched, or an
@ -97,307 +75,143 @@ btr_search_move_or_delete_hash_entries(
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect);
bool garbage_collect) noexcept;
/** Drop possible adaptive hash index entries when a page is evicted
from the buffer pool or freed in a file, or the index is being dropped.
@param[in] page_id page id */
void btr_search_drop_page_hash_when_freed(const page_id_t page_id);
/** Updates the page hash index when a single record is inserted on a page.
@param[in] cursor cursor which was positioned to the place to insert
using btr_cur_search_, and the new record has been
inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is inserted on a page.
@param[in,out] cursor cursor which was positioned to the
place to insert using btr_cur_search_...,
and the new record has been inserted next
to the cursor
@param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor,
srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is deleted from a page.
@param[in] cursor cursor which was positioned on the record to delete
using btr_cur_search_, the record is not yet deleted.*/
void btr_search_update_hash_on_delete(btr_cur_t *cursor);
/** Validates the search system.
@param thd connection, for checking if CHECK TABLE has been killed
@return true if ok */
bool btr_search_validate(THD *thd);
void btr_search_drop_page_hash_when_freed(const page_id_t page_id) noexcept;
/** Lock all search latches in exclusive mode. */
static inline void btr_search_x_lock_all();
/** Update the page hash index after a single record is inserted on a page.
@param cursor cursor which was positioned before the inserted record */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor) noexcept;
/** Unlock all search latches from exclusive mode. */
static inline void btr_search_x_unlock_all();
/** Update the page hash index after a single record is inserted on a page.
@param cursor cursor which was positioned before the inserted record */
void btr_search_update_hash_on_insert(btr_cur_t *cursor) noexcept;
/** Lock all search latches in shared mode. */
static inline void btr_search_s_lock_all();
/** Updates the page hash index before a single record is deleted from a page.
@param cursor cursor positioned on the to-be-deleted record */
void btr_search_update_hash_on_delete(btr_cur_t *cursor) noexcept;
/** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all();
/** Validates the search system.
@param thd connection, for checking if CHECK TABLE has been killed
@return true if ok */
bool btr_search_validate(THD *thd) noexcept;
# ifdef UNIV_DEBUG
/** @return if the index is marked as freed */
bool btr_search_check_marked_free_index(const buf_block_t *block);
# endif /* UNIV_DEBUG */
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create()
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_s_lock_all(index)
# define btr_search_s_unlock_all(index)
# define btr_search_info_update(index, cursor)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)
# ifdef UNIV_DEBUG
# define btr_search_check_marked_free_index(block)
bool btr_search_check_marked_free_index(const buf_block_t *block) noexcept;
# endif /* UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_ADAPT
/** Create and initialize search info.
@param[in,out] heap heap where created
@return own: search info struct */
static inline btr_search_t* btr_search_info_create(mem_heap_t* heap)
MY_ATTRIBUTE((nonnull, warn_unused_result));
struct ahi_node;
/** @return the search info of an index */
static inline btr_search_t* btr_search_get_info(dict_index_t* index)
{
return(index->search_info);
}
#endif /* BTR_CUR_ADAPT */
/** The search info struct in an index */
struct btr_search_t{
/* @{ The following fields are not protected by any latch.
Unfortunately, this means that they must be aligned to
the machine word, i.e., they cannot be turned into bit-fields. */
buf_block_t* root_guess;/*!< the root page frame when it was last time
fetched, or NULL */
#ifdef BTR_CUR_HASH_ADAPT
ulint hash_analysis; /*!< when this exceeds
BTR_SEARCH_HASH_ANALYSIS, the hash
analysis starts; this is reset if no
success noticed */
ibool last_hash_succ; /*!< TRUE if the last search would have
succeeded, or did succeed, using the hash
index; NOTE that the value here is not exact:
it is not calculated for every search, and the
calculation itself is not always accurate! */
ulint n_hash_potential;
/*!< number of consecutive searches
which would have succeeded, or did succeed,
using the hash index;
the range is 0 .. BTR_SEARCH_BUILD_LIMIT + 5 */
/* @} */
ulint ref_count; /*!< Number of blocks in this index tree
that have search index built
i.e. block->index points to this index.
Protected by search latch except
when during initialization in
btr_search_info_create(). */
/*---------------------- @{ */
uint16_t n_fields; /*!< recommended prefix length for hash search:
number of full fields */
uint16_t n_bytes; /*!< recommended prefix: number of bytes in
an incomplete field
@see BTR_PAGE_MAX_REC_SIZE */
bool left_side; /*!< true or false, depending on whether
the leftmost record of several records with
the same prefix should be indexed in the
hash index */
/*---------------------- @} */
#ifdef UNIV_SEARCH_PERF_STAT
ulint n_hash_succ; /*!< number of successful hash searches thus
far */
ulint n_hash_fail; /*!< number of failed hash searches */
ulint n_patt_succ; /*!< number of successful pattern searches thus
far */
ulint n_searches; /*!< number of searches */
#endif /* UNIV_SEARCH_PERF_STAT */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef UNIV_DEBUG
ulint magic_n; /*!< magic number @see BTR_SEARCH_MAGIC_N */
/** value of btr_search_t::magic_n, used in assertions */
# define BTR_SEARCH_MAGIC_N 1112765
#endif /* UNIV_DEBUG */
};
#ifdef BTR_CUR_HASH_ADAPT
/** The hash index system */
struct btr_search_sys_t
struct btr_sea
{
/** the actual value of innodb_adaptive_hash_index */
Atomic_relaxed<bool> enabled;
/** Disable the adaptive hash search system and empty the index. */
void disable() noexcept;
/** Enable the adaptive hash search system.
@param resize whether buf_pool_t::resize() is the caller */
void enable(bool resize= false) noexcept;
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
/** latch protecting the hash table */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
/** map of dtuple_fold() or rec_fold() to rec_t* in buf_page_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
#ifdef _MSC_VER
#pragma warning(push)
// nonstandard extension - zero sized array, if perfschema is not compiled
#pragma warning(disable : 4200)
#endif
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
#ifdef _MSC_VER
#pragma warning(pop)
#endif
void init()
{
memset((void*) this, 0, sizeof *this);
latch.SRW_LOCK_INIT(btr_search_latch_key);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
latch.destroy();
if (heap)
clear();
}
/** latch protecting blocks, spare; may be acquired while holding latch */
srw_mutex blocks_mutex;
/** allocated blocks */
UT_LIST_BASE_NODE_T(buf_page_t) blocks;
/** a cached block to extend blocks */
Atomic_relaxed<buf_block_t*> spare;
inline void init() noexcept;
inline void alloc(ulint hash_size) noexcept;
inline void clear() noexcept;
inline void free() noexcept;
/** Ensure that there is a spare block for a future insert() */
void prepare_insert() noexcept;
/** Clean up after erasing an AHI node
@param erase node being erased
@return buffer block to be freed
@retval nullptr if no buffer block was freed */
buf_block_t *cleanup_after_erase(ahi_node *erase) noexcept;
__attribute__((nonnull))
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Insert or replace an entry into the hash table.
@param fold hash value of rec
@param rec B-tree leaf page record
@param block the buffer block that contains rec */
void insert(ulint fold, const rec_t *rec, buf_block_t *block) noexcept;
# else
/** Insert or replace an entry into the hash table.
@param fold hash value of data
@param rec B-tree leaf page record */
void insert(ulint fold, const rec_t *rec) noexcept;
# endif
/** Delete a pointer to a record if it exists.
@param fold hash value of rec
@param rec B-tree leaf page record
@return whether a record existed and was removed */
inline bool erase(ulint fold, const rec_t *rec) noexcept;
};
/** innodb_adaptive_hash_index_parts */
ulong n_parts;
/** Partitions of the adaptive hash index */
partition *parts;
partition parts[512];
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
partition &get_part(index_id_t id) noexcept { return parts[id % n_parts]; }
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
{
ut_ad(!index.table->space ||
index.table->space->id == index.table->space_id);
return get_part(ulint(index.id), index.table->space_id);
}
/** Get the search latch for the adaptive hash index partition */
srw_spin_lock *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; }
partition &get_part(const dict_index_t &index) noexcept
{ return get_part(index.id); }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
void create() noexcept;
void alloc(ulint hash_size) noexcept;
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
inline void clear() noexcept;
/** Free at shutdown */
void free()
{
if (parts)
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
ut_free(parts);
parts= nullptr;
}
}
void free() noexcept;
};
/** The adaptive hash index */
extern btr_search_sys_t btr_search_sys;
extern btr_sea btr_search;
/** @return number of leaf pages pointed to by the adaptive hash index */
TRANSACTIONAL_INLINE inline ulint dict_index_t::n_ahi_pages() const
{
if (!btr_search_enabled)
return 0;
srw_spin_lock *latch= &btr_search_sys.get_part(*this)->latch;
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
if (xbegin())
{
if (latch->is_locked())
xabort();
ulint ref_count= search_info->ref_count;
xend();
return ref_count;
}
#endif
latch->rd_lock(SRW_LOCK_CALL);
ulint ref_count= search_info->ref_count;
latch->rd_unlock();
return ref_count;
}
#ifdef UNIV_SEARCH_PERF_STAT
# ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ;
/** Number of failed adaptive hash index lookups */
extern ulint btr_search_n_hash_fail;
#endif /* UNIV_SEARCH_PERF_STAT */
/** After change in n_fields or n_bytes in info, this many rounds are waited
before starting the hash analysis again: this is to save CPU time when there
is no hope in building a hash index. */
#define BTR_SEARCH_HASH_ANALYSIS 17
/** Limit of consecutive searches for trying a search shortcut on the search
pattern */
#define BTR_SEARCH_ON_PATTERN_LIMIT 3
/** Limit of consecutive searches for trying a search shortcut using
the hash index */
#define BTR_SEARCH_ON_HASH_LIMIT 3
/** We do this many searches before trying to keep the search latch
over calls from MySQL. If we notice someone waiting for the latch, we
again set this much timeout. This is to reduce contention. */
#define BTR_SEA_TIMEOUT 10000
# endif /* UNIV_SEARCH_PERF_STAT */
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create()
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)
# ifdef UNIV_DEBUG
# define btr_search_check_marked_free_index(block)
# endif /* UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#include "btr0sea.inl"
#endif

117
storage/innobase/include/btr0sea.inl

@ -1,117 +0,0 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file include/btr0sea.ic
The index tree adaptive search
Created 2/17/1996 Heikki Tuuri
*************************************************************************/
#include "dict0mem.h"
#include "btr0cur.h"
#include "buf0buf.h"
/** Create and initialize search info.
@param[in,out] heap heap where created
@return own: search info struct */
static inline btr_search_t* btr_search_info_create(mem_heap_t* heap)
{
btr_search_t* info = static_cast<btr_search_t*>(
mem_heap_zalloc(heap, sizeof(btr_search_t)));
ut_d(info->magic_n = BTR_SEARCH_MAGIC_N);
#ifdef BTR_CUR_HASH_ADAPT
info->n_fields = 1;
info->left_side = TRUE;
#endif /* BTR_CUR_HASH_ADAPT */
return(info);
}
#ifdef BTR_CUR_HASH_ADAPT
/** Updates the search info.
@param[in,out] info search info
@param[in,out] cursor cursor which was just positioned */
void btr_search_info_update_slow(btr_search_t *info, btr_cur_t *cursor);
/*********************************************************************//**
Updates the search info. */
static inline
void
btr_search_info_update(
/*===================*/
dict_index_t* index, /*!< in: index of the cursor */
btr_cur_t* cursor) /*!< in: cursor which was just positioned */
{
ut_ad(!index->is_spatial());
ut_ad(!index->table->is_temporary());
if (!btr_search_enabled) {
return;
}
btr_search_t* info;
info = btr_search_get_info(index);
info->hash_analysis++;
if (info->hash_analysis < BTR_SEARCH_HASH_ANALYSIS) {
/* Do nothing */
return;
}
ut_ad(cursor->flag != BTR_CUR_HASH);
btr_search_info_update_slow(info, cursor);
}
/** Lock all search latches in exclusive mode. */
static inline void btr_search_x_lock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.wr_lock(SRW_LOCK_CALL);
}
}
/** Unlock all search latches from exclusive mode. */
static inline void btr_search_x_unlock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.wr_unlock();
}
}
/** Lock all search latches in shared mode. */
static inline void btr_search_s_lock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.rd_lock(SRW_LOCK_CALL);
}
}
/** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys.parts[i].latch.rd_unlock();
}
}
#endif /* BTR_CUR_HASH_ADAPT */

11
storage/innobase/include/btr0types.h

@ -33,17 +33,6 @@ Created 2/17/1996 Heikki Tuuri
struct btr_pcur_t;
/** B-tree cursor */
struct btr_cur_t;
/** B-tree search information for the adaptive hash index */
struct btr_search_t;
#ifdef BTR_CUR_HASH_ADAPT
/** Is search system enabled.
Search system is protected by array of latches. */
extern char btr_search_enabled;
/** Number of adaptive hash index partition. */
extern ulong btr_ahi_parts;
#endif /* BTR_CUR_HASH_ADAPT */
/** The size of a reference to data stored on a different page.
The reference is stored at the end of the prefix of the field

4
storage/innobase/include/buf0buf.h

@ -890,12 +890,12 @@ struct buf_block_t{
Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be
in btr_search.parts. Thus, n_pointers must be
protected by atomic memory access.
This implies that the fields may be read without race
condition whenever any of the following hold:
- the btr_search_sys.partition[].latch is being held, or
- the btr_search.parts.latch is being held, or
- state() == NOT_USED || state() == MEMORY,
and holding some latch prevents the state from changing to that.

66
storage/innobase/include/dict0mem.h

@ -1051,8 +1051,66 @@ struct dict_index_t {
UT_LIST_NODE_T(dict_index_t)
indexes;/*!< list of indexes of the table */
#ifdef BTR_CUR_ADAPT
btr_search_t* search_info;
/*!< info used in optimistic searches */
/** The search info struct in an index */
struct ahi {
ahi()= default;
ahi(const ahi&)= default;
~ahi()= default;
/** Dummy assignment operator for dict_index_t::clone(), which
will return a clone where these fields are reset to default values
(because no AHI entries exist yet for the clone) */
ahi &operator=(const ahi&) { new(this) ahi(); return *this; }
/** the root page when it was last time fetched, or nullptr */
buf_block_t *root_guess= nullptr;
# ifdef BTR_CUR_HASH_ADAPT
private:
/** After change in n_fields or n_bytes, this many rounds are
waited before starting the hash analysis again: this is to save
CPU time when there is no hope in building a hash index. */
static constexpr uint8_t HASH_ANALYSIS= 16;
/** the number of calls to hash_analysis_useful() */
Atomic_relaxed<uint8_t> hash_analysis{0};
public:
bool hash_analysis_useful() noexcept
{
return hash_analysis > HASH_ANALYSIS ||
hash_analysis.fetch_add(1) >= HASH_ANALYSIS;
}
void hash_analysis_reset() noexcept { hash_analysis= 0; }
/** number of consecutive searches which would have succeeded, or
did succeed, using the hash index; the range is 0
.. BTR_SEARCH_BUILD_LIMIT + 5 */
Atomic_relaxed<uint8_t> n_hash_potential{0};
/** whether the last search would have succeeded, or
did succeed, using the hash index; NOTE that the value
here is not exact: it is not calculated for every
search, and the calculation itself is not always accurate! */
Atomic_relaxed<bool> last_hash_succ{false};
/** recommended full field prefix */
uint16_t n_fields= 1;
/** recommended number of bytes in an incomplete field */
uint16_t n_bytes= 0;
/** whether the leftmost record of several records with the same
prefix should be indexed */
bool left_side= true;
/** number of buf_block_t::index pointers to this index */
Atomic_counter<size_t> ref_count{0};
# ifdef UNIV_SEARCH_PERF_STAT
/** number of successful hash searches */
size_t n_hash_succ{0};
/** number of failed hash searches */
size_t n_hash_fail{0};
/** number of searches */
size_t n_searches{0};
# endif /* UNIV_SEARCH_PERF_STAT */
# endif /* BTR_CUR_HASH_ADAPT */
} search_info;
#endif /* BTR_CUR_ADAPT */
row_log_t* online_log;
/*!< the log of modifications
@ -1319,8 +1377,8 @@ public:
/** Clone this index for lazy dropping of the adaptive hash index.
@return this or a clone */
dict_index_t* clone_if_needed();
/** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint n_ahi_pages() const;
/** @return whether any leaf pages may be in the adaptive hash index */
bool any_ahi_pages() const noexcept { return search_info.ref_count; }
/** @return whether mark_freed() had been invoked */
bool freed() const { return UNIV_UNLIKELY(page == 1); }
/** Note that the index is waiting for btr_search_lazy_free() */

60
storage/innobase/include/ha0ha.h

@ -1,60 +0,0 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/ha0ha.h
The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri
*******************************************************/
#ifndef ha0ha_h
#define ha0ha_h
#include "hash0hash.h"
#include "page0types.h"
#include "buf0types.h"
#include "rem0types.h"
#ifdef BTR_CUR_HASH_ADAPT
/*************************************************************//**
Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: folded value of the searched data */
/** The hash table external chain node */
struct ha_node_t {
ulint fold; /*!< fold value for the data */
ha_node_t* next; /*!< next chain node or NULL if none */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block; /*!< buffer block containing the data, or NULL */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data; /*!< pointer to the data */
};
#include "ha0ha.inl"
#endif /* BTR_CUR_HASH_ADAPT */
#endif

154
storage/innobase/include/ha0ha.inl

@ -1,154 +0,0 @@
/*****************************************************************************
Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file include/ha0ha.ic
The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri
*************************************************************************/
#ifdef BTR_CUR_HASH_ADAPT
#include "btr0types.h"
/******************************************************************//**
Gets a hash node data.
@return pointer to the data */
UNIV_INLINE
const rec_t*
ha_node_get_data(
/*=============*/
const ha_node_t* node) /*!< in: hash chain node */
{
return(node->data);
}
/******************************************************************//**
Sets hash node data. */
UNIV_INLINE
void
ha_node_set_data_func(
/*==================*/
ha_node_t* node, /*!< in: hash chain node */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data) /*!< in: pointer to the data */
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = data;
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/******************************************************************//**
Gets the next node in a hash chain.
@return next node, NULL if none */
UNIV_INLINE
ha_node_t*
ha_chain_get_next(
/*==============*/
const ha_node_t* node) /*!< in: hash chain node */
{
return(node->next);
}
/******************************************************************//**
Gets the first node in a hash chain.
@return first node, NULL if none */
UNIV_INLINE
ha_node_t*
ha_chain_get_first(
/*===============*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: fold value determining the chain */
{
return static_cast<ha_node_t*>(table->array[table->calc_hash(fold)].node);
}
/*************************************************************//**
Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: folded value of the searched data */
{
ut_ad(btr_search_enabled);
for (const ha_node_t* node = ha_chain_get_first(table, fold);
node != NULL;
node = ha_chain_get_next(node)) {
if (node->fold == fold) {
return(node->data);
}
}
return(NULL);
}
/*********************************************************//**
Looks for an element when we know the pointer to the data.
@return pointer to the hash table node, NULL if not found in the table */
UNIV_INLINE
ha_node_t*
ha_search_with_data(
/*================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data) /*!< in: pointer to the data */
{
ha_node_t* node;
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold);
while (node) {
if (node->data == data) {
return(node);
}
node = ha_chain_get_next(node);
}
return(NULL);
}
#endif /* BTR_CUR_HASH_ADAPT */

68
storage/innobase/include/mem0mem.h

@ -28,8 +28,6 @@ Created 6/9/1994 Heikki Tuuri
#define mem0mem_h
#include "ut0mem.h"
#include "ut0rnd.h"
#include "mach0data.h"
#include <memory>
@ -42,22 +40,14 @@ typedef struct mem_block_info_t mem_block_t;
/** A memory heap is a nonempty linear list of memory blocks */
typedef mem_block_t mem_heap_t;
struct buf_block_t;
/** Types of allocation for memory heaps: DYNAMIC means allocation from the
dynamic memory pool of the C compiler, BUFFER means allocation from the
buffer pool; the latter method is used for very big heaps */
#define MEM_HEAP_DYNAMIC 0 /* the most common type */
#define MEM_HEAP_BUFFER 1
#define MEM_HEAP_BTR_SEARCH 2 /* this flag can optionally be
ORed to MEM_HEAP_BUFFER, in which
case heap->free_block is used in
some cases for memory allocations,
and if it's NULL, the memory
allocation functions can return
NULL. */
/** Different type of heaps in terms of which datastructure is using them */
#define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER)
#define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER)
/** The following start size is used for the first block in the memory heap if
@ -110,8 +100,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
@ -145,8 +134,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
@ -180,26 +168,6 @@ void
mem_heap_empty(
mem_heap_t* heap);
/** Returns a pointer to the topmost element in a memory heap.
The size of the element must be given.
@param[in] heap memory heap
@param[in] n size of the topmost element
@return pointer to the topmost element */
UNIV_INLINE
void*
mem_heap_get_top(
mem_heap_t* heap,
ulint n);
/*****************************************************************//**
Frees the topmost element in a memory heap.
The size of the element must be given. */
UNIV_INLINE
void
mem_heap_free_top(
/*==============*/
mem_heap_t* heap, /*!< in: memory heap */
ulint n); /*!< in: size of the topmost element */
/*****************************************************************//**
Returns the space in bytes occupied by a memory heap. */
UNIV_INLINE
@ -266,24 +234,6 @@ mem_heap_strdupl(mem_heap_t* heap, const char* str, size_t len)
return(static_cast<char*>(memcpy(s, str, len)));
}
/** Duplicate a string to a memory heap, with lower-case conversion
@param[in] heap memory heap where string is allocated
@param[in] cs the character set of the string
@param[in] str the source string
@return own: a NUL-terminated lower-cased copy of str */
inline
LEX_STRING
mem_heap_alloc_casedn_z(mem_heap_t *heap,
CHARSET_INFO *cs,
const LEX_CSTRING &str)
{
size_t nbytes= str.length * cs->casedn_multiply() + 1;
LEX_STRING res;
res.str= static_cast<char*>(mem_heap_alloc(heap, nbytes));
res.length= cs->casedn_z(str.str, str.length, res.str, nbytes);
return res;
}
/**********************************************************************//**
Concatenate two strings and return the result, using a memory heap.
@return own: the result */
@ -337,19 +287,13 @@ struct mem_block_info_t {
in the heap. This is defined only in the base
node and is set to ULINT_UNDEFINED in others. */
ulint type; /*!< type of heap: MEM_HEAP_DYNAMIC, or
MEM_HEAP_BUF possibly ORed to MEM_HEAP_BTR_SEARCH */
MEM_HEAP_BUFFER */
ulint free; /*!< offset in bytes of the first free position for
user data in the block */
ulint start; /*!< the value of the struct field 'free' at the
creation of the block */
void* free_block;
/* if the MEM_HEAP_BTR_SEARCH bit is set in type,
and this is the heap root, this can contain an
allocated buffer frame, which can be appended as a
free block to the heap, if we need more space;
otherwise, this is NULL */
void* buf_block;
buf_block_t* buf_block;
/* if this block has been allocated from the buffer
pool, this contains the buf_block_t handle;
otherwise, this is NULL */

93
storage/innobase/include/mem0mem.inl

@ -39,8 +39,7 @@ Created 6/8/1994 Heikki Tuuri
#endif /* UNIV_DEBUG */
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
@ -62,19 +61,11 @@ mem_heap_block_free(
mem_heap_t* heap, /*!< in: heap */
mem_block_t* block); /*!< in: block to free */
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap); /*!< in: heap */
/***************************************************************//**
Adds a new block to a memory heap.
@param[in] heap memory heap
@param[in] n number of bytes needed
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
mem_heap_t* heap,
@ -100,9 +91,7 @@ UNIV_INLINE
void
mem_block_set_type(mem_block_t* block, ulint type)
{
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
block->type = type;
}
@ -157,8 +146,6 @@ mem_heap_zalloc(
mem_heap_t* heap,
ulint n)
{
ut_ad(heap);
ut_ad(!(heap->type & MEM_HEAP_BTR_SEARCH));
return(memset(mem_heap_alloc(heap, n), 0, n));
}
@ -166,8 +153,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
@ -290,62 +276,6 @@ mem_heap_empty(
mem_heap_t* heap)
{
mem_heap_free_heap_top(heap, (byte*) heap + mem_block_get_start(heap));
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
}
/** Returns a pointer to the topmost element in a memory heap.
The size of the element must be given.
@param[in] heap memory heap
@param[in] n size of the topmost element
@return pointer to the topmost element */
UNIV_INLINE
void*
mem_heap_get_top(
mem_heap_t* heap,
ulint n)
{
mem_block_t* block;
byte* buf;
block = UT_LIST_GET_LAST(heap->base);
buf = (byte*) block + mem_block_get_free(block) - MEM_SPACE_NEEDED(n);
return((void*) buf);
}
/*****************************************************************//**
Frees the topmost element in a memory heap. The size of the element must be
given. */
UNIV_INLINE
void
mem_heap_free_top(
/*==============*/
mem_heap_t* heap, /*!< in: memory heap */
ulint n) /*!< in: size of the topmost element */
{
mem_block_t* block;
n += REDZONE_SIZE;
block = UT_LIST_GET_LAST(heap->base);
/* Subtract the free field of block */
mem_block_set_free(block, mem_block_get_free(block)
- MEM_SPACE_NEEDED(n));
/* If free == start, we may free the block if it is not the first
one */
if ((heap != block) && (mem_block_get_free(block)
== mem_block_get_start(block))) {
mem_heap_block_free(heap, block);
} else {
MEM_NOACCESS((byte*) block + mem_block_get_free(block), n);
}
}
/** Creates a memory heap.
@ -356,8 +286,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
@ -406,10 +335,6 @@ mem_heap_free(
block = UT_LIST_GET_LAST(heap->base);
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
while (block != NULL) {
/* Store the contents of info before freeing current block
(it is erased in freeing) */
@ -430,13 +355,7 @@ mem_heap_get_size(
/*==============*/
mem_heap_t* heap) /*!< in: heap */
{
ulint size = heap->total_size;
if (heap->free_block) {
size += srv_page_size;
}
return(size);
return heap->total_size;
}
/**********************************************************************//**

52
storage/innobase/mem/mem0mem.cc

@ -215,7 +215,6 @@ mem_heap_validate(
case MEM_HEAP_DYNAMIC:
break;
case MEM_HEAP_BUFFER:
case MEM_HEAP_BUFFER | MEM_HEAP_BTR_SEARCH:
ut_ad(block->len <= srv_page_size);
break;
default:
@ -242,8 +241,7 @@ static void ut_strlcpy_rev(char* dst, const char* src, ulint size)
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
@ -257,12 +255,11 @@ mem_heap_create_block_func(
ulint type) /*!< in: type of heap: MEM_HEAP_DYNAMIC or
MEM_HEAP_BUFFER */
{
buf_block_t* buf_block = NULL;
buf_block_t* buf_block;
mem_block_t* block;
ulint len;
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
if (heap != NULL) {
ut_d(mem_heap_validate(heap));
@ -276,24 +273,11 @@ mem_heap_create_block_func(
ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF);
block = static_cast<mem_block_t*>(ut_malloc_nokey(len));
buf_block = nullptr;
} else {
len = srv_page_size;
if ((type & MEM_HEAP_BTR_SEARCH) && heap) {
/* We cannot allocate the block from the
buffer pool, but must get the free block from
the heap header free block field */
buf_block = static_cast<buf_block_t*>(heap->free_block);
heap->free_block = NULL;
if (UNIV_UNLIKELY(!buf_block)) {
return(NULL);
}
} else {
buf_block = buf_block_alloc();
}
buf_block = buf_block_alloc();
block = (mem_block_t*) buf_block->page.frame;
}
@ -304,7 +288,6 @@ mem_heap_create_block_func(
}
block->buf_block = buf_block;
block->free_block = NULL;
ut_d(ut_strlcpy_rev(block->file_name, file_name,
sizeof(block->file_name)));
@ -340,8 +323,7 @@ mem_heap_create_block_func(
/***************************************************************//**
Adds a new block to a memory heap.
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
/*===============*/
@ -400,9 +382,6 @@ mem_heap_block_free(
{
ulint type;
ulint len;
buf_block_t* buf_block;
buf_block = static_cast<buf_block_t*>(block->buf_block);
UT_LIST_REMOVE(heap->base, block);
@ -413,25 +392,10 @@ mem_heap_block_free(
len = block->len;
if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) {
ut_ad(!buf_block);
ut_ad(!block->buf_block);
ut_free(block);
} else {
ut_ad(type & MEM_HEAP_BUFFER);
buf_block_free(buf_block);
}
}
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap) /*!< in: heap */
{
if (UNIV_LIKELY_NULL(heap->free_block)) {
buf_block_free(static_cast<buf_block_t*>(heap->free_block));
heap->free_block = NULL;
buf_block_free(block->buf_block);
}
}

3
storage/innobase/mtr/mtr0mtr.cc

@ -31,9 +31,8 @@ Created 11/26/1995 Heikki Tuuri
#include "log0crypt.h"
#ifdef BTR_CUR_HASH_ADAPT
# include "btr0sea.h"
#else
# include "btr0cur.h"
#endif
#include "btr0cur.h"
#include "srv0start.h"
#include "log.h"
#include "mariadb_stats.h"

7
storage/innobase/row/row0import.cc

@ -3224,10 +3224,6 @@ static void add_fts_index(dict_table_t *table)
for (ulint i= 0; i < clust_index->n_uniq; i++)
dict_index_add_col(fts_index, table, clust_index->fields[i].col,
clust_index->fields[i].prefix_len);
#ifdef BTR_CUR_HASH_ADAPT
fts_index->search_info= btr_search_info_create(fts_index->heap);
fts_index->search_info->ref_count= 0;
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(fts_index->table->indexes, fts_index);
}
@ -3330,9 +3326,6 @@ static dict_table_t *build_fts_hidden_table(
new_index->fields[old_index->n_fields].fixed_len= sizeof(doc_id_t);
}
#ifdef BTR_CUR_HASH_ADAPT
new_index->search_info= btr_search_info_create(new_index->heap);
#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(new_index->table->indexes, new_index);
old_index= UT_LIST_GET_NEXT(indexes, old_index);
if (UT_LIST_GET_LEN(new_table->indexes)

11
storage/innobase/row/row0ins.cc

@ -2760,13 +2760,10 @@ avoid_bulk:
#endif /* WITH_WSREP */
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
btr_search_x_lock_all();
index->table->bulk_trx_id = trx->id;
btr_search_x_unlock_all();
} else {
index->table->bulk_trx_id = trx->id;
}
auto &part = btr_search.get_part(*index);
part.latch.wr_lock(SRW_LOCK_CALL);
index->table->bulk_trx_id = trx->id;
part.latch.wr_unlock();
#else /* BTR_CUR_HASH_ADAPT */
index->table->bulk_trx_id = trx->id;
#endif /* BTR_CUR_HASH_ADAPT */

2
storage/innobase/row/row0merge.cc

@ -4088,7 +4088,7 @@ row_merge_drop_indexes(
prebuilt->ins_node->entry_list
in ins_node_create_entry_list(). */
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
ut_ad(!index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(
table, index);

2
storage/innobase/row/row0mysql.cc

@ -2171,7 +2171,7 @@ row_create_index_for_mysql(
err = dict_create_index_tree_in_mem(index, trx);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!index->search_info->ref_count);
ut_ad(!index->search_info.ref_count);
#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) {

2
storage/innobase/row/row0sel.cc

@ -4540,7 +4540,7 @@ early_not_found:
if (UNIV_UNLIKELY(direction == 0)
&& unique_search
&& btr_search_enabled
&& btr_search.enabled
&& dict_index_is_clust(index)
&& !index->table->is_temporary()
&& !prebuilt->templ_contains_blob

35
storage/innobase/srv/srv0srv.cc

@ -743,20 +743,17 @@ srv_printf_innodb_monitor(
os_aio_print(file);
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
if (btr_search.enabled) {
fputs("-------------------\n"
"ADAPTIVE HASH INDEX\n"
"-------------------\n", file);
for (ulint i = 0; i < btr_ahi_parts; ++i) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
for (ulong i = 0; i < btr_search.n_parts; ++i) {
btr_sea::partition& part= btr_search.parts[i];
part.blocks_mutex.wr_lock();
fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n",
part->table.n_cells,
part->heap->base.count
- !part->heap->free_block);
part->latch.rd_unlock();
part.table.n_cells, part.blocks.count + !!part.spare);
part.blocks_mutex.wr_unlock();
}
const ulint with_ahi = btr_cur_n_sea;
@ -836,17 +833,17 @@ srv_export_innodb_status(void)
export_vars.innodb_ahi_miss = btr_cur_n_non_sea;
ulint mem_adaptive_hash = 0;
for (ulong i = 0; i < btr_ahi_parts; i++) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
if (part->heap) {
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
mem_adaptive_hash += mem_heap_get_size(part->heap)
+ part->table.n_cells * sizeof(hash_cell_t);
}
part->latch.rd_unlock();
for (ulong i = 0; i < btr_search.n_parts; i++) {
btr_sea::partition& part= btr_search.parts[i];
part.blocks_mutex.wr_lock();
mem_adaptive_hash += part.blocks.count + !!part.spare;
part.blocks_mutex.wr_unlock();
}
mem_adaptive_hash <<= srv_page_size_shift;
btr_search.parts[0].latch.rd_lock(SRW_LOCK_CALL);
mem_adaptive_hash += btr_search.parts[0].table.n_cells
* sizeof *btr_search.parts[0].table.array * btr_search.n_parts;
btr_search.parts[0].latch.rd_unlock();
export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash;
#endif

2
storage/innobase/srv/srv0start.cc

@ -2123,7 +2123,7 @@ void innodb_shutdown()
#ifdef BTR_CUR_HASH_ADAPT
if (dict_sys.is_initialised()) {
btr_search_disable();
btr_search.disable();
}
#endif /* BTR_CUR_HASH_ADAPT */
log_sys.close();

Loading…
Cancel
Save