You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

5371 lines
150 KiB

17 years ago
17 years ago
17 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
17 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
17 years ago
17 years ago
15 years ago
17 years ago
17 years ago
17 years ago
17 years ago
17 years ago
15 years ago
17 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
15 years ago
16 years ago
16 years ago
15 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
17 years ago
15 years ago
16 years ago
15 years ago
15 years ago
16 years ago
16 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
17 years ago
17 years ago
17 years ago
17 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
17 years ago
17 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
17 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
16 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
17 years ago
15 years ago
15 years ago
17 years ago
16 years ago
16 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
16 years ago
15 years ago
15 years ago
15 years ago
15 years ago
15 years ago
16 years ago
15 years ago
15 years ago
15 years ago
16 years ago
  1. /*****************************************************************************
  2. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
  3. Copyright (c) 2008, Google Inc.
  4. Portions of this file contain modifications contributed and copyrighted by
  5. Google, Inc. Those modifications are gratefully acknowledged and are described
  6. briefly in the InnoDB documentation. The contributions by Google are
  7. incorporated with their permission, and subject to the conditions contained in
  8. the file COPYING.Google.
  9. This program is free software; you can redistribute it and/or modify it under
  10. the terms of the GNU General Public License as published by the Free Software
  11. Foundation; version 2 of the License.
  12. This program is distributed in the hope that it will be useful, but WITHOUT
  13. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  14. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License along with
  16. this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  17. Place, Suite 330, Boston, MA 02111-1307 USA
  18. *****************************************************************************/
  19. /**************************************************//**
  20. @file buf/buf0buf.c
  21. The database buffer buf_pool
  22. Created 11/5/1995 Heikki Tuuri
  23. *******************************************************/
  24. #include "buf0buf.h"
  25. #ifdef UNIV_NONINL
  26. #include "buf0buf.ic"
  27. #endif
  28. #include "mem0mem.h"
  29. #include "btr0btr.h"
  30. #include "fil0fil.h"
  31. #ifndef UNIV_HOTBACKUP
  32. #include "buf0buddy.h"
  33. #include "lock0lock.h"
  34. #include "btr0sea.h"
  35. #include "ibuf0ibuf.h"
  36. #include "trx0undo.h"
  37. #include "log0log.h"
  38. #endif /* !UNIV_HOTBACKUP */
  39. #include "srv0srv.h"
  40. #include "dict0dict.h"
  41. #include "log0recv.h"
  42. #include "page0zip.h"
  43. #include "trx0trx.h"
  44. #include "srv0start.h"
  45. /* prototypes for new functions added to ha_innodb.cc */
  46. trx_t* innobase_get_trx();
  47. static inline void _increment_page_get_statistics(buf_block_t* block, trx_t* trx)
  48. {
  49. ulint block_hash;
  50. ulint block_hash_byte;
  51. byte block_hash_offset;
  52. ut_ad(block);
  53. if (!innobase_get_slow_log() || !trx || !trx->take_stats)
  54. return;
  55. if (!trx->distinct_page_access_hash) {
  56. trx->distinct_page_access_hash = mem_alloc(DPAH_SIZE);
  57. memset(trx->distinct_page_access_hash, 0, DPAH_SIZE);
  58. }
  59. block_hash = ut_hash_ulint((block->page.space << 20) + block->page.space +
  60. block->page.offset, DPAH_SIZE << 3);
  61. block_hash_byte = block_hash >> 3;
  62. block_hash_offset = (byte) block_hash & 0x07;
  63. if (block_hash_byte >= DPAH_SIZE)
  64. fprintf(stderr, "!!! block_hash_byte = %lu block_hash_offset = %d !!!\n", block_hash_byte, block_hash_offset);
  65. if (block_hash_offset > 7)
  66. fprintf(stderr, "!!! block_hash_byte = %lu block_hash_offset = %d !!!\n", block_hash_byte, block_hash_offset);
  67. if ((trx->distinct_page_access_hash[block_hash_byte] & ((byte) 0x01 << block_hash_offset)) == 0)
  68. trx->distinct_page_access++;
  69. trx->distinct_page_access_hash[block_hash_byte] |= (byte) 0x01 << block_hash_offset;
  70. return;
  71. }
  72. /*
  73. IMPLEMENTATION OF THE BUFFER POOL
  74. =================================
  75. Performance improvement:
  76. ------------------------
  77. Thread scheduling in NT may be so slow that the OS wait mechanism should
  78. not be used even in waiting for disk reads to complete.
  79. Rather, we should put waiting query threads to the queue of
  80. waiting jobs, and let the OS thread do something useful while the i/o
  81. is processed. In this way we could remove most OS thread switches in
  82. an i/o-intensive benchmark like TPC-C.
  83. A possibility is to put a user space thread library between the database
  84. and NT. User space thread libraries might be very fast.
  85. SQL Server 7.0 can be configured to use 'fibers' which are lightweight
  86. threads in NT. These should be studied.
  87. Buffer frames and blocks
  88. ------------------------
  89. Following the terminology of Gray and Reuter, we call the memory
  90. blocks where file pages are loaded buffer frames. For each buffer
  91. frame there is a control block, or shortly, a block, in the buffer
  92. control array. The control info which does not need to be stored
  93. in the file along with the file page, resides in the control block.
  94. Buffer pool struct
  95. ------------------
  96. The buffer buf_pool contains a single mutex which protects all the
  97. control data structures of the buf_pool. The content of a buffer frame is
  98. protected by a separate read-write lock in its control block, though.
  99. These locks can be locked and unlocked without owning the buf_pool->mutex.
  100. The OS events in the buf_pool struct can be waited for without owning the
  101. buf_pool->mutex.
  102. The buf_pool->mutex is a hot-spot in main memory, causing a lot of
  103. memory bus traffic on multiprocessor systems when processors
  104. alternately access the mutex. On our Pentium, the mutex is accessed
  105. maybe every 10 microseconds. We gave up the solution to have mutexes
  106. for each control block, for instance, because it seemed to be
  107. complicated.
  108. A solution to reduce mutex contention of the buf_pool->mutex is to
  109. create a separate mutex for the page hash table. On Pentium,
  110. accessing the hash table takes 2 microseconds, about half
  111. of the total buf_pool->mutex hold time.
  112. Control blocks
  113. --------------
  114. The control block contains, for instance, the bufferfix count
  115. which is incremented when a thread wants a file page to be fixed
  116. in a buffer frame. The bufferfix operation does not lock the
  117. contents of the frame, however. For this purpose, the control
  118. block contains a read-write lock.
  119. The buffer frames have to be aligned so that the start memory
  120. address of a frame is divisible by the universal page size, which
  121. is a power of two.
  122. We intend to make the buffer buf_pool size on-line reconfigurable,
  123. that is, the buf_pool size can be changed without closing the database.
  124. Then the database administarator may adjust it to be bigger
  125. at night, for example. The control block array must
  126. contain enough control blocks for the maximum buffer buf_pool size
  127. which is used in the particular database.
  128. If the buf_pool size is cut, we exploit the virtual memory mechanism of
  129. the OS, and just refrain from using frames at high addresses. Then the OS
  130. can swap them to disk.
  131. The control blocks containing file pages are put to a hash table
  132. according to the file address of the page.
  133. We could speed up the access to an individual page by using
  134. "pointer swizzling": we could replace the page references on
  135. non-leaf index pages by direct pointers to the page, if it exists
  136. in the buf_pool. We could make a separate hash table where we could
  137. chain all the page references in non-leaf pages residing in the buf_pool,
  138. using the page reference as the hash key,
  139. and at the time of reading of a page update the pointers accordingly.
  140. Drawbacks of this solution are added complexity and,
  141. possibly, extra space required on non-leaf pages for memory pointers.
  142. A simpler solution is just to speed up the hash table mechanism
  143. in the database, using tables whose size is a power of 2.
  144. Lists of blocks
  145. ---------------
  146. There are several lists of control blocks.
  147. The free list (buf_pool->free) contains blocks which are currently not
  148. used.
  149. The common LRU list contains all the blocks holding a file page
  150. except those for which the bufferfix count is non-zero.
  151. The pages are in the LRU list roughly in the order of the last
  152. access to the page, so that the oldest pages are at the end of the
  153. list. We also keep a pointer to near the end of the LRU list,
  154. which we can use when we want to artificially age a page in the
  155. buf_pool. This is used if we know that some page is not needed
  156. again for some time: we insert the block right after the pointer,
  157. causing it to be replaced sooner than would normally be the case.
  158. Currently this aging mechanism is used for read-ahead mechanism
  159. of pages, and it can also be used when there is a scan of a full
  160. table which cannot fit in the memory. Putting the pages near the
  161. end of the LRU list, we make sure that most of the buf_pool stays
  162. in the main memory, undisturbed.
  163. The unzip_LRU list contains a subset of the common LRU list. The
  164. blocks on the unzip_LRU list hold a compressed file page and the
  165. corresponding uncompressed page frame. A block is in unzip_LRU if and
  166. only if the predicate buf_page_belongs_to_unzip_LRU(&block->page)
  167. holds. The blocks in unzip_LRU will be in same order as they are in
  168. the common LRU list. That is, each manipulation of the common LRU
  169. list will result in the same manipulation of the unzip_LRU list.
  170. The chain of modified blocks (buf_pool->flush_list) contains the blocks
  171. holding file pages that have been modified in the memory
  172. but not written to disk yet. The block with the oldest modification
  173. which has not yet been written to disk is at the end of the chain.
  174. The access to this list is protected by buf_pool->flush_list_mutex.
  175. The chain of unmodified compressed blocks (buf_pool->zip_clean)
  176. contains the control blocks (buf_page_t) of those compressed pages
  177. that are not in buf_pool->flush_list and for which no uncompressed
  178. page has been allocated in the buffer pool. The control blocks for
  179. uncompressed pages are accessible via buf_block_t objects that are
  180. reachable via buf_pool->chunks[].
  181. The chains of free memory blocks (buf_pool->zip_free[]) are used by
  182. the buddy allocator (buf0buddy.c) to keep track of currently unused
  183. memory blocks of size sizeof(buf_page_t)..UNIV_PAGE_SIZE / 2. These
  184. blocks are inside the UNIV_PAGE_SIZE-sized memory blocks of type
  185. BUF_BLOCK_MEMORY that the buddy allocator requests from the buffer
  186. pool. The buddy allocator is solely used for allocating control
  187. blocks for compressed pages (buf_page_t) and compressed page frames.
  188. Loading a file page
  189. -------------------
  190. First, a victim block for replacement has to be found in the
  191. buf_pool. It is taken from the free list or searched for from the
  192. end of the LRU-list. An exclusive lock is reserved for the frame,
  193. the io_fix field is set in the block fixing the block in buf_pool,
  194. and the io-operation for loading the page is queued. The io-handler thread
  195. releases the X-lock on the frame and resets the io_fix field
  196. when the io operation completes.
  197. A thread may request the above operation using the function
  198. buf_page_get(). It may then continue to request a lock on the frame.
  199. The lock is granted when the io-handler releases the x-lock.
  200. Read-ahead
  201. ----------
  202. The read-ahead mechanism is intended to be intelligent and
  203. isolated from the semantically higher levels of the database
  204. index management. From the higher level we only need the
  205. information if a file page has a natural successor or
  206. predecessor page. On the leaf level of a B-tree index,
  207. these are the next and previous pages in the natural
  208. order of the pages.
  209. Let us first explain the read-ahead mechanism when the leafs
  210. of a B-tree are scanned in an ascending or descending order.
  211. When a read page is the first time referenced in the buf_pool,
  212. the buffer manager checks if it is at the border of a so-called
  213. linear read-ahead area. The tablespace is divided into these
  214. areas of size 64 blocks, for example. So if the page is at the
  215. border of such an area, the read-ahead mechanism checks if
  216. all the other blocks in the area have been accessed in an
  217. ascending or descending order. If this is the case, the system
  218. looks at the natural successor or predecessor of the page,
  219. checks if that is at the border of another area, and in this case
  220. issues read-requests for all the pages in that area. Maybe
  221. we could relax the condition that all the pages in the area
  222. have to be accessed: if data is deleted from a table, there may
  223. appear holes of unused pages in the area.
  224. A different read-ahead mechanism is used when there appears
  225. to be a random access pattern to a file.
  226. If a new page is referenced in the buf_pool, and several pages
  227. of its random access area (for instance, 32 consecutive pages
  228. in a tablespace) have recently been referenced, we may predict
  229. that the whole area may be needed in the near future, and issue
  230. the read requests for the whole area.
  231. */
  232. #ifndef UNIV_HOTBACKUP
  233. /** Value in microseconds */
  234. static const int WAIT_FOR_READ = 5000;
  235. /** Number of attemtps made to read in a page in the buffer pool */
  236. static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
  237. /** The buffer pools of the database */
  238. UNIV_INTERN buf_pool_t* buf_pool_ptr;
  239. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  240. static ulint buf_dbg_counter = 0; /*!< This is used to insert validation
  241. operations in execution in the
  242. debug version */
  243. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  244. #ifdef UNIV_DEBUG
  245. /** If this is set TRUE, the program prints info whenever
  246. read-ahead or flush occurs */
  247. UNIV_INTERN ibool buf_debug_prints = FALSE;
  248. #endif /* UNIV_DEBUG */
  249. #ifdef UNIV_PFS_RWLOCK
  250. /* Keys to register buffer block related rwlocks and mutexes with
  251. performance schema */
  252. UNIV_INTERN mysql_pfs_key_t buf_pool_page_hash_key;
  253. UNIV_INTERN mysql_pfs_key_t buf_block_lock_key;
  254. # ifdef UNIV_SYNC_DEBUG
  255. UNIV_INTERN mysql_pfs_key_t buf_block_debug_latch_key;
  256. # endif /* UNIV_SYNC_DEBUG */
  257. #endif /* UNIV_PFS_RWLOCK */
  258. #ifdef UNIV_PFS_MUTEX
  259. UNIV_INTERN mysql_pfs_key_t buffer_block_mutex_key;
  260. UNIV_INTERN mysql_pfs_key_t buf_pool_mutex_key;
  261. UNIV_INTERN mysql_pfs_key_t buf_pool_zip_mutex_key;
  262. UNIV_INTERN mysql_pfs_key_t buf_pool_LRU_list_mutex_key;
  263. UNIV_INTERN mysql_pfs_key_t buf_pool_free_list_mutex_key;
  264. UNIV_INTERN mysql_pfs_key_t buf_pool_zip_free_mutex_key;
  265. UNIV_INTERN mysql_pfs_key_t buf_pool_zip_hash_mutex_key;
  266. UNIV_INTERN mysql_pfs_key_t flush_list_mutex_key;
  267. #endif /* UNIV_PFS_MUTEX */
  268. #if defined UNIV_PFS_MUTEX || defined UNIV_PFS_RWLOCK
  269. # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
  270. /* Buffer block mutexes and rwlocks can be registered
  271. in one group rather than individually. If PFS_GROUP_BUFFER_SYNC
  272. is defined, register buffer block mutex and rwlock
  273. in one group after their initialization. */
  274. # define PFS_GROUP_BUFFER_SYNC
  275. /* This define caps the number of mutexes/rwlocks can
  276. be registered with performance schema. Developers can
  277. modify this define if necessary. Please note, this would
  278. be effective only if PFS_GROUP_BUFFER_SYNC is defined. */
  279. # define PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER ULINT_MAX
  280. # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
  281. #endif /* UNIV_PFS_MUTEX || UNIV_PFS_RWLOCK */
  282. /** A chunk of buffers. The buffer pool is allocated in chunks. (moved to buf0buf.h)*/
  283. //struct buf_chunk_struct{
  284. // ulint mem_size; /*!< allocated size of the chunk */
  285. // ulint size; /*!< size of frames[] and blocks[] */
  286. // void* mem; /*!< pointer to the memory area which
  287. // was allocated for the frames */
  288. // buf_block_t* blocks; /*!< array of buffer control blocks */
  289. //};
  290. #endif /* !UNIV_HOTBACKUP */
  291. /********************************************************************//**
  292. Gets the smallest oldest_modification lsn for any page in the pool. Returns
  293. zero if all modified pages have been flushed to disk.
  294. @return oldest modification in pool, zero if none */
  295. UNIV_INTERN
  296. ib_uint64_t
  297. buf_pool_get_oldest_modification(void)
  298. /*==================================*/
  299. {
  300. ulint i;
  301. buf_page_t* bpage;
  302. ib_uint64_t lsn = 0;
  303. ib_uint64_t oldest_lsn = 0;
  304. /* When we traverse all the flush lists we don't want another
  305. thread to add a dirty page to any flush list. */
  306. if (srv_buf_pool_instances > 1)
  307. log_flush_order_mutex_enter();
  308. for (i = 0; i < srv_buf_pool_instances; i++) {
  309. buf_pool_t* buf_pool;
  310. buf_pool = buf_pool_from_array(i);
  311. buf_flush_list_mutex_enter(buf_pool);
  312. bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
  313. if (bpage != NULL) {
  314. ut_ad(bpage->in_flush_list);
  315. lsn = bpage->oldest_modification;
  316. }
  317. buf_flush_list_mutex_exit(buf_pool);
  318. if (!oldest_lsn || oldest_lsn > lsn) {
  319. oldest_lsn = lsn;
  320. }
  321. }
  322. if (srv_buf_pool_instances > 1)
  323. log_flush_order_mutex_exit();
  324. /* The returned answer may be out of date: the flush_list can
  325. change after the mutex has been released. */
  326. return(oldest_lsn);
  327. }
  328. /********************************************************************//**
  329. Get total buffer pool statistics. */
  330. UNIV_INTERN
  331. void
  332. buf_get_total_list_len(
  333. /*===================*/
  334. ulint* LRU_len, /*!< out: length of all LRU lists */
  335. ulint* free_len, /*!< out: length of all free lists */
  336. ulint* flush_list_len) /*!< out: length of all flush lists */
  337. {
  338. ulint i;
  339. *LRU_len = 0;
  340. *free_len = 0;
  341. *flush_list_len = 0;
  342. for (i = 0; i < srv_buf_pool_instances; i++) {
  343. buf_pool_t* buf_pool;
  344. buf_pool = buf_pool_from_array(i);
  345. *LRU_len += UT_LIST_GET_LEN(buf_pool->LRU);
  346. *free_len += UT_LIST_GET_LEN(buf_pool->free);
  347. *flush_list_len += UT_LIST_GET_LEN(buf_pool->flush_list);
  348. }
  349. }
  350. /********************************************************************//**
  351. Get total buffer pool statistics. */
  352. UNIV_INTERN
  353. void
  354. buf_get_total_stat(
  355. /*===============*/
  356. buf_pool_stat_t* tot_stat) /*!< out: buffer pool stats */
  357. {
  358. ulint i;
  359. memset(tot_stat, 0, sizeof(*tot_stat));
  360. for (i = 0; i < srv_buf_pool_instances; i++) {
  361. buf_pool_stat_t*buf_stat;
  362. buf_pool_t* buf_pool;
  363. buf_pool = buf_pool_from_array(i);
  364. buf_stat = &buf_pool->stat;
  365. tot_stat->n_page_gets += buf_stat->n_page_gets;
  366. tot_stat->n_pages_read += buf_stat->n_pages_read;
  367. tot_stat->n_pages_written += buf_stat->n_pages_written;
  368. tot_stat->n_pages_created += buf_stat->n_pages_created;
  369. tot_stat->n_ra_pages_read_rnd += buf_stat->n_ra_pages_read_rnd;
  370. tot_stat->n_ra_pages_read += buf_stat->n_ra_pages_read;
  371. tot_stat->n_ra_pages_evicted += buf_stat->n_ra_pages_evicted;
  372. tot_stat->n_pages_made_young += buf_stat->n_pages_made_young;
  373. tot_stat->n_pages_not_made_young +=
  374. buf_stat->n_pages_not_made_young;
  375. }
  376. }
  377. /********************************************************************//**
  378. Allocates a buffer block.
  379. @return own: the allocated block, in state BUF_BLOCK_MEMORY */
  380. UNIV_INTERN
  381. buf_block_t*
  382. buf_block_alloc(
  383. /*============*/
  384. buf_pool_t* buf_pool) /*!< in/out: buffer pool instance,
  385. or NULL for round-robin selection
  386. of the buffer pool */
  387. {
  388. buf_block_t* block;
  389. ulint index;
  390. static ulint buf_pool_index;
  391. if (buf_pool == NULL) {
  392. /* We are allocating memory from any buffer pool, ensure
  393. we spread the grace on all buffer pool instances. */
  394. index = buf_pool_index++ % srv_buf_pool_instances;
  395. buf_pool = buf_pool_from_array(index);
  396. }
  397. block = buf_LRU_get_free_block(buf_pool);
  398. buf_block_set_state(block, BUF_BLOCK_MEMORY);
  399. return(block);
  400. }
  401. /********************************************************************//**
  402. Calculates a page checksum which is stored to the page when it is written
  403. to a file. Note that we must be careful to calculate the same value on
  404. 32-bit and 64-bit architectures.
  405. @return checksum */
  406. UNIV_INTERN
  407. ulint
  408. buf_calc_page_new_checksum(
  409. /*=======================*/
  410. const byte* page) /*!< in: buffer page */
  411. {
  412. ulint checksum;
  413. /* Since the field FIL_PAGE_FILE_FLUSH_LSN, and in versions <= 4.1.x
  414. ..._ARCH_LOG_NO, are written outside the buffer pool to the first
  415. pages of data files, we have to skip them in the page checksum
  416. calculation.
  417. We must also skip the field FIL_PAGE_SPACE_OR_CHKSUM where the
  418. checksum is stored, and also the last 8 bytes of page because
  419. there we store the old formula checksum. */
  420. checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
  421. FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
  422. + ut_fold_binary(page + FIL_PAGE_DATA,
  423. UNIV_PAGE_SIZE - FIL_PAGE_DATA
  424. - FIL_PAGE_END_LSN_OLD_CHKSUM);
  425. checksum = checksum & 0xFFFFFFFFUL;
  426. return(checksum);
  427. }
  428. UNIV_INTERN
  429. ulint
  430. buf_calc_page_new_checksum_32(
  431. /*==========================*/
  432. const byte* page) /*!< in: buffer page */
  433. {
  434. ulint checksum;
  435. checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
  436. FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
  437. + ut_fold_binary(page + FIL_PAGE_DATA,
  438. FIL_PAGE_DATA_ALIGN_32 - FIL_PAGE_DATA)
  439. + ut_fold_binary_32(page + FIL_PAGE_DATA_ALIGN_32,
  440. UNIV_PAGE_SIZE - FIL_PAGE_DATA_ALIGN_32
  441. - FIL_PAGE_END_LSN_OLD_CHKSUM);
  442. checksum = checksum & 0xFFFFFFFFUL;
  443. return(checksum);
  444. }
  445. /********************************************************************//**
  446. In versions < 4.0.14 and < 4.1.1 there was a bug that the checksum only
  447. looked at the first few bytes of the page. This calculates that old
  448. checksum.
  449. NOTE: we must first store the new formula checksum to
  450. FIL_PAGE_SPACE_OR_CHKSUM before calculating and storing this old checksum
  451. because this takes that field as an input!
  452. @return checksum */
  453. UNIV_INTERN
  454. ulint
  455. buf_calc_page_old_checksum(
  456. /*=======================*/
  457. const byte* page) /*!< in: buffer page */
  458. {
  459. ulint checksum;
  460. checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
  461. checksum = checksum & 0xFFFFFFFFUL;
  462. return(checksum);
  463. }
  464. /********************************************************************//**
  465. Checks if a page is corrupt.
  466. @return TRUE if corrupted */
  467. UNIV_INTERN
  468. ibool
  469. buf_page_is_corrupted(
  470. /*==================*/
  471. const byte* read_buf, /*!< in: a database page */
  472. ulint zip_size) /*!< in: size of compressed page;
  473. 0 for uncompressed pages */
  474. {
  475. ulint checksum_field;
  476. ulint old_checksum_field;
  477. if (UNIV_LIKELY(!zip_size)
  478. && memcmp(read_buf + FIL_PAGE_LSN + 4,
  479. read_buf + UNIV_PAGE_SIZE
  480. - FIL_PAGE_END_LSN_OLD_CHKSUM + 4, 4)) {
  481. /* Stored log sequence numbers at the start and the end
  482. of page do not match */
  483. return(TRUE);
  484. }
  485. #ifndef UNIV_HOTBACKUP
  486. if (recv_lsn_checks_on) {
  487. ib_uint64_t current_lsn;
  488. if (log_peek_lsn(&current_lsn)
  489. && UNIV_UNLIKELY
  490. (current_lsn
  491. < mach_read_from_8(read_buf + FIL_PAGE_LSN))) {
  492. ut_print_timestamp(stderr);
  493. fprintf(stderr,
  494. " InnoDB: Error: page %lu log sequence number"
  495. " %llu\n"
  496. "InnoDB: is in the future! Current system "
  497. "log sequence number %llu.\n"
  498. "InnoDB: Your database may be corrupt or "
  499. "you may have copied the InnoDB\n"
  500. "InnoDB: tablespace but not the InnoDB "
  501. "log files. See\n"
  502. "InnoDB: " REFMAN "forcing-innodb-recovery.html\n"
  503. "InnoDB: for more information.\n",
  504. (ulong) mach_read_from_4(read_buf
  505. + FIL_PAGE_OFFSET),
  506. mach_read_from_8(read_buf + FIL_PAGE_LSN),
  507. current_lsn);
  508. }
  509. }
  510. #endif
  511. /* If we use checksums validation, make additional check before
  512. returning TRUE to ensure that the checksum is not equal to
  513. BUF_NO_CHECKSUM_MAGIC which might be stored by InnoDB with checksums
  514. disabled. Otherwise, skip checksum calculation and return FALSE */
  515. if (UNIV_LIKELY(srv_use_checksums)) {
  516. checksum_field = mach_read_from_4(read_buf
  517. + FIL_PAGE_SPACE_OR_CHKSUM);
  518. if (UNIV_UNLIKELY(zip_size)) {
  519. return(checksum_field != BUF_NO_CHECKSUM_MAGIC
  520. && checksum_field
  521. != page_zip_calc_checksum(read_buf, zip_size));
  522. }
  523. old_checksum_field = mach_read_from_4(
  524. read_buf + UNIV_PAGE_SIZE
  525. - FIL_PAGE_END_LSN_OLD_CHKSUM);
  526. /* There are 2 valid formulas for old_checksum_field:
  527. 1. Very old versions of InnoDB only stored 8 byte lsn to the
  528. start and the end of the page.
  529. 2. Newer InnoDB versions store the old formula checksum
  530. there. */
  531. if (old_checksum_field != mach_read_from_4(read_buf
  532. + FIL_PAGE_LSN)
  533. && old_checksum_field != BUF_NO_CHECKSUM_MAGIC
  534. && old_checksum_field
  535. != buf_calc_page_old_checksum(read_buf)) {
  536. return(TRUE);
  537. }
  538. /* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id
  539. (always equal to 0), to FIL_PAGE_SPACE_OR_CHKSUM */
  540. if (!srv_fast_checksum
  541. && checksum_field != 0
  542. && checksum_field != BUF_NO_CHECKSUM_MAGIC
  543. && checksum_field
  544. != buf_calc_page_new_checksum(read_buf)) {
  545. return(TRUE);
  546. }
  547. if (srv_fast_checksum
  548. && checksum_field != 0
  549. && checksum_field != BUF_NO_CHECKSUM_MAGIC
  550. && checksum_field
  551. != buf_calc_page_new_checksum_32(read_buf)
  552. && checksum_field
  553. != buf_calc_page_new_checksum(read_buf)) {
  554. return(TRUE);
  555. }
  556. }
  557. return(FALSE);
  558. }
  559. /********************************************************************//**
  560. Prints a page to stderr. */
  561. UNIV_INTERN
  562. void
  563. buf_page_print(
  564. /*===========*/
  565. const byte* read_buf, /*!< in: a database page */
  566. ulint zip_size) /*!< in: compressed page size, or
  567. 0 for uncompressed pages */
  568. {
  569. #ifndef UNIV_HOTBACKUP
  570. dict_index_t* index;
  571. #endif /* !UNIV_HOTBACKUP */
  572. ulint checksum;
  573. ulint checksum_32;
  574. ulint old_checksum;
  575. ulint size = zip_size;
  576. if (!size) {
  577. size = UNIV_PAGE_SIZE;
  578. }
  579. ut_print_timestamp(stderr);
  580. fprintf(stderr, " InnoDB: Page dump in ascii and hex (%lu bytes):\n",
  581. (ulong) size);
  582. ut_print_buf(stderr, read_buf, size);
  583. fputs("\nInnoDB: End of page dump\n", stderr);
  584. if (zip_size) {
  585. /* Print compressed page. */
  586. switch (fil_page_get_type(read_buf)) {
  587. case FIL_PAGE_TYPE_ZBLOB:
  588. case FIL_PAGE_TYPE_ZBLOB2:
  589. checksum = srv_use_checksums
  590. ? page_zip_calc_checksum(read_buf, zip_size)
  591. : BUF_NO_CHECKSUM_MAGIC;
  592. ut_print_timestamp(stderr);
  593. fprintf(stderr,
  594. " InnoDB: Compressed BLOB page"
  595. " checksum %lu, stored %lu\n"
  596. "InnoDB: Page lsn %lu %lu\n"
  597. "InnoDB: Page number (if stored"
  598. " to page already) %lu,\n"
  599. "InnoDB: space id (if stored"
  600. " to page already) %lu\n",
  601. (ulong) checksum,
  602. (ulong) mach_read_from_4(
  603. read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
  604. (ulong) mach_read_from_4(
  605. read_buf + FIL_PAGE_LSN),
  606. (ulong) mach_read_from_4(
  607. read_buf + (FIL_PAGE_LSN + 4)),
  608. (ulong) mach_read_from_4(
  609. read_buf + FIL_PAGE_OFFSET),
  610. (ulong) mach_read_from_4(
  611. read_buf
  612. + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
  613. return;
  614. default:
  615. ut_print_timestamp(stderr);
  616. fprintf(stderr,
  617. " InnoDB: unknown page type %lu,"
  618. " assuming FIL_PAGE_INDEX\n",
  619. fil_page_get_type(read_buf));
  620. /* fall through */
  621. case FIL_PAGE_INDEX:
  622. checksum = srv_use_checksums
  623. ? page_zip_calc_checksum(read_buf, zip_size)
  624. : BUF_NO_CHECKSUM_MAGIC;
  625. ut_print_timestamp(stderr);
  626. fprintf(stderr,
  627. " InnoDB: Compressed page checksum %lu,"
  628. " stored %lu\n"
  629. "InnoDB: Page lsn %lu %lu\n"
  630. "InnoDB: Page number (if stored"
  631. " to page already) %lu,\n"
  632. "InnoDB: space id (if stored"
  633. " to page already) %lu\n",
  634. (ulong) checksum,
  635. (ulong) mach_read_from_4(
  636. read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
  637. (ulong) mach_read_from_4(
  638. read_buf + FIL_PAGE_LSN),
  639. (ulong) mach_read_from_4(
  640. read_buf + (FIL_PAGE_LSN + 4)),
  641. (ulong) mach_read_from_4(
  642. read_buf + FIL_PAGE_OFFSET),
  643. (ulong) mach_read_from_4(
  644. read_buf
  645. + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
  646. return;
  647. case FIL_PAGE_TYPE_XDES:
  648. /* This is an uncompressed page. */
  649. break;
  650. }
  651. }
  652. checksum = srv_use_checksums
  653. ? buf_calc_page_new_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
  654. checksum_32 = srv_use_checksums
  655. ? buf_calc_page_new_checksum_32(read_buf) : BUF_NO_CHECKSUM_MAGIC;
  656. old_checksum = srv_use_checksums
  657. ? buf_calc_page_old_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
  658. ut_print_timestamp(stderr);
  659. fprintf(stderr,
  660. " InnoDB: Page checksum %lu (32bit_calc: %lu), prior-to-4.0.14-form"
  661. " checksum %lu\n"
  662. "InnoDB: stored checksum %lu, prior-to-4.0.14-form"
  663. " stored checksum %lu\n"
  664. "InnoDB: Page lsn %lu %lu, low 4 bytes of lsn"
  665. " at page end %lu\n"
  666. "InnoDB: Page number (if stored to page already) %lu,\n"
  667. "InnoDB: space id (if created with >= MySQL-4.1.1"
  668. " and stored already) %lu\n",
  669. (ulong) checksum, (ulong) checksum_32, (ulong) old_checksum,
  670. (ulong) mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
  671. (ulong) mach_read_from_4(read_buf + UNIV_PAGE_SIZE
  672. - FIL_PAGE_END_LSN_OLD_CHKSUM),
  673. (ulong) mach_read_from_4(read_buf + FIL_PAGE_LSN),
  674. (ulong) mach_read_from_4(read_buf + FIL_PAGE_LSN + 4),
  675. (ulong) mach_read_from_4(read_buf + UNIV_PAGE_SIZE
  676. - FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
  677. (ulong) mach_read_from_4(read_buf + FIL_PAGE_OFFSET),
  678. (ulong) mach_read_from_4(read_buf
  679. + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
  680. #ifndef UNIV_HOTBACKUP
  681. if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE)
  682. == TRX_UNDO_INSERT) {
  683. fprintf(stderr,
  684. "InnoDB: Page may be an insert undo log page\n");
  685. } else if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR
  686. + TRX_UNDO_PAGE_TYPE)
  687. == TRX_UNDO_UPDATE) {
  688. fprintf(stderr,
  689. "InnoDB: Page may be an update undo log page\n");
  690. }
  691. #endif /* !UNIV_HOTBACKUP */
  692. switch (fil_page_get_type(read_buf)) {
  693. index_id_t index_id;
  694. case FIL_PAGE_INDEX:
  695. index_id = btr_page_get_index_id(read_buf);
  696. fprintf(stderr,
  697. "InnoDB: Page may be an index page where"
  698. " index id is %llu\n",
  699. (ullint) index_id);
  700. #ifndef UNIV_HOTBACKUP
  701. index = dict_index_find_on_id_low(index_id);
  702. if (index) {
  703. fputs("InnoDB: (", stderr);
  704. dict_index_name_print(stderr, NULL, index);
  705. fputs(")\n", stderr);
  706. }
  707. #endif /* !UNIV_HOTBACKUP */
  708. break;
  709. case FIL_PAGE_INODE:
  710. fputs("InnoDB: Page may be an 'inode' page\n", stderr);
  711. break;
  712. case FIL_PAGE_IBUF_FREE_LIST:
  713. fputs("InnoDB: Page may be an insert buffer free list page\n",
  714. stderr);
  715. break;
  716. case FIL_PAGE_TYPE_ALLOCATED:
  717. fputs("InnoDB: Page may be a freshly allocated page\n",
  718. stderr);
  719. break;
  720. case FIL_PAGE_IBUF_BITMAP:
  721. fputs("InnoDB: Page may be an insert buffer bitmap page\n",
  722. stderr);
  723. break;
  724. case FIL_PAGE_TYPE_SYS:
  725. fputs("InnoDB: Page may be a system page\n",
  726. stderr);
  727. break;
  728. case FIL_PAGE_TYPE_TRX_SYS:
  729. fputs("InnoDB: Page may be a transaction system page\n",
  730. stderr);
  731. break;
  732. case FIL_PAGE_TYPE_FSP_HDR:
  733. fputs("InnoDB: Page may be a file space header page\n",
  734. stderr);
  735. break;
  736. case FIL_PAGE_TYPE_XDES:
  737. fputs("InnoDB: Page may be an extent descriptor page\n",
  738. stderr);
  739. break;
  740. case FIL_PAGE_TYPE_BLOB:
  741. fputs("InnoDB: Page may be a BLOB page\n",
  742. stderr);
  743. break;
  744. case FIL_PAGE_TYPE_ZBLOB:
  745. case FIL_PAGE_TYPE_ZBLOB2:
  746. fputs("InnoDB: Page may be a compressed BLOB page\n",
  747. stderr);
  748. break;
  749. }
  750. }
  751. #ifndef UNIV_HOTBACKUP
  752. # ifdef PFS_GROUP_BUFFER_SYNC
  753. /********************************************************************//**
  754. This function registers mutexes and rwlocks in buffer blocks with
  755. performance schema. If PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER is
  756. defined to be a value less than chunk->size, then only mutexes
  757. and rwlocks in the first PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER
  758. blocks are registered. */
  759. static
  760. void
  761. pfs_register_buffer_block(
  762. /*======================*/
  763. buf_chunk_t* chunk) /*!< in/out: chunk of buffers */
  764. {
  765. ulint i;
  766. ulint num_to_register;
  767. buf_block_t* block;
  768. block = chunk->blocks;
  769. num_to_register = ut_min(chunk->size,
  770. PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER);
  771. for (i = 0; i < num_to_register; i++) {
  772. mutex_t* mutex;
  773. rw_lock_t* rwlock;
  774. # ifdef UNIV_PFS_MUTEX
  775. mutex = &block->mutex;
  776. ut_a(!mutex->pfs_psi);
  777. mutex->pfs_psi = (PSI_server)
  778. ? PSI_server->init_mutex(buffer_block_mutex_key, mutex)
  779. : NULL;
  780. # endif /* UNIV_PFS_MUTEX */
  781. # ifdef UNIV_PFS_RWLOCK
  782. rwlock = &block->lock;
  783. ut_a(!rwlock->pfs_psi);
  784. rwlock->pfs_psi = (PSI_server)
  785. ? PSI_server->init_rwlock(buf_block_lock_key, rwlock)
  786. : NULL;
  787. # ifdef UNIV_SYNC_DEBUG
  788. rwlock = &block->debug_latch;
  789. ut_a(!rwlock->pfs_psi);
  790. rwlock->pfs_psi = (PSI_server)
  791. ? PSI_server->init_rwlock(buf_block_debug_latch_key,
  792. rwlock)
  793. : NULL;
  794. # endif /* UNIV_SYNC_DEBUG */
  795. # endif /* UNIV_PFS_RWLOCK */
  796. block++;
  797. }
  798. }
  799. # endif /* PFS_GROUP_BUFFER_SYNC */
  800. /********************************************************************//**
  801. Initializes a buffer control block when the buf_pool is created. */
  802. static
  803. void
  804. buf_block_init(
  805. /*===========*/
  806. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  807. buf_block_t* block, /*!< in: pointer to control block */
  808. byte* frame) /*!< in: pointer to buffer frame */
  809. {
  810. UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE, block);
  811. block->frame = frame;
  812. block->page.buf_pool_index = buf_pool_index(buf_pool);
  813. block->page.state = BUF_BLOCK_NOT_USED;
  814. block->page.buf_fix_count = 0;
  815. block->page.io_fix = BUF_IO_NONE;
  816. block->modify_clock = 0;
  817. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  818. block->page.file_page_was_freed = FALSE;
  819. #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
  820. block->check_index_page_at_flush = FALSE;
  821. block->index = NULL;
  822. block->btr_search_latch = NULL;
  823. #ifdef UNIV_DEBUG
  824. block->page.in_page_hash = FALSE;
  825. block->page.in_zip_hash = FALSE;
  826. block->page.in_flush_list = FALSE;
  827. block->page.in_free_list = FALSE;
  828. #endif /* UNIV_DEBUG */
  829. block->page.flush_list.prev = NULL;
  830. block->page.flush_list.next = NULL;
  831. block->page.zip_list.prev = NULL;
  832. block->page.zip_list.next = NULL;
  833. block->page.in_LRU_list = FALSE;
  834. block->in_unzip_LRU_list = FALSE;
  835. #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
  836. block->n_pointers = 0;
  837. #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
  838. page_zip_des_init(&block->page.zip);
  839. #if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
  840. /* If PFS_SKIP_BUFFER_MUTEX_RWLOCK is defined, skip registration
  841. of buffer block mutex/rwlock with performance schema. If
  842. PFS_GROUP_BUFFER_SYNC is defined, skip the registration
  843. since buffer block mutex/rwlock will be registered later in
  844. pfs_register_buffer_block() */
  845. mutex_create(PFS_NOT_INSTRUMENTED, &block->mutex, SYNC_BUF_BLOCK);
  846. rw_lock_create(PFS_NOT_INSTRUMENTED, &block->lock, SYNC_LEVEL_VARYING);
  847. # ifdef UNIV_SYNC_DEBUG
  848. rw_lock_create(PFS_NOT_INSTRUMENTED,
  849. &block->debug_latch, SYNC_NO_ORDER_CHECK);
  850. # endif /* UNIV_SYNC_DEBUG */
  851. #else /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
  852. mutex_create(buffer_block_mutex_key, &block->mutex, SYNC_BUF_BLOCK);
  853. rw_lock_create(buf_block_lock_key, &block->lock, SYNC_LEVEL_VARYING);
  854. # ifdef UNIV_SYNC_DEBUG
  855. rw_lock_create(buf_block_debug_latch_key,
  856. &block->debug_latch, SYNC_NO_ORDER_CHECK);
  857. # endif /* UNIV_SYNC_DEBUG */
  858. #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
  859. ut_ad(rw_lock_validate(&(block->lock)));
  860. }
  861. /********************************************************************//**
  862. Allocates a chunk of buffer frames.
  863. @return chunk, or NULL on failure */
  864. static
  865. buf_chunk_t*
  866. buf_chunk_init(
  867. /*===========*/
  868. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  869. buf_chunk_t* chunk, /*!< out: chunk of buffers */
  870. ulint mem_size) /*!< in: requested size in bytes */
  871. {
  872. buf_block_t* block;
  873. byte* frame;
  874. ulint i;
  875. ulint size_target;
  876. /* Round down to a multiple of page size,
  877. although it already should be. */
  878. mem_size = ut_2pow_round(mem_size, UNIV_PAGE_SIZE);
  879. size_target = (mem_size / UNIV_PAGE_SIZE) - 1;
  880. /* Reserve space for the block descriptors. */
  881. mem_size += ut_2pow_round((mem_size / UNIV_PAGE_SIZE) * (sizeof *block)
  882. + (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE);
  883. chunk->mem_size = mem_size;
  884. chunk->mem = os_mem_alloc_large(&chunk->mem_size);
  885. if (UNIV_UNLIKELY(chunk->mem == NULL)) {
  886. return(NULL);
  887. }
  888. /* Allocate the block descriptors from
  889. the start of the memory block. */
  890. chunk->blocks = chunk->mem;
  891. /* Align a pointer to the first frame. Note that when
  892. os_large_page_size is smaller than UNIV_PAGE_SIZE,
  893. we may allocate one fewer block than requested. When
  894. it is bigger, we may allocate more blocks than requested. */
  895. frame = ut_align(chunk->mem, UNIV_PAGE_SIZE);
  896. chunk->size = chunk->mem_size / UNIV_PAGE_SIZE
  897. - (frame != chunk->mem);
  898. /* Subtract the space needed for block descriptors. */
  899. {
  900. ulint size = chunk->size;
  901. while (frame < (byte*) (chunk->blocks + size)) {
  902. frame += UNIV_PAGE_SIZE;
  903. size--;
  904. }
  905. chunk->size = size;
  906. }
  907. if (chunk->size > size_target) {
  908. chunk->size = size_target;
  909. }
  910. /* Init block structs and assign frames for them. Then we
  911. assign the frames to the first blocks (we already mapped the
  912. memory above). */
  913. block = chunk->blocks;
  914. for (i = chunk->size; i--; ) {
  915. buf_block_init(buf_pool, block, frame);
  916. #ifdef HAVE_valgrind
  917. /* Wipe contents of frame to eliminate a Purify warning */
  918. memset(block->frame, '\0', UNIV_PAGE_SIZE);
  919. #endif
  920. /* Add the block to the free list */
  921. mutex_enter(&buf_pool->free_list_mutex);
  922. UT_LIST_ADD_LAST(free, buf_pool->free, (&block->page));
  923. ut_d(block->page.in_free_list = TRUE);
  924. mutex_exit(&buf_pool->free_list_mutex);
  925. ut_ad(buf_pool_from_block(block) == buf_pool);
  926. block++;
  927. frame += UNIV_PAGE_SIZE;
  928. }
  929. #ifdef PFS_GROUP_BUFFER_SYNC
  930. pfs_register_buffer_block(chunk);
  931. #endif
  932. return(chunk);
  933. }
  934. #ifdef UNIV_DEBUG
  935. /*********************************************************************//**
  936. Finds a block in the given buffer chunk that points to a
  937. given compressed page.
  938. @return buffer block pointing to the compressed page, or NULL */
  939. static
  940. buf_block_t*
  941. buf_chunk_contains_zip(
  942. /*===================*/
  943. buf_chunk_t* chunk, /*!< in: chunk being checked */
  944. const void* data) /*!< in: pointer to compressed page */
  945. {
  946. buf_block_t* block;
  947. ulint i;
  948. block = chunk->blocks;
  949. for (i = chunk->size; i--; block++) {
  950. if (block->page.zip.data == data) {
  951. return(block);
  952. }
  953. }
  954. return(NULL);
  955. }
  956. /*********************************************************************//**
  957. Finds a block in the buffer pool that points to a
  958. given compressed page.
  959. @return buffer block pointing to the compressed page, or NULL */
  960. UNIV_INTERN
  961. buf_block_t*
  962. buf_pool_contains_zip(
  963. /*==================*/
  964. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  965. const void* data) /*!< in: pointer to compressed page */
  966. {
  967. ulint n;
  968. buf_chunk_t* chunk = buf_pool->chunks;
  969. ut_ad(buf_pool);
  970. //ut_ad(buf_pool_mutex_own(buf_pool));
  971. ut_ad(mutex_own(&buf_pool->zip_free_mutex));
  972. for (n = buf_pool->n_chunks; n--; chunk++) {
  973. buf_block_t* block = buf_chunk_contains_zip(chunk, data);
  974. if (block) {
  975. return(block);
  976. }
  977. }
  978. return(NULL);
  979. }
  980. #endif /* UNIV_DEBUG */
  981. /*********************************************************************//**
  982. Checks that all file pages in the buffer chunk are in a replaceable state.
  983. @return address of a non-free block, or NULL if all freed */
  984. static
  985. const buf_block_t*
  986. buf_chunk_not_freed(
  987. /*================*/
  988. buf_chunk_t* chunk) /*!< in: chunk being checked */
  989. {
  990. buf_block_t* block;
  991. ulint i;
  992. block = chunk->blocks;
  993. for (i = chunk->size; i--; block++) {
  994. ibool ready;
  995. switch (buf_block_get_state(block)) {
  996. case BUF_BLOCK_ZIP_FREE:
  997. case BUF_BLOCK_ZIP_PAGE:
  998. case BUF_BLOCK_ZIP_DIRTY:
  999. /* The uncompressed buffer pool should never
  1000. contain compressed block descriptors. */
  1001. ut_error;
  1002. break;
  1003. case BUF_BLOCK_NOT_USED:
  1004. case BUF_BLOCK_READY_FOR_USE:
  1005. case BUF_BLOCK_MEMORY:
  1006. case BUF_BLOCK_REMOVE_HASH:
  1007. /* Skip blocks that are not being used for
  1008. file pages. */
  1009. break;
  1010. case BUF_BLOCK_FILE_PAGE:
  1011. mutex_enter(&block->mutex);
  1012. ready = buf_flush_ready_for_replace(&block->page);
  1013. mutex_exit(&block->mutex);
  1014. if (block->page.is_corrupt) {
  1015. /* corrupt page may remain, it can be skipped */
  1016. break;
  1017. }
  1018. if (!ready) {
  1019. return(block);
  1020. }
  1021. break;
  1022. }
  1023. }
  1024. return(NULL);
  1025. }
  1026. /********************************************************************//**
  1027. Set buffer pool size variables after resizing it */
  1028. static
  1029. void
  1030. buf_pool_set_sizes(void)
  1031. /*====================*/
  1032. {
  1033. ulint i;
  1034. ulint curr_size = 0;
  1035. buf_pool_mutex_enter_all();
  1036. for (i = 0; i < srv_buf_pool_instances; i++) {
  1037. buf_pool_t* buf_pool;
  1038. buf_pool = buf_pool_from_array(i);
  1039. curr_size += buf_pool->curr_pool_size;
  1040. }
  1041. srv_buf_pool_curr_size = curr_size;
  1042. srv_buf_pool_old_size = srv_buf_pool_size;
  1043. buf_pool_mutex_exit_all();
  1044. }
  1045. /********************************************************************//**
  1046. Initialize a buffer pool instance.
  1047. @return DB_SUCCESS if all goes well. */
  1048. UNIV_INTERN
  1049. ulint
  1050. buf_pool_init_instance(
  1051. /*===================*/
  1052. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  1053. ulint buf_pool_size, /*!< in: size in bytes */
  1054. ulint instance_no) /*!< in: id of the instance */
  1055. {
  1056. ulint i;
  1057. buf_chunk_t* chunk;
  1058. /* 1. Initialize general fields
  1059. ------------------------------- */
  1060. mutex_create(buf_pool_mutex_key,
  1061. &buf_pool->mutex, SYNC_BUF_POOL);
  1062. mutex_create(buf_pool_LRU_list_mutex_key,
  1063. &buf_pool->LRU_list_mutex, SYNC_BUF_LRU_LIST);
  1064. rw_lock_create(buf_pool_page_hash_key,
  1065. &buf_pool->page_hash_latch, SYNC_BUF_PAGE_HASH);
  1066. mutex_create(buf_pool_free_list_mutex_key,
  1067. &buf_pool->free_list_mutex, SYNC_BUF_FREE_LIST);
  1068. mutex_create(buf_pool_zip_free_mutex_key,
  1069. &buf_pool->zip_free_mutex, SYNC_BUF_ZIP_FREE);
  1070. mutex_create(buf_pool_zip_hash_mutex_key,
  1071. &buf_pool->zip_hash_mutex, SYNC_BUF_ZIP_HASH);
  1072. mutex_create(buf_pool_zip_mutex_key,
  1073. &buf_pool->zip_mutex, SYNC_BUF_BLOCK);
  1074. mutex_enter(&buf_pool->LRU_list_mutex);
  1075. rw_lock_x_lock(&buf_pool->page_hash_latch);
  1076. buf_pool_mutex_enter(buf_pool);
  1077. if (buf_pool_size > 0) {
  1078. buf_pool->n_chunks = 1;
  1079. buf_pool->chunks = chunk = mem_zalloc(sizeof *chunk);
  1080. UT_LIST_INIT(buf_pool->free);
  1081. if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) {
  1082. mem_free(chunk);
  1083. mem_free(buf_pool);
  1084. mutex_exit(&buf_pool->LRU_list_mutex);
  1085. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1086. buf_pool_mutex_exit(buf_pool);
  1087. return(DB_ERROR);
  1088. }
  1089. buf_pool->instance_no = instance_no;
  1090. buf_pool->old_pool_size = buf_pool_size;
  1091. buf_pool->curr_size = chunk->size;
  1092. buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
  1093. buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
  1094. buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
  1095. buf_pool->last_printout_time = ut_time();
  1096. }
  1097. /* 2. Initialize flushing fields
  1098. -------------------------------- */
  1099. mutex_create(flush_list_mutex_key, &buf_pool->flush_list_mutex,
  1100. SYNC_BUF_FLUSH_LIST);
  1101. for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
  1102. buf_pool->no_flush[i] = os_event_create(NULL);
  1103. }
  1104. /* 3. Initialize LRU fields
  1105. --------------------------- */
  1106. /* All fields are initialized by mem_zalloc(). */
  1107. mutex_exit(&buf_pool->LRU_list_mutex);
  1108. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1109. buf_pool_mutex_exit(buf_pool);
  1110. return(DB_SUCCESS);
  1111. }
  1112. /********************************************************************//**
  1113. free one buffer pool instance */
  1114. static
  1115. void
  1116. buf_pool_free_instance(
  1117. /*===================*/
  1118. buf_pool_t* buf_pool) /* in,own: buffer pool instance
  1119. to free */
  1120. {
  1121. buf_chunk_t* chunk;
  1122. buf_chunk_t* chunks;
  1123. buf_page_t* bpage;
  1124. bpage = UT_LIST_GET_LAST(buf_pool->LRU);
  1125. while (bpage != NULL) {
  1126. buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
  1127. enum buf_page_state state = buf_page_get_state(bpage);
  1128. ut_ad(buf_page_in_file(bpage));
  1129. ut_ad(bpage->in_LRU_list);
  1130. if (state != BUF_BLOCK_FILE_PAGE) {
  1131. /* We must not have any dirty block except
  1132. when doing a fast shutdown. */
  1133. ut_ad(state == BUF_BLOCK_ZIP_PAGE
  1134. || srv_fast_shutdown == 2);
  1135. buf_page_free_descriptor(bpage);
  1136. }
  1137. bpage = prev_bpage;
  1138. }
  1139. chunks = buf_pool->chunks;
  1140. chunk = chunks + buf_pool->n_chunks;
  1141. while (--chunk >= chunks) {
  1142. os_mem_free_large(chunk->mem, chunk->mem_size);
  1143. }
  1144. mem_free(buf_pool->chunks);
  1145. hash_table_free(buf_pool->page_hash);
  1146. hash_table_free(buf_pool->zip_hash);
  1147. }
  1148. /********************************************************************//**
  1149. Creates the buffer pool.
  1150. @return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
  1151. UNIV_INTERN
  1152. ulint
  1153. buf_pool_init(
  1154. /*==========*/
  1155. ulint total_size, /*!< in: size of the total pool in bytes */
  1156. ulint n_instances) /*!< in: number of instances */
  1157. {
  1158. ulint i;
  1159. const ulint size = total_size / n_instances;
  1160. ut_ad(n_instances > 0);
  1161. ut_ad(n_instances <= MAX_BUFFER_POOLS);
  1162. ut_ad(n_instances == srv_buf_pool_instances);
  1163. /* We create an extra buffer pool instance, this instance is used
  1164. for flushing the flush lists, to keep track of n_flush for all
  1165. the buffer pools and also used as a waiting object during flushing. */
  1166. buf_pool_ptr = mem_zalloc(n_instances * sizeof *buf_pool_ptr);
  1167. for (i = 0; i < n_instances; i++) {
  1168. buf_pool_t* ptr = &buf_pool_ptr[i];
  1169. if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
  1170. /* Free all the instances created so far. */
  1171. buf_pool_free(i);
  1172. return(DB_ERROR);
  1173. }
  1174. }
  1175. buf_pool_set_sizes();
  1176. buf_LRU_old_ratio_update(100 * 3/ 8, FALSE);
  1177. btr_search_sys_create(buf_pool_get_curr_size() / sizeof(void*) / 64);
  1178. return(DB_SUCCESS);
  1179. }
  1180. /********************************************************************//**
  1181. Frees the buffer pool at shutdown. This must not be invoked before
  1182. freeing all mutexes. */
  1183. UNIV_INTERN
  1184. void
  1185. buf_pool_free(
  1186. /*==========*/
  1187. ulint n_instances) /*!< in: numbere of instances to free */
  1188. {
  1189. ulint i;
  1190. for (i = 0; i < n_instances; i++) {
  1191. buf_pool_free_instance(buf_pool_from_array(i));
  1192. }
  1193. mem_free(buf_pool_ptr);
  1194. buf_pool_ptr = NULL;
  1195. }
  1196. /********************************************************************//**
  1197. Clears the adaptive hash index on all pages in the buffer pool. */
  1198. UNIV_INTERN
  1199. void
  1200. buf_pool_clear_hash_index(void)
  1201. /*===========================*/
  1202. {
  1203. ulint p;
  1204. #ifdef UNIV_SYNC_DEBUG
  1205. ulint j;
  1206. for (j = 0; j < btr_search_index_num; j++) {
  1207. ut_ad(rw_lock_own(btr_search_latch_part[j], RW_LOCK_EX));
  1208. }
  1209. #endif /* UNIV_SYNC_DEBUG */
  1210. ut_ad(!btr_search_enabled);
  1211. for (p = 0; p < srv_buf_pool_instances; p++) {
  1212. buf_pool_t* buf_pool = buf_pool_from_array(p);
  1213. buf_chunk_t* chunks = buf_pool->chunks;
  1214. buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
  1215. while (--chunk >= chunks) {
  1216. buf_block_t* block = chunk->blocks;
  1217. ulint i = chunk->size;
  1218. for (; i--; block++) {
  1219. dict_index_t* index = block->index;
  1220. /* We can set block->index = NULL
  1221. when we have an x-latch on btr_search_latch;
  1222. see the comment in buf0buf.h */
  1223. if (!index) {
  1224. /* Not hashed */
  1225. continue;
  1226. }
  1227. block->index = NULL;
  1228. # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
  1229. block->n_pointers = 0;
  1230. # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
  1231. }
  1232. }
  1233. }
  1234. }
  1235. /********************************************************************//**
  1236. Relocate a buffer control block. Relocates the block on the LRU list
  1237. and in buf_pool->page_hash. Does not relocate bpage->list.
  1238. The caller must take care of relocating bpage->list. */
  1239. UNIV_INTERN
  1240. void
  1241. buf_relocate(
  1242. /*=========*/
  1243. buf_page_t* bpage, /*!< in/out: control block being relocated;
  1244. buf_page_get_state(bpage) must be
  1245. BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
  1246. buf_page_t* dpage) /*!< in/out: destination control block */
  1247. {
  1248. buf_page_t* b;
  1249. ulint fold;
  1250. buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
  1251. //ut_ad(buf_pool_mutex_own(buf_pool));
  1252. ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
  1253. #ifdef UNIV_SYNC_DEBUG
  1254. ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
  1255. #endif
  1256. ut_ad(mutex_own(buf_page_get_mutex(bpage)));
  1257. ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
  1258. ut_a(bpage->buf_fix_count == 0);
  1259. ut_ad(bpage->in_LRU_list);
  1260. ut_ad(!bpage->in_zip_hash);
  1261. ut_ad(bpage->in_page_hash);
  1262. ut_ad(bpage == buf_page_hash_get(buf_pool,
  1263. bpage->space, bpage->offset));
  1264. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
  1265. #ifdef UNIV_DEBUG
  1266. switch (buf_page_get_state(bpage)) {
  1267. case BUF_BLOCK_ZIP_FREE:
  1268. case BUF_BLOCK_NOT_USED:
  1269. case BUF_BLOCK_READY_FOR_USE:
  1270. case BUF_BLOCK_FILE_PAGE:
  1271. case BUF_BLOCK_MEMORY:
  1272. case BUF_BLOCK_REMOVE_HASH:
  1273. ut_error;
  1274. case BUF_BLOCK_ZIP_DIRTY:
  1275. case BUF_BLOCK_ZIP_PAGE:
  1276. break;
  1277. }
  1278. #endif /* UNIV_DEBUG */
  1279. memcpy(dpage, bpage, sizeof *dpage);
  1280. bpage->in_LRU_list = FALSE;
  1281. ut_d(bpage->in_page_hash = FALSE);
  1282. /* relocate buf_pool->LRU */
  1283. b = UT_LIST_GET_PREV(LRU, bpage);
  1284. UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);
  1285. if (b) {
  1286. UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, b, dpage);
  1287. } else {
  1288. UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, dpage);
  1289. }
  1290. if (UNIV_UNLIKELY(buf_pool->LRU_old == bpage)) {
  1291. buf_pool->LRU_old = dpage;
  1292. #ifdef UNIV_LRU_DEBUG
  1293. /* buf_pool->LRU_old must be the first item in the LRU list
  1294. whose "old" flag is set. */
  1295. ut_a(buf_pool->LRU_old->old);
  1296. ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
  1297. || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
  1298. ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
  1299. || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
  1300. } else {
  1301. /* Check that the "old" flag is consistent in
  1302. the block and its neighbours. */
  1303. buf_page_set_old(dpage, buf_page_is_old(dpage));
  1304. #endif /* UNIV_LRU_DEBUG */
  1305. }
  1306. ut_d(UT_LIST_VALIDATE(LRU, buf_page_t, buf_pool->LRU,
  1307. ut_ad(ut_list_node_313->in_LRU_list)));
  1308. /* relocate buf_pool->page_hash */
  1309. fold = buf_page_address_fold(bpage->space, bpage->offset);
  1310. HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
  1311. HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage);
  1312. }
  1313. /********************************************************************//**
  1314. Determine if a block is a sentinel for a buffer pool watch.
  1315. @return TRUE if a sentinel for a buffer pool watch, FALSE if not */
  1316. UNIV_INTERN
  1317. ibool
  1318. buf_pool_watch_is_sentinel(
  1319. /*=======================*/
  1320. buf_pool_t* buf_pool, /*!< buffer pool instance */
  1321. const buf_page_t* bpage) /*!< in: block */
  1322. {
  1323. ut_ad(buf_page_in_file(bpage));
  1324. if (bpage < &buf_pool->watch[0]
  1325. || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
  1326. ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
  1327. || bpage->zip.data != NULL);
  1328. return(FALSE);
  1329. }
  1330. ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
  1331. ut_ad(!bpage->in_zip_hash);
  1332. ut_ad(bpage->in_page_hash);
  1333. ut_ad(bpage->zip.data == NULL);
  1334. ut_ad(bpage->buf_fix_count > 0);
  1335. return(TRUE);
  1336. }
  1337. /****************************************************************//**
  1338. Add watch for the given page to be read in. Caller must have the buffer pool
  1339. mutex reserved.
  1340. @return NULL if watch set, block if the page is in the buffer pool */
  1341. UNIV_INTERN
  1342. buf_page_t*
  1343. buf_pool_watch_set(
  1344. /*===============*/
  1345. ulint space, /*!< in: space id */
  1346. ulint offset, /*!< in: page number */
  1347. ulint fold) /*!< in: buf_page_address_fold(space, offset) */
  1348. {
  1349. buf_page_t* bpage;
  1350. ulint i;
  1351. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1352. mutex_t* block_mutex;
  1353. //ut_ad(buf_pool_mutex_own(buf_pool));
  1354. rw_lock_x_lock(&buf_pool->page_hash_latch);
  1355. bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
  1356. if (UNIV_LIKELY_NULL(bpage)) {
  1357. block_mutex = buf_page_get_mutex_enter(bpage);
  1358. ut_a(block_mutex);
  1359. if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
  1360. /* The page was loaded meanwhile. */
  1361. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1362. return(bpage);
  1363. }
  1364. /* Add to an existing watch. */
  1365. bpage->buf_fix_count++;
  1366. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1367. mutex_exit(block_mutex);
  1368. return(NULL);
  1369. }
  1370. /* buf_pool->watch is protected by zip_mutex for now */
  1371. mutex_enter(&buf_pool->zip_mutex);
  1372. for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
  1373. bpage = &buf_pool->watch[i];
  1374. ut_ad(bpage->access_time == 0);
  1375. ut_ad(bpage->newest_modification == 0);
  1376. ut_ad(bpage->oldest_modification == 0);
  1377. ut_ad(bpage->zip.data == NULL);
  1378. ut_ad(!bpage->in_zip_hash);
  1379. switch (bpage->state) {
  1380. case BUF_BLOCK_POOL_WATCH:
  1381. ut_ad(!bpage->in_page_hash);
  1382. ut_ad(bpage->buf_fix_count == 0);
  1383. /* bpage is pointing to buf_pool->watch[],
  1384. which is protected by buf_pool->mutex.
  1385. Normally, buf_page_t objects are protected by
  1386. buf_block_t::mutex or buf_pool->zip_mutex or both. */
  1387. bpage->state = BUF_BLOCK_ZIP_PAGE;
  1388. bpage->space = space;
  1389. bpage->offset = offset;
  1390. bpage->buf_fix_count = 1;
  1391. bpage->buf_pool_index = buf_pool_index(buf_pool);
  1392. ut_d(bpage->in_page_hash = TRUE);
  1393. HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
  1394. fold, bpage);
  1395. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1396. mutex_exit(&buf_pool->zip_mutex);
  1397. return(NULL);
  1398. case BUF_BLOCK_ZIP_PAGE:
  1399. ut_ad(bpage->in_page_hash);
  1400. ut_ad(bpage->buf_fix_count > 0);
  1401. break;
  1402. default:
  1403. ut_error;
  1404. }
  1405. }
  1406. /* Allocation failed. Either the maximum number of purge
  1407. threads should never exceed BUF_POOL_WATCH_SIZE, or this code
  1408. should be modified to return a special non-NULL value and the
  1409. caller should purge the record directly. */
  1410. ut_error;
  1411. /* Fix compiler warning */
  1412. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1413. mutex_exit(&buf_pool->zip_mutex);
  1414. return(NULL);
  1415. }
  1416. /****************************************************************//**
  1417. Remove the sentinel block for the watch before replacing it with a real block.
  1418. buf_page_watch_clear() or buf_page_watch_occurred() will notice that
  1419. the block has been replaced with the real block.
  1420. @return reference count, to be added to the replacement block */
  1421. static
  1422. void
  1423. buf_pool_watch_remove(
  1424. /*==================*/
  1425. buf_pool_t* buf_pool, /*!< buffer pool instance */
  1426. ulint fold, /*!< in: buf_page_address_fold(
  1427. space, offset) */
  1428. buf_page_t* watch) /*!< in/out: sentinel for watch */
  1429. {
  1430. //ut_ad(buf_pool_mutex_own(buf_pool));
  1431. #ifdef UNIV_SYNC_DEBUG
  1432. ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
  1433. #endif
  1434. ut_ad(mutex_own(&buf_pool->zip_mutex)); /* for now */
  1435. HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
  1436. ut_d(watch->in_page_hash = FALSE);
  1437. watch->buf_fix_count = 0;
  1438. watch->state = BUF_BLOCK_POOL_WATCH;
  1439. }
  1440. /****************************************************************//**
  1441. Stop watching if the page has been read in.
  1442. buf_pool_watch_set(space,offset) must have returned NULL before. */
  1443. UNIV_INTERN
  1444. void
  1445. buf_pool_watch_unset(
  1446. /*=================*/
  1447. ulint space, /*!< in: space id */
  1448. ulint offset) /*!< in: page number */
  1449. {
  1450. buf_page_t* bpage;
  1451. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1452. ulint fold = buf_page_address_fold(space, offset);
  1453. //buf_pool_mutex_enter(buf_pool);
  1454. rw_lock_x_lock(&buf_pool->page_hash_latch);
  1455. bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
  1456. /* The page must exist because buf_pool_watch_set()
  1457. increments buf_fix_count. */
  1458. ut_a(bpage);
  1459. if (UNIV_UNLIKELY(!buf_pool_watch_is_sentinel(buf_pool, bpage))) {
  1460. mutex_t* mutex = buf_page_get_mutex_enter(bpage);
  1461. ut_a(bpage->buf_fix_count > 0);
  1462. bpage->buf_fix_count--;
  1463. mutex_exit(mutex);
  1464. } else {
  1465. mutex_enter(&buf_pool->zip_mutex);
  1466. ut_a(bpage->buf_fix_count > 0);
  1467. if (UNIV_LIKELY(!--bpage->buf_fix_count)) {
  1468. buf_pool_watch_remove(buf_pool, fold, bpage);
  1469. }
  1470. mutex_exit(&buf_pool->zip_mutex);
  1471. }
  1472. //buf_pool_mutex_exit(buf_pool);
  1473. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  1474. }
  1475. /****************************************************************//**
  1476. Check if the page has been read in.
  1477. This may only be called after buf_pool_watch_set(space,offset)
  1478. has returned NULL and before invoking buf_pool_watch_unset(space,offset).
  1479. @return FALSE if the given page was not read in, TRUE if it was */
  1480. UNIV_INTERN
  1481. ibool
  1482. buf_pool_watch_occurred(
  1483. /*====================*/
  1484. ulint space, /*!< in: space id */
  1485. ulint offset) /*!< in: page number */
  1486. {
  1487. ibool ret;
  1488. buf_page_t* bpage;
  1489. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1490. ulint fold = buf_page_address_fold(space, offset);
  1491. //buf_pool_mutex_enter(buf_pool);
  1492. rw_lock_s_lock(&buf_pool->page_hash_latch);
  1493. bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
  1494. /* The page must exist because buf_pool_watch_set()
  1495. increments buf_fix_count. */
  1496. ut_a(bpage);
  1497. ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
  1498. //buf_pool_mutex_exit(buf_pool);
  1499. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1500. return(ret);
  1501. }
  1502. /********************************************************************//**
  1503. Moves a page to the start of the buffer pool LRU list. This high-level
  1504. function can be used to prevent an important page from slipping out of
  1505. the buffer pool. */
  1506. UNIV_INTERN
  1507. void
  1508. buf_page_make_young(
  1509. /*================*/
  1510. buf_page_t* bpage) /*!< in: buffer block of a file page */
  1511. {
  1512. buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
  1513. //buf_pool_mutex_enter(buf_pool);
  1514. mutex_enter(&buf_pool->LRU_list_mutex);
  1515. ut_a(buf_page_in_file(bpage));
  1516. buf_LRU_make_block_young(bpage);
  1517. //buf_pool_mutex_exit(buf_pool);
  1518. mutex_exit(&buf_pool->LRU_list_mutex);
  1519. }
  1520. /********************************************************************//**
  1521. Sets the time of the first access of a page and moves a page to the
  1522. start of the buffer pool LRU list if it is too old. This high-level
  1523. function can be used to prevent an important page from slipping
  1524. out of the buffer pool. */
  1525. static
  1526. void
  1527. buf_page_set_accessed_make_young(
  1528. /*=============================*/
  1529. buf_page_t* bpage, /*!< in/out: buffer block of a
  1530. file page */
  1531. unsigned access_time) /*!< in: bpage->access_time
  1532. read under mutex protection,
  1533. or 0 if unknown */
  1534. {
  1535. buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
  1536. ut_ad(!buf_pool_mutex_own(buf_pool));
  1537. ut_a(buf_page_in_file(bpage));
  1538. if (buf_page_peek_if_too_old(bpage)) {
  1539. //buf_pool_mutex_enter(buf_pool);
  1540. mutex_enter(&buf_pool->LRU_list_mutex);
  1541. buf_LRU_make_block_young(bpage);
  1542. //buf_pool_mutex_exit(buf_pool);
  1543. mutex_exit(&buf_pool->LRU_list_mutex);
  1544. } else if (!access_time) {
  1545. ulint time_ms = ut_time_ms();
  1546. mutex_t* block_mutex = buf_page_get_mutex_enter(bpage);
  1547. //buf_pool_mutex_enter(buf_pool);
  1548. if (block_mutex) {
  1549. buf_page_set_accessed(bpage, time_ms);
  1550. mutex_exit(block_mutex);
  1551. }
  1552. //buf_pool_mutex_exit(buf_pool);
  1553. }
  1554. }
  1555. /********************************************************************//**
  1556. Resets the check_index_page_at_flush field of a page if found in the buffer
  1557. pool. */
  1558. UNIV_INTERN
  1559. void
  1560. buf_reset_check_index_page_at_flush(
  1561. /*================================*/
  1562. ulint space, /*!< in: space id */
  1563. ulint offset) /*!< in: page number */
  1564. {
  1565. buf_block_t* block;
  1566. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1567. //buf_pool_mutex_enter(buf_pool);
  1568. rw_lock_s_lock(&buf_pool->page_hash_latch);
  1569. block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
  1570. if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
  1571. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
  1572. block->check_index_page_at_flush = FALSE;
  1573. }
  1574. //buf_pool_mutex_exit(buf_pool);
  1575. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1576. }
  1577. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  1578. /********************************************************************//**
  1579. Sets file_page_was_freed TRUE if the page is found in the buffer pool.
  1580. This function should be called when we free a file page and want the
  1581. debug version to check that it is not accessed any more unless
  1582. reallocated.
  1583. @return control block if found in page hash table, otherwise NULL */
  1584. UNIV_INTERN
  1585. buf_page_t*
  1586. buf_page_set_file_page_was_freed(
  1587. /*=============================*/
  1588. ulint space, /*!< in: space id */
  1589. ulint offset) /*!< in: page number */
  1590. {
  1591. buf_page_t* bpage;
  1592. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1593. //buf_pool_mutex_enter(buf_pool);
  1594. rw_lock_s_lock(&buf_pool->page_hash_latch);
  1595. bpage = buf_page_hash_get(buf_pool, space, offset);
  1596. if (bpage) {
  1597. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
  1598. /* bpage->file_page_was_freed can already hold
  1599. when this code is invoked from dict_drop_index_tree() */
  1600. bpage->file_page_was_freed = TRUE;
  1601. }
  1602. //buf_pool_mutex_exit(buf_pool);
  1603. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1604. return(bpage);
  1605. }
  1606. /********************************************************************//**
  1607. Sets file_page_was_freed FALSE if the page is found in the buffer pool.
  1608. This function should be called when we free a file page and want the
  1609. debug version to check that it is not accessed any more unless
  1610. reallocated.
  1611. @return control block if found in page hash table, otherwise NULL */
  1612. UNIV_INTERN
  1613. buf_page_t*
  1614. buf_page_reset_file_page_was_freed(
  1615. /*===============================*/
  1616. ulint space, /*!< in: space id */
  1617. ulint offset) /*!< in: page number */
  1618. {
  1619. buf_page_t* bpage;
  1620. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1621. //buf_pool_mutex_enter(buf_pool);
  1622. rw_lock_s_lock(&buf_pool->page_hash_latch);
  1623. bpage = buf_page_hash_get(buf_pool, space, offset);
  1624. if (bpage) {
  1625. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
  1626. bpage->file_page_was_freed = FALSE;
  1627. }
  1628. //buf_pool_mutex_exit(buf_pool);
  1629. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1630. return(bpage);
  1631. }
  1632. #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
  1633. /********************************************************************//**
  1634. Get read access to a compressed page (usually of type
  1635. FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2).
  1636. The page must be released with buf_page_release_zip().
  1637. NOTE: the page is not protected by any latch. Mutual exclusion has to
  1638. be implemented at a higher level. In other words, all possible
  1639. accesses to a given page through this function must be protected by
  1640. the same set of mutexes or latches.
  1641. @return pointer to the block */
  1642. UNIV_INTERN
  1643. buf_page_t*
  1644. buf_page_get_zip(
  1645. /*=============*/
  1646. ulint space, /*!< in: space id */
  1647. ulint zip_size,/*!< in: compressed page size */
  1648. ulint offset) /*!< in: page number */
  1649. {
  1650. buf_page_t* bpage;
  1651. mutex_t* block_mutex;
  1652. ibool must_read;
  1653. unsigned access_time;
  1654. trx_t* trx = NULL;
  1655. ulint sec;
  1656. ulint ms;
  1657. ib_uint64_t start_time;
  1658. ib_uint64_t finish_time;
  1659. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  1660. if (innobase_get_slow_log()) {
  1661. trx = innobase_get_trx();
  1662. }
  1663. buf_pool->stat.n_page_gets++;
  1664. for (;;) {
  1665. //buf_pool_mutex_enter(buf_pool);
  1666. lookup:
  1667. rw_lock_s_lock(&buf_pool->page_hash_latch);
  1668. bpage = buf_page_hash_get(buf_pool, space, offset);
  1669. if (bpage) {
  1670. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
  1671. break;
  1672. }
  1673. /* Page not in buf_pool: needs to be read from file */
  1674. //buf_pool_mutex_exit(buf_pool);
  1675. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1676. buf_read_page(space, zip_size, offset, trx);
  1677. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  1678. ut_a(++buf_dbg_counter % 37 || buf_validate());
  1679. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  1680. }
  1681. if (UNIV_UNLIKELY(bpage->space_was_being_deleted)) {
  1682. /* This page is obsoleted, should discard and retry */
  1683. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1684. mutex_enter(&buf_pool->LRU_list_mutex);
  1685. block_mutex = buf_page_get_mutex_enter(bpage);
  1686. if (UNIV_UNLIKELY(!block_mutex)) {
  1687. mutex_exit(&buf_pool->LRU_list_mutex);
  1688. goto lookup;
  1689. }
  1690. buf_LRU_free_block(bpage, TRUE, TRUE);
  1691. mutex_exit(&buf_pool->LRU_list_mutex);
  1692. mutex_exit(block_mutex);
  1693. block_mutex = NULL;
  1694. goto lookup;
  1695. }
  1696. if (UNIV_UNLIKELY(!bpage->zip.data)) {
  1697. /* There is no compressed page. */
  1698. err_exit:
  1699. //buf_pool_mutex_exit(buf_pool);
  1700. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1701. return(NULL);
  1702. }
  1703. if (srv_pass_corrupt_table <= 1) {
  1704. if (bpage->is_corrupt) {
  1705. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1706. return(NULL);
  1707. }
  1708. }
  1709. block_mutex = buf_page_get_mutex_enter(bpage);
  1710. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  1711. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
  1712. switch (buf_page_get_state(bpage)) {
  1713. case BUF_BLOCK_NOT_USED:
  1714. case BUF_BLOCK_READY_FOR_USE:
  1715. case BUF_BLOCK_MEMORY:
  1716. case BUF_BLOCK_REMOVE_HASH:
  1717. case BUF_BLOCK_ZIP_FREE:
  1718. if (block_mutex)
  1719. mutex_exit(block_mutex);
  1720. break;
  1721. case BUF_BLOCK_ZIP_PAGE:
  1722. case BUF_BLOCK_ZIP_DIRTY:
  1723. ut_a(block_mutex == &buf_pool->zip_mutex);
  1724. bpage->buf_fix_count++;
  1725. goto got_block;
  1726. case BUF_BLOCK_FILE_PAGE:
  1727. ut_a(block_mutex == &((buf_block_t*) bpage)->mutex);
  1728. /* release mutex to obey to latch-order */
  1729. mutex_exit(block_mutex);
  1730. /* get LRU_list_mutex for buf_LRU_free_block() */
  1731. mutex_enter(&buf_pool->LRU_list_mutex);
  1732. mutex_enter(block_mutex);
  1733. if (UNIV_UNLIKELY(bpage->space != space
  1734. || bpage->offset != offset
  1735. || !bpage->in_LRU_list
  1736. || !bpage->zip.data)) {
  1737. /* someone should interrupt, retry */
  1738. mutex_exit(&buf_pool->LRU_list_mutex);
  1739. mutex_exit(block_mutex);
  1740. goto lookup;
  1741. }
  1742. /* Discard the uncompressed page frame if possible. */
  1743. if (buf_LRU_free_block(bpage, FALSE, TRUE)) {
  1744. mutex_exit(&buf_pool->LRU_list_mutex);
  1745. mutex_exit(block_mutex);
  1746. goto lookup;
  1747. }
  1748. mutex_exit(&buf_pool->LRU_list_mutex);
  1749. buf_block_buf_fix_inc((buf_block_t*) bpage,
  1750. __FILE__, __LINE__);
  1751. goto got_block;
  1752. }
  1753. ut_error;
  1754. goto err_exit;
  1755. got_block:
  1756. must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
  1757. access_time = buf_page_is_accessed(bpage);
  1758. //buf_pool_mutex_exit(buf_pool);
  1759. mutex_exit(block_mutex);
  1760. buf_page_set_accessed_make_young(bpage, access_time);
  1761. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  1762. ut_a(!bpage->file_page_was_freed);
  1763. #endif
  1764. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  1765. ut_a(++buf_dbg_counter % 5771 || buf_validate());
  1766. ut_a(bpage->buf_fix_count > 0);
  1767. ut_a(buf_page_in_file(bpage));
  1768. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  1769. if (must_read) {
  1770. /* Let us wait until the read operation
  1771. completes */
  1772. if (innobase_get_slow_log() && trx && trx->take_stats)
  1773. {
  1774. ut_usectime(&sec, &ms);
  1775. start_time = (ib_uint64_t)sec * 1000000 + ms;
  1776. } else {
  1777. start_time = 0;
  1778. }
  1779. for (;;) {
  1780. enum buf_io_fix io_fix;
  1781. mutex_enter(block_mutex);
  1782. io_fix = buf_page_get_io_fix(bpage);
  1783. mutex_exit(block_mutex);
  1784. if (io_fix == BUF_IO_READ) {
  1785. os_thread_sleep(WAIT_FOR_READ);
  1786. } else {
  1787. break;
  1788. }
  1789. }
  1790. if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
  1791. {
  1792. ut_usectime(&sec, &ms);
  1793. finish_time = (ib_uint64_t)sec * 1000000 + ms;
  1794. trx->io_reads_wait_timer += (ulint)(finish_time - start_time);
  1795. }
  1796. }
  1797. #ifdef UNIV_IBUF_COUNT_DEBUG
  1798. ut_a(ibuf_count_get(buf_page_get_space(bpage),
  1799. buf_page_get_page_no(bpage)) == 0);
  1800. #endif
  1801. return(bpage);
  1802. }
  1803. /********************************************************************//**
  1804. Initialize some fields of a control block. */
  1805. UNIV_INLINE
  1806. void
  1807. buf_block_init_low(
  1808. /*===============*/
  1809. buf_block_t* block) /*!< in: block to init */
  1810. {
  1811. block->check_index_page_at_flush = FALSE;
  1812. block->index = NULL;
  1813. block->btr_search_latch = NULL;
  1814. block->n_hash_helps = 0;
  1815. block->n_fields = 1;
  1816. block->n_bytes = 0;
  1817. block->left_side = TRUE;
  1818. }
  1819. #endif /* !UNIV_HOTBACKUP */
  1820. /********************************************************************//**
  1821. Decompress a block.
  1822. @return TRUE if successful */
  1823. UNIV_INTERN
  1824. ibool
  1825. buf_zip_decompress(
  1826. /*===============*/
  1827. buf_block_t* block, /*!< in/out: block */
  1828. ibool check) /*!< in: TRUE=verify the page checksum */
  1829. {
  1830. const byte* frame = block->page.zip.data;
  1831. ulint stamp_checksum = mach_read_from_4(
  1832. frame + FIL_PAGE_SPACE_OR_CHKSUM);
  1833. ut_ad(buf_block_get_zip_size(block));
  1834. ut_a(buf_block_get_space(block) != 0);
  1835. if (UNIV_LIKELY(check && stamp_checksum != BUF_NO_CHECKSUM_MAGIC)) {
  1836. ulint calc_checksum = page_zip_calc_checksum(
  1837. frame, page_zip_get_size(&block->page.zip));
  1838. if (UNIV_UNLIKELY(stamp_checksum != calc_checksum)) {
  1839. ut_print_timestamp(stderr);
  1840. fprintf(stderr,
  1841. " InnoDB: compressed page checksum mismatch"
  1842. " (space %u page %u): %lu != %lu\n",
  1843. block->page.space, block->page.offset,
  1844. stamp_checksum, calc_checksum);
  1845. return(FALSE);
  1846. }
  1847. }
  1848. switch (fil_page_get_type(frame)) {
  1849. case FIL_PAGE_INDEX:
  1850. if (page_zip_decompress(&block->page.zip,
  1851. block->frame, TRUE)) {
  1852. return(TRUE);
  1853. }
  1854. fprintf(stderr,
  1855. "InnoDB: unable to decompress space %lu page %lu\n",
  1856. (ulong) block->page.space,
  1857. (ulong) block->page.offset);
  1858. return(FALSE);
  1859. case FIL_PAGE_TYPE_ALLOCATED:
  1860. case FIL_PAGE_INODE:
  1861. case FIL_PAGE_IBUF_BITMAP:
  1862. case FIL_PAGE_TYPE_FSP_HDR:
  1863. case FIL_PAGE_TYPE_XDES:
  1864. case FIL_PAGE_TYPE_ZBLOB:
  1865. case FIL_PAGE_TYPE_ZBLOB2:
  1866. /* Copy to uncompressed storage. */
  1867. memcpy(block->frame, frame,
  1868. buf_block_get_zip_size(block));
  1869. return(TRUE);
  1870. }
  1871. ut_print_timestamp(stderr);
  1872. fprintf(stderr,
  1873. " InnoDB: unknown compressed page"
  1874. " type %lu\n",
  1875. fil_page_get_type(frame));
  1876. return(FALSE);
  1877. }
  1878. #ifndef UNIV_HOTBACKUP
  1879. /*******************************************************************//**
  1880. Gets the block to whose frame the pointer is pointing to if found
  1881. in this buffer pool instance.
  1882. @return pointer to block */
  1883. UNIV_INTERN
  1884. buf_block_t*
  1885. buf_block_align_instance(
  1886. /*=====================*/
  1887. buf_pool_t* buf_pool, /*!< in: buffer in which the block
  1888. resides */
  1889. const byte* ptr) /*!< in: pointer to a frame */
  1890. {
  1891. buf_chunk_t* chunk;
  1892. ulint i;
  1893. /* TODO: protect buf_pool->chunks with a mutex (it will
  1894. currently remain constant after buf_pool_init()) */
  1895. for (chunk = buf_pool->chunks, i = buf_pool->n_chunks; i--; chunk++) {
  1896. ulint offs;
  1897. if (UNIV_UNLIKELY(ptr < chunk->blocks->frame)) {
  1898. continue;
  1899. }
  1900. /* else */
  1901. offs = ptr - chunk->blocks->frame;
  1902. offs >>= UNIV_PAGE_SIZE_SHIFT;
  1903. if (UNIV_LIKELY(offs < chunk->size)) {
  1904. buf_block_t* block = &chunk->blocks[offs];
  1905. /* The function buf_chunk_init() invokes
  1906. buf_block_init() so that block[n].frame ==
  1907. block->frame + n * UNIV_PAGE_SIZE. Check it. */
  1908. ut_ad(block->frame == page_align(ptr));
  1909. #ifdef UNIV_DEBUG
  1910. /* A thread that updates these fields must
  1911. hold buf_pool->mutex and block->mutex. Acquire
  1912. only the latter. */
  1913. mutex_enter(&block->mutex);
  1914. switch (buf_block_get_state(block)) {
  1915. case BUF_BLOCK_ZIP_FREE:
  1916. case BUF_BLOCK_ZIP_PAGE:
  1917. case BUF_BLOCK_ZIP_DIRTY:
  1918. /* These types should only be used in
  1919. the compressed buffer pool, whose
  1920. memory is allocated from
  1921. buf_pool->chunks, in UNIV_PAGE_SIZE
  1922. blocks flagged as BUF_BLOCK_MEMORY. */
  1923. ut_error;
  1924. break;
  1925. case BUF_BLOCK_NOT_USED:
  1926. case BUF_BLOCK_READY_FOR_USE:
  1927. case BUF_BLOCK_MEMORY:
  1928. /* Some data structures contain
  1929. "guess" pointers to file pages. The
  1930. file pages may have been freed and
  1931. reused. Do not complain. */
  1932. break;
  1933. case BUF_BLOCK_REMOVE_HASH:
  1934. /* buf_LRU_block_remove_hashed_page()
  1935. will overwrite the FIL_PAGE_OFFSET and
  1936. FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with
  1937. 0xff and set the state to
  1938. BUF_BLOCK_REMOVE_HASH. */
  1939. ut_ad(page_get_space_id(page_align(ptr))
  1940. == 0xffffffff);
  1941. ut_ad(page_get_page_no(page_align(ptr))
  1942. == 0xffffffff);
  1943. break;
  1944. case BUF_BLOCK_FILE_PAGE:
  1945. ut_ad(block->page.space
  1946. == page_get_space_id(page_align(ptr)));
  1947. ut_ad(block->page.offset
  1948. == page_get_page_no(page_align(ptr)));
  1949. break;
  1950. }
  1951. mutex_exit(&block->mutex);
  1952. #endif /* UNIV_DEBUG */
  1953. return(block);
  1954. }
  1955. }
  1956. return(NULL);
  1957. }
  1958. /*******************************************************************//**
  1959. Gets the block to whose frame the pointer is pointing to.
  1960. @return pointer to block, never NULL */
  1961. UNIV_INTERN
  1962. buf_block_t*
  1963. buf_block_align(
  1964. /*============*/
  1965. const byte* ptr) /*!< in: pointer to a frame */
  1966. {
  1967. ulint i;
  1968. for (i = 0; i < srv_buf_pool_instances; i++) {
  1969. buf_block_t* block;
  1970. block = buf_block_align_instance(
  1971. buf_pool_from_array(i), ptr);
  1972. if (block) {
  1973. return(block);
  1974. }
  1975. }
  1976. /* The block should always be found. */
  1977. ut_error;
  1978. return(NULL);
  1979. }
  1980. /********************************************************************//**
  1981. Find out if a pointer belongs to a buf_block_t. It can be a pointer to
  1982. the buf_block_t itself or a member of it. This functions checks one of
  1983. the buffer pool instances.
  1984. @return TRUE if ptr belongs to a buf_block_t struct */
  1985. static
  1986. ibool
  1987. buf_pointer_is_block_field_instance(
  1988. /*================================*/
  1989. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  1990. const void* ptr) /*!< in: pointer not dereferenced */
  1991. {
  1992. const buf_chunk_t* chunk = buf_pool->chunks;
  1993. const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks;
  1994. /* TODO: protect buf_pool->chunks with a mutex (it will
  1995. currently remain constant after buf_pool_init()) */
  1996. while (chunk < echunk) {
  1997. if (ptr >= (void *)chunk->blocks
  1998. && ptr < (void *)(chunk->blocks + chunk->size)) {
  1999. return(TRUE);
  2000. }
  2001. chunk++;
  2002. }
  2003. return(FALSE);
  2004. }
  2005. /********************************************************************//**
  2006. Find out if a pointer belongs to a buf_block_t. It can be a pointer to
  2007. the buf_block_t itself or a member of it
  2008. @return TRUE if ptr belongs to a buf_block_t struct */
  2009. UNIV_INTERN
  2010. ibool
  2011. buf_pointer_is_block_field(
  2012. /*=======================*/
  2013. const void* ptr) /*!< in: pointer not dereferenced */
  2014. {
  2015. ulint i;
  2016. for (i = 0; i < srv_buf_pool_instances; i++) {
  2017. ibool found;
  2018. found = buf_pointer_is_block_field_instance(
  2019. buf_pool_from_array(i), ptr);
  2020. if (found) {
  2021. return(TRUE);
  2022. }
  2023. }
  2024. return(FALSE);
  2025. }
  2026. /********************************************************************//**
  2027. Find out if a buffer block was created by buf_chunk_init().
  2028. @return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
  2029. static
  2030. ibool
  2031. buf_block_is_uncompressed(
  2032. /*======================*/
  2033. buf_pool_t* buf_pool, /*!< in: buffer pool instance */
  2034. const buf_block_t* block) /*!< in: pointer to block,
  2035. not dereferenced */
  2036. {
  2037. //ut_ad(buf_pool_mutex_own(buf_pool));
  2038. if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
  2039. /* The pointer should be aligned. */
  2040. return(FALSE);
  2041. }
  2042. return(buf_pointer_is_block_field_instance(buf_pool, (void *)block));
  2043. }
  2044. /********************************************************************//**
  2045. This is the general function used to get access to a database page.
  2046. @return pointer to the block or NULL */
  2047. UNIV_INTERN
  2048. buf_block_t*
  2049. buf_page_get_gen(
  2050. /*=============*/
  2051. ulint space, /*!< in: space id */
  2052. ulint zip_size,/*!< in: compressed page size in bytes
  2053. or 0 for uncompressed pages */
  2054. ulint offset, /*!< in: page number */
  2055. ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
  2056. buf_block_t* guess, /*!< in: guessed block or NULL */
  2057. ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
  2058. BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or
  2059. BUF_GET_IF_IN_POOL_OR_WATCH */
  2060. const char* file, /*!< in: file name */
  2061. ulint line, /*!< in: line where called */
  2062. mtr_t* mtr) /*!< in: mini-transaction */
  2063. {
  2064. buf_block_t* block;
  2065. ulint fold;
  2066. unsigned access_time;
  2067. ulint fix_type;
  2068. ibool must_read;
  2069. ulint retries = 0;
  2070. mutex_t* block_mutex = NULL;
  2071. trx_t* trx = NULL;
  2072. ulint sec;
  2073. ulint ms;
  2074. ib_uint64_t start_time;
  2075. ib_uint64_t finish_time;
  2076. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  2077. ut_ad(mtr);
  2078. ut_ad(mtr->state == MTR_ACTIVE);
  2079. ut_ad((rw_latch == RW_S_LATCH)
  2080. || (rw_latch == RW_X_LATCH)
  2081. || (rw_latch == RW_NO_LATCH));
  2082. #ifdef UNIV_DEBUG
  2083. switch (mode) {
  2084. case BUF_GET_NO_LATCH:
  2085. ut_ad(rw_latch == RW_NO_LATCH);
  2086. break;
  2087. case BUF_GET:
  2088. case BUF_GET_IF_IN_POOL:
  2089. case BUF_PEEK_IF_IN_POOL:
  2090. case BUF_GET_IF_IN_POOL_OR_WATCH:
  2091. case BUF_GET_POSSIBLY_FREED:
  2092. break;
  2093. default:
  2094. ut_error;
  2095. }
  2096. #endif /* UNIV_DEBUG */
  2097. ut_ad(zip_size == fil_space_get_zip_size(space));
  2098. ut_ad(ut_is_2pow(zip_size));
  2099. #ifndef UNIV_LOG_DEBUG
  2100. ut_ad(!ibuf_inside(mtr)
  2101. || ibuf_page_low(space, zip_size, offset,
  2102. FALSE, file, line, NULL));
  2103. #endif
  2104. if (innobase_get_slow_log()) {
  2105. trx = innobase_get_trx();
  2106. }
  2107. buf_pool->stat.n_page_gets++;
  2108. fold = buf_page_address_fold(space, offset);
  2109. loop:
  2110. block = guess;
  2111. //buf_pool_mutex_enter(buf_pool);
  2112. if (block) {
  2113. block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
  2114. /* If the guess is a compressed page descriptor that
  2115. has been allocated by buf_page_alloc_descriptor(),
  2116. it may have been freed by buf_relocate(). */
  2117. if (!block_mutex) {
  2118. block = guess = NULL;
  2119. } else if (!buf_block_is_uncompressed(buf_pool, block)
  2120. || offset != block->page.offset
  2121. || space != block->page.space
  2122. || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
  2123. mutex_exit(block_mutex);
  2124. block = guess = NULL;
  2125. } else {
  2126. ut_ad(!block->page.in_zip_hash);
  2127. ut_ad(block->page.in_page_hash);
  2128. }
  2129. }
  2130. if (block == NULL) {
  2131. rw_lock_s_lock(&buf_pool->page_hash_latch);
  2132. block = (buf_block_t*) buf_page_hash_get_low(
  2133. buf_pool, space, offset, fold);
  2134. if (block) {
  2135. if (UNIV_UNLIKELY(block->page.space_was_being_deleted)) {
  2136. /* This page is obsoleted, should discard and retry */
  2137. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  2138. mutex_enter(&buf_pool->LRU_list_mutex);
  2139. block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
  2140. if (UNIV_UNLIKELY(!block_mutex)) {
  2141. mutex_exit(&buf_pool->LRU_list_mutex);
  2142. goto loop;
  2143. }
  2144. buf_LRU_free_block((buf_page_t*)block, TRUE, TRUE);
  2145. mutex_exit(&buf_pool->LRU_list_mutex);
  2146. mutex_exit(block_mutex);
  2147. block_mutex = NULL;
  2148. goto loop;
  2149. }
  2150. block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
  2151. ut_a(block_mutex);
  2152. }
  2153. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  2154. }
  2155. loop2:
  2156. if (block && buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
  2157. mutex_exit(block_mutex);
  2158. block = NULL;
  2159. }
  2160. if (block == NULL) {
  2161. /* Page not in buf_pool: needs to be read from file */
  2162. if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
  2163. block = (buf_block_t*) buf_pool_watch_set(
  2164. space, offset, fold);
  2165. if (UNIV_LIKELY_NULL(block)) {
  2166. block_mutex = buf_page_get_mutex((buf_page_t*)block);
  2167. ut_a(block_mutex);
  2168. ut_ad(mutex_own(block_mutex));
  2169. goto got_block;
  2170. }
  2171. }
  2172. //buf_pool_mutex_exit(buf_pool);
  2173. if (mode == BUF_GET_IF_IN_POOL
  2174. || mode == BUF_PEEK_IF_IN_POOL
  2175. || mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
  2176. return(NULL);
  2177. }
  2178. if (buf_read_page(space, zip_size, offset, trx)) {
  2179. buf_read_ahead_random(space, zip_size, offset,
  2180. ibuf_inside(mtr), trx);
  2181. retries = 0;
  2182. } else if (retries < BUF_PAGE_READ_MAX_RETRIES) {
  2183. ++retries;
  2184. } else {
  2185. fprintf(stderr, "InnoDB: Error: Unable"
  2186. " to read tablespace %lu page no"
  2187. " %lu into the buffer pool after"
  2188. " %lu attempts\n"
  2189. "InnoDB: The most probable cause"
  2190. " of this error may be that the"
  2191. " table has been corrupted.\n"
  2192. "InnoDB: You can try to fix this"
  2193. " problem by using"
  2194. " innodb_force_recovery.\n"
  2195. "InnoDB: Please see reference manual"
  2196. " for more details.\n"
  2197. "InnoDB: Aborting...\n",
  2198. space, offset,
  2199. BUF_PAGE_READ_MAX_RETRIES);
  2200. ut_error;
  2201. }
  2202. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2203. ut_a(++buf_dbg_counter % 37 || buf_validate());
  2204. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2205. goto loop;
  2206. }
  2207. got_block:
  2208. ut_ad(page_zip_get_size(&block->page.zip) == zip_size);
  2209. must_read = buf_block_get_io_fix(block) == BUF_IO_READ;
  2210. if (must_read && (mode == BUF_GET_IF_IN_POOL
  2211. || mode == BUF_PEEK_IF_IN_POOL)) {
  2212. /* The page is being read to buffer pool,
  2213. but we cannot wait around for the read to
  2214. complete. */
  2215. //buf_pool_mutex_exit(buf_pool);
  2216. mutex_exit(block_mutex);
  2217. return(NULL);
  2218. }
  2219. if (srv_pass_corrupt_table <= 1) {
  2220. if (block->page.is_corrupt) {
  2221. mutex_exit(block_mutex);
  2222. return(NULL);
  2223. }
  2224. }
  2225. switch (buf_block_get_state(block)) {
  2226. buf_page_t* bpage;
  2227. ibool success;
  2228. case BUF_BLOCK_FILE_PAGE:
  2229. if (block_mutex == &buf_pool->zip_mutex) {
  2230. /* it is wrong mutex... */
  2231. mutex_exit(block_mutex);
  2232. goto loop;
  2233. }
  2234. break;
  2235. case BUF_BLOCK_ZIP_PAGE:
  2236. case BUF_BLOCK_ZIP_DIRTY:
  2237. ut_ad(block_mutex == &buf_pool->zip_mutex);
  2238. bpage = &block->page;
  2239. /* Protect bpage->buf_fix_count. */
  2240. //mutex_enter(&buf_pool->zip_mutex);
  2241. if (bpage->buf_fix_count
  2242. || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
  2243. /* This condition often occurs when the buffer
  2244. is not buffer-fixed, but I/O-fixed by
  2245. buf_page_init_for_read(). */
  2246. //mutex_exit(&buf_pool->zip_mutex);
  2247. wait_until_unfixed:
  2248. /* The block is buffer-fixed or I/O-fixed.
  2249. Try again later. */
  2250. //buf_pool_mutex_exit(buf_pool);
  2251. mutex_exit(block_mutex);
  2252. os_thread_sleep(WAIT_FOR_READ);
  2253. goto loop;
  2254. }
  2255. /* Allocate an uncompressed page. */
  2256. //buf_pool_mutex_exit(buf_pool);
  2257. //mutex_exit(&buf_pool->zip_mutex);
  2258. mutex_exit(block_mutex);
  2259. block = buf_LRU_get_free_block(buf_pool);
  2260. ut_a(block);
  2261. block_mutex = &block->mutex;
  2262. //buf_pool_mutex_enter(buf_pool);
  2263. mutex_enter(&buf_pool->LRU_list_mutex);
  2264. rw_lock_x_lock(&buf_pool->page_hash_latch);
  2265. mutex_enter(block_mutex);
  2266. {
  2267. buf_page_t* hash_bpage;
  2268. hash_bpage = buf_page_hash_get_low(
  2269. buf_pool, space, offset, fold);
  2270. if (UNIV_UNLIKELY(bpage != hash_bpage)) {
  2271. /* The buf_pool->page_hash was modified
  2272. while buf_pool->mutex was released.
  2273. Free the block that was allocated. */
  2274. buf_LRU_block_free_non_file_page(block, TRUE);
  2275. mutex_exit(block_mutex);
  2276. block = (buf_block_t*) hash_bpage;
  2277. if (block) {
  2278. block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
  2279. ut_a(block_mutex);
  2280. }
  2281. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2282. mutex_exit(&buf_pool->LRU_list_mutex);
  2283. goto loop2;
  2284. }
  2285. }
  2286. mutex_enter(&buf_pool->zip_mutex);
  2287. if (UNIV_UNLIKELY
  2288. (bpage->buf_fix_count
  2289. || buf_page_get_io_fix(bpage) != BUF_IO_NONE)) {
  2290. mutex_exit(&buf_pool->zip_mutex);
  2291. /* The block was buffer-fixed or I/O-fixed
  2292. while buf_pool->mutex was not held by this thread.
  2293. Free the block that was allocated and try again.
  2294. This should be extremely unlikely. */
  2295. buf_LRU_block_free_non_file_page(block, TRUE);
  2296. //mutex_exit(&block->mutex);
  2297. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2298. mutex_exit(&buf_pool->LRU_list_mutex);
  2299. goto wait_until_unfixed;
  2300. }
  2301. /* Move the compressed page from bpage to block,
  2302. and uncompress it. */
  2303. buf_relocate(bpage, &block->page);
  2304. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2305. buf_block_init_low(block);
  2306. block->lock_hash_val = lock_rec_hash(space, offset);
  2307. UNIV_MEM_DESC(&block->page.zip.data,
  2308. page_zip_get_size(&block->page.zip), block);
  2309. if (buf_page_get_state(&block->page)
  2310. == BUF_BLOCK_ZIP_PAGE) {
  2311. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2312. UT_LIST_REMOVE(zip_list, buf_pool->zip_clean,
  2313. &block->page);
  2314. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2315. ut_ad(!block->page.in_flush_list);
  2316. } else {
  2317. /* Relocate buf_pool->flush_list. */
  2318. buf_flush_relocate_on_flush_list(bpage,
  2319. &block->page);
  2320. }
  2321. /* Buffer-fix, I/O-fix, and X-latch the block
  2322. for the duration of the decompression.
  2323. Also add the block to the unzip_LRU list. */
  2324. block->page.state = BUF_BLOCK_FILE_PAGE;
  2325. /* Insert at the front of unzip_LRU list */
  2326. buf_unzip_LRU_add_block(block, FALSE);
  2327. mutex_exit(&buf_pool->LRU_list_mutex);
  2328. block->page.buf_fix_count = 1;
  2329. buf_block_set_io_fix(block, BUF_IO_READ);
  2330. rw_lock_x_lock_func(&block->lock, 0, file, line);
  2331. UNIV_MEM_INVALID(bpage, sizeof *bpage);
  2332. mutex_exit(block_mutex);
  2333. mutex_exit(&buf_pool->zip_mutex);
  2334. buf_pool_mutex_enter(buf_pool);
  2335. buf_pool->n_pend_unzip++;
  2336. buf_pool_mutex_exit(buf_pool);
  2337. //buf_pool_mutex_exit(buf_pool);
  2338. buf_page_free_descriptor(bpage);
  2339. /* Decompress the page and apply buffered operations
  2340. while not holding buf_pool->mutex or block->mutex. */
  2341. success = buf_zip_decompress(block, srv_use_checksums);
  2342. ut_a(success);
  2343. if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
  2344. ibuf_merge_or_delete_for_page(block, space, offset,
  2345. zip_size, TRUE);
  2346. }
  2347. /* Unfix and unlatch the block. */
  2348. //buf_pool_mutex_enter(buf_pool);
  2349. block_mutex = &block->mutex;
  2350. mutex_enter(block_mutex);
  2351. block->page.buf_fix_count--;
  2352. buf_block_set_io_fix(block, BUF_IO_NONE);
  2353. buf_pool_mutex_enter(buf_pool);
  2354. buf_pool->n_pend_unzip--;
  2355. buf_pool_mutex_exit(buf_pool);
  2356. rw_lock_x_unlock(&block->lock);
  2357. break;
  2358. case BUF_BLOCK_ZIP_FREE:
  2359. case BUF_BLOCK_NOT_USED:
  2360. case BUF_BLOCK_READY_FOR_USE:
  2361. case BUF_BLOCK_MEMORY:
  2362. case BUF_BLOCK_REMOVE_HASH:
  2363. ut_error;
  2364. break;
  2365. }
  2366. ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2367. //mutex_enter(&block->mutex);
  2368. #if UNIV_WORD_SIZE == 4
  2369. /* On 32-bit systems, there is no padding in buf_page_t. On
  2370. other systems, Valgrind could complain about uninitialized pad
  2371. bytes. */
  2372. UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
  2373. #endif
  2374. #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
  2375. if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
  2376. && ibuf_debug) {
  2377. /* Try to evict the block from the buffer pool, to use the
  2378. insert buffer (change buffer) as much as possible. */
  2379. if (buf_LRU_free_block(&block->page, TRUE, FALSE)) {
  2380. mutex_exit(block_mutex);
  2381. if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
  2382. /* Set the watch, as it would have
  2383. been set if the page were not in the
  2384. buffer pool in the first place. */
  2385. block = (buf_block_t*) buf_pool_watch_set(
  2386. space, offset, fold);
  2387. if (UNIV_LIKELY_NULL(block)) {
  2388. block_mutex = buf_page_get_mutex((buf_page_t*)block);
  2389. ut_a(block_mutex);
  2390. ut_ad(mutex_own(block_mutex));
  2391. /* The page entered the buffer
  2392. pool for some reason. Try to
  2393. evict it again. */
  2394. goto got_block;
  2395. }
  2396. }
  2397. //buf_pool_mutex_exit(buf_pool);
  2398. fprintf(stderr,
  2399. "innodb_change_buffering_debug evict %u %u\n",
  2400. (unsigned) space, (unsigned) offset);
  2401. return(NULL);
  2402. } else if (buf_flush_page_try(buf_pool, block)) {
  2403. fprintf(stderr,
  2404. "innodb_change_buffering_debug flush %u %u\n",
  2405. (unsigned) space, (unsigned) offset);
  2406. guess = block;
  2407. goto loop;
  2408. }
  2409. /* Failed to evict the page; change it directly */
  2410. }
  2411. #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
  2412. buf_block_buf_fix_inc(block, file, line);
  2413. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  2414. ut_a(mode == BUF_GET_POSSIBLY_FREED
  2415. || !block->page.file_page_was_freed);
  2416. #endif
  2417. //mutex_exit(&block->mutex);
  2418. /* Check if this is the first access to the page */
  2419. access_time = buf_page_is_accessed(&block->page);
  2420. //buf_pool_mutex_exit(buf_pool);
  2421. mutex_exit(block_mutex);
  2422. if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
  2423. buf_page_set_accessed_make_young(&block->page, access_time);
  2424. }
  2425. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2426. ut_a(++buf_dbg_counter % 5771 || buf_validate());
  2427. ut_a(block->page.buf_fix_count > 0);
  2428. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2429. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2430. switch (rw_latch) {
  2431. case RW_NO_LATCH:
  2432. if (must_read) {
  2433. /* Let us wait until the read operation
  2434. completes */
  2435. if (innobase_get_slow_log() && trx && trx->take_stats)
  2436. {
  2437. ut_usectime(&sec, &ms);
  2438. start_time = (ib_uint64_t)sec * 1000000 + ms;
  2439. } else {
  2440. start_time = 0;
  2441. }
  2442. for (;;) {
  2443. enum buf_io_fix io_fix;
  2444. mutex_enter(&block->mutex);
  2445. io_fix = buf_block_get_io_fix(block);
  2446. mutex_exit(&block->mutex);
  2447. if (io_fix == BUF_IO_READ) {
  2448. os_thread_sleep(WAIT_FOR_READ);
  2449. } else {
  2450. break;
  2451. }
  2452. }
  2453. if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
  2454. {
  2455. ut_usectime(&sec, &ms);
  2456. finish_time = (ib_uint64_t)sec * 1000000 + ms;
  2457. trx->io_reads_wait_timer += (ulint)(finish_time - start_time);
  2458. }
  2459. }
  2460. fix_type = MTR_MEMO_BUF_FIX;
  2461. break;
  2462. case RW_S_LATCH:
  2463. rw_lock_s_lock_func(&(block->lock), 0, file, line);
  2464. fix_type = MTR_MEMO_PAGE_S_FIX;
  2465. break;
  2466. default:
  2467. ut_ad(rw_latch == RW_X_LATCH);
  2468. rw_lock_x_lock_func(&(block->lock), 0, file, line);
  2469. fix_type = MTR_MEMO_PAGE_X_FIX;
  2470. break;
  2471. }
  2472. mtr_memo_push(mtr, block, fix_type);
  2473. if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) {
  2474. /* In the case of a first access, try to apply linear
  2475. read-ahead */
  2476. buf_read_ahead_linear(space, zip_size, offset,
  2477. ibuf_inside(mtr), trx);
  2478. }
  2479. #ifdef UNIV_IBUF_COUNT_DEBUG
  2480. ut_a(ibuf_count_get(buf_block_get_space(block),
  2481. buf_block_get_page_no(block)) == 0);
  2482. #endif
  2483. if (innobase_get_slow_log()) {
  2484. _increment_page_get_statistics(block, trx);
  2485. }
  2486. return(block);
  2487. }
  2488. /********************************************************************//**
  2489. This is the general function used to get optimistic access to a database
  2490. page.
  2491. @return TRUE if success */
  2492. UNIV_INTERN
  2493. ibool
  2494. buf_page_optimistic_get(
  2495. /*====================*/
  2496. ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH */
  2497. buf_block_t* block, /*!< in: guessed buffer block */
  2498. ib_uint64_t modify_clock,/*!< in: modify clock value if mode is
  2499. ..._GUESS_ON_CLOCK */
  2500. const char* file, /*!< in: file name */
  2501. ulint line, /*!< in: line where called */
  2502. mtr_t* mtr) /*!< in: mini-transaction */
  2503. {
  2504. buf_pool_t* buf_pool;
  2505. unsigned access_time;
  2506. ibool success;
  2507. ulint fix_type;
  2508. trx_t* trx = NULL;
  2509. ut_ad(block);
  2510. ut_ad(mtr);
  2511. ut_ad(mtr->state == MTR_ACTIVE);
  2512. ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
  2513. mutex_enter(&block->mutex);
  2514. if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
  2515. mutex_exit(&block->mutex);
  2516. return(FALSE);
  2517. }
  2518. buf_block_buf_fix_inc(block, file, line);
  2519. mutex_exit(&block->mutex);
  2520. /* Check if this is the first access to the page.
  2521. We do a dirty read on purpose, to avoid mutex contention.
  2522. This field is only used for heuristic purposes; it does not
  2523. affect correctness. */
  2524. access_time = buf_page_is_accessed(&block->page);
  2525. buf_page_set_accessed_make_young(&block->page, access_time);
  2526. ut_ad(!ibuf_inside(mtr)
  2527. || ibuf_page(buf_block_get_space(block),
  2528. buf_block_get_zip_size(block),
  2529. buf_block_get_page_no(block), NULL));
  2530. if (rw_latch == RW_S_LATCH) {
  2531. success = rw_lock_s_lock_nowait(&(block->lock),
  2532. file, line);
  2533. fix_type = MTR_MEMO_PAGE_S_FIX;
  2534. } else {
  2535. success = rw_lock_x_lock_func_nowait(&(block->lock),
  2536. file, line);
  2537. fix_type = MTR_MEMO_PAGE_X_FIX;
  2538. }
  2539. if (UNIV_UNLIKELY(!success)) {
  2540. mutex_enter(&block->mutex);
  2541. buf_block_buf_fix_dec(block);
  2542. mutex_exit(&block->mutex);
  2543. return(FALSE);
  2544. }
  2545. if (UNIV_UNLIKELY(modify_clock != block->modify_clock)) {
  2546. buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
  2547. if (rw_latch == RW_S_LATCH) {
  2548. rw_lock_s_unlock(&(block->lock));
  2549. } else {
  2550. rw_lock_x_unlock(&(block->lock));
  2551. }
  2552. mutex_enter(&block->mutex);
  2553. buf_block_buf_fix_dec(block);
  2554. mutex_exit(&block->mutex);
  2555. return(FALSE);
  2556. }
  2557. mtr_memo_push(mtr, block, fix_type);
  2558. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2559. ut_a(++buf_dbg_counter % 5771 || buf_validate());
  2560. ut_a(block->page.buf_fix_count > 0);
  2561. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2562. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2563. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  2564. ut_a(block->page.file_page_was_freed == FALSE);
  2565. #endif
  2566. if (innobase_get_slow_log()) {
  2567. trx = innobase_get_trx();
  2568. }
  2569. if (UNIV_UNLIKELY(!access_time)) {
  2570. /* In the case of a first access, try to apply linear
  2571. read-ahead */
  2572. buf_read_ahead_linear(buf_block_get_space(block),
  2573. buf_block_get_zip_size(block),
  2574. buf_block_get_page_no(block),
  2575. ibuf_inside(mtr), trx);
  2576. }
  2577. #ifdef UNIV_IBUF_COUNT_DEBUG
  2578. ut_a(ibuf_count_get(buf_block_get_space(block),
  2579. buf_block_get_page_no(block)) == 0);
  2580. #endif
  2581. buf_pool = buf_pool_from_block(block);
  2582. buf_pool->stat.n_page_gets++;
  2583. if (innobase_get_slow_log()) {
  2584. _increment_page_get_statistics(block, trx);
  2585. }
  2586. return(TRUE);
  2587. }
  2588. /********************************************************************//**
  2589. This is used to get access to a known database page, when no waiting can be
  2590. done. For example, if a search in an adaptive hash index leads us to this
  2591. frame.
  2592. @return TRUE if success */
  2593. UNIV_INTERN
  2594. ibool
  2595. buf_page_get_known_nowait(
  2596. /*======================*/
  2597. ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH */
  2598. buf_block_t* block, /*!< in: the known page */
  2599. ulint mode, /*!< in: BUF_MAKE_YOUNG or BUF_KEEP_OLD */
  2600. const char* file, /*!< in: file name */
  2601. ulint line, /*!< in: line where called */
  2602. mtr_t* mtr) /*!< in: mini-transaction */
  2603. {
  2604. buf_pool_t* buf_pool;
  2605. ibool success;
  2606. ulint fix_type;
  2607. trx_t* trx = NULL;
  2608. ut_ad(mtr);
  2609. ut_ad(mtr->state == MTR_ACTIVE);
  2610. ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));
  2611. mutex_enter(&block->mutex);
  2612. if (buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH) {
  2613. /* Another thread is just freeing the block from the LRU list
  2614. of the buffer pool: do not try to access this page; this
  2615. attempt to access the page can only come through the hash
  2616. index because when the buffer block state is ..._REMOVE_HASH,
  2617. we have already removed it from the page address hash table
  2618. of the buffer pool. */
  2619. mutex_exit(&block->mutex);
  2620. return(FALSE);
  2621. }
  2622. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2623. buf_block_buf_fix_inc(block, file, line);
  2624. mutex_exit(&block->mutex);
  2625. buf_pool = buf_pool_from_block(block);
  2626. if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
  2627. //buf_pool_mutex_enter(buf_pool);
  2628. mutex_enter(&buf_pool->LRU_list_mutex);
  2629. buf_LRU_make_block_young(&block->page);
  2630. //buf_pool_mutex_exit(buf_pool);
  2631. mutex_exit(&buf_pool->LRU_list_mutex);
  2632. } else if (!buf_page_is_accessed(&block->page)) {
  2633. /* Above, we do a dirty read on purpose, to avoid
  2634. mutex contention. The field buf_page_t::access_time
  2635. is only used for heuristic purposes. Writes to the
  2636. field must be protected by mutex, however. */
  2637. ulint time_ms = ut_time_ms();
  2638. //buf_pool_mutex_enter(buf_pool);
  2639. mutex_enter(&block->mutex);
  2640. buf_page_set_accessed(&block->page, time_ms);
  2641. //buf_pool_mutex_exit(buf_pool);
  2642. mutex_exit(&block->mutex);
  2643. }
  2644. ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
  2645. if (rw_latch == RW_S_LATCH) {
  2646. success = rw_lock_s_lock_nowait(&(block->lock),
  2647. file, line);
  2648. fix_type = MTR_MEMO_PAGE_S_FIX;
  2649. } else {
  2650. success = rw_lock_x_lock_func_nowait(&(block->lock),
  2651. file, line);
  2652. fix_type = MTR_MEMO_PAGE_X_FIX;
  2653. }
  2654. if (!success) {
  2655. mutex_enter(&block->mutex);
  2656. buf_block_buf_fix_dec(block);
  2657. mutex_exit(&block->mutex);
  2658. return(FALSE);
  2659. }
  2660. mtr_memo_push(mtr, block, fix_type);
  2661. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2662. ut_a(++buf_dbg_counter % 5771 || buf_validate());
  2663. ut_a(block->page.buf_fix_count > 0);
  2664. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2665. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2666. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  2667. ut_a(block->page.file_page_was_freed == FALSE);
  2668. #endif
  2669. #ifdef UNIV_IBUF_COUNT_DEBUG
  2670. ut_a((mode == BUF_KEEP_OLD)
  2671. || (ibuf_count_get(buf_block_get_space(block),
  2672. buf_block_get_page_no(block)) == 0));
  2673. #endif
  2674. buf_pool->stat.n_page_gets++;
  2675. if (innobase_get_slow_log()) {
  2676. trx = innobase_get_trx();
  2677. _increment_page_get_statistics(block, trx);
  2678. }
  2679. return(TRUE);
  2680. }
  2681. /*******************************************************************//**
  2682. Given a tablespace id and page number tries to get that page. If the
  2683. page is not in the buffer pool it is not loaded and NULL is returned.
  2684. Suitable for using when holding the kernel mutex.
  2685. @return pointer to a page or NULL */
  2686. UNIV_INTERN
  2687. const buf_block_t*
  2688. buf_page_try_get_func(
  2689. /*==================*/
  2690. ulint space_id,/*!< in: tablespace id */
  2691. ulint page_no,/*!< in: page number */
  2692. const char* file, /*!< in: file name */
  2693. ulint line, /*!< in: line where called */
  2694. mtr_t* mtr) /*!< in: mini-transaction */
  2695. {
  2696. buf_block_t* block;
  2697. ibool success;
  2698. ulint fix_type;
  2699. buf_pool_t* buf_pool = buf_pool_get(space_id, page_no);
  2700. ut_ad(mtr);
  2701. ut_ad(mtr->state == MTR_ACTIVE);
  2702. //buf_pool_mutex_enter(buf_pool);
  2703. rw_lock_s_lock(&buf_pool->page_hash_latch);
  2704. block = buf_block_hash_get(buf_pool, space_id, page_no);
  2705. if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
  2706. //buf_pool_mutex_exit(buf_pool);
  2707. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  2708. return(NULL);
  2709. }
  2710. ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
  2711. mutex_enter(&block->mutex);
  2712. //buf_pool_mutex_exit(buf_pool);
  2713. rw_lock_s_unlock(&buf_pool->page_hash_latch);
  2714. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2715. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2716. ut_a(buf_block_get_space(block) == space_id);
  2717. ut_a(buf_block_get_page_no(block) == page_no);
  2718. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2719. buf_block_buf_fix_inc(block, file, line);
  2720. mutex_exit(&block->mutex);
  2721. fix_type = MTR_MEMO_PAGE_S_FIX;
  2722. success = rw_lock_s_lock_nowait(&block->lock, file, line);
  2723. if (!success) {
  2724. /* Let us try to get an X-latch. If the current thread
  2725. is holding an X-latch on the page, we cannot get an
  2726. S-latch. */
  2727. fix_type = MTR_MEMO_PAGE_X_FIX;
  2728. success = rw_lock_x_lock_func_nowait(&block->lock,
  2729. file, line);
  2730. }
  2731. if (!success) {
  2732. mutex_enter(&block->mutex);
  2733. buf_block_buf_fix_dec(block);
  2734. mutex_exit(&block->mutex);
  2735. return(NULL);
  2736. }
  2737. mtr_memo_push(mtr, block, fix_type);
  2738. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2739. ut_a(++buf_dbg_counter % 5771 || buf_validate());
  2740. ut_a(block->page.buf_fix_count > 0);
  2741. ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
  2742. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2743. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  2744. ut_a(block->page.file_page_was_freed == FALSE);
  2745. #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
  2746. buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
  2747. buf_pool->stat.n_page_gets++;
  2748. #ifdef UNIV_IBUF_COUNT_DEBUG
  2749. ut_a(ibuf_count_get(buf_block_get_space(block),
  2750. buf_block_get_page_no(block)) == 0);
  2751. #endif
  2752. return(block);
  2753. }
  2754. /********************************************************************//**
  2755. Initialize some fields of a control block. */
  2756. UNIV_INLINE
  2757. void
  2758. buf_page_init_low(
  2759. /*==============*/
  2760. buf_page_t* bpage) /*!< in: block to init */
  2761. {
  2762. bpage->flush_type = BUF_FLUSH_LRU;
  2763. bpage->io_fix = BUF_IO_NONE;
  2764. bpage->buf_fix_count = 0;
  2765. bpage->freed_page_clock = 0;
  2766. bpage->access_time = 0;
  2767. bpage->newest_modification = 0;
  2768. bpage->oldest_modification = 0;
  2769. HASH_INVALIDATE(bpage, hash);
  2770. bpage->is_corrupt = FALSE;
  2771. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  2772. bpage->file_page_was_freed = FALSE;
  2773. #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
  2774. }
  2775. /********************************************************************//**
  2776. Inits a page to the buffer buf_pool. */
  2777. static __attribute__((nonnull))
  2778. void
  2779. buf_page_init(
  2780. /*==========*/
  2781. buf_pool_t* buf_pool,/*!< in/out: buffer pool */
  2782. ulint space, /*!< in: space id */
  2783. ulint offset, /*!< in: offset of the page within space
  2784. in units of a page */
  2785. ulint fold, /*!< in: buf_page_address_fold(space,offset) */
  2786. buf_block_t* block) /*!< in/out: block to init */
  2787. {
  2788. buf_page_t* hash_page;
  2789. ut_ad(buf_pool == buf_pool_get(space, offset));
  2790. //ut_ad(buf_pool_mutex_own(buf_pool));
  2791. #ifdef UNIV_SYNC_DEBUG
  2792. ut_ad(rw_lock_own(&buf_pool->page_hash_latch, RW_LOCK_EX));
  2793. #endif
  2794. ut_ad(mutex_own(&(block->mutex)));
  2795. ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
  2796. /* Set the state of the block */
  2797. buf_block_set_file_page(block, space, offset);
  2798. #ifdef UNIV_DEBUG_VALGRIND
  2799. if (!space) {
  2800. /* Silence valid Valgrind warnings about uninitialized
  2801. data being written to data files. There are some unused
  2802. bytes on some pages that InnoDB does not initialize. */
  2803. UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
  2804. }
  2805. #endif /* UNIV_DEBUG_VALGRIND */
  2806. buf_block_init_low(block);
  2807. block->lock_hash_val = lock_rec_hash(space, offset);
  2808. buf_page_init_low(&block->page);
  2809. /* Insert into the hash table of file pages */
  2810. hash_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
  2811. if (UNIV_LIKELY(!hash_page)) {
  2812. } else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) {
  2813. /* Preserve the reference count. */
  2814. ulint buf_fix_count;
  2815. mutex_enter(&buf_pool->zip_mutex);
  2816. buf_fix_count = hash_page->buf_fix_count;
  2817. ut_a(buf_fix_count > 0);
  2818. block->page.buf_fix_count += buf_fix_count;
  2819. buf_pool_watch_remove(buf_pool, fold, hash_page);
  2820. mutex_exit(&buf_pool->zip_mutex);
  2821. } else {
  2822. fprintf(stderr,
  2823. "InnoDB: Error: page %lu %lu already found"
  2824. " in the hash table: %p, %p\n",
  2825. (ulong) space,
  2826. (ulong) offset,
  2827. (const void*) hash_page, (const void*) block);
  2828. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  2829. mutex_exit(&block->mutex);
  2830. //buf_pool_mutex_exit(buf_pool);
  2831. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2832. buf_print();
  2833. buf_LRU_print();
  2834. buf_validate();
  2835. buf_LRU_validate();
  2836. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  2837. ut_error;
  2838. }
  2839. ut_ad(!block->page.in_zip_hash);
  2840. ut_ad(!block->page.in_page_hash);
  2841. ut_d(block->page.in_page_hash = TRUE);
  2842. HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
  2843. fold, &block->page);
  2844. }
  2845. /********************************************************************//**
  2846. Function which inits a page for read to the buffer buf_pool. If the page is
  2847. (1) already in buf_pool, or
  2848. (2) if we specify to read only ibuf pages and the page is not an ibuf page, or
  2849. (3) if the space is deleted or being deleted,
  2850. then this function does nothing.
  2851. Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
  2852. on the buffer frame. The io-handler must take care that the flag is cleared
  2853. and the lock released later.
  2854. @return pointer to the block or NULL */
  2855. UNIV_INTERN
  2856. buf_page_t*
  2857. buf_page_init_for_read(
  2858. /*===================*/
  2859. ulint* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
  2860. ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */
  2861. ulint space, /*!< in: space id */
  2862. ulint zip_size,/*!< in: compressed page size, or 0 */
  2863. ibool unzip, /*!< in: TRUE=request uncompressed page */
  2864. ib_int64_t tablespace_version,
  2865. /*!< in: prevents reading from a wrong
  2866. version of the tablespace in case we have done
  2867. DISCARD + IMPORT */
  2868. ulint offset) /*!< in: page number */
  2869. {
  2870. buf_block_t* block;
  2871. buf_page_t* bpage = NULL;
  2872. buf_page_t* watch_page;
  2873. mtr_t mtr;
  2874. ulint fold;
  2875. ibool lru = FALSE;
  2876. void* data;
  2877. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  2878. ut_ad(buf_pool);
  2879. *err = DB_SUCCESS;
  2880. if (mode == BUF_READ_IBUF_PAGES_ONLY) {
  2881. /* It is a read-ahead within an ibuf routine */
  2882. ut_ad(!ibuf_bitmap_page(zip_size, offset));
  2883. ibuf_mtr_start(&mtr);
  2884. if (!recv_no_ibuf_operations
  2885. && !ibuf_page(space, zip_size, offset, &mtr)) {
  2886. ibuf_mtr_commit(&mtr);
  2887. return(NULL);
  2888. }
  2889. } else {
  2890. ut_ad(mode == BUF_READ_ANY_PAGE);
  2891. }
  2892. if (zip_size && UNIV_LIKELY(!unzip)
  2893. && UNIV_LIKELY(!recv_recovery_is_on())) {
  2894. block = NULL;
  2895. } else {
  2896. block = buf_LRU_get_free_block(buf_pool);
  2897. ut_ad(block);
  2898. ut_ad(buf_pool_from_block(block) == buf_pool);
  2899. }
  2900. fold = buf_page_address_fold(space, offset);
  2901. retry:
  2902. //buf_pool_mutex_enter(buf_pool);
  2903. mutex_enter(&buf_pool->LRU_list_mutex);
  2904. rw_lock_x_lock(&buf_pool->page_hash_latch);
  2905. watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
  2906. if (UNIV_UNLIKELY(watch_page && watch_page->space_was_being_deleted)) {
  2907. mutex_t* block_mutex = buf_page_get_mutex_enter(watch_page);
  2908. /* This page is obsoleted, should discard and retry */
  2909. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2910. ut_a(block_mutex);
  2911. buf_LRU_free_block(watch_page, TRUE, TRUE);
  2912. mutex_exit(&buf_pool->LRU_list_mutex);
  2913. mutex_exit(block_mutex);
  2914. goto retry;
  2915. }
  2916. if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
  2917. /* The page is already in the buffer pool. */
  2918. watch_page = NULL;
  2919. err_exit:
  2920. if (block) {
  2921. mutex_enter(&block->mutex);
  2922. mutex_exit(&buf_pool->LRU_list_mutex);
  2923. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2924. buf_LRU_block_free_non_file_page(block, FALSE);
  2925. mutex_exit(&block->mutex);
  2926. }
  2927. else {
  2928. mutex_exit(&buf_pool->LRU_list_mutex);
  2929. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2930. }
  2931. bpage = NULL;
  2932. goto func_exit;
  2933. }
  2934. if (fil_tablespace_deleted_or_being_deleted_in_mem(
  2935. space, tablespace_version)) {
  2936. /* The page belongs to a space which has been
  2937. deleted or is being deleted. */
  2938. *err = DB_TABLESPACE_DELETED;
  2939. goto err_exit;
  2940. }
  2941. if (block) {
  2942. bpage = &block->page;
  2943. mutex_enter(&block->mutex);
  2944. ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
  2945. buf_page_init(buf_pool, space, offset, fold, block);
  2946. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  2947. /* The block must be put to the LRU list, to the old blocks */
  2948. buf_LRU_add_block(bpage, TRUE/* to old blocks */);
  2949. /* We set a pass-type x-lock on the frame because then
  2950. the same thread which called for the read operation
  2951. (and is running now at this point of code) can wait
  2952. for the read to complete by waiting for the x-lock on
  2953. the frame; if the x-lock were recursive, the same
  2954. thread would illegally get the x-lock before the page
  2955. read is completed. The x-lock is cleared by the
  2956. io-handler thread. */
  2957. rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
  2958. buf_page_set_io_fix(bpage, BUF_IO_READ);
  2959. if (UNIV_UNLIKELY(zip_size)) {
  2960. page_zip_set_size(&block->page.zip, zip_size);
  2961. /* buf_pool->mutex may be released and
  2962. reacquired by buf_buddy_alloc(). Thus, we
  2963. must release block->mutex in order not to
  2964. break the latching order in the reacquisition
  2965. of buf_pool->mutex. We also must defer this
  2966. operation until after the block descriptor has
  2967. been added to buf_pool->LRU and
  2968. buf_pool->page_hash. */
  2969. mutex_exit(&block->mutex);
  2970. data = buf_buddy_alloc(buf_pool, zip_size, &lru, FALSE);
  2971. mutex_enter(&block->mutex);
  2972. block->page.zip.data = data;
  2973. /* To maintain the invariant
  2974. block->in_unzip_LRU_list
  2975. == buf_page_belongs_to_unzip_LRU(&block->page)
  2976. we have to add this block to unzip_LRU
  2977. after block->page.zip.data is set. */
  2978. ut_ad(buf_page_belongs_to_unzip_LRU(&block->page));
  2979. buf_unzip_LRU_add_block(block, TRUE);
  2980. }
  2981. mutex_exit(&buf_pool->LRU_list_mutex);
  2982. mutex_exit(&block->mutex);
  2983. } else {
  2984. /* The compressed page must be allocated before the
  2985. control block (bpage), in order to avoid the
  2986. invocation of buf_buddy_relocate_block() on
  2987. uninitialized data. */
  2988. data = buf_buddy_alloc(buf_pool, zip_size, &lru, TRUE);
  2989. /* If buf_buddy_alloc() allocated storage from the LRU list,
  2990. it released and reacquired buf_pool->mutex. Thus, we must
  2991. check the page_hash again, as it may have been modified. */
  2992. if (UNIV_UNLIKELY(lru)) {
  2993. watch_page = buf_page_hash_get_low(
  2994. buf_pool, space, offset, fold);
  2995. if (watch_page
  2996. && !buf_pool_watch_is_sentinel(buf_pool,
  2997. watch_page)) {
  2998. /* The block was added by some other thread. */
  2999. watch_page = NULL;
  3000. buf_buddy_free(buf_pool, data, zip_size, TRUE);
  3001. mutex_exit(&buf_pool->LRU_list_mutex);
  3002. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3003. bpage = NULL;
  3004. goto func_exit;
  3005. }
  3006. }
  3007. bpage = buf_page_alloc_descriptor();
  3008. /* Initialize the buf_pool pointer. */
  3009. bpage->buf_pool_index = buf_pool_index(buf_pool);
  3010. page_zip_des_init(&bpage->zip);
  3011. page_zip_set_size(&bpage->zip, zip_size);
  3012. bpage->zip.data = data;
  3013. mutex_enter(&buf_pool->zip_mutex);
  3014. UNIV_MEM_DESC(bpage->zip.data,
  3015. page_zip_get_size(&bpage->zip), bpage);
  3016. buf_page_init_low(bpage);
  3017. bpage->state = BUF_BLOCK_ZIP_PAGE;
  3018. bpage->space = space;
  3019. bpage->offset = offset;
  3020. bpage->space_was_being_deleted = FALSE;
  3021. #ifdef UNIV_DEBUG
  3022. bpage->in_page_hash = FALSE;
  3023. bpage->in_zip_hash = FALSE;
  3024. bpage->in_flush_list = FALSE;
  3025. bpage->in_free_list = FALSE;
  3026. #endif /* UNIV_DEBUG */
  3027. bpage->in_LRU_list = FALSE;
  3028. ut_d(bpage->in_page_hash = TRUE);
  3029. if (UNIV_LIKELY_NULL(watch_page)) {
  3030. /* Preserve the reference count. */
  3031. ulint buf_fix_count = watch_page->buf_fix_count;
  3032. ut_a(buf_fix_count > 0);
  3033. bpage->buf_fix_count += buf_fix_count;
  3034. ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
  3035. buf_pool_watch_remove(buf_pool, fold, watch_page);
  3036. }
  3037. HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold,
  3038. bpage);
  3039. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3040. /* The block must be put to the LRU list, to the old blocks */
  3041. buf_LRU_add_block(bpage, TRUE/* to old blocks */);
  3042. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  3043. buf_LRU_insert_zip_clean(bpage);
  3044. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  3045. mutex_exit(&buf_pool->LRU_list_mutex);
  3046. buf_page_set_io_fix(bpage, BUF_IO_READ);
  3047. mutex_exit(&buf_pool->zip_mutex);
  3048. }
  3049. buf_pool_mutex_enter(buf_pool);
  3050. buf_pool->n_pend_reads++;
  3051. buf_pool_mutex_exit(buf_pool);
  3052. func_exit:
  3053. //buf_pool_mutex_exit(buf_pool);
  3054. if (mode == BUF_READ_IBUF_PAGES_ONLY) {
  3055. ibuf_mtr_commit(&mtr);
  3056. }
  3057. ut_ad(!bpage || buf_page_in_file(bpage));
  3058. return(bpage);
  3059. }
  3060. /********************************************************************//**
  3061. Initializes a page to the buffer buf_pool. The page is usually not read
  3062. from a file even if it cannot be found in the buffer buf_pool. This is one
  3063. of the functions which perform to a block a state transition NOT_USED =>
  3064. FILE_PAGE (the other is buf_page_get_gen).
  3065. @return pointer to the block, page bufferfixed */
  3066. UNIV_INTERN
  3067. buf_block_t*
  3068. buf_page_create(
  3069. /*============*/
  3070. ulint space, /*!< in: space id */
  3071. ulint offset, /*!< in: offset of the page within space in units of
  3072. a page */
  3073. ulint zip_size,/*!< in: compressed page size, or 0 */
  3074. mtr_t* mtr) /*!< in: mini-transaction handle */
  3075. {
  3076. buf_frame_t* frame;
  3077. buf_block_t* block;
  3078. ulint fold;
  3079. buf_block_t* free_block = NULL;
  3080. ulint time_ms = ut_time_ms();
  3081. buf_pool_t* buf_pool = buf_pool_get(space, offset);
  3082. ut_ad(mtr);
  3083. ut_ad(mtr->state == MTR_ACTIVE);
  3084. ut_ad(space || !zip_size);
  3085. free_block = buf_LRU_get_free_block(buf_pool);
  3086. fold = buf_page_address_fold(space, offset);
  3087. retry:
  3088. //buf_pool_mutex_enter(buf_pool);
  3089. mutex_enter(&buf_pool->LRU_list_mutex);
  3090. rw_lock_x_lock(&buf_pool->page_hash_latch);
  3091. block = (buf_block_t*) buf_page_hash_get_low(
  3092. buf_pool, space, offset, fold);
  3093. if (UNIV_UNLIKELY(block && block->page.space_was_being_deleted)) {
  3094. mutex_t* block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
  3095. /* This page is obsoleted, should discard and retry */
  3096. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3097. ut_a(block_mutex);
  3098. buf_LRU_free_block((buf_page_t*)block, TRUE, TRUE);
  3099. mutex_exit(&buf_pool->LRU_list_mutex);
  3100. mutex_exit(block_mutex);
  3101. goto retry;
  3102. }
  3103. if (block
  3104. && buf_page_in_file(&block->page)
  3105. && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
  3106. #ifdef UNIV_IBUF_COUNT_DEBUG
  3107. ut_a(ibuf_count_get(space, offset) == 0);
  3108. #endif
  3109. #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
  3110. block->page.file_page_was_freed = FALSE;
  3111. #endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
  3112. /* Page can be found in buf_pool */
  3113. //buf_pool_mutex_exit(buf_pool);
  3114. mutex_exit(&buf_pool->LRU_list_mutex);
  3115. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3116. buf_block_free(free_block);
  3117. return(buf_page_get_with_no_latch(space, zip_size,
  3118. offset, mtr));
  3119. }
  3120. /* If we get here, the page was not in buf_pool: init it there */
  3121. #ifdef UNIV_DEBUG
  3122. if (buf_debug_prints) {
  3123. fprintf(stderr, "Creating space %lu page %lu to buffer\n",
  3124. (ulong) space, (ulong) offset);
  3125. }
  3126. #endif /* UNIV_DEBUG */
  3127. block = free_block;
  3128. mutex_enter(&block->mutex);
  3129. buf_page_init(buf_pool, space, offset, fold, block);
  3130. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3131. /* The block must be put to the LRU list */
  3132. buf_LRU_add_block(&block->page, FALSE);
  3133. buf_block_buf_fix_inc(block, __FILE__, __LINE__);
  3134. buf_pool->stat.n_pages_created++;
  3135. if (zip_size) {
  3136. void* data;
  3137. ibool lru;
  3138. /* Prevent race conditions during buf_buddy_alloc(),
  3139. which may release and reacquire buf_pool->mutex,
  3140. by IO-fixing and X-latching the block. */
  3141. buf_page_set_io_fix(&block->page, BUF_IO_READ);
  3142. rw_lock_x_lock(&block->lock);
  3143. page_zip_set_size(&block->page.zip, zip_size);
  3144. mutex_exit(&block->mutex);
  3145. /* buf_pool->mutex may be released and reacquired by
  3146. buf_buddy_alloc(). Thus, we must release block->mutex
  3147. in order not to break the latching order in
  3148. the reacquisition of buf_pool->mutex. We also must
  3149. defer this operation until after the block descriptor
  3150. has been added to buf_pool->LRU and buf_pool->page_hash. */
  3151. data = buf_buddy_alloc(buf_pool, zip_size, &lru, FALSE);
  3152. mutex_enter(&block->mutex);
  3153. block->page.zip.data = data;
  3154. /* To maintain the invariant
  3155. block->in_unzip_LRU_list
  3156. == buf_page_belongs_to_unzip_LRU(&block->page)
  3157. we have to add this block to unzip_LRU after
  3158. block->page.zip.data is set. */
  3159. ut_ad(buf_page_belongs_to_unzip_LRU(&block->page));
  3160. buf_unzip_LRU_add_block(block, FALSE);
  3161. buf_page_set_io_fix(&block->page, BUF_IO_NONE);
  3162. rw_lock_x_unlock(&block->lock);
  3163. }
  3164. buf_page_set_accessed(&block->page, time_ms);
  3165. //buf_pool_mutex_exit(buf_pool);
  3166. mutex_exit(&buf_pool->LRU_list_mutex);
  3167. mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
  3168. mutex_exit(&block->mutex);
  3169. /* Delete possible entries for the page from the insert buffer:
  3170. such can exist if the page belonged to an index which was dropped */
  3171. ibuf_merge_or_delete_for_page(NULL, space, offset, zip_size, TRUE);
  3172. /* Flush pages from the end of the LRU list if necessary */
  3173. buf_flush_free_margin(buf_pool, FALSE);
  3174. frame = block->frame;
  3175. memset(frame + FIL_PAGE_PREV, 0xff, 4);
  3176. memset(frame + FIL_PAGE_NEXT, 0xff, 4);
  3177. mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);
  3178. /* Reset to zero the file flush lsn field in the page; if the first
  3179. page of an ibdata file is 'created' in this function into the buffer
  3180. pool then we lose the original contents of the file flush lsn stamp.
  3181. Then InnoDB could in a crash recovery print a big, false, corruption
  3182. warning if the stamp contains an lsn bigger than the ib_logfile lsn. */
  3183. memset(frame + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
  3184. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  3185. ut_a(++buf_dbg_counter % 357 || buf_validate());
  3186. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  3187. #ifdef UNIV_IBUF_COUNT_DEBUG
  3188. ut_a(ibuf_count_get(buf_block_get_space(block),
  3189. buf_block_get_page_no(block)) == 0);
  3190. #endif
  3191. return(block);
  3192. }
  3193. /********************************************************************//**
  3194. Mark a table with the specified space pointed by bpage->space corrupted.
  3195. Also remove the bpage from LRU list.
  3196. @return TRUE if successful */
  3197. static
  3198. ibool
  3199. buf_mark_space_corrupt(
  3200. /*===================*/
  3201. buf_page_t* bpage) /*!< in: pointer to the block in question */
  3202. {
  3203. buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
  3204. const ibool uncompressed = (buf_page_get_state(bpage)
  3205. == BUF_BLOCK_FILE_PAGE);
  3206. ulint space = bpage->space;
  3207. ibool ret = TRUE;
  3208. /* First unfix and release lock on the bpage */
  3209. //buf_pool_mutex_enter(buf_pool);
  3210. mutex_enter(&buf_pool->LRU_list_mutex);
  3211. rw_lock_x_lock(&buf_pool->page_hash_latch);
  3212. mutex_enter(buf_page_get_mutex(bpage));
  3213. ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_READ);
  3214. ut_ad(bpage->buf_fix_count == 0);
  3215. /* Set BUF_IO_NONE before we remove the block from LRU list */
  3216. buf_page_set_io_fix(bpage, BUF_IO_NONE);
  3217. if (uncompressed) {
  3218. rw_lock_x_unlock_gen(
  3219. &((buf_block_t*) bpage)->lock,
  3220. BUF_IO_READ);
  3221. }
  3222. /* Find the table with specified space id, and mark it corrupted */
  3223. if (dict_set_corrupted_by_space(space)) {
  3224. buf_LRU_free_one_page(bpage);
  3225. } else {
  3226. ret = FALSE;
  3227. }
  3228. buf_pool_mutex_enter(buf_pool);
  3229. ut_ad(buf_pool->n_pend_reads > 0);
  3230. buf_pool->n_pend_reads--;
  3231. buf_pool_mutex_exit(buf_pool);
  3232. mutex_exit(buf_page_get_mutex(bpage));
  3233. //buf_pool_mutex_exit(buf_pool);
  3234. mutex_exit(&buf_pool->LRU_list_mutex);
  3235. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3236. return(ret);
  3237. }
  3238. /********************************************************************//**
  3239. Completes an asynchronous read or write request of a file page to or from
  3240. the buffer pool. */
  3241. UNIV_INTERN
  3242. void
  3243. buf_page_io_complete(
  3244. /*=================*/
  3245. buf_page_t* bpage) /*!< in: pointer to the block in question */
  3246. {
  3247. enum buf_io_fix io_type;
  3248. buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
  3249. const ibool uncompressed = (buf_page_get_state(bpage)
  3250. == BUF_BLOCK_FILE_PAGE);
  3251. ibool have_LRU_mutex = FALSE;
  3252. mutex_t* block_mutex;
  3253. ut_a(buf_page_in_file(bpage));
  3254. /* We do not need protect io_fix here by mutex to read
  3255. it because this is the only function where we can change the value
  3256. from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
  3257. ensures that this is the only thread that handles the i/o for this
  3258. block. */
  3259. io_type = buf_page_get_io_fix(bpage);
  3260. ut_ad(io_type == BUF_IO_READ || io_type == BUF_IO_WRITE);
  3261. if (io_type == BUF_IO_READ) {
  3262. ulint read_page_no;
  3263. ulint read_space_id;
  3264. byte* frame;
  3265. if (buf_page_get_zip_size(bpage)) {
  3266. frame = bpage->zip.data;
  3267. buf_pool->n_pend_unzip++;
  3268. if (uncompressed
  3269. && !buf_zip_decompress((buf_block_t*) bpage,
  3270. FALSE)) {
  3271. buf_pool->n_pend_unzip--;
  3272. goto corrupt;
  3273. }
  3274. buf_pool->n_pend_unzip--;
  3275. } else {
  3276. ut_a(uncompressed);
  3277. frame = ((buf_block_t*) bpage)->frame;
  3278. }
  3279. /* If this page is not uninitialized and not in the
  3280. doublewrite buffer, then the page number and space id
  3281. should be the same as in block. */
  3282. read_page_no = mach_read_from_4(frame + FIL_PAGE_OFFSET);
  3283. read_space_id = mach_read_from_4(
  3284. frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
  3285. if ((bpage->space == TRX_SYS_SPACE
  3286. || (srv_doublewrite_file && bpage->space == TRX_DOUBLEWRITE_SPACE))
  3287. && trx_doublewrite_page_inside(bpage->offset)) {
  3288. ut_print_timestamp(stderr);
  3289. fprintf(stderr,
  3290. " InnoDB: Error: reading page %lu\n"
  3291. "InnoDB: which is in the"
  3292. " doublewrite buffer!\n",
  3293. (ulong) bpage->offset);
  3294. } else if (!read_space_id && !read_page_no) {
  3295. /* This is likely an uninitialized page. */
  3296. } else if ((bpage->space
  3297. && bpage->space != read_space_id)
  3298. || bpage->offset != read_page_no) {
  3299. /* We did not compare space_id to read_space_id
  3300. if bpage->space == 0, because the field on the
  3301. page may contain garbage in MySQL < 4.1.1,
  3302. which only supported bpage->space == 0. */
  3303. ut_print_timestamp(stderr);
  3304. fprintf(stderr,
  3305. " InnoDB: Error: space id and page n:o"
  3306. " stored in the page\n"
  3307. "InnoDB: read in are %lu:%lu,"
  3308. " should be %lu:%lu!\n",
  3309. (ulong) read_space_id, (ulong) read_page_no,
  3310. (ulong) bpage->space,
  3311. (ulong) bpage->offset);
  3312. }
  3313. if (!srv_pass_corrupt_table || !bpage->is_corrupt) {
  3314. /* From version 3.23.38 up we store the page checksum
  3315. to the 4 first bytes of the page end lsn field */
  3316. if (buf_page_is_corrupted(frame,
  3317. buf_page_get_zip_size(bpage))) {
  3318. corrupt:
  3319. fprintf(stderr,
  3320. "InnoDB: Database page corruption on disk"
  3321. " or a failed\n"
  3322. "InnoDB: file read of page %lu.\n"
  3323. "InnoDB: You may have to recover"
  3324. " from a backup.\n",
  3325. (ulong) bpage->offset);
  3326. buf_page_print(frame, buf_page_get_zip_size(bpage));
  3327. fprintf(stderr,
  3328. "InnoDB: Database page corruption on disk"
  3329. " or a failed\n"
  3330. "InnoDB: file read of page %lu.\n"
  3331. "InnoDB: You may have to recover"
  3332. " from a backup.\n",
  3333. (ulong) bpage->offset);
  3334. fputs("InnoDB: It is also possible that"
  3335. " your operating\n"
  3336. "InnoDB: system has corrupted its"
  3337. " own file cache\n"
  3338. "InnoDB: and rebooting your computer"
  3339. " removes the\n"
  3340. "InnoDB: error.\n"
  3341. "InnoDB: If the corrupt page is an index page\n"
  3342. "InnoDB: you can also try to"
  3343. " fix the corruption\n"
  3344. "InnoDB: by dumping, dropping,"
  3345. " and reimporting\n"
  3346. "InnoDB: the corrupt table."
  3347. " You can use CHECK\n"
  3348. "InnoDB: TABLE to scan your"
  3349. " table for corruption.\n"
  3350. "InnoDB: See also "
  3351. REFMAN "forcing-innodb-recovery.html\n"
  3352. "InnoDB: about forcing recovery.\n", stderr);
  3353. if (srv_pass_corrupt_table && !trx_sys_sys_space(bpage->space)
  3354. && bpage->space < SRV_LOG_SPACE_FIRST_ID) {
  3355. trx_t* trx;
  3356. fprintf(stderr,
  3357. "InnoDB: space %u will be treated as corrupt.\n",
  3358. bpage->space);
  3359. fil_space_set_corrupt(bpage->space);
  3360. trx = innobase_get_trx();
  3361. if (trx && trx->dict_operation_lock_mode == RW_X_LATCH) {
  3362. dict_table_set_corrupt_by_space(bpage->space, FALSE);
  3363. } else {
  3364. dict_table_set_corrupt_by_space(bpage->space, TRUE);
  3365. }
  3366. bpage->is_corrupt = TRUE;
  3367. } else
  3368. if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
  3369. /* If page space id is larger than TRX_SYS_SPACE
  3370. (0), we will attempt to mark the corresponding
  3371. table as corrupted instead of crashing server */
  3372. if (bpage->space > TRX_SYS_SPACE
  3373. && buf_mark_space_corrupt(bpage)) {
  3374. return;
  3375. } else {
  3376. fputs("InnoDB: Ending processing"
  3377. " because of"
  3378. " a corrupt database page.\n",
  3379. stderr);
  3380. ut_error;
  3381. }
  3382. }
  3383. }
  3384. } /**/
  3385. if (recv_recovery_is_on()) {
  3386. /* Pages must be uncompressed for crash recovery. */
  3387. ut_a(uncompressed);
  3388. recv_recover_page(TRUE, (buf_block_t*) bpage);
  3389. }
  3390. if (uncompressed && !recv_no_ibuf_operations) {
  3391. ibuf_merge_or_delete_for_page(
  3392. /* Delete possible entries, if bpage is_corrupt */
  3393. (srv_pass_corrupt_table && bpage->is_corrupt) ? NULL :
  3394. (buf_block_t*) bpage, bpage->space,
  3395. bpage->offset, buf_page_get_zip_size(bpage),
  3396. (srv_pass_corrupt_table && bpage->is_corrupt) ? FALSE :
  3397. TRUE);
  3398. }
  3399. }
  3400. if (io_type == BUF_IO_WRITE
  3401. && (
  3402. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  3403. buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY ||
  3404. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  3405. buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)) {
  3406. /* to keep consistency at buf_LRU_insert_zip_clean() */
  3407. have_LRU_mutex = TRUE; /* optimistic */
  3408. }
  3409. retry_mutex:
  3410. if (have_LRU_mutex)
  3411. mutex_enter(&buf_pool->LRU_list_mutex);
  3412. block_mutex = buf_page_get_mutex_enter(bpage);
  3413. ut_a(block_mutex);
  3414. if (io_type == BUF_IO_WRITE
  3415. && (buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY
  3416. || buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)
  3417. && !have_LRU_mutex) {
  3418. mutex_exit(block_mutex);
  3419. have_LRU_mutex = TRUE;
  3420. goto retry_mutex;
  3421. }
  3422. buf_pool_mutex_enter(buf_pool);
  3423. #ifdef UNIV_IBUF_COUNT_DEBUG
  3424. if (io_type == BUF_IO_WRITE || uncompressed) {
  3425. /* For BUF_IO_READ of compressed-only blocks, the
  3426. buffered operations will be merged by buf_page_get_gen()
  3427. after the block has been uncompressed. */
  3428. ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0);
  3429. }
  3430. #endif
  3431. /* Because this thread which does the unlocking is not the same that
  3432. did the locking, we use a pass value != 0 in unlock, which simply
  3433. removes the newest lock debug record, without checking the thread
  3434. id. */
  3435. buf_page_set_io_fix(bpage, BUF_IO_NONE);
  3436. switch (io_type) {
  3437. case BUF_IO_READ:
  3438. /* NOTE that the call to ibuf may have moved the ownership of
  3439. the x-latch to this OS thread: do not let this confuse you in
  3440. debugging! */
  3441. ut_a(!have_LRU_mutex);
  3442. ut_ad(buf_pool->n_pend_reads > 0);
  3443. buf_pool->n_pend_reads--;
  3444. buf_pool->stat.n_pages_read++;
  3445. if (uncompressed) {
  3446. rw_lock_x_unlock_gen(&((buf_block_t*) bpage)->lock,
  3447. BUF_IO_READ);
  3448. }
  3449. break;
  3450. case BUF_IO_WRITE:
  3451. /* Write means a flush operation: call the completion
  3452. routine in the flush system */
  3453. buf_flush_write_complete(bpage);
  3454. if (have_LRU_mutex)
  3455. mutex_exit(&buf_pool->LRU_list_mutex);
  3456. if (uncompressed) {
  3457. rw_lock_s_unlock_gen(&((buf_block_t*) bpage)->lock,
  3458. BUF_IO_WRITE);
  3459. }
  3460. buf_pool->stat.n_pages_written++;
  3461. break;
  3462. default:
  3463. ut_error;
  3464. }
  3465. #ifdef UNIV_DEBUG
  3466. if (buf_debug_prints) {
  3467. fprintf(stderr, "Has %s page space %lu page no %lu\n",
  3468. io_type == BUF_IO_READ ? "read" : "written",
  3469. (ulong) buf_page_get_space(bpage),
  3470. (ulong) buf_page_get_page_no(bpage));
  3471. }
  3472. #endif /* UNIV_DEBUG */
  3473. buf_pool_mutex_exit(buf_pool);
  3474. mutex_exit(block_mutex);
  3475. }
  3476. /********************************************************************//**
  3477. */
  3478. UNIV_INTERN
  3479. buf_block_t*
  3480. buf_page_from_array(
  3481. /*================*/
  3482. buf_pool_t* buf_pool,
  3483. ulint n_block)
  3484. {
  3485. ulint n_chunks, offset;
  3486. buf_chunk_t* chunk;
  3487. ut_a(n_block < buf_pool->curr_size);
  3488. chunk = buf_pool->chunks;
  3489. offset = n_block;
  3490. for (n_chunks = buf_pool->n_chunks; n_chunks--; chunk++) {
  3491. if (offset < chunk->size) {
  3492. return(&chunk->blocks[offset]);
  3493. }
  3494. offset -= chunk->size;
  3495. }
  3496. ut_error;
  3497. return(NULL);
  3498. }
  3499. /*********************************************************************//**
  3500. Asserts that all file pages in the buffer are in a replaceable state.
  3501. @return TRUE */
  3502. static
  3503. ibool
  3504. buf_all_freed_instance(
  3505. /*===================*/
  3506. buf_pool_t* buf_pool) /*!< in: buffer pool instancce */
  3507. {
  3508. ulint i;
  3509. buf_chunk_t* chunk;
  3510. ut_ad(buf_pool);
  3511. //buf_pool_mutex_enter(buf_pool);
  3512. mutex_enter(&buf_pool->LRU_list_mutex);
  3513. rw_lock_x_lock(&buf_pool->page_hash_latch);
  3514. chunk = buf_pool->chunks;
  3515. for (i = buf_pool->n_chunks; i--; chunk++) {
  3516. const buf_block_t* block = buf_chunk_not_freed(chunk);
  3517. if (UNIV_LIKELY_NULL(block)) {
  3518. fprintf(stderr,
  3519. "Page %lu %lu still fixed or dirty\n",
  3520. (ulong) block->page.space,
  3521. (ulong) block->page.offset);
  3522. ut_error;
  3523. }
  3524. }
  3525. //buf_pool_mutex_exit(buf_pool);
  3526. mutex_exit(&buf_pool->LRU_list_mutex);
  3527. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3528. return(TRUE);
  3529. }
  3530. /*********************************************************************//**
  3531. Invalidates file pages in one buffer pool instance */
  3532. static
  3533. void
  3534. buf_pool_invalidate_instance(
  3535. /*=========================*/
  3536. buf_pool_t* buf_pool) /*!< in: buffer pool instance */
  3537. {
  3538. ibool freed;
  3539. enum buf_flush i;
  3540. buf_pool_mutex_enter(buf_pool);
  3541. for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
  3542. /* As this function is called during startup and
  3543. during redo application phase during recovery, InnoDB
  3544. is single threaded (apart from IO helper threads) at
  3545. this stage. No new write batch can be in intialization
  3546. stage at this point. */
  3547. ut_ad(buf_pool->init_flush[i] == FALSE);
  3548. /* However, it is possible that a write batch that has
  3549. been posted earlier is still not complete. For buffer
  3550. pool invalidation to proceed we must ensure there is NO
  3551. write activity happening. */
  3552. if (buf_pool->n_flush[i] > 0) {
  3553. buf_pool_mutex_exit(buf_pool);
  3554. buf_flush_wait_batch_end(buf_pool, i);
  3555. buf_pool_mutex_enter(buf_pool);
  3556. }
  3557. }
  3558. buf_pool_mutex_exit(buf_pool);
  3559. ut_ad(buf_all_freed_instance(buf_pool));
  3560. freed = TRUE;
  3561. while (freed) {
  3562. freed = buf_LRU_search_and_free_block(buf_pool, 100);
  3563. }
  3564. //buf_pool_mutex_enter(buf_pool);
  3565. mutex_enter(&buf_pool->LRU_list_mutex);
  3566. ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
  3567. ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
  3568. buf_pool->freed_page_clock = 0;
  3569. buf_pool->LRU_old = NULL;
  3570. buf_pool->LRU_old_len = 0;
  3571. buf_pool->LRU_flush_ended = 0;
  3572. memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
  3573. buf_refresh_io_stats(buf_pool);
  3574. //buf_pool_mutex_exit(buf_pool);
  3575. mutex_exit(&buf_pool->LRU_list_mutex);
  3576. }
  3577. /*********************************************************************//**
  3578. Invalidates the file pages in the buffer pool when an archive recovery is
  3579. completed. All the file pages buffered must be in a replaceable state when
  3580. this function is called: not latched and not modified. */
  3581. UNIV_INTERN
  3582. void
  3583. buf_pool_invalidate(void)
  3584. /*=====================*/
  3585. {
  3586. ulint i;
  3587. for (i = 0; i < srv_buf_pool_instances; i++) {
  3588. buf_pool_invalidate_instance(buf_pool_from_array(i));
  3589. }
  3590. }
  3591. #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  3592. /*********************************************************************//**
  3593. Validates data in one buffer pool instance
  3594. @return TRUE */
  3595. static
  3596. ibool
  3597. buf_pool_validate_instance(
  3598. /*=======================*/
  3599. buf_pool_t* buf_pool) /*!< in: buffer pool instance */
  3600. {
  3601. buf_page_t* b;
  3602. buf_chunk_t* chunk;
  3603. ulint i;
  3604. ulint n_single_flush = 0;
  3605. ulint n_lru_flush = 0;
  3606. ulint n_list_flush = 0;
  3607. ulint n_lru = 0;
  3608. ulint n_flush = 0;
  3609. ulint n_free = 0;
  3610. ulint n_zip = 0;
  3611. ut_ad(buf_pool);
  3612. //buf_pool_mutex_enter(buf_pool);
  3613. mutex_enter(&buf_pool->LRU_list_mutex);
  3614. rw_lock_x_lock(&buf_pool->page_hash_latch);
  3615. /* for keep the new latch order, it cannot validate correctly... */
  3616. chunk = buf_pool->chunks;
  3617. /* Check the uncompressed blocks. */
  3618. for (i = buf_pool->n_chunks; i--; chunk++) {
  3619. ulint j;
  3620. buf_block_t* block = chunk->blocks;
  3621. for (j = chunk->size; j--; block++) {
  3622. mutex_enter(&block->mutex);
  3623. switch (buf_block_get_state(block)) {
  3624. case BUF_BLOCK_ZIP_FREE:
  3625. case BUF_BLOCK_ZIP_PAGE:
  3626. case BUF_BLOCK_ZIP_DIRTY:
  3627. /* These should only occur on
  3628. zip_clean, zip_free[], or flush_list. */
  3629. ut_error;
  3630. break;
  3631. case BUF_BLOCK_FILE_PAGE:
  3632. ut_a(buf_page_hash_get(buf_pool,
  3633. buf_block_get_space(
  3634. block),
  3635. buf_block_get_page_no(
  3636. block))
  3637. == &block->page);
  3638. #ifdef UNIV_IBUF_COUNT_DEBUG
  3639. ut_a(buf_page_get_io_fix(&block->page)
  3640. == BUF_IO_READ
  3641. || !ibuf_count_get(buf_block_get_space(
  3642. block),
  3643. buf_block_get_page_no(
  3644. block)));
  3645. #endif
  3646. switch (buf_page_get_io_fix(&block->page)) {
  3647. case BUF_IO_NONE:
  3648. break;
  3649. case BUF_IO_WRITE:
  3650. switch (buf_page_get_flush_type(
  3651. &block->page)) {
  3652. case BUF_FLUSH_LRU:
  3653. n_lru_flush++;
  3654. ut_a(rw_lock_is_locked(
  3655. &block->lock,
  3656. RW_LOCK_SHARED));
  3657. break;
  3658. case BUF_FLUSH_LIST:
  3659. n_list_flush++;
  3660. break;
  3661. case BUF_FLUSH_SINGLE_PAGE:
  3662. n_single_flush++;
  3663. break;
  3664. default:
  3665. ut_error;
  3666. }
  3667. break;
  3668. case BUF_IO_READ:
  3669. ut_a(rw_lock_is_locked(&block->lock,
  3670. RW_LOCK_EX));
  3671. break;
  3672. case BUF_IO_PIN:
  3673. break;
  3674. }
  3675. n_lru++;
  3676. break;
  3677. case BUF_BLOCK_NOT_USED:
  3678. n_free++;
  3679. break;
  3680. case BUF_BLOCK_READY_FOR_USE:
  3681. case BUF_BLOCK_MEMORY:
  3682. case BUF_BLOCK_REMOVE_HASH:
  3683. /* do nothing */
  3684. break;
  3685. }
  3686. mutex_exit(&block->mutex);
  3687. }
  3688. }
  3689. mutex_enter(&buf_pool->zip_mutex);
  3690. /* Check clean compressed-only blocks. */
  3691. for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
  3692. b = UT_LIST_GET_NEXT(zip_list, b)) {
  3693. ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
  3694. switch (buf_page_get_io_fix(b)) {
  3695. case BUF_IO_NONE:
  3696. case BUF_IO_PIN:
  3697. /* All clean blocks should be I/O-unfixed. */
  3698. break;
  3699. case BUF_IO_READ:
  3700. /* In buf_LRU_free_block(), we temporarily set
  3701. b->io_fix = BUF_IO_READ for a newly allocated
  3702. control block in order to prevent
  3703. buf_page_get_gen() from decompressing the block. */
  3704. break;
  3705. default:
  3706. ut_error;
  3707. break;
  3708. }
  3709. /* It is OK to read oldest_modification here because
  3710. we have acquired buf_pool->zip_mutex above which acts
  3711. as the 'block->mutex' for these bpages. */
  3712. ut_a(!b->oldest_modification);
  3713. ut_a(buf_page_hash_get(buf_pool, b->space, b->offset) == b);
  3714. n_lru++;
  3715. n_zip++;
  3716. }
  3717. /* Check dirty blocks. */
  3718. buf_flush_list_mutex_enter(buf_pool);
  3719. for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
  3720. b = UT_LIST_GET_NEXT(flush_list, b)) {
  3721. ut_ad(b->in_flush_list);
  3722. ut_a(b->oldest_modification);
  3723. n_flush++;
  3724. switch (buf_page_get_state(b)) {
  3725. case BUF_BLOCK_ZIP_DIRTY:
  3726. n_lru++;
  3727. n_zip++;
  3728. switch (buf_page_get_io_fix(b)) {
  3729. case BUF_IO_NONE:
  3730. case BUF_IO_READ:
  3731. case BUF_IO_PIN:
  3732. break;
  3733. case BUF_IO_WRITE:
  3734. switch (buf_page_get_flush_type(b)) {
  3735. case BUF_FLUSH_LRU:
  3736. n_lru_flush++;
  3737. break;
  3738. case BUF_FLUSH_LIST:
  3739. n_list_flush++;
  3740. break;
  3741. case BUF_FLUSH_SINGLE_PAGE:
  3742. n_single_flush++;
  3743. break;
  3744. default:
  3745. ut_error;
  3746. }
  3747. break;
  3748. }
  3749. break;
  3750. case BUF_BLOCK_FILE_PAGE:
  3751. /* uncompressed page */
  3752. break;
  3753. case BUF_BLOCK_ZIP_FREE:
  3754. case BUF_BLOCK_ZIP_PAGE:
  3755. case BUF_BLOCK_NOT_USED:
  3756. case BUF_BLOCK_READY_FOR_USE:
  3757. case BUF_BLOCK_MEMORY:
  3758. case BUF_BLOCK_REMOVE_HASH:
  3759. ut_error;
  3760. break;
  3761. }
  3762. ut_a(buf_page_hash_get(buf_pool, b->space, b->offset) == b);
  3763. }
  3764. ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
  3765. buf_flush_list_mutex_exit(buf_pool);
  3766. mutex_exit(&buf_pool->zip_mutex);
  3767. if (n_lru + n_free > buf_pool->curr_size + n_zip) {
  3768. fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n",
  3769. (ulong) n_lru, (ulong) n_free,
  3770. (ulong) buf_pool->curr_size, (ulong) n_zip);
  3771. ut_error;
  3772. }
  3773. ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
  3774. /* because of latching order with block->mutex, we cannot get needed mutexes before that */
  3775. /*
  3776. if (UT_LIST_GET_LEN(buf_pool->free) != n_free) {
  3777. fprintf(stderr, "Free list len %lu, free blocks %lu\n",
  3778. (ulong) UT_LIST_GET_LEN(buf_pool->free),
  3779. (ulong) n_free);
  3780. ut_error;
  3781. }
  3782. ut_a(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_single_flush);
  3783. ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
  3784. ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
  3785. */
  3786. //buf_pool_mutex_exit(buf_pool);
  3787. mutex_exit(&buf_pool->LRU_list_mutex);
  3788. rw_lock_x_unlock(&buf_pool->page_hash_latch);
  3789. ut_a(buf_LRU_validate());
  3790. ut_a(buf_flush_validate(buf_pool));
  3791. return(TRUE);
  3792. }
  3793. /*********************************************************************//**
  3794. Validates the buffer buf_pool data structure.
  3795. @return TRUE */
  3796. UNIV_INTERN
  3797. ibool
  3798. buf_validate(void)
  3799. /*==============*/
  3800. {
  3801. ulint i;
  3802. for (i = 0; i < srv_buf_pool_instances; i++) {
  3803. buf_pool_t* buf_pool;
  3804. buf_pool = buf_pool_from_array(i);
  3805. buf_pool_validate_instance(buf_pool);
  3806. }
  3807. return(TRUE);
  3808. }
  3809. #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
  3810. #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
  3811. /*********************************************************************//**
  3812. Prints info of the buffer buf_pool data structure for one instance. */
  3813. static
  3814. void
  3815. buf_print_instance(
  3816. /*===============*/
  3817. buf_pool_t* buf_pool)
  3818. {
  3819. index_id_t* index_ids;
  3820. ulint* counts;
  3821. ulint size;
  3822. ulint i;
  3823. ulint j;
  3824. index_id_t id;
  3825. ulint n_found;
  3826. buf_chunk_t* chunk;
  3827. dict_index_t* index;
  3828. ut_ad(buf_pool);
  3829. size = buf_pool->curr_size;
  3830. index_ids = mem_alloc(size * sizeof *index_ids);
  3831. counts = mem_alloc(sizeof(ulint) * size);
  3832. //buf_pool_mutex_enter(buf_pool);
  3833. mutex_enter(&buf_pool->LRU_list_mutex);
  3834. mutex_enter(&buf_pool->free_list_mutex);
  3835. buf_flush_list_mutex_enter(buf_pool);
  3836. fprintf(stderr,
  3837. "buf_pool size %lu\n"
  3838. "database pages %lu\n"
  3839. "free pages %lu\n"
  3840. "modified database pages %lu\n"
  3841. "n pending decompressions %lu\n"
  3842. "n pending reads %lu\n"
  3843. "n pending flush LRU %lu list %lu single page %lu\n"
  3844. "pages made young %lu, not young %lu\n"
  3845. "pages read %lu, created %lu, written %lu\n",
  3846. (ulong) size,
  3847. (ulong) UT_LIST_GET_LEN(buf_pool->LRU),
  3848. (ulong) UT_LIST_GET_LEN(buf_pool->free),
  3849. (ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
  3850. (ulong) buf_pool->n_pend_unzip,
  3851. (ulong) buf_pool->n_pend_reads,
  3852. (ulong) buf_pool->n_flush[BUF_FLUSH_LRU],
  3853. (ulong) buf_pool->n_flush[BUF_FLUSH_LIST],
  3854. (ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE],
  3855. (ulong) buf_pool->stat.n_pages_made_young,
  3856. (ulong) buf_pool->stat.n_pages_not_made_young,
  3857. (ulong) buf_pool->stat.n_pages_read,
  3858. (ulong) buf_pool->stat.n_pages_created,
  3859. (ulong) buf_pool->stat.n_pages_written);
  3860. buf_flush_list_mutex_exit(buf_pool);
  3861. /* Count the number of blocks belonging to each index in the buffer */
  3862. n_found = 0;
  3863. chunk = buf_pool->chunks;
  3864. for (i = buf_pool->n_chunks; i--; chunk++) {
  3865. buf_block_t* block = chunk->blocks;
  3866. ulint n_blocks = chunk->size;
  3867. for (; n_blocks--; block++) {
  3868. const buf_frame_t* frame = block->frame;
  3869. if (fil_page_get_type(frame) == FIL_PAGE_INDEX) {
  3870. id = btr_page_get_index_id(frame);
  3871. /* Look for the id in the index_ids array */
  3872. j = 0;
  3873. while (j < n_found) {
  3874. if (index_ids[j] == id) {
  3875. counts[j]++;
  3876. break;
  3877. }
  3878. j++;
  3879. }
  3880. if (j == n_found) {
  3881. n_found++;
  3882. index_ids[j] = id;
  3883. counts[j] = 1;
  3884. }
  3885. }
  3886. }
  3887. }
  3888. //buf_pool_mutex_exit(buf_pool);
  3889. mutex_exit(&buf_pool->LRU_list_mutex);
  3890. mutex_exit(&buf_pool->free_list_mutex);
  3891. for (i = 0; i < n_found; i++) {
  3892. index = dict_index_get_if_in_cache(index_ids[i]);
  3893. fprintf(stderr,
  3894. "Block count for index %llu in buffer is about %lu",
  3895. (ullint) index_ids[i],
  3896. (ulong) counts[i]);
  3897. if (index) {
  3898. putc(' ', stderr);
  3899. dict_index_name_print(stderr, NULL, index);
  3900. }
  3901. putc('\n', stderr);
  3902. }
  3903. mem_free(index_ids);
  3904. mem_free(counts);
  3905. ut_a(buf_pool_validate_instance(buf_pool));
  3906. }
  3907. /*********************************************************************//**
  3908. Prints info of the buffer buf_pool data structure. */
  3909. UNIV_INTERN
  3910. void
  3911. buf_print(void)
  3912. /*===========*/
  3913. {
  3914. ulint i;
  3915. for (i = 0; i < srv_buf_pool_instances; i++) {
  3916. buf_pool_t* buf_pool;
  3917. buf_pool = buf_pool_from_array(i);
  3918. buf_print_instance(buf_pool);
  3919. }
  3920. }
  3921. #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
  3922. #ifdef UNIV_DEBUG
  3923. /*********************************************************************//**
  3924. Returns the number of latched pages in the buffer pool.
  3925. @return number of latched pages */
  3926. UNIV_INTERN
  3927. ulint
  3928. buf_get_latched_pages_number_instance(
  3929. /*==================================*/
  3930. buf_pool_t* buf_pool) /*!< in: buffer pool instance */
  3931. {
  3932. buf_page_t* b;
  3933. ulint i;
  3934. buf_chunk_t* chunk;
  3935. ulint fixed_pages_number = 0;
  3936. //buf_pool_mutex_enter(buf_pool);
  3937. chunk = buf_pool->chunks;
  3938. for (i = buf_pool->n_chunks; i--; chunk++) {
  3939. buf_block_t* block;
  3940. ulint j;
  3941. block = chunk->blocks;
  3942. for (j = chunk->size; j--; block++) {
  3943. if (buf_block_get_state(block)
  3944. != BUF_BLOCK_FILE_PAGE) {
  3945. continue;
  3946. }
  3947. mutex_enter(&block->mutex);
  3948. if (block->page.buf_fix_count != 0
  3949. || buf_page_get_io_fix(&block->page)
  3950. != BUF_IO_NONE) {
  3951. fixed_pages_number++;
  3952. }
  3953. mutex_exit(&block->mutex);
  3954. }
  3955. }
  3956. mutex_enter(&buf_pool->zip_mutex);
  3957. /* Traverse the lists of clean and dirty compressed-only blocks. */
  3958. for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
  3959. b = UT_LIST_GET_NEXT(zip_list, b)) {
  3960. ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
  3961. ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE);
  3962. if (b->buf_fix_count != 0
  3963. || buf_page_get_io_fix(b) != BUF_IO_NONE) {
  3964. fixed_pages_number++;
  3965. }
  3966. }
  3967. buf_flush_list_mutex_enter(buf_pool);
  3968. for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
  3969. b = UT_LIST_GET_NEXT(flush_list, b)) {
  3970. ut_ad(b->in_flush_list);
  3971. switch (buf_page_get_state(b)) {
  3972. case BUF_BLOCK_ZIP_DIRTY:
  3973. if (b->buf_fix_count != 0
  3974. || buf_page_get_io_fix(b) != BUF_IO_NONE) {
  3975. fixed_pages_number++;
  3976. }
  3977. break;
  3978. case BUF_BLOCK_FILE_PAGE:
  3979. /* uncompressed page */
  3980. break;
  3981. case BUF_BLOCK_ZIP_FREE:
  3982. case BUF_BLOCK_ZIP_PAGE:
  3983. case BUF_BLOCK_NOT_USED:
  3984. case BUF_BLOCK_READY_FOR_USE:
  3985. case BUF_BLOCK_MEMORY:
  3986. case BUF_BLOCK_REMOVE_HASH:
  3987. ut_error;
  3988. break;
  3989. }
  3990. }
  3991. buf_flush_list_mutex_exit(buf_pool);
  3992. mutex_exit(&buf_pool->zip_mutex);
  3993. //buf_pool_mutex_exit(buf_pool);
  3994. return(fixed_pages_number);
  3995. }
  3996. /*********************************************************************//**
  3997. Returns the number of latched pages in all the buffer pools.
  3998. @return number of latched pages */
  3999. UNIV_INTERN
  4000. ulint
  4001. buf_get_latched_pages_number(void)
  4002. /*==============================*/
  4003. {
  4004. ulint i;
  4005. ulint total_latched_pages = 0;
  4006. for (i = 0; i < srv_buf_pool_instances; i++) {
  4007. buf_pool_t* buf_pool;
  4008. buf_pool = buf_pool_from_array(i);
  4009. total_latched_pages += buf_get_latched_pages_number_instance(
  4010. buf_pool);
  4011. }
  4012. return(total_latched_pages);
  4013. }
  4014. #endif /* UNIV_DEBUG */
  4015. /*********************************************************************//**
  4016. Returns the number of pending buf pool ios.
  4017. @return number of pending I/O operations */
  4018. UNIV_INTERN
  4019. ulint
  4020. buf_get_n_pending_ios(void)
  4021. /*=======================*/
  4022. {
  4023. ulint i;
  4024. ulint pend_ios = 0;
  4025. for (i = 0; i < srv_buf_pool_instances; i++) {
  4026. buf_pool_t* buf_pool;
  4027. buf_pool = buf_pool_from_array(i);
  4028. pend_ios +=
  4029. buf_pool->n_pend_reads
  4030. + buf_pool->n_flush[BUF_FLUSH_LRU]
  4031. + buf_pool->n_flush[BUF_FLUSH_LIST]
  4032. + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE];
  4033. }
  4034. return(pend_ios);
  4035. }
  4036. /*********************************************************************//**
  4037. Returns the ratio in percents of modified pages in the buffer pool /
  4038. database pages in the buffer pool.
  4039. @return modified page percentage ratio */
  4040. UNIV_INTERN
  4041. ulint
  4042. buf_get_modified_ratio_pct(void)
  4043. /*============================*/
  4044. {
  4045. ulint ratio;
  4046. ulint lru_len = 0;
  4047. ulint free_len = 0;
  4048. ulint flush_list_len = 0;
  4049. buf_get_total_list_len(&lru_len, &free_len, &flush_list_len);
  4050. ratio = (100 * flush_list_len) / (1 + lru_len + free_len);
  4051. /* 1 + is there to avoid division by zero */
  4052. return(ratio);
  4053. }
  4054. /*******************************************************************//**
  4055. Aggregates a pool stats information with the total buffer pool stats */
  4056. static
  4057. void
  4058. buf_stats_aggregate_pool_info(
  4059. /*==========================*/
  4060. buf_pool_info_t* total_info, /*!< in/out: the buffer pool
  4061. info to store aggregated
  4062. result */
  4063. const buf_pool_info_t* pool_info) /*!< in: individual buffer pool
  4064. stats info */
  4065. {
  4066. ut_a(total_info && pool_info);
  4067. /* Nothing to copy if total_info is the same as pool_info */
  4068. if (total_info == pool_info) {
  4069. return;
  4070. }
  4071. total_info->pool_size += pool_info->pool_size;
  4072. total_info->pool_size_bytes += pool_info->pool_size_bytes;
  4073. total_info->lru_len += pool_info->lru_len;
  4074. total_info->old_lru_len += pool_info->old_lru_len;
  4075. total_info->free_list_len += pool_info->free_list_len;
  4076. total_info->flush_list_len += pool_info->flush_list_len;
  4077. total_info->n_pend_unzip += pool_info->n_pend_unzip;
  4078. total_info->n_pend_reads += pool_info->n_pend_reads;
  4079. total_info->n_pending_flush_lru += pool_info->n_pending_flush_lru;
  4080. total_info->n_pending_flush_list += pool_info->n_pending_flush_list;
  4081. total_info->n_pending_flush_single_page +=
  4082. pool_info->n_pending_flush_single_page;
  4083. total_info->n_pages_made_young += pool_info->n_pages_made_young;
  4084. total_info->n_pages_not_made_young += pool_info->n_pages_not_made_young;
  4085. total_info->n_pages_read += pool_info->n_pages_read;
  4086. total_info->n_pages_created += pool_info->n_pages_created;
  4087. total_info->n_pages_written += pool_info->n_pages_written;
  4088. total_info->n_page_gets += pool_info->n_page_gets;
  4089. total_info->n_ra_pages_read_rnd += pool_info->n_ra_pages_read_rnd;
  4090. total_info->n_ra_pages_read += pool_info->n_ra_pages_read;
  4091. total_info->n_ra_pages_evicted += pool_info->n_ra_pages_evicted;
  4092. total_info->page_made_young_rate += pool_info->page_made_young_rate;
  4093. total_info->page_not_made_young_rate +=
  4094. pool_info->page_not_made_young_rate;
  4095. total_info->pages_read_rate += pool_info->pages_read_rate;
  4096. total_info->pages_created_rate += pool_info->pages_created_rate;
  4097. total_info->pages_written_rate += pool_info->pages_written_rate;
  4098. total_info->n_page_get_delta += pool_info->n_page_get_delta;
  4099. total_info->page_read_delta += pool_info->page_read_delta;
  4100. total_info->young_making_delta += pool_info->young_making_delta;
  4101. total_info->not_young_making_delta += pool_info->not_young_making_delta;
  4102. total_info->pages_readahead_rnd_rate += pool_info->pages_readahead_rnd_rate;
  4103. total_info->pages_readahead_rate += pool_info->pages_readahead_rate;
  4104. total_info->pages_evicted_rate += pool_info->pages_evicted_rate;
  4105. total_info->unzip_lru_len += pool_info->unzip_lru_len;
  4106. total_info->io_sum += pool_info->io_sum;
  4107. total_info->io_cur += pool_info->io_cur;
  4108. total_info->unzip_sum += pool_info->unzip_sum;
  4109. total_info->unzip_cur += pool_info->unzip_cur;
  4110. }
  4111. /*******************************************************************//**
  4112. Collect buffer pool stats information for a buffer pool. Also
  4113. record aggregated stats if there are more than one buffer pool
  4114. in the server */
  4115. static
  4116. void
  4117. buf_stats_get_pool_info(
  4118. /*====================*/
  4119. buf_pool_t* buf_pool, /*!< in: buffer pool */
  4120. ulint pool_id, /*!< in: buffer pool ID */
  4121. buf_pool_info_t* all_pool_info) /*!< in/out: buffer pool info
  4122. to fill */
  4123. {
  4124. buf_pool_info_t* pool_info;
  4125. time_t current_time;
  4126. double time_elapsed;
  4127. /* Find appropriate pool_info to store stats for this buffer pool */
  4128. pool_info = &all_pool_info[pool_id];
  4129. mutex_enter(&buf_pool->LRU_list_mutex);
  4130. mutex_enter(&buf_pool->free_list_mutex);
  4131. buf_pool_mutex_enter(buf_pool);
  4132. buf_flush_list_mutex_enter(buf_pool);
  4133. pool_info->pool_unique_id = pool_id;
  4134. pool_info->pool_size = buf_pool->curr_size;
  4135. pool_info->pool_size_bytes = buf_pool->curr_pool_size;
  4136. pool_info->lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
  4137. pool_info->old_lru_len = buf_pool->LRU_old_len;
  4138. pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool->free);
  4139. pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool->flush_list);
  4140. pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
  4141. pool_info->n_pend_reads = buf_pool->n_pend_reads;
  4142. pool_info->n_pending_flush_lru =
  4143. (buf_pool->n_flush[BUF_FLUSH_LRU]
  4144. + buf_pool->init_flush[BUF_FLUSH_LRU]);
  4145. pool_info->n_pending_flush_list =
  4146. (buf_pool->n_flush[BUF_FLUSH_LIST]
  4147. + buf_pool->init_flush[BUF_FLUSH_LIST]);
  4148. pool_info->n_pending_flush_single_page =
  4149. buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE];
  4150. buf_flush_list_mutex_exit(buf_pool);
  4151. current_time = time(NULL);
  4152. time_elapsed = 0.001 + difftime(current_time,
  4153. buf_pool->last_printout_time);
  4154. pool_info->n_pages_made_young = buf_pool->stat.n_pages_made_young;
  4155. pool_info->n_pages_not_made_young =
  4156. buf_pool->stat.n_pages_not_made_young;
  4157. pool_info->n_pages_read = buf_pool->stat.n_pages_read;
  4158. pool_info->n_pages_created = buf_pool->stat.n_pages_created;
  4159. pool_info->n_pages_written = buf_pool->stat.n_pages_written;
  4160. pool_info->n_page_gets = buf_pool->stat.n_page_gets;
  4161. pool_info->n_ra_pages_read_rnd = buf_pool->stat.n_ra_pages_read_rnd;
  4162. pool_info->n_ra_pages_read = buf_pool->stat.n_ra_pages_read;
  4163. pool_info->n_ra_pages_evicted = buf_pool->stat.n_ra_pages_evicted;
  4164. pool_info->page_made_young_rate =
  4165. (buf_pool->stat.n_pages_made_young
  4166. - buf_pool->old_stat.n_pages_made_young) / time_elapsed;
  4167. pool_info->page_not_made_young_rate =
  4168. (buf_pool->stat.n_pages_not_made_young
  4169. - buf_pool->old_stat.n_pages_not_made_young) / time_elapsed;
  4170. pool_info->pages_read_rate =
  4171. (buf_pool->stat.n_pages_read
  4172. - buf_pool->old_stat.n_pages_read) / time_elapsed;
  4173. pool_info->pages_created_rate =
  4174. (buf_pool->stat.n_pages_created
  4175. - buf_pool->old_stat.n_pages_created) / time_elapsed;
  4176. pool_info->pages_written_rate =
  4177. (buf_pool->stat.n_pages_written
  4178. - buf_pool->old_stat.n_pages_written) / time_elapsed;
  4179. pool_info->n_page_get_delta = buf_pool->stat.n_page_gets
  4180. - buf_pool->old_stat.n_page_gets;
  4181. if (pool_info->n_page_get_delta) {
  4182. pool_info->page_read_delta = buf_pool->stat.n_pages_read
  4183. - buf_pool->old_stat.n_pages_read;
  4184. pool_info->young_making_delta =
  4185. buf_pool->stat.n_pages_made_young
  4186. - buf_pool->old_stat.n_pages_made_young;
  4187. pool_info->not_young_making_delta =
  4188. buf_pool->stat.n_pages_not_made_young
  4189. - buf_pool->old_stat.n_pages_not_made_young;
  4190. }
  4191. pool_info->pages_readahead_rnd_rate =
  4192. (buf_pool->stat.n_ra_pages_read_rnd
  4193. - buf_pool->old_stat.n_ra_pages_read_rnd) / time_elapsed;
  4194. pool_info->pages_readahead_rate =
  4195. (buf_pool->stat.n_ra_pages_read
  4196. - buf_pool->old_stat.n_ra_pages_read) / time_elapsed;
  4197. pool_info->pages_evicted_rate =
  4198. (buf_pool->stat.n_ra_pages_evicted
  4199. - buf_pool->old_stat.n_ra_pages_evicted) / time_elapsed;
  4200. pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
  4201. pool_info->io_sum = buf_LRU_stat_sum.io;
  4202. pool_info->io_cur = buf_LRU_stat_cur.io;
  4203. pool_info->unzip_sum = buf_LRU_stat_sum.unzip;
  4204. pool_info->unzip_cur = buf_LRU_stat_cur.unzip;
  4205. buf_refresh_io_stats(buf_pool);
  4206. mutex_exit(&buf_pool->LRU_list_mutex);
  4207. mutex_exit(&buf_pool->free_list_mutex);
  4208. buf_pool_mutex_exit(buf_pool);
  4209. }
  4210. /*********************************************************************//**
  4211. Prints info of the buffer i/o. */
  4212. UNIV_INTERN
  4213. void
  4214. buf_print_io_instance(
  4215. /*==================*/
  4216. buf_pool_info_t*pool_info, /*!< in: buffer pool info */
  4217. FILE* file) /*!< in/out: buffer where to print */
  4218. {
  4219. ut_ad(pool_info);
  4220. fprintf(file,
  4221. "Buffer pool size %lu\n"
  4222. "Buffer pool size, bytes %lu\n"
  4223. "Free buffers %lu\n"
  4224. "Database pages %lu\n"
  4225. "Old database pages %lu\n"
  4226. "Modified db pages %lu\n"
  4227. "Pending reads %lu\n"
  4228. "Pending writes: LRU %lu, flush list %lu, single page %lu\n",
  4229. pool_info->pool_size,
  4230. pool_info->pool_size_bytes,
  4231. pool_info->free_list_len,
  4232. pool_info->lru_len,
  4233. pool_info->old_lru_len,
  4234. pool_info->flush_list_len,
  4235. pool_info->n_pend_reads,
  4236. pool_info->n_pending_flush_lru,
  4237. pool_info->n_pending_flush_list,
  4238. pool_info->n_pending_flush_single_page);
  4239. fprintf(file,
  4240. "Pages made young %lu, not young %lu\n"
  4241. "%.2f youngs/s, %.2f non-youngs/s\n"
  4242. "Pages read %lu, created %lu, written %lu\n"
  4243. "%.2f reads/s, %.2f creates/s, %.2f writes/s\n",
  4244. pool_info->n_pages_made_young,
  4245. pool_info->n_pages_not_made_young,
  4246. pool_info->page_made_young_rate,
  4247. pool_info->page_not_made_young_rate,
  4248. pool_info->n_pages_read,
  4249. pool_info->n_pages_created,
  4250. pool_info->n_pages_written,
  4251. pool_info->pages_read_rate,
  4252. pool_info->pages_created_rate,
  4253. pool_info->pages_written_rate);
  4254. if (pool_info->n_page_get_delta) {
  4255. fprintf(file,
  4256. "Buffer pool hit rate %lu / 1000,"
  4257. " young-making rate %lu / 1000 not %lu / 1000\n",
  4258. (ulong) (1000 - (1000 * pool_info->page_read_delta
  4259. / pool_info->n_page_get_delta)),
  4260. (ulong) (1000 * pool_info->young_making_delta
  4261. / pool_info->n_page_get_delta),
  4262. (ulong) (1000 * pool_info->not_young_making_delta
  4263. / pool_info->n_page_get_delta));
  4264. } else {
  4265. fputs("No buffer pool page gets since the last printout\n",
  4266. file);
  4267. }
  4268. /* Statistics about read ahead algorithm */
  4269. fprintf(file, "Pages read ahead %.2f/s,"
  4270. " evicted without access %.2f/s,"
  4271. " Random read ahead %.2f/s\n",
  4272. pool_info->pages_readahead_rate,
  4273. pool_info->pages_evicted_rate,
  4274. pool_info->pages_readahead_rnd_rate);
  4275. /* Print some values to help us with visualizing what is
  4276. happening with LRU eviction. */
  4277. fprintf(file,
  4278. "LRU len: %lu, unzip_LRU len: %lu\n"
  4279. "I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
  4280. pool_info->lru_len, pool_info->unzip_lru_len,
  4281. pool_info->io_sum, pool_info->io_cur,
  4282. pool_info->unzip_sum, pool_info->unzip_cur);
  4283. }
  4284. /*********************************************************************//**
  4285. Prints info of the buffer i/o. */
  4286. UNIV_INTERN
  4287. void
  4288. buf_print_io(
  4289. /*=========*/
  4290. FILE* file) /*!< in/out: buffer where to print */
  4291. {
  4292. ulint i;
  4293. buf_pool_info_t* pool_info;
  4294. buf_pool_info_t* pool_info_total;
  4295. /* If srv_buf_pool_instances is greater than 1, allocate
  4296. one extra buf_pool_info_t, the last one stores
  4297. aggregated/total values from all pools */
  4298. if (srv_buf_pool_instances > 1) {
  4299. pool_info = (buf_pool_info_t*) mem_zalloc((
  4300. srv_buf_pool_instances + 1) * sizeof *pool_info);
  4301. pool_info_total = &pool_info[srv_buf_pool_instances];
  4302. } else {
  4303. ut_a(srv_buf_pool_instances == 1);
  4304. pool_info_total = pool_info = (buf_pool_info_t*) mem_zalloc(
  4305. sizeof *pool_info)
  4306. }
  4307. for (i = 0; i < srv_buf_pool_instances; i++) {
  4308. buf_pool_t* buf_pool;
  4309. buf_pool = buf_pool_from_array(i);
  4310. /* Fetch individual buffer pool info and calculate
  4311. aggregated stats along the way */
  4312. buf_stats_get_pool_info(buf_pool, i, pool_info);
  4313. /* If we have more than one buffer pool, store
  4314. the aggregated stats */
  4315. if (srv_buf_pool_instances > 1) {
  4316. buf_stats_aggregate_pool_info(pool_info_total,
  4317. &pool_info[i]);
  4318. }
  4319. }
  4320. /* Print the aggreate buffer pool info */
  4321. buf_print_io_instance(pool_info_total, file);
  4322. /* If there are more than one buffer pool, print each individual pool
  4323. info */
  4324. if (srv_buf_pool_instances > 1) {
  4325. fputs("----------------------\n"
  4326. "INDIVIDUAL BUFFER POOL INFO\n"
  4327. "----------------------\n", file);
  4328. for (i = 0; i < srv_buf_pool_instances; i++) {
  4329. fprintf(file, "---BUFFER POOL %lu\n", i);
  4330. buf_print_io_instance(&pool_info[i], file);
  4331. }
  4332. }
  4333. mem_free(pool_info);
  4334. }
  4335. /**********************************************************************//**
  4336. Refreshes the statistics used to print per-second averages. */
  4337. UNIV_INTERN
  4338. void
  4339. buf_refresh_io_stats(
  4340. /*=================*/
  4341. buf_pool_t* buf_pool) /*!< in: buffer pool instance */
  4342. {
  4343. buf_pool->last_printout_time = ut_time();
  4344. buf_pool->old_stat = buf_pool->stat;
  4345. }
  4346. /**********************************************************************//**
  4347. Refreshes the statistics used to print per-second averages. */
  4348. UNIV_INTERN
  4349. void
  4350. buf_refresh_io_stats_all(void)
  4351. /*==========================*/
  4352. {
  4353. ulint i;
  4354. for (i = 0; i < srv_buf_pool_instances; i++) {
  4355. buf_pool_t* buf_pool;
  4356. buf_pool = buf_pool_from_array(i);
  4357. buf_refresh_io_stats(buf_pool);
  4358. }
  4359. }
  4360. /**********************************************************************//**
  4361. Check if all pages in all buffer pools are in a replacable state.
  4362. @return FALSE if not */
  4363. UNIV_INTERN
  4364. ibool
  4365. buf_all_freed(void)
  4366. /*===============*/
  4367. {
  4368. ulint i;
  4369. for (i = 0; i < srv_buf_pool_instances; i++) {
  4370. buf_pool_t* buf_pool;
  4371. buf_pool = buf_pool_from_array(i);
  4372. if (!buf_all_freed_instance(buf_pool)) {
  4373. return(FALSE);
  4374. }
  4375. }
  4376. return(TRUE);
  4377. }
  4378. /*********************************************************************//**
  4379. Checks that there currently are no pending i/o-operations for the buffer
  4380. pool.
  4381. @return TRUE if there is no pending i/o */
  4382. UNIV_INTERN
  4383. ibool
  4384. buf_pool_check_no_pending_io(void)
  4385. /*==============================*/
  4386. {
  4387. ulint i;
  4388. ibool ret = TRUE;
  4389. buf_pool_mutex_enter_all();
  4390. for (i = 0; i < srv_buf_pool_instances && ret; i++) {
  4391. const buf_pool_t* buf_pool;
  4392. buf_pool = buf_pool_from_array(i);
  4393. if (buf_pool->n_pend_reads
  4394. + buf_pool->n_flush[BUF_FLUSH_LRU]
  4395. + buf_pool->n_flush[BUF_FLUSH_LIST]
  4396. + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
  4397. ret = FALSE;
  4398. }
  4399. }
  4400. buf_pool_mutex_exit_all();
  4401. return(ret);
  4402. }
  4403. #if 0
  4404. Code currently not used
  4405. /*********************************************************************//**
  4406. Gets the current length of the free list of buffer blocks.
  4407. @return length of the free list */
  4408. UNIV_INTERN
  4409. ulint
  4410. buf_get_free_list_len(void)
  4411. /*=======================*/
  4412. {
  4413. ulint len;
  4414. //buf_pool_mutex_enter(buf_pool);
  4415. mutex_enter(&buf_pool->free_list_mutex);
  4416. len = UT_LIST_GET_LEN(buf_pool->free);
  4417. //buf_pool_mutex_exit(buf_pool);
  4418. mutex_exit(&buf_pool->free_list_mutex);
  4419. return(len);
  4420. }
  4421. #endif
  4422. #else /* !UNIV_HOTBACKUP */
  4423. /********************************************************************//**
  4424. Inits a page to the buffer buf_pool, for use in ibbackup --restore. */
  4425. UNIV_INTERN
  4426. void
  4427. buf_page_init_for_backup_restore(
  4428. /*=============================*/
  4429. ulint space, /*!< in: space id */
  4430. ulint offset, /*!< in: offset of the page within space
  4431. in units of a page */
  4432. ulint zip_size,/*!< in: compressed page size in bytes
  4433. or 0 for uncompressed pages */
  4434. buf_block_t* block) /*!< in: block to init */
  4435. {
  4436. block->page.state = BUF_BLOCK_FILE_PAGE;
  4437. block->page.space = space;
  4438. block->page.offset = offset;
  4439. page_zip_des_init(&block->page.zip);
  4440. /* We assume that block->page.data has been allocated
  4441. with zip_size == UNIV_PAGE_SIZE. */
  4442. ut_ad(zip_size <= UNIV_PAGE_SIZE);
  4443. ut_ad(ut_is_2pow(zip_size));
  4444. page_zip_set_size(&block->page.zip, zip_size);
  4445. if (zip_size) {
  4446. block->page.zip.data = block->frame + UNIV_PAGE_SIZE;
  4447. }
  4448. }
  4449. #endif /* !UNIV_HOTBACKUP */