@ -22,6 +22,8 @@ Created 1/8/1996 Heikki Tuuri
# include "btr0btr.h"
# include "btr0cur.h"
# include "btr0sea.h"
# include "page0zip.h"
# include "page0page.h"
# include "pars0pars.h"
# include "pars0sym.h"
# include "que0que.h"
@ -1262,6 +1264,156 @@ is_ord_part:
return ( undo_page_len > = UNIV_PAGE_SIZE ) ;
}
/********************************************************************
If a record of this index might not fit on a single B - tree page ,
return TRUE . */
static
ibool
dict_index_too_big_for_tree (
/*========================*/
/* out: TRUE if the index
record could become too big */
const dict_table_t * table , /* in: table */
const dict_index_t * new_index ) /* in: index */
{
ulint zip_size ;
ulint comp ;
ulint i ;
/* maximum possible storage size of a record */
ulint rec_max_size ;
/* maximum allowed size of a record on a leaf page */
ulint page_rec_max ;
/* maximum allowed size of a node pointer record */
ulint page_ptr_max ;
comp = dict_table_is_comp ( table ) ;
zip_size = dict_table_zip_size ( table ) ;
if ( zip_size & & zip_size < UNIV_PAGE_SIZE ) {
/* On a compressed page, two records must fit in the
uncompressed page modification log . On compressed
pages with zip_size = = UNIV_PAGE_SIZE , this limit will
never be reached . */
ut_ad ( comp ) ;
/* The maximum allowed record size is the size of
an empty page , minus a byte for recoding the heap
number in the page modification log . The maximum
allowed node pointer size is half that . */
page_rec_max = page_zip_empty_size ( new_index - > n_fields ,
zip_size ) - 1 ;
page_ptr_max = page_rec_max / 2 ;
/* On a compressed page, there is a two-byte entry in
the dense page directory for every record . But there
is no record header . */
rec_max_size = 2 ;
} else {
/* The maximum allowed record size is half a B-tree
page . No additional sparse page directory entry will
be generated for the first few user records . */
page_rec_max = page_get_free_space_of_empty ( comp ) / 2 ;
page_ptr_max = page_rec_max ;
/* Each record has a header. */
rec_max_size = comp
? REC_N_NEW_EXTRA_BYTES
: REC_N_OLD_EXTRA_BYTES ;
}
if ( comp ) {
/* Include the "null" flags in the
maximum possible record size . */
rec_max_size + = UT_BITS_IN_BYTES ( new_index - > n_nullable ) ;
} else {
/* For each column, include a 2-byte offset and a
" null " flag . The 1 - byte format is only used in short
records that do not contain externally stored columns .
Such records could never exceed the page limit , even
when using the 2 - byte format . */
rec_max_size + = 2 * new_index - > n_fields ;
}
/* Compute the maximum possible record size. */
for ( i = 0 ; i < new_index - > n_fields ; i + + ) {
const dict_field_t * field
= dict_index_get_nth_field ( new_index , i ) ;
const dict_col_t * col
= dict_field_get_col ( field ) ;
ulint field_max_size ;
ulint field_ext_max_size ;
/* In dtuple_convert_big_rec(), variable-length columns
that are longer than BTR_EXTERN_FIELD_REF_SIZE * 2
may be chosen for external storage .
Fixed - length columns , and all columns of secondary
index records are always stored inline . */
/* Determine the maximum length of the index field.
The field_ext_max_size should be computed as the worst
case in rec_get_converted_size_comp ( ) for
REC_STATUS_ORDINARY records . */
field_max_size = dict_col_get_fixed_size ( col ) ;
if ( field_max_size ) {
/* dict_index_add_col() should guarantee this */
ut_ad ( ! field - > prefix_len
| | field - > fixed_len = = field - > prefix_len ) ;
/* Fixed lengths are not encoded
in ROW_FORMAT = COMPACT . */
field_ext_max_size = 0 ;
goto add_field_size ;
}
field_max_size = dict_col_get_max_size ( col ) ;
field_ext_max_size = field_max_size < 256 ? 1 : 2 ;
if ( field - > prefix_len ) {
if ( field - > prefix_len < field_max_size ) {
field_max_size = field - > prefix_len ;
}
} else if ( field_max_size > BTR_EXTERN_FIELD_REF_SIZE * 2
& & dict_index_is_clust ( new_index ) ) {
/* In the worst case, we have a locally stored
column of BTR_EXTERN_FIELD_REF_SIZE * 2 bytes .
The length can be stored in one byte . If the
column were stored externally , the lengths in
the clustered index page would be
BTR_EXTERN_FIELD_REF_SIZE and 2. */
field_max_size = BTR_EXTERN_FIELD_REF_SIZE * 2 ;
field_ext_max_size = 1 ;
}
if ( comp ) {
/* Add the extra size for ROW_FORMAT=COMPACT.
For ROW_FORMAT = REDUNDANT , these bytes were
added to rec_max_size before this loop . */
rec_max_size + = field_ext_max_size ;
}
add_field_size :
rec_max_size + = field_max_size ;
/* Check the size limit on leaf pages. */
if ( UNIV_UNLIKELY ( rec_max_size > = page_rec_max ) ) {
return ( TRUE ) ;
}
/* Check the size limit on non-leaf pages. Records
stored in non - leaf B - tree pages consist of the unique
columns of the record ( the key columns of the B - tree )
and a node pointer field . When we have processed the
unique columns , rec_max_size equals the size of the
node pointer record minus the node pointer column . */
if ( i + 1 = = dict_index_get_n_unique_in_tree ( new_index )
& & rec_max_size + REC_NODE_PTR_SIZE > = page_ptr_max ) {
return ( TRUE ) ;
}
}
return ( FALSE ) ;
}
/**************************************************************************
Adds an index to the dictionary cache . */
UNIV_INTERN
@ -1272,7 +1424,10 @@ dict_index_add_to_cache(
dict_table_t * table , /* in: table on which the index is */
dict_index_t * index , /* in, own: index; NOTE! The index memory
object is freed in this function ! */
ulint page_no ) /* in: root page number of the index */
ulint page_no , /* in: root page number of the index */
ibool strict ) /* in: TRUE=refuse to create the index
if records could be too big to fit in
an B - tree page */
{
dict_index_t * new_index ;
ulint n_ord ;
@ -1303,6 +1458,13 @@ dict_index_add_to_cache(
new_index - > n_fields = new_index - > n_def ;
if ( strict & & dict_index_too_big_for_tree ( table , new_index ) ) {
too_big :
dict_mem_index_free ( new_index ) ;
dict_mem_index_free ( index ) ;
return ( DB_TOO_BIG_RECORD ) ;
}
if ( UNIV_UNLIKELY ( index - > type & DICT_UNIVERSAL ) ) {
n_ord = new_index - > n_fields ;
} else {
@ -1334,9 +1496,8 @@ dict_index_add_to_cache(
if ( dict_index_too_big_for_undo ( table , new_index ) ) {
/* An undo log record might not fit in
a single page . Refuse to create this index . */
dict_mem_index_free ( new_index ) ;
dict_mem_index_free ( index ) ;
return ( DB_TOO_BIG_RECORD ) ;
goto too_big ;
}
break ;