[Commits] c850799967c: MDEV-10963 Fragmented BINLOG query
revision-id: c850799967c561d08242987269d94af6ae4c7c5e (mariadb-10.1.35-59-gc850799967c) parent(s): d3a8b5aa9cee24a6397662e30df2e915f45460e0 author: Andrei Elkin committer: Andrei Elkin timestamp: 2018-09-21 18:48:06 +0300 message: MDEV-10963 Fragmented BINLOG query The problem was originally stated in http://bugs.mysql.com/bug.php?id=82212 The size of an base64-encoded Rows_log_event exceeds its vanilla byte representation in 4/3 times. When a binlogged event size is about 1GB mysqlbinlog generates a BINLOG query that can't be send out due to its size. It is fixed with fragmenting the BINLOG argument C-string into (approximate) halves when the base64 encoded event is over 1GB size. The mysqlbinlog in such case puts out SET @binlog_fragment_0='base64-encoded-fragment_0'; SET @binlog_fragment_1='base64-encoded-fragment_1'; BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); to represent a big BINLOG 'base64-encoded-"total"'. Two more statements are composed to promptly release memory SET @binlog_fragment_0=NULL; SET @binlog_fragment_1=NULL; The 2 fragments are enough, though the client and server still may need to tweak their @@max_allowed_packet to satisfy to the fragment size (which they would have to do anyway with greater number of fragments, should that be desired). On the lower level the following changes are made: Log_event::print_base64() remains to call encoder and store the encoded data into a cache but now *without* doing any formatting. The latter is left for time when the cache is copied to an output file (e.g mysqlbinlog output). No formatting behavior is also reflected by the change in the meaning of the last argument which specifies whether to cache the encoded data. my_b_copy_to_file() is turned into my_b_copy_to_file_frag() which accepts format specifier arguments to build a proper syntax BINLOG query in both the fragmented (n_frag > 1) and non-fragmented (n_frag == 1) cases. Rows_log_event::print_helper() Takes decision whether to fragment, prepares respective format specifiers and invokes the cache-to-file copying function, which is now copy_cache_frag_to_file_and_reinit() replaces original copy_event_cache_to_file_and_reinit() to pass extra arguments to my_b_copy_to_file() successor of my_b_copy_to_file_frag() replaces the former pure copier not to also conduct wrapping the encoded data per format specifiers. With its 'n_frag' argument as 1 and the rest or args NULL works as the original function. --- client/mysqlbinlog.cc | 21 ++- include/my_sys.h | 10 +- .../suite/binlog/r/binlog_base64_flag.result | 16 +++ .../binlog/r/binlog_mysqlbinlog_row_frag.result | 26 ++++ mysql-test/suite/binlog/t/binlog_base64_flag.test | 22 ++++ .../binlog/t/binlog_mysqlbinlog_row_frag.test | 43 +++++++ mysys/mf_iocache2.c | 135 +++++++++++++++++--- sql/lex.h | 1 + sql/log_event.cc | 142 ++++++++++++++++++--- sql/log_event.h | 34 ++++- sql/log_event_old.cc | 15 ++- sql/sql_binlog.cc | 106 +++++++++++++-- sql/sql_lex.cc | 2 + sql/sql_lex.h | 10 ++ sql/sql_yacc.yy | 12 +- unittest/sql/mf_iocache-t.cc | 51 +++++++- 16 files changed, 587 insertions(+), 59 deletions(-) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index bc13aa6c2cc..2c05bb806a9 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -72,6 +72,7 @@ ulong mysqld_net_retry_count = 10L; ulong open_files_limit; ulong opt_binlog_rows_event_max_size; ulonglong test_flags = 0; +ulong opt_binlog_rows_event_max_encoded_size= MAX_MAX_ALLOWED_PACKET; static uint opt_protocol= 0; static FILE *result_file; static char *result_file_name= 0; @@ -813,7 +814,12 @@ write_event_header_and_base64(Log_event *ev, FILE *result_file, /* Write header and base64 output to cache */ ev->print_header(head, print_event_info, FALSE); - ev->print_base64(body, print_event_info, FALSE); + + DBUG_ASSERT(print_event_info->base64_output_mode == BASE64_OUTPUT_ALWAYS); + + ev->print_base64(body, print_event_info, + print_event_info->base64_output_mode != + BASE64_OUTPUT_DECODE_ROWS); /* Read data from cache and write to result file */ if (copy_event_cache_to_file_and_reinit(head, result_file) || @@ -852,7 +858,9 @@ static bool print_base64(PRINT_EVENT_INFO *print_event_info, Log_event *ev) return 1; } ev->print(result_file, print_event_info); - return print_event_info->head_cache.error == -1; + return + print_event_info->head_cache.error == -1 || + print_event_info->body_cache.error == -1; } @@ -1472,6 +1480,15 @@ that may lead to an endless loop.", "This value must be a multiple of 256.", &opt_binlog_rows_event_max_size, &opt_binlog_rows_event_max_size, 0, GET_ULONG, REQUIRED_ARG, UINT_MAX, 256, ULONG_MAX, 0, 256, 0}, +#ifndef DBUG_OFF + {"debug-binlog-row-event-max-encoded-size", 0, + "The maximum size of base64-encoded rows-event in one BINLOG pseudo-query " + "instance. When the computed actual size exceeds the limit " + "the BINLOG's argument string is fragmented in two.", + &opt_binlog_rows_event_max_encoded_size, + &opt_binlog_rows_event_max_encoded_size, 0, + GET_ULONG, REQUIRED_ARG, UINT_MAX/4, 256, ULONG_MAX, 0, 256, 0}, +#endif {"verify-binlog-checksum", 'c', "Verify checksum binlog events.", (uchar**) &opt_verify_binlog_checksum, (uchar**) &opt_verify_binlog_checksum, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/include/my_sys.h b/include/my_sys.h index 110a2ee9af3..f201486137c 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -602,7 +602,15 @@ static inline size_t my_b_bytes_in_cache(const IO_CACHE *info) return *info->current_end - *info->current_pos; } -int my_b_copy_to_file(IO_CACHE *cache, FILE *file); +int +my_b_copy_to_file_frag(IO_CACHE *cache, FILE *file, + uint n_frag, + const char* before_frag, + const char* after_frag, + const char* after_last, + const char* final_per_frag, + char* buf); + my_off_t my_b_append_tell(IO_CACHE* info); my_off_t my_b_safe_tell(IO_CACHE* info); /* picks the correct tell() */ int my_b_pread(IO_CACHE *info, uchar *Buffer, size_t Count, my_off_t pos); diff --git a/mysql-test/suite/binlog/r/binlog_base64_flag.result b/mysql-test/suite/binlog/r/binlog_base64_flag.result index d13e13c97b0..a1d39f9ef7b 100644 --- a/mysql-test/suite/binlog/r/binlog_base64_flag.result +++ b/mysql-test/suite/binlog/r/binlog_base64_flag.result @@ -28,6 +28,22 @@ a 1 1 3 +DELETE FROM t1 WHERE a=3; +BINLOG ' +ODdYRw8BAAAAZgAAAGoAAAABAAQANS4xLjIzLXJjLWRlYnVnLWxvZwAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAA4N1hHEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC +'; +SET @binlog_fragment_0=' +TFtYRxMBAAAAKQAAAH8BAAAAABAAAAAAAAAABHRlc3QAAnQxAAEDAAE= +TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA== +'; +SET @binlog_fragment_1=''; +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); +select * from t1; +a +1 +1 +3 ==== Test --base64-output=never on a binlog with row events ==== /*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; /*!40019 SET @@session.max_insert_delayed_threads=0*/; diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result new file mode 100644 index 00000000000..d4e1d3b70c9 --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result @@ -0,0 +1,26 @@ +CREATE TABLE t (a TEXT); +RESET MASTER; +INSERT INTO t SET a=repeat('a', 1024); +SELECT a from t into @a; +FLUSH LOGS; +DELETE FROM t; +SELECT a LIKE @a as 'true' FROM t; +true +1 +SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL'; +@binlog_fragment_0 NULL +NULL NULL +BINLOG number-of-fragments must be exactly two +BINLOG DEFRAGMENT(@binlog_fragment); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1 +BINLOG DEFRAGMENT(@binlog_fragment, @binlog_fragment, @binlog_fragment); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ' @binlog_fragment)' at line 1 +SET @binlog_fragment_0='012345'; +SET @binlog_fragment_1='012345'; +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use +SET @binlog_fragment_0='012345'; +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_not_exist); +ERROR HY000: Decoding of base64 string failed: BINLOG fragment user variable 'binlog_fragment_not_exist' has unexpectedly no value +# Cleanup +DROP TABLE t; diff --git a/mysql-test/suite/binlog/t/binlog_base64_flag.test b/mysql-test/suite/binlog/t/binlog_base64_flag.test index f8333315088..19e8ccc1905 100644 --- a/mysql-test/suite/binlog/t/binlog_base64_flag.test +++ b/mysql-test/suite/binlog/t/binlog_base64_flag.test @@ -67,6 +67,28 @@ TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA== # The above line should succeed and 3 should be in the table select * from t1; +# The same as above with one-fragment BINLOG to prove +# BINLOG 'base64-encoded-data' is equivalent to the pair of +# SET @uservar='base64-encoded-data'; +# BINLOG 1, @uservar; +DELETE FROM t1 WHERE a=3; +# This is a binlog statement containing a Format_description_log_event +# from the same version as the Table_map and Write_rows_log_event. +BINLOG ' +ODdYRw8BAAAAZgAAAGoAAAABAAQANS4xLjIzLXJjLWRlYnVnLWxvZwAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAA4N1hHEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC +'; + +# This is a Table_map_log_event+Write_rows_log_event corresponding to: +# INSERT INTO TABLE test.t1 VALUES (3) +SET @binlog_fragment_0=' +TFtYRxMBAAAAKQAAAH8BAAAAABAAAAAAAAAABHRlc3QAAnQxAAEDAAE= +TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA== +'; +SET @binlog_fragment_1=''; +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); +# The above line should succeed and 3 should be in the table +select * from t1; # Test that mysqlbinlog stops with an error message when the # --base64-output=never flag is used on a binlog with base64 events. diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test new file mode 100644 index 00000000000..5318f8acec6 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test @@ -0,0 +1,43 @@ +--source include/have_debug.inc +--source include/have_log_bin.inc +--source include/have_binlog_format_row.inc + +--let $MYSQLD_DATADIR= `select @@datadir` +--let $max_size=1024 + +CREATE TABLE t (a TEXT); +# events of interest are guaranteed to stay in 000001 log +RESET MASTER; +--eval INSERT INTO t SET a=repeat('a', $max_size) +SELECT a from t into @a; +FLUSH LOGS; +DELETE FROM t; + +--exec $MYSQL_BINLOG --debug-binlog-row-event-max-encoded-size=256 $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql + +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql + +SELECT a LIKE @a as 'true' FROM t; +SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL'; + +# improper syntax error +--echo BINLOG number-of-fragments must be exactly two +--error ER_PARSE_ERROR +BINLOG DEFRAGMENT(@binlog_fragment); +--error ER_PARSE_ERROR +BINLOG DEFRAGMENT(@binlog_fragment, @binlog_fragment, @binlog_fragment); + +# corrupted fragments error check (to the expected error code notice, +# the same error code occurs in a similar unfragmented case) +SET @binlog_fragment_0='012345'; +SET @binlog_fragment_1='012345'; +--error ER_SYNTAX_ERROR +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); + +# Not existing fragment is not allowed +SET @binlog_fragment_0='012345'; +--error ER_BASE64_DECODE_ERROR +BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_not_exist); + +--echo # Cleanup +DROP TABLE t; diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c index 2499094037d..6b7ff8a7568 100644 --- a/mysys/mf_iocache2.c +++ b/mysys/mf_iocache2.c @@ -22,13 +22,23 @@ #include <stdarg.h> #include <m_ctype.h> -/* +/** Copy contents of an IO_CACHE to a file. SYNOPSIS - my_b_copy_to_file() - cache IO_CACHE to copy from - file File to copy to + my_b_copy_to_file_frag + + cache IO_CACHE to copy from + file File to copy to + n_frag # of fragments + + Other arguments represent format strings to enable wrapping + of the fragments and their total, including + + before_frag before a fragment + after_frag after a fragment + after_last_frag after all the fragments + final_per_frag in the end per each fragment DESCRIPTION Copy the contents of the cache to the file. The cache will be @@ -38,33 +48,120 @@ If a failure to write fully occurs, the cache is only copied partially. - TODO - Make this function solid by handling partial reads from the cache - in a correct manner: it should be atomic. + The copying is made in so many steps as the number of fragments as + specified by the parameter 'n_frag'. Each step is wrapped with + writing to the file 'before_frag' and 'after_frag' formated + strings, unless the parameters are NULL. In the end, optionally, + first 'after_last_frag' string is appended to 'file' followed by + 'final_per_frag' per each fragment. + final item. RETURN VALUE 0 All OK 1 An error occurred + + TODO + Make this function solid by handling partial reads from the cache + in a correct manner: it should be atomic. */ int -my_b_copy_to_file(IO_CACHE *cache, FILE *file) +my_b_copy_to_file_frag(IO_CACHE *cache, FILE *file, + uint n_frag, + const char* before_frag, + const char* after_frag, + const char* after_last, + const char* final_per_frag, + char* buf) { - size_t bytes_in_cache; - DBUG_ENTER("my_b_copy_to_file"); + size_t bytes_in_cache; // block, may have short size in the last one + size_t written_off_last_block; // consumed part of the block by last fragment + size_t total_size= my_b_tell(cache); + size_t frag_size= total_size / n_frag + 1; + size_t total_written= 0; + size_t frag_written; // bytes collected in the current fragment + uint i; + + DBUG_ENTER("my_b_copy_to_file_frag"); + + DBUG_ASSERT(cache->type == WRITE_CACHE); /* Reinit the cache to read from the beginning of the cache */ if (reinit_io_cache(cache, READ_CACHE, 0L, FALSE, FALSE)) DBUG_RETURN(1); - bytes_in_cache= my_b_bytes_in_cache(cache); - do + + for (i= 0, written_off_last_block= 0, bytes_in_cache= my_b_bytes_in_cache(cache); + i < n_frag; + i++, total_written += frag_written) { - if (my_fwrite(file, cache->read_pos, bytes_in_cache, - MYF(MY_WME | MY_NABP)) == (size_t) -1) - DBUG_RETURN(1); - } while ((bytes_in_cache= my_b_fill(cache))); - if(cache->error == -1) - DBUG_RETURN(1); - DBUG_RETURN(0); + frag_written= 0; + if (before_frag) + { + sprintf(buf, before_frag, i); + my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP)); + } + do + { + /* + Either the current block is the last (L) in making the + current fragment (and possibly has some extra not to fit (LG) into + the fragment), or (I) the current (whole then) block is + intermediate. + */ + size_t block_to_write= (frag_written + bytes_in_cache >= frag_size) ? + frag_size - frag_written : bytes_in_cache; + + DBUG_ASSERT(n_frag != 1 || + (block_to_write == bytes_in_cache && + written_off_last_block == 0)); + + if (my_fwrite(file, cache->read_pos + written_off_last_block, + block_to_write, + MYF(MY_WME | MY_NABP)) == (size_t) -1) + /* no cache->error is set here */ + DBUG_RETURN(1); + + frag_written += block_to_write; + if (frag_written == frag_size) // (L) + { + DBUG_ASSERT(block_to_write <= bytes_in_cache); + written_off_last_block= block_to_write; + bytes_in_cache -= written_off_last_block; // (LG) when bytes>0 + /* + Nothing should be left in cache at the end of the + last fragment construction. + */ + DBUG_ASSERT(i != n_frag - 1 || bytes_in_cache == 0); + + break; + } + else + { + written_off_last_block= 0; // (I) + } + } while ((bytes_in_cache= my_b_fill(cache))); + + if (after_frag) + { + sprintf(buf, after_frag, NULL); + my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP)); + } + } + + DBUG_ASSERT(total_written == total_size); // output == input + + if (after_last) + { + sprintf(buf, after_last, n_frag); + my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP)); + } + + for (i= 0; final_per_frag && i < n_frag ; i++) + { + sprintf(buf, final_per_frag, i); + my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP)); + } + + DBUG_RETURN(cache->error == -1); } diff --git a/sql/lex.h b/sql/lex.h index 87c87d03fb3..cd3f3803b80 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -133,6 +133,7 @@ static SYMBOL symbols[] = { { "COMPACT", SYM(COMPACT_SYM)}, { "COMPLETION", SYM(COMPLETION_SYM)}, { "COMPRESSED", SYM(COMPRESSED_SYM)}, + { "DEFRAGMENT", SYM(DEFRAGMENT_SYM)}, { "CONCURRENT", SYM(CONCURRENT)}, { "CONDITION", SYM(CONDITION_SYM)}, { "CONNECTION", SYM(CONNECTION_SYM)}, diff --git a/sql/log_event.cc b/sql/log_event.cc index e1912ad4620..0c39200148f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -2762,9 +2762,17 @@ void free_table_map_log_event(Table_map_log_event *event) delete event; } +/* + Encode the event, optionally per 'do_print_encoded_base64' store the result + into the argument cache; optionally per 'verbose' print into the cache + a verbose represenation of the event. + Note, no extra wrapping is done to the being io-cached data, like + procuding a BINLOG query. It's left for a routine that extracts from + the cache. +*/ void Log_event::print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info, - bool more) + bool do_print_encoded) { const uchar *ptr= (const uchar *)temp_buf; uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET); @@ -2783,17 +2791,9 @@ void Log_event::print_base64(IO_CACHE* file, DBUG_ASSERT(0); } - if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS) - { - if (my_b_tell(file) == 0) - my_b_write_string(file, "\nBINLOG '\n"); - + if (do_print_encoded) my_b_printf(file, "%s\n", tmp_str); - if (!more) - my_b_printf(file, "'%s\n", print_event_info->delimiter); - } - if (print_event_info->verbose) { Rows_log_event *ev= NULL; @@ -4833,9 +4833,17 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info) print_event_info->base64_output_mode != BASE64_OUTPUT_NEVER && !print_event_info->short_form) { - if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS) + /* BINLOG is matched with the delimiter below on the same level */ + bool do_print_encoded= + print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS; + if (do_print_encoded) my_b_printf(&cache, "BINLOG '\n"); - print_base64(&cache, print_event_info, FALSE); + + print_base64(&cache, print_event_info, do_print_encoded); + + if (do_print_encoded) + my_b_printf(&cache, "'%s\n", print_event_info->delimiter); + print_event_info->printed_fd_event= TRUE; } DBUG_VOID_RETURN; @@ -10474,12 +10482,108 @@ void Rows_log_event::pack_info(Protocol *protocol) #endif #ifdef MYSQL_CLIENT +void copy_cache_to_file_wrapped(FILE *file, + PRINT_EVENT_INFO *print_event_info, + IO_CACHE *body, + bool do_print_encoded) +{ + uint n_frag= 1; + const char* before_frag= NULL; + char* after_frag= NULL; + char* after_last= NULL; + char* final_per_frag= NULL; + /* + 2 fragments can always represent near 1GB row-based + base64-encoded event as two strings each of size less than + max(max_allowed_packet). Greater number of fragments does not + save from potential need to tweak (increase) @@max_allowed_packet + before to process the fragments. So 2 is safe and enough. + */ + const char fmt_last_frag2[]= + "\nBINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1)%s\n"; + const char fmt_last_per_frag[]= "\nSET @binlog_fragment_%%d = NULL%s\n"; + const char fmt_before_frag[]= "\nSET @binlog_fragment_%d ='\n"; + /* + Buffer to pass to copy_cache_frag_to_file_and_reinit to + compute formatted strings according to specifiers. + The sizes may depend on an actual fragment number size in terms of decimal + signs so its maximum is estimated (not precisely yet safely) below. + */ + char buf[(sizeof(fmt_last_frag2) + sizeof(fmt_last_per_frag)) + + ((sizeof(n_frag) * 8)/3 + 1) // decimal index + + sizeof(print_event_info->delimiter) + 3]; // delim, \n and 0. + + if (do_print_encoded) + { + after_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME)); + sprintf(after_frag, "'%s\n", print_event_info->delimiter); + if (my_b_tell(body) > opt_binlog_rows_event_max_encoded_size) + n_frag= 2; + if (n_frag > 1) + { + before_frag= fmt_before_frag; + after_last= (char*) my_malloc(sizeof(buf), MYF(MY_WME)); + sprintf(after_last, fmt_last_frag2, (char*) print_event_info->delimiter); + final_per_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME)); + sprintf(final_per_frag, fmt_last_per_frag, + (char*) print_event_info->delimiter); + } + else + { + before_frag= "\nBINLOG '\n"; + } + } + if (copy_cache_frag_to_file_and_reinit(body, file, n_frag, + before_frag, after_frag, + after_last, final_per_frag, buf)) + { + body->error= -1; + goto err; + } + +err: + my_free(after_frag); + my_free(after_last); + my_free(final_per_frag); +} + +/* + The function invokes base64 encoder to run on the current + event string and store the result into two caches. + When the event ends the current statement the caches are is copied into + the argument file. + Copying is also concerned how to wrap the event, specifically to produce + a valid SQL syntax. + When the encoded data size is within max(MAX_ALLOWED_PACKET) + a regular BINLOG query is composed. Otherwise it is build as fragmented + + SET @binlog_fragment_0='...'; + SET @binlog_fragment_1='...'; + BINLOG DEFRAGMENT(@binlog_fragment_0, @binlog_fragment_1); + + where fragments are represented by a sequence of "indexed" user + variables. + Two more statements are composed as well + + SET @binlog_fragment_0=NULL; + SET @binlog_fragment_1=NULL; + + to promptly release memory. + + NOTE. + If any changes made don't forget to duplicate them to + Old_rows_log_event as long as it's supported. +*/ void Rows_log_event::print_helper(FILE *file, PRINT_EVENT_INFO *print_event_info, char const *const name) { IO_CACHE *const head= &print_event_info->head_cache; IO_CACHE *const body= &print_event_info->body_cache; + bool do_print_encoded= + print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS && + !print_event_info->short_form; + if (!print_event_info->short_form) { bool const last_stmt_event= get_flags(STMT_END_F); @@ -10487,13 +10591,17 @@ void Rows_log_event::print_helper(FILE *file, my_b_printf(head, "\t%s: table id %lu%s\n", name, m_table_id, last_stmt_event ? " flags: STMT_END_F" : ""); - print_base64(body, print_event_info, !last_stmt_event); + print_base64(body, print_event_info, do_print_encoded); } if (get_flags(STMT_END_F)) { - copy_event_cache_to_file_and_reinit(head, file); - copy_event_cache_to_file_and_reinit(body, file); + if (copy_event_cache_to_file_and_reinit(head, file)) + { + head->error= -1; + return; + } + copy_cache_to_file_wrapped(file, print_event_info, body, do_print_encoded); } } #endif @@ -11352,7 +11460,9 @@ void Table_map_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info) m_dbnam, m_tblnam, m_table_id, ((m_flags & TM_BIT_HAS_TRIGGERS_F) ? " (has triggers)" : "")); - print_base64(&print_event_info->body_cache, print_event_info, TRUE); + print_base64(&print_event_info->body_cache, print_event_info, + print_event_info->base64_output_mode != + BASE64_OUTPUT_DECODE_ROWS); copy_event_cache_to_file_and_reinit(&print_event_info->head_cache, file); } } diff --git a/sql/log_event.h b/sql/log_event.h index 90900f63533..3c58ab8e1a9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1157,7 +1157,7 @@ class Log_event void print_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info, bool is_more); void print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info, - bool is_more); + bool do_print_encoded_base64); #endif /* read_log_event() functions read an event from a binlog or relay @@ -4891,15 +4891,43 @@ class Ignorable_log_event : public Log_event { virtual int get_data_size() { return IGNORABLE_HEADER_LEN; } }; +#ifdef MYSQL_CLIENT +void copy_cache_to_file_wrapped(FILE *file, + PRINT_EVENT_INFO *print_event_info, + IO_CACHE *body, + bool do_print_encoded); +#endif static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache, FILE *file) { - return - my_b_copy_to_file(cache, file) || + return + my_b_copy_to_file_frag(cache, file, 1, NULL, NULL, NULL, NULL, NULL) || reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); } + +/** + Copying of 'cache' content to 'file' in steps of the number of + fragments as specified by 'n_frag'. Other arguments enables wrapping + of the fragments and total. See more in my_b_copy_to_file_frag() header comments. +*/ +inline bool copy_cache_frag_to_file_and_reinit(IO_CACHE *cache, + FILE *file, + uint n_frag, + const char* before_frag, + const char* after_frag, + const char* after_last, + const char* final_per_frag, + char* buf) +{ + return + my_b_copy_to_file_frag(cache, file, n_frag, before_frag, after_frag, + after_last, final_per_frag, buf) || + reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); +} + + #ifdef MYSQL_SERVER /***************************************************************************** diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index d2b4470bbf9..66b3ebca7ea 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1850,12 +1850,17 @@ void Old_rows_log_event::pack_info(Protocol *protocol) #ifdef MYSQL_CLIENT +/* Method duplicates Rows_log_event's one */ void Old_rows_log_event::print_helper(FILE *file, PRINT_EVENT_INFO *print_event_info, char const *const name) { IO_CACHE *const head= &print_event_info->head_cache; IO_CACHE *const body= &print_event_info->body_cache; + bool do_print_encoded= + print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS && + !print_event_info->short_form; + if (!print_event_info->short_form) { bool const last_stmt_event= get_flags(STMT_END_F); @@ -1863,13 +1868,17 @@ void Old_rows_log_event::print_helper(FILE *file, my_b_printf(head, "\t%s: table id %lu%s\n", name, m_table_id, last_stmt_event ? " flags: STMT_END_F" : ""); - print_base64(body, print_event_info, !last_stmt_event); + print_base64(body, print_event_info, do_print_encoded); } if (get_flags(STMT_END_F)) { - copy_event_cache_to_file_and_reinit(head, file); - copy_event_cache_to_file_and_reinit(body, file); + if (copy_event_cache_to_file_and_reinit(head, file)) + { + head->error= -1; + return; + } + copy_cache_to_file_wrapped(file, print_event_info, body, do_print_encoded); } } #endif diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 91cf038907e..b15d72e036a 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -28,6 +28,73 @@ // START_EVENT_V3, // Log_event_type, // Log_event + +/* + Copy fragments into the standard placeholder thd->lex->comment.str. + + Compute the size of the (still) encoded total, + allocate and then copy fragments one after another. + The size can exceed max(max_allowed_packet) which is not a + problem as no String instance is created off this char array. + + Return 0 at success, -1 otherwise. +*/ +int binlog_defragment(THD *thd) +{ + LEX_STRING *curr_frag_name; + + thd->lex->comment.length= 0; + thd->lex->comment.str= NULL; + /* compute the total size */ + for (uint i= 0; i < thd->lex->fragmented_binlog_event.n_frag; i++) + { + user_var_entry *entry; + + curr_frag_name= &thd->lex->fragmented_binlog_event.frag_name[i]; + entry= + (user_var_entry*) my_hash_search(&thd->user_vars, + (uchar*) curr_frag_name->str, + curr_frag_name->length); + if (!entry || entry->type != STRING_RESULT) + { + my_printf_error(ER_BASE64_DECODE_ERROR, + "%s: BINLOG fragment user " + "variable '%s' has unexpectedly no value", MYF(0), + ER_THD(thd, ER_BASE64_DECODE_ERROR), curr_frag_name->str); + return -1; + } + thd->lex->comment.length += entry->length; + } + thd->lex->comment.str= // to be freed by the caller + (char *) my_malloc(thd->lex->comment.length, MYF(MY_WME)); + if (!thd->lex->comment.str) + { + my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); + return -1; + } + + /* fragments are merged into allocated buf */ + size_t gathered_length= 0; + for (uint i= 0; i < thd->lex->fragmented_binlog_event.n_frag; i++) + { + user_var_entry *entry; + + curr_frag_name= &thd->lex->fragmented_binlog_event.frag_name[i]; + entry= + (user_var_entry*) my_hash_search(&thd->user_vars, + (uchar*) curr_frag_name->str, + curr_frag_name->length); + memcpy(thd->lex->comment.str + gathered_length, + entry->value, entry->length); + + gathered_length += entry->length; + } + DBUG_ASSERT(gathered_length == thd->lex->comment.length); + + return 0; +} + + /** Execute a BINLOG statement. @@ -53,14 +120,6 @@ void mysql_client_binlog_statement(THD* thd) if (check_global_access(thd, SUPER_ACL)) DBUG_VOID_RETURN; - size_t coded_len= thd->lex->comment.length; - if (!coded_len) - { - my_error(ER_SYNTAX_ERROR, MYF(0)); - DBUG_VOID_RETURN; - } - size_t decoded_len= base64_needed_decoded_length(coded_len); - /* option_bits will be changed when applying the event. But we don't expect it be changed permanently after BINLOG statement, so backup it first. @@ -81,7 +140,8 @@ void mysql_client_binlog_statement(THD* thd) int err; Relay_log_info *rli; rpl_group_info *rgi; - + char *buf= NULL; + size_t coded_len= 0, decoded_len= 0; rli= thd->rli_fake; if (!rli) { @@ -102,15 +162,12 @@ void mysql_client_binlog_statement(THD* thd) rgi->thd= thd; const char *error= 0; - char *buf= (char *) my_malloc(decoded_len, MYF(MY_WME)); Log_event *ev = 0; /* Out of memory check */ - if (!(rli && - rli->relay_log.description_event_for_exec && - buf)) + if (!(rli && rli->relay_log.description_event_for_exec)) { my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */ goto end; @@ -119,6 +176,25 @@ void mysql_client_binlog_statement(THD* thd) rli->sql_driver_thd= thd; rli->no_storage= TRUE; + if (thd->lex->fragmented_binlog_event.n_frag > 0) + { + if (binlog_defragment(thd)) + goto end; + } + + if (!(coded_len= thd->lex->comment.length)) + { + my_error(ER_SYNTAX_ERROR, MYF(0)); + DBUG_VOID_RETURN; + } + + decoded_len= base64_needed_decoded_length(coded_len); + if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME)))) + { + my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); + goto end; + } + for (char const *strptr= thd->lex->comment.str ; strptr < thd->lex->comment.str + thd->lex->comment.length ; ) { @@ -272,6 +348,10 @@ void mysql_client_binlog_statement(THD* thd) my_ok(thd); end: + if (thd->lex->fragmented_binlog_event.n_frag > 0) + { + my_free(thd->lex->comment.str); + } thd->variables.option_bits= thd_options; rgi->slave_close_thread_tables(thd); my_free(buf); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 085ad1a4b3b..806da72c4fa 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -746,6 +746,7 @@ void lex_start(THD *thd) lex->var_list.empty(); lex->stmt_var_list.empty(); lex->proc_list.elements=0; + lex->fragmented_binlog_event.n_frag= 0; lex->is_lex_started= TRUE; DBUG_VOID_RETURN; @@ -2886,6 +2887,7 @@ LEX::LEX() gtid_domain_static_buffer, initial_gtid_domain_buffer_size, initial_gtid_domain_buffer_size, 0); + fragmented_binlog_event.n_frag= 0; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 05e31c28277..76929c424e1 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -2748,6 +2748,16 @@ struct LEX: public Query_tables_list Item *limit_rows_examined; ulonglong limit_rows_examined_cnt; /** + Describes fragmented version of BINLOG query: the total number of + the fragments and their names as ones of the user variables. + */ + struct st_fragmented_binlog_event + { + static const uint max_frags=2; + uint n_frag; + LEX_STRING frag_name[max_frags]; + } fragmented_binlog_event; + /** Holds a set of domain_ids for deletion at FLUSH..DELETE_DOMAIN_ID */ DYNAMIC_ARRAY delete_gtid_domain; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 6ae65e0c50f..c15a3e98110 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1179,6 +1179,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token DECLARE_SYM /* SQL-2003-R */ %token DEFAULT /* SQL-2003-R */ %token DEFINER_SYM +%token DEFRAGMENT_SYM /* MYSQL */ %token DELAYED_SYM %token DELAY_KEY_WRITE_SYM %token DELETE_SYM /* SQL-2003-R */ @@ -7951,7 +7952,15 @@ binlog_base64_event: Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT; Lex->comment= $2; } - ; + | + BINLOG_SYM DEFRAGMENT_SYM '(' '@' ident_or_text ',' '@' ident_or_text ')' + { + Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT; + Lex->fragmented_binlog_event.n_frag= 2; + Lex->fragmented_binlog_event.frag_name[0]= $5; + Lex->fragmented_binlog_event.frag_name[1]= $8; + } + ; check_view_or_table: table_or_tables table_list opt_mi_check_type @@ -14038,6 +14047,7 @@ keyword_sp: | DATE_SYM {} | DAY_SYM {} | DEFINER_SYM {} + | DEFRAGMENT_SYM {} | DELAY_KEY_WRITE_SYM {} | DES_KEY_FILE {} | DIAGNOSTICS_SYM {} diff --git a/unittest/sql/mf_iocache-t.cc b/unittest/sql/mf_iocache-t.cc index 8f97745f0fc..520446a7a34 100644 --- a/unittest/sql/mf_iocache-t.cc +++ b/unittest/sql/mf_iocache-t.cc @@ -286,10 +286,58 @@ void mdev14014() } +void mdev10963() +{ + int res; + int n_frag_max= 64; + int n_checks= 8; + uchar buf[1024 * 512]; + FILE *file; + myf my_flags= MYF(MY_WME); + const char *file_name="cache.log"; + + memset(buf, FILL, sizeof(buf)); + diag("MDEV-10963 Fragmented BINLOG query"); + + init_io_cache_encryption(); + + /* copying source */ + res= open_cached_file(&info, 0, 0, CACHE_SIZE, 0); + ok(res == 0, "open_cached_file" INFO_TAIL); + res= my_b_write(&info, buf, sizeof(buf)); + + ulong saved_pos= my_b_tell(&info); + ok(res == 0 && saved_pos == sizeof(buf), "cache is written"); + + /* destination */ + file= my_fopen(file_name, O_RDWR | O_TRUNC | O_CREAT, my_flags); + ok(my_fileno(file) > 0, "opened file fd = %d", my_fileno(file)); + + /* + For number of 'n_checks' times verify copying with random fragment + quantity which also cover cases when the fragment size is less than the + cache read buffer size. + */ + for (; n_checks; n_checks--, rewind(file)) + { + int c_frag= rand() % n_frag_max + 1; + + res= my_b_copy_to_file_frag(&info, file, c_frag, + NULL, NULL, NULL, NULL, NULL); + ok(res == 0, "cache copied to file" INFO_TAIL); + ok(my_ftell(file, my_flags) == sizeof(buf), "file written in %d fragments", c_frag); + res= reinit_io_cache(&info, WRITE_CACHE, saved_pos, 0, 0); + ok(res == 0 && my_b_tell(&info) == sizeof(buf), "write cache state restored"); + } + close_cached_file(&info); + my_fclose(file, my_flags); + my_delete(file_name, MYF(MY_WME)); +} + int main(int argc __attribute__((unused)),char *argv[]) { MY_INIT(argv[0]); - plan(51); + plan(78); /* temp files with and without encryption */ encrypt_tmp_files= 1; @@ -306,6 +354,7 @@ int main(int argc __attribute__((unused)),char *argv[]) encrypt_tmp_files= 0; mdev14014(); + mdev10963(); my_end(0); return exit_status();
participants (1)
-
andrei.elkin@pp.inet.fi