Re: ca64ddcc709: MDEV-31646 Online alter applies binlog cache limit to cache writes
Hi, Nikita, This is a review of all the three commits On Jul 20, Nikita Malyavin wrote:
revision-id: ca64ddcc709 (mariadb-11.0.1-144-gca64ddcc709) parent(s): f34c7419cb8 author: Nikita Malyavin committer: Nikita Malyavin timestamp: 2023-07-19 02:31:32 +0400 message:
MDEV-31646 Online alter applies binlog cache limit to cache writes
diff --git a/sql/log.cc b/sql/log.cc index afd0643fe82..7466d63d325 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2291,8 +2294,14 @@ int binlog_log_row_online_alter(TABLE* table, const uchar *before_record, before_record, after_record);
table->rpl_write_set= old_rpl_write_set; + thd->pop_internal_handler(); + + if (unlikely(error)) + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DISK_FULL, + "Broken online alter log. " + "ALTER TABLE will finish with error.");
I don't think so. Why would a DML statement get a warning about the concurrently running ALTER TABLE? It didn't do anything wrong, I don't think there should be a warning here Like, someone is running a series of DML statements, or the same statement many times. At some point another connection runs ALTER TABLE and suddenly those perfectly good DML statements start spewing out warnings. ALTER ends and all is clear again - looks wrong to me
- return unlikely(error) ? HA_ERR_RBR_LOGGING_FAILED : 0; + return 0; }
static void @@ -2592,7 +2601,7 @@ void Event_log::set_write_error(THD *thd, bool is_transactional) DBUG_VOID_RETURN; }
-bool Event_log::check_write_error(THD *thd) +bool MYSQL_BIN_LOG::check_write_error(THD *thd)
What's the difference? it's a static method anyway, FWIW it could be outside of any class. What's the point of moving it from one class to another?
{ DBUG_ENTER("MYSQL_BIN_LOG::check_write_error");
@@ -3806,9 +3815,9 @@ bool MYSQL_BIN_LOG::open_index_file(const char *index_file_name_arg, }
-bool Event_log::open(enum cache_type io_cache_type_arg) +bool Event_log::open(enum cache_type io_cache_type_arg, size_t buffer_size) { - bool error= init_io_cache(&log_file, -1, LOG_BIN_IO_SIZE, io_cache_type_arg, + bool error= init_io_cache(&log_file, -1, buffer_size, io_cache_type_arg,
why? it's only used by online alter, where would you need a different buffer size? only in your DBUG_EXECUTE_IF?
0, 0, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL));
log_state= LOG_OPENED; @@ -6544,7 +6562,8 @@ Event_log::flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event, if (writer.write(pending)) { set_write_error(thd, is_transactional); - if (check_write_error(thd) && cache_data && + if (dynamic_cast<MYSQL_BIN_LOG*>(this) &&
why?
+ MYSQL_BIN_LOG::check_write_error(thd) && cache_data && stmt_has_updated_non_trans_table(thd)) cache_data->set_incident(); delete pending; @@ -7721,8 +7749,11 @@ static int binlog_online_alter_end_trans(THD *thd, bool all, bool commit) { DBUG_ASSERT(cache.cache_log.type != READ_CACHE); mysql_mutex_lock(binlog->get_log_lock()); - error= binlog->write_cache(thd, &cache.cache_log); + error= binlog->write_cache_raw(thd, &cache.cache_log); mysql_mutex_unlock(binlog->get_log_lock()); + + if (unlikely(error)) + binlog->set_write_error(my_errno);
shouldn't binlog->write_cache() call set_write_error() internally? why would the caller have to do it?
} } else if (!commit) // rollback @@ -7736,14 +7767,14 @@ static int binlog_online_alter_end_trans(THD *thd, bool all, bool commit) cache.store_prev_position(); }
+ thd->pop_internal_handler();
if (error) { - my_error(ER_ERROR_ON_WRITE, MYF(ME_ERROR_LOG), - binlog->get_name(), errno); - binlog_online_alter_cleanup(thd->online_alter_cache_list, - is_ending_transaction); - DBUG_RETURN(error); + push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN, + ER_ERROR_ON_WRITE, ER_THD(thd, ER_ERROR_ON_WRITE), + binlog->get_name(), errno); + DBUG_ASSERT(binlog->get_write_error());
same comment as above about not showing ALTER problems in concurrent threads
} }
diff --git a/sql/log.h b/sql/log.h index 83e323cb6fb..39512b01991 100644 --- a/sql/log.h +++ b/sql/log.h @@ -444,17 +445,20 @@ class Event_log: public MYSQL_LOG */ class Cache_flip_event_log: public Event_log { IO_CACHE alt_buf; +IF_DBUG(public:,)
yuck I don't see you using online_write_error or ref_count in DBUG macros
IO_CACHE *current, *alt; std::atomic<uint> ref_count; + std::atomic<int> online_write_error; public:
Cache_flip_event_log() : Event_log(), alt_buf{}, - current(&log_file), alt(&alt_buf), ref_count(1) {} - bool open(enum cache_type io_cache_type_arg) + current(&log_file), alt(&alt_buf), ref_count(1), + online_write_error(0) {} + bool open(size_t buffer_size)
looks like Cache_flip_event_log::open() doesn't need any arguments at all
{ log_file.dir= mysql_tmpdir; alt_buf.dir= log_file.dir; - bool res= Event_log::open(io_cache_type_arg); + bool res= Event_log::open(WRITE_CACHE); if (res) return res;
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - : thd->variables.max_allowed_packet - : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; } on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
+ { + THD *thd=current_thd; + max_allowed_packet= thd ? thd->slave_thread + ? slave_max_allowed_packet + : thd->variables.max_allowed_packet + : ~(uint)0; + } #endif DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
Hi! Hope you did it with piña colada under the palm tree;-) Regarding MDEV-31646 preserve DMLs in case of online binlog fault: It's changes are shown there as a part of ca64ddcc709, but it's important to point out that it's a separate thing: It is the commit that introduces error reporting to the ALTER thread, and its practically important only for a 32-bit build. I made it, but not sure if it's worth it. It complicates the architecture and brings many ad-hoc checks into the code. What do you think? Though if some multi-writer IO_CACHE replacement will finally be deployed, it may remove it all, since the 4GB restriction will not take place anymore (the transaction cache will still be an issue though). As an alternative, we could just fix IO_CACHE to support files larger than SIZE_T_MAX, by making end_of_file ulonglong, and all the corresponding changes. On Thu, 20 Jul 2023 at 17:59, Sergei Golubchik <serg@mariadb.org> wrote:
Hi, Nikita,
This is a review of all the three commits
On Jul 20, Nikita Malyavin wrote:
revision-id: ca64ddcc709 (mariadb-11.0.1-144-gca64ddcc709) parent(s): f34c7419cb8 author: Nikita Malyavin committer: Nikita Malyavin timestamp: 2023-07-19 02:31:32 +0400 message:
MDEV-31646 Online alter applies binlog cache limit to cache writes
diff --git a/sql/log.cc b/sql/log.cc index afd0643fe82..7466d63d325 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2291,8 +2294,14 @@ int binlog_log_row_online_alter(TABLE* table, const uchar *before_record, before_record, after_record);
table->rpl_write_set= old_rpl_write_set; + thd->pop_internal_handler(); + + if (unlikely(error)) + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DISK_FULL, + "Broken online alter log. " + "ALTER TABLE will finish with error.");
I don't think so. Why would a DML statement get a warning about the concurrently running ALTER TABLE? It didn't do anything wrong, I don't think there should be a warning here
Like, someone is running a series of DML statements, or the same statement many times. At some point another connection runs ALTER TABLE and suddenly those perfectly good DML statements start spewing out warnings. ALTER ends and all is clear again - looks wrong to me
Honestly I added this printf only to pass buildbot (unused `error`), and I don't want to leave the result unhandled. But you see, if for example server has binlog enabled, this perfect DML will simply fail! And this warning is about that some system limit is reached.
- return unlikely(error) ? HA_ERR_RBR_LOGGING_FAILED : 0; + return 0; }
static void @@ -2592,7 +2601,7 @@ void Event_log::set_write_error(THD *thd, bool is_transactional) DBUG_VOID_RETURN; }
-bool Event_log::check_write_error(THD *thd) +bool MYSQL_BIN_LOG::check_write_error(THD *thd)
What's the difference? it's a static method anyway, FWIW it could be outside of any class. What's the point of moving it from one class to another?
check_write_error is only relevant to MYSQL_BIN_LOG (in the way it's written). It ensures that a write error is set in thd (so it's closer to ensure_write_error). I have returned the declaration to where it was, maybe I should even remove the change from the initial refactoring commit. Also i think it's very much binlog-specific, and it's creepily designed, so want it to be kept closer to its specifics.
@@ -6544,7 +6562,8 @@ Event_log::flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event,
if (writer.write(pending)) { set_write_error(thd, is_transactional); - if (check_write_error(thd) && cache_data && + if (dynamic_cast<MYSQL_BIN_LOG*>(this) &&
why?
+ MYSQL_BIN_LOG::check_write_error(thd) && cache_data &&
stmt_has_updated_non_trans_table(thd)) cache_data->set_incident();
Yeah, probably it's better to make a virtual function... but the reason was that incident events is also a binlog-specific thing. And that check_write_error is moved to MYSQL_BIN_LOG as static. I'll improve it.
{ DBUG_ENTER("MYSQL_BIN_LOG::check_write_error");
@@ -3806,9 +3815,9 @@ bool MYSQL_BIN_LOG::open_index_file(const char *index_file_name_arg, }
-bool Event_log::open(enum cache_type io_cache_type_arg) +bool Event_log::open(enum cache_type io_cache_type_arg, size_t buffer_size) { - bool error= init_io_cache(&log_file, -1, LOG_BIN_IO_SIZE, io_cache_type_arg, + bool error= init_io_cache(&log_file, -1, buffer_size, io_cache_type_arg,
why? it's only used by online alter, where would you need a different buffer size? only in your DBUG_EXECUTE_IF?
Yes, only there.
0, 0, MYF(MY_WME | MY_NABP |
MY_WAIT_IF_FULL));
log_state= LOG_OPENED;
@@ -7721,8 +7749,11 @@ static int binlog_online_alter_end_trans(THD *thd, bool all, bool commit) { DBUG_ASSERT(cache.cache_log.type != READ_CACHE); mysql_mutex_lock(binlog->get_log_lock()); - error= binlog->write_cache(thd, &cache.cache_log); + error= binlog->write_cache_raw(thd, &cache.cache_log); mysql_mutex_unlock(binlog->get_log_lock()); + + if (unlikely(error)) + binlog->set_write_error(my_errno);
shouldn't binlog->write_cache() call set_write_error() internally? why would the caller have to do it?
Wouldn't you bother if I'll move it to mf_iocache as smth like my_b_copy_cache?
diff --git a/sql/log.h b/sql/log.h index 83e323cb6fb..39512b01991 100644 --- a/sql/log.h +++ b/sql/log.h @@ -444,17 +445,20 @@ class Event_log: public MYSQL_LOG */ class Cache_flip_event_log: public Event_log { IO_CACHE alt_buf; +IF_DBUG(public:,)
yuck I don't see you using online_write_error or ref_count in DBUG macros
IO_CACHE *current, *alt;
you mean, you want "private" set back here? Sure, I don't access neither of the following outside.
std::atomic<uint> ref_count; + std::atomic<int> online_write_error; public:
Cache_flip_event_log() : Event_log(), alt_buf{}, - current(&log_file), alt(&alt_buf), ref_count(1) {} - bool open(enum cache_type io_cache_type_arg) + current(&log_file), alt(&alt_buf), ref_count(1), + online_write_error(0) {} + bool open(size_t buffer_size)
looks like Cache_flip_event_log::open() doesn't need any arguments at all
I don't want to put DBUG_EXECUTE_IF inside, because I'll have to either #include it in log.h, or move Cache_flip_event_log::open in some .cc.
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - : thd->variables.max_allowed_packet - : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As
ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; }
Makes sense. That'll also remove the above ifdef, which I was unsure of, whether to remove it, or not.
on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
I haven't seen such. If it's there, better to ignore it as well. -- Yours truly, Nikita Malyavin
Hi, Nikita, On Jul 25, Nikita Malyavin wrote:
Regarding MDEV-31646 preserve DMLs in case of online binlog fault: It's changes are shown there as a part of ca64ddcc709, but it's important to point out that it's a separate thing: It is the commit that introduces error reporting to the ALTER thread, and its practically important only for a 32-bit build. I made it, but not sure if it's worth it. It complicates the architecture and brings many ad-hoc checks into the code.
What do you think?
let's drop this code then, please create a new MDEV about IO_CACHE on 32-bit.
On Thu, 20 Jul 2023 at 17:59, Sergei Golubchik <serg@mariadb.org> wrote:
On Jul 20, Nikita Malyavin wrote:
revision-id: ca64ddcc709 (mariadb-11.0.1-144-gca64ddcc709) parent(s): f34c7419cb8 author: Nikita Malyavin committer: Nikita Malyavin timestamp: 2023-07-19 02:31:32 +0400 message:
MDEV-31646 Online alter applies binlog cache limit to cache writes
diff --git a/sql/log.cc b/sql/log.cc index afd0643fe82..7466d63d325 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2291,8 +2294,14 @@ int binlog_log_row_online_alter(TABLE* table, const uchar *before_record, before_record, after_record);
table->rpl_write_set= old_rpl_write_set; + thd->pop_internal_handler(); + + if (unlikely(error)) + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DISK_FULL, + "Broken online alter log. " + "ALTER TABLE will finish with error.");
I don't think so. Why would a DML statement get a warning about the concurrently running ALTER TABLE? It didn't do anything wrong, I don't think there should be a warning here
Like, someone is running a series of DML statements, or the same statement many times. At some point another connection runs ALTER TABLE and suddenly those perfectly good DML statements start spewing out warnings. ALTER ends and all is clear again - looks wrong to me
Honestly I added this printf only to pass buildbot (unused `error`), and I don't want to leave the result unhandled.
The result should be handled, yes. One possible way to react on such a failure in the DML thread is to stop writing to table->online_alter_cache. May be even destroy it right away.
But you see, if for example server has binlog enabled, this perfect DML will simply fail! And this warning is about that some system limit is reached.
Yes. It's a warning that some other statement in a different thread, started by a different user or application, have failed. This user or application cannot do anything about it, likely they cannot even use this information in any sensible way. In this case I don't see a need to warn the user about what some other user is doing.
- return unlikely(error) ? HA_ERR_RBR_LOGGING_FAILED : 0; + return 0; }
static void @@ -3806,9 +3815,9 @@ bool MYSQL_BIN_LOG::open_index_file(const char *index_file_name_arg, }
-bool Event_log::open(enum cache_type io_cache_type_arg) +bool Event_log::open(enum cache_type io_cache_type_arg, size_t buffer_size) { - bool error= init_io_cache(&log_file, -1, LOG_BIN_IO_SIZE, io_cache_type_arg, + bool error= init_io_cache(&log_file, -1, buffer_size, io_cache_type_arg,
why? it's only used by online alter, where would you need a different buffer size? only in your DBUG_EXECUTE_IF?
Yes, only there.
in that case, it'd be cleaner not to add a new, rather unused, argument everywhere. if you need it for debugging, you can add DBUG_EXECUTE_IF() directly into Event_log::open().
0, 0, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL));
log_state= LOG_OPENED;
@@ -7721,8 +7749,11 @@ static int binlog_online_alter_end_trans(THD *thd, bool all, bool commit) { DBUG_ASSERT(cache.cache_log.type != READ_CACHE); mysql_mutex_lock(binlog->get_log_lock()); - error= binlog->write_cache(thd, &cache.cache_log); + error= binlog->write_cache_raw(thd, &cache.cache_log); mysql_mutex_unlock(binlog->get_log_lock()); + + if (unlikely(error)) + binlog->set_write_error(my_errno);
shouldn't binlog->write_cache() call set_write_error() internally? why would the caller have to do it?
Wouldn't you bother if I'll move it to mf_iocache as smth like my_b_copy_cache?
Move what?
diff --git a/sql/log.h b/sql/log.h index 83e323cb6fb..39512b01991 100644 --- a/sql/log.h +++ b/sql/log.h @@ -444,17 +445,20 @@ class Event_log: public MYSQL_LOG std::atomic<uint> ref_count; + std::atomic<int> online_write_error; public:
Cache_flip_event_log() : Event_log(), alt_buf{}, - current(&log_file), alt(&alt_buf), ref_count(1) {} - bool open(enum cache_type io_cache_type_arg) + current(&log_file), alt(&alt_buf), ref_count(1), + online_write_error(0) {} + bool open(size_t buffer_size)
looks like Cache_flip_event_log::open() doesn't need any arguments at all
I don't want to put DBUG_EXECUTE_IF inside, because I'll have to either #include it in log.h, or move Cache_flip_event_log::open in some .cc.
Event_log::open() is already in some .cc, and that's the only place where you actually need this value, isn't it so? Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
On Tue, 8 Aug 2023 at 22:35, Sergei Golubchik <serg@mariadb.org> wrote:
Hi, Nikita,
On Jul 25, Nikita Malyavin wrote:
Regarding MDEV-31646 preserve DMLs in case of online binlog fault: It's changes are shown there as a part of ca64ddcc709, but it's important to point out that it's a separate thing: It is the commit that introduces error reporting to the ALTER thread, and its practically important only for a 32-bit build. I made it, but not sure if it's worth it. It complicates the architecture and brings many ad-hoc checks into the code.
What do you think?
let's drop this code then, please create a new MDEV about IO_CACHE on 32-bit.
And by the way, it's only applicable to systems with SIZEOF_OFF_T=4. I didn't know that when I wrote the response yet. Makes even more sense thus. The rest of the comments are obsolete then. -- Yours truly, Nikita Malyavin
I found that one issue was left unaddressed: On Thu, 20 Jul 2023 at 17:59, Sergei Golubchik <serg@mariadb.org> wrote:
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - : thd->variables.max_allowed_packet - : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As
ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; }
I thought about it, but let me avoid it this time. There are ~15 calls of both read_log_event functions, and it's hard to say what's called from where. I'd better leave this insane knot as it is now. Also as a bonus mysql_bin_log will not depend on an external max_allowed_packet, which is fixed as 1GiB.
on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
+ { + THD *thd=current_thd; + max_allowed_packet= thd ? thd->slave_thread + ? slave_max_allowed_packet + : thd->variables.max_allowed_packet + : ~(uint)0; + } #endif DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
-- Yours truly, Nikita Malyavin
Hi, Nikita, On Aug 09, Nikita Malyavin wrote:
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - : thd->variables.max_allowed_packet - : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As
ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; }
I thought about it, but let me avoid it this time. There are ~15 calls of both read_log_event functions, and it's hard to say what's called from where. I'd better leave this insane knot as it is now.
Also as a bonus mysql_bin_log will not depend on an external max_allowed_packet, which is fixed as 1GiB.
still, I don't like a run-time condition for what should be a compile-time thing. Another way of solving it is static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg) { size_t max_packet= thd ? ... : ~0U; return read_log_event(file, packet, fdle, checksum_alg_arg, max_packet); } static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg, size_t max_allowed_packet);
on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
+ { + THD *thd=current_thd; + max_allowed_packet= thd ? thd->slave_thread + ? slave_max_allowed_packet + : thd->variables.max_allowed_packet + : ~(uint)0; + } #endif DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
Oh! I like this one more, too. Alright! On Thu, 10 Aug 2023 at 18:53, Sergei Golubchik <serg@mariadb.org> wrote:
Hi, Nikita,
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - :
On Aug 09, Nikita Malyavin wrote: thd->variables.max_allowed_packet
- : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As
ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; }
I thought about it, but let me avoid it this time. There are ~15 calls of both read_log_event functions, and it's hard to say what's called from where. I'd better leave this insane knot as it is now.
Also as a bonus mysql_bin_log will not depend on an external max_allowed_packet, which is fixed as 1GiB.
still, I don't like a run-time condition for what should be a compile-time thing. Another way of solving it is
static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg) { size_t max_packet= thd ? ... : ~0U; return read_log_event(file, packet, fdle, checksum_alg_arg, max_packet); }
static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg, size_t max_allowed_packet);
on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
+ { + THD *thd=current_thd; + max_allowed_packet= thd ? thd->slave_thread + ? slave_max_allowed_packet + : thd->variables.max_allowed_packet + : ~(uint)0; + } #endif DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
-- Yours truly, Nikita Malyavin
Done here: https://github.com/MariaDB/server/commit/4835ca3d5800ce981e3454fd9ce1c9b1706... On Thu, 10 Aug 2023 at 19:08, Nikita Malyavin <nikitamalyavin@gmail.com> wrote:
Oh! I like this one more, too. Alright!
On Thu, 10 Aug 2023 at 18:53, Sergei Golubchik <serg@mariadb.org> wrote:
Hi, Nikita,
diff --git a/sql/log_event.cc b/sql/log_event.cc index 10180a95d99..0c4c284a40f 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
int Log_event::read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, - enum enum_binlog_checksum_alg checksum_alg_arg) + enum enum_binlog_checksum_alg checksum_alg_arg, + size_t max_allowed_packet) { ulong data_len; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; uchar ev_offset= packet->length(); #if !defined(MYSQL_CLIENT) - THD *thd=current_thd; - ulong max_allowed_packet= thd ? thd->slave_thread ? slave_max_allowed_packet - :
On Aug 09, Nikita Malyavin wrote: thd->variables.max_allowed_packet
- : ~(uint)0; + if (max_allowed_packet == 0)
may be better pass THD as an argument? or even correct value of max_allowed_packet. As
ulong max_allowed_packet(THD *thd) { return thd ? ... : ~(uint)0; }
I thought about it, but let me avoid it this time. There are ~15 calls of both read_log_event functions, and it's hard to say what's called from where. I'd better leave this insane knot as it is now.
Also as a bonus mysql_bin_log will not depend on an external max_allowed_packet, which is fixed as 1GiB.
still, I don't like a run-time condition for what should be a compile-time thing. Another way of solving it is
static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg) { size_t max_packet= thd ? ... : ~0U; return read_log_event(file, packet, fdle, checksum_alg_arg, max_packet); }
static int read_log_event(IO_CACHE* file, String* packet, const Format_description_log_event *fdle, enum enum_binlog_checksum_alg checksum_alg_arg, size_t max_allowed_packet);
on the second thought, do you need to ignore max_allowed_packet here for online ALTER? Is there no event size limit when the event is written into binlog?
+ { + THD *thd=current_thd; + max_allowed_packet= thd ? thd->slave_thread + ? slave_max_allowed_packet + : thd->variables.max_allowed_packet + : ~(uint)0; + } #endif DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
Regards, Sergei VP of MariaDB Server Engineering and security@mariadb.org
-- Yours truly, Nikita Malyavin
-- Yours truly, Nikita Malyavin
participants (2)
-
Nikita Malyavin
-
Sergei Golubchik