revision-id: d1e945e8a82b0e1730e8357f448f386e7274fca6 (mariadb-10.1.35-42-gd1e945e8a82)
parent(s): f01c4a10d74397220d7b7ffb724e6f52d3ab42a5
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-08 16:57:31 +0300
message:
MDEV-17143: Galera test failure on galera.MW-44
Start general log OFF and then truncate mysql.general_log and
use proper wait timeouts to make sure it is really empty.
---
mysql-test/suite/galera/r/MW-44.result | 14 ++++----------
mysql-test/suite/galera/t/MW-44-master.opt | 1 +
mysql-test/suite/galera/t/MW-44.test | 14 ++++++++------
3 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index e5cc05057ce..a07719daca1 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -2,12 +2,6 @@ TRUNCATE TABLE mysql.general_log;
TRUNCATE TABLE mysql.general_log;
SELECT Argument FROM mysql.general_log;
Argument
-SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%'
-SELECT Argument FROM mysql.general_log
-SELECT Argument FROM mysql.general_log;
-Argument
-SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%'
-SELECT Argument FROM mysql.general_log
SET GLOBAL general_log='ON';
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
@@ -18,8 +12,8 @@ SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument
argument
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
ALTER TABLE t1 ADD COLUMN f2 INTEGER
-SET GLOBAL general_log='ON';
-SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
-COUNT(*) = 0
-0
+SELECT Argument FROM mysql.general_log;
+Argument
DROP TABLE t1;
+SET GLOBAL general_log='OFF';
+SET GLOBAL general_log='OFF';
diff --git a/mysql-test/suite/galera/t/MW-44-master.opt b/mysql-test/suite/galera/t/MW-44-master.opt
index a15aa0a99d9..9b086195e8a 100644
--- a/mysql-test/suite/galera/t/MW-44-master.opt
+++ b/mysql-test/suite/galera/t/MW-44-master.opt
@@ -1 +1,2 @@
--log-output=TABLE
+--general-log=OFF
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index 5bc5fa9dab8..0f8b1319b2c 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -9,13 +9,12 @@
TRUNCATE TABLE mysql.general_log;
--connection node_2
-TRUNCATE TABLE mysql.general_log;
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%';
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
--source include/wait_condition.inc
-SELECT Argument FROM mysql.general_log;
+TRUNCATE TABLE mysql.general_log;
--connection node_1
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%';
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
--source include/wait_condition.inc
SELECT Argument FROM mysql.general_log;
@@ -32,7 +31,10 @@ SET SESSION wsrep_osu_method=TOI;
SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
--connection node_2
-SET GLOBAL general_log='ON';
-SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
+SELECT Argument FROM mysql.general_log;
DROP TABLE t1;
+SET GLOBAL general_log='OFF';
+
+--connection node_1
+SET GLOBAL general_log='OFF';
1
0

[Commits] eec96f6: MDEV-15890 Strange error message if you try to FLUSH TABLES <view> after LOCK TABLES <view>.
by holyfoot@askmonty.org 08 Sep '18
by holyfoot@askmonty.org 08 Sep '18
08 Sep '18
revision-id: eec96f689616740b5296c28fbc1e264574c4557c (mariadb-10.1.35-42-geec96f6)
parent(s): f01c4a10d74397220d7b7ffb724e6f52d3ab42a5
committer: Alexey Botchkov
timestamp: 2018-09-08 14:18:51 +0400
message:
MDEV-15890 Strange error message if you try to FLUSH TABLES <view> after LOCK TABLES <view>.
LOCK view WRITE shouldn't block FLUSH view.
So we set the view's mdl_request type to it's tables.
---
mysql-test/r/flush.result | 1 -
mysql-test/r/lock.result | 4 ++--
mysql-test/r/lock_multi.result | 9 ++++-----
mysql-test/r/lock_sync.result | 6 +-----
.../suite/sys_vars/r/delayed_insert_limit_func.result | 4 ++--
.../suite/sys_vars/r/sql_low_priority_updates_func.result | 4 ++--
mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test | 4 ++--
.../suite/sys_vars/t/sql_low_priority_updates_func.test | 4 ++--
mysql-test/t/flush.test | 1 -
mysql-test/t/lock.test | 4 ++--
mysql-test/t/lock_multi.test | 14 +++++++++-----
mysql-test/t/lock_sync.test | 8 ++------
sql/sql_view.cc | 3 +--
13 files changed, 29 insertions(+), 37 deletions(-)
diff --git a/mysql-test/r/flush.result b/mysql-test/r/flush.result
index 2d7b81b..4e4aec4 100644
--- a/mysql-test/r/flush.result
+++ b/mysql-test/r/flush.result
@@ -508,7 +508,6 @@ ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
LOCK TABLES v1 WRITE;
FLUSH TABLES v1;
-ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
LOCK TABLES v1 READ;
FLUSH TABLES t1;
diff --git a/mysql-test/r/lock.result b/mysql-test/r/lock.result
index 0dcc0de..b6f19d1 100644
--- a/mysql-test/r/lock.result
+++ b/mysql-test/r/lock.result
@@ -136,7 +136,7 @@ select * from t1;
ERROR HY000: Table 't1' was not locked with LOCK TABLES
unlock tables;
create or replace view v_bug5719 as select * from t1;
-lock tables v_bug5719 write;
+lock tables v_bug5719 read;
select * from v_bug5719;
a
@@ -299,7 +299,7 @@ create table t2 (j int);
#
# Try to perform DDL on table which is locked through view.
create view v1 as select * from t2;
-lock tables t1 write, v1 write;
+lock tables t1 write, v1 read;
flush table t2;
ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
drop table t2;
diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result
index 12960a4..4df415b 100644
--- a/mysql-test/r/lock_multi.result
+++ b/mysql-test/r/lock_multi.result
@@ -276,15 +276,14 @@ DROP VIEW IF EXISTS v1;
#
# Test 1: LOCK TABLES v1 WRITE, t1 READ;
#
-# Thanks to the fact that we no longer allow DDL on tables
-# which are locked for write implicitly, the exact scenario
-# in which assert was failing is no longer repeatable.
CREATE TABLE t1 ( f1 integer );
CREATE VIEW v1 AS SELECT f1 FROM t1 ;
+# Connection 2
LOCK TABLES v1 WRITE, t1 READ;
FLUSH TABLE t1;
-ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
-UNLOCK TABLES;
+# Connection 1
+LOCK TABLES t1 WRITE;
+FLUSH TABLE t1;
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result
index 219cc08..8143f3f 100644
--- a/mysql-test/r/lock_sync.result
+++ b/mysql-test/r/lock_sync.result
@@ -648,9 +648,6 @@ set debug_sync= 'RESET';
set @old_general_log = @@global.general_log;
set @@global.general_log= OFF;
create table t1 (i int) engine=InnoDB;
-# We have to use view in order to make LOCK TABLES avoid
-# acquiring SNRW metadata lock on table.
-create view v1 as select * from t1;
insert into t1 values (1);
# Prepare user lock which will be used for resuming execution of
# the first statement after it acquires TL_WRITE_ALLOW_WRITE lock.
@@ -673,7 +670,7 @@ select count(*) > 0 from t1 as a, t1 as b for update;;
# acquiring lock for the the first instance of 't1'.
set debug_sync= 'now WAIT_FOR parked';
# Send LOCK TABLE statement which will try to get TL_WRITE lock on 't1':
-lock table v1 write;;
+lock table t1 write concurrent;;
# Switch to connection 'default'.
# Wait until this LOCK TABLES statement starts waiting for table lock.
# Allow SELECT ... FOR UPDATE to resume.
@@ -703,7 +700,6 @@ unlock tables;
# Do clean-up.
set debug_sync= 'RESET';
set @@global.general_log= @old_general_log;
-drop view v1;
drop table t1;
#
# Bug#50821 Deadlock between LOCK TABLES and ALTER TABLE
diff --git a/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result b/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
index eeb7a28..6076d02 100644
--- a/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
+++ b/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
@@ -13,7 +13,7 @@ INSERT INTO t1 VALUES('3','1','1');
INSERT INTO t1 VALUES('4','1','1');
INSERT INTO t1 VALUES('5','1','1');
INSERT INTO t1 VALUES('6','1','1');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
INSERT DELAYED INTO t1 VALUES('7','1','1');
INSERT DELAYED INTO t1 VALUES('8','1','1');
@@ -82,7 +82,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
Asynchronous execute
INSERT DELAYED INTO t1 VALUES('7');
diff --git a/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result b/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
index fe76c2c..e9758e2 100644
--- a/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
+++ b/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
@@ -20,7 +20,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 WRITE CONCURRENT;
** Connection con1 **
** Asynchronous Execution **
UPDATE t1 SET a = CONCAT(a,"-updated");|
@@ -56,7 +56,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
** Asynchronous Execution **
UPDATE t1 SET a = CONCAT(a,"-updated");|
diff --git a/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test b/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
index 427f273..8ad4978 100644
--- a/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
+++ b/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
@@ -61,7 +61,7 @@ INSERT INTO t1 VALUES('4','1','1');
INSERT INTO t1 VALUES('5','1','1');
INSERT INTO t1 VALUES('6','1','1');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
@@ -173,7 +173,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
diff --git a/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test b/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
index ba13558..5d7d6cb 100644
--- a/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
+++ b/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
@@ -70,7 +70,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 WRITE CONCURRENT;
--echo ** Connection con1 **
connection con1;
@@ -144,7 +144,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
diff --git a/mysql-test/t/flush.test b/mysql-test/t/flush.test
index 7736574..11a1ac2 100644
--- a/mysql-test/t/flush.test
+++ b/mysql-test/t/flush.test
@@ -724,7 +724,6 @@ FLUSH TABLES v1;
UNLOCK TABLES;
LOCK TABLES v1 WRITE;
---error ER_TABLE_NOT_LOCKED_FOR_WRITE
FLUSH TABLES v1;
UNLOCK TABLES;
diff --git a/mysql-test/t/lock.test b/mysql-test/t/lock.test
index 6cfaf9f..734fcd8 100644
--- a/mysql-test/t/lock.test
+++ b/mysql-test/t/lock.test
@@ -192,7 +192,7 @@ drop view v_bug5719;
select * from t1;
unlock tables;
create or replace view v_bug5719 as select * from t1;
-lock tables v_bug5719 write;
+lock tables v_bug5719 read;
select * from v_bug5719;
--echo
--echo Allowed to use an underlying table under LOCK TABLES <view>
@@ -370,7 +370,7 @@ create table t2 (j int);
--echo #
--echo # Try to perform DDL on table which is locked through view.
create view v1 as select * from t2;
-lock tables t1 write, v1 write;
+lock tables t1 write, v1 read;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
flush table t2;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test
index efd0fe5..ee73c2e 100644
--- a/mysql-test/t/lock_multi.test
+++ b/mysql-test/t/lock_multi.test
@@ -771,17 +771,21 @@ DROP VIEW IF EXISTS v1;
--echo #
--echo # Test 1: LOCK TABLES v1 WRITE, t1 READ;
--echo #
---echo # Thanks to the fact that we no longer allow DDL on tables
---echo # which are locked for write implicitly, the exact scenario
---echo # in which assert was failing is no longer repeatable.
CREATE TABLE t1 ( f1 integer );
CREATE VIEW v1 AS SELECT f1 FROM t1 ;
+--echo # Connection 2
+connect (con2,localhost,root);
LOCK TABLES v1 WRITE, t1 READ;
---error ER_TABLE_NOT_LOCKED_FOR_WRITE
FLUSH TABLE t1;
-UNLOCK TABLES;
+disconnect con2;
+--source include/wait_until_disconnected.inc
+
+--echo # Connection 1
+connection default;
+LOCK TABLES t1 WRITE;
+FLUSH TABLE t1; # Assertion happened here
# Cleanup
DROP TABLE t1;
diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test
index ef79cc2..0e0aa8f 100644
--- a/mysql-test/t/lock_sync.test
+++ b/mysql-test/t/lock_sync.test
@@ -909,9 +909,6 @@ set @old_general_log = @@global.general_log;
set @@global.general_log= OFF;
create table t1 (i int) engine=InnoDB;
---echo # We have to use view in order to make LOCK TABLES avoid
---echo # acquiring SNRW metadata lock on table.
-create view v1 as select * from t1;
insert into t1 values (1);
--echo # Prepare user lock which will be used for resuming execution of
--echo # the first statement after it acquires TL_WRITE_ALLOW_WRITE lock.
@@ -942,14 +939,14 @@ connection con_bug45143_3;
--echo # acquiring lock for the the first instance of 't1'.
set debug_sync= 'now WAIT_FOR parked';
--echo # Send LOCK TABLE statement which will try to get TL_WRITE lock on 't1':
---send lock table v1 write;
+--send lock table t1 write concurrent;
--echo # Switch to connection 'default'.
connection default;
--echo # Wait until this LOCK TABLES statement starts waiting for table lock.
let $wait_condition= select count(*)= 1 from information_schema.processlist
where state= 'Waiting for table level lock' and
- info='lock table v1 write';
+ info='lock table t1 write concurrent';
--source include/wait_condition.inc
--echo # Allow SELECT ... FOR UPDATE to resume.
--echo # Since it already has TL_WRITE_ALLOW_WRITE lock on the first instance
@@ -993,7 +990,6 @@ disconnect con_bug45143_2;
disconnect con_bug45143_3;
set debug_sync= 'RESET';
set @@global.general_log= @old_general_log;
-drop view v1;
drop table t1;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 6bd6b6a..d7a2a6b 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1534,8 +1534,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local)
{
tbl->lock_type= table->lock_type;
- tbl->mdl_request.set_type((tbl->lock_type >= TL_WRITE_ALLOW_WRITE) ?
- MDL_SHARED_WRITE : MDL_SHARED_READ);
+ tbl->mdl_request.set_type(table->mdl_request.type);
}
/*
If the view is mergeable, we might want to
1
0
revision-id: f01c4a10d74397220d7b7ffb724e6f52d3ab42a5 (mariadb-10.1.35-41-gf01c4a10d74)
parent(s): 908ac40bdb9ab09cb718786f33bed417161a1748
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-08 08:12:55 +0300
message:
Add one more wait for truncate in MW-44.
---
mysql-test/suite/galera/r/MW-44.result | 8 ++++++++
mysql-test/suite/galera/t/MW-44.test | 7 +++++++
2 files changed, 15 insertions(+)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index 7e3d2f4b7ec..e5cc05057ce 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -1,5 +1,13 @@
TRUNCATE TABLE mysql.general_log;
TRUNCATE TABLE mysql.general_log;
+SELECT Argument FROM mysql.general_log;
+Argument
+SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%'
+SELECT Argument FROM mysql.general_log
+SELECT Argument FROM mysql.general_log;
+Argument
+SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%'
+SELECT Argument FROM mysql.general_log
SET GLOBAL general_log='ON';
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index 6defa432879..5bc5fa9dab8 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -10,8 +10,15 @@ TRUNCATE TABLE mysql.general_log;
--connection node_2
TRUNCATE TABLE mysql.general_log;
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%';
+--source include/wait_condition.inc
+SELECT Argument FROM mysql.general_log;
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument LIKE 'TRUNCATE%';
+--source include/wait_condition.inc
+SELECT Argument FROM mysql.general_log;
+
SET GLOBAL general_log='ON';
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
1
0
revision-id: 93ff64ebd7a7b2a534acc3ee8bf14cbfd8658d0f (mariadb-galera-10.0.36-2-g93ff64ebd7a)
parent(s): 2482306861ca107adfb6c8823cd9f6e413912ebe
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-08 08:07:25 +0300
message:
Remove incorrect install command.
---
support-files/CMakeLists.txt | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/support-files/CMakeLists.txt b/support-files/CMakeLists.txt
index 36340a2fbe7..c6ce2c04eb3 100644
--- a/support-files/CMakeLists.txt
+++ b/support-files/CMakeLists.txt
@@ -102,12 +102,8 @@ IF(UNIX)
DESTINATION ${inst_location} COMPONENT SupportFiles
PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
- CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/wsrep.cnf.sh
+ CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/wsrep.cnf.sh
${CMAKE_CURRENT_BINARY_DIR}/wsrep.cnf @ONLY)
- INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/wsrep.cnf
- DESTINATION ${inst_location} COMPONENT SupportFiles
- PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
- GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
IF (INSTALL_SYSCONFDIR)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/mysql-log-rotate DESTINATION ${INSTALL_SYSCONFDIR}/logrotate.d
1
0
revision-id: 2056c3cdcb8a16e017ae56c5367ab67091a29efd (mariadb-10.2.16-110-g2056c3c)
parent(s): b245023fe0bc6fa0bd6e2dfa9352b30b71d0d27d
author: Igor Babaev
committer: Igor Babaev
timestamp: 2018-09-07 20:10:04 -0700
message:
MDEV-17024 Crash on large query
This problem manifested itself when a join query used two or more
materialized CTE such that each of them employed the same recursive CTE.
The bug caused a crash. The crash happened because the cleanup()
function was performed premature for recursive CTE. This clean up was
induced by the cleanup of the first CTE referenced the recusrsive CTE.
This cleanup destroyed the structures that would allow to read from the
temporary table containing the rows of the recursive CTE and an attempt to read
these rows for the second CTE referencing the recursive CTE triggered a
crash.
The clean up for a recursive CTE R should be performed after the cleanup
of the last materialized CTE that uses R.
---
mysql-test/r/cte_recursive.result | 93 +++++++++++++++++++++++++++++++++++++++
mysql-test/t/cte_recursive.test | 67 ++++++++++++++++++++++++++++
sql/sql_base.cc | 9 ++++
sql/sql_class.h | 8 +++-
sql/sql_cte.h | 11 ++++-
sql/sql_derived.cc | 6 ++-
sql/sql_union.cc | 33 +++++++++++++-
7 files changed, 222 insertions(+), 5 deletions(-)
diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result
index c892c76..55733be 100644
--- a/mysql-test/r/cte_recursive.result
+++ b/mysql-test/r/cte_recursive.result
@@ -3300,3 +3300,96 @@ SELECT func();
func()
1
DROP FUNCTION func;
+#
+# MDEV-17024: two materialized CTEs using the same recursive CTE
+#
+create table t1 (id int);
+insert into t1 values (1), (2), (3);
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2;
+c1 c2
+2 1
+explain extended with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY <derived4> ALL NULL NULL NULL NULL 6 100.00
+1 PRIMARY <derived5> ALL NULL NULL NULL NULL 6 100.00 Using join buffer (flat, BNL join)
+4 DERIVED <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
+4 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+5 DERIVED <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
+5 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 with recursive rcte as (select 1 AS `a` union select cast(`rcte`.`a` + 1 as unsigned) AS `cast(a+1 as unsigned)` from `rcte` where `rcte`.`a` < 10), cte1 as (select count(0) AS `c1` from `rcte` join `test`.`t1` where `rcte`.`a` between 3 and 5 and `test`.`t1`.`id` = `rcte`.`a` - 3), cte2 as (select count(0) AS `c2` from `rcte` join `test`.`t1` where `rcte`.`a` between 7 and 8 and `test`.`t1`.`id` = `rcte`.`a` - 7)select `cte1`.`c1` AS `c1`,`cte2`.`c2` AS `c2` from `cte1` join `cte2`
+prepare stmt from "with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2";
+execute stmt;
+c1 c2
+2 1
+execute stmt;
+c1 c2
+2 1
+create table t2 (c1 int, c2 int);
+create procedure p() insert into t2 with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2;
+call p();
+select * from t2;
+c1 c2
+2 1
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1;
+c1
+2
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from t1),
+cte2 as
+(select count(*) as c2 from t2)
+select * from cte1,cte2;
+c1 c2
+3 1
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2 where cte1.c1 = 3;
+c1 c2
+drop procedure p;
+drop table t1,t2;
diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test
index 4eee9ef..e3a9349 100644
--- a/mysql-test/t/cte_recursive.test
+++ b/mysql-test/t/cte_recursive.test
@@ -2323,3 +2323,70 @@ RETURN
SELECT func();
DROP FUNCTION func;
+
+--echo #
+--echo # MDEV-17024: two materialized CTEs using the same recursive CTE
+--echo #
+
+create table t1 (id int);
+insert into t1 values (1), (2), (3);
+
+let $q=
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2;
+
+eval $q;
+eval explain extended $q;
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+
+create table t2 (c1 int, c2 int);
+eval create procedure p() insert into t2 $q;
+call p();
+select * from t2;
+
+let $q1=
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1;
+
+eval $q1;
+
+let $q2=
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from t1),
+cte2 as
+(select count(*) as c2 from t2)
+select * from cte1,cte2;
+
+eval $q2;
+
+let $q3=
+with recursive
+rcte(a) as
+(select 1 union select cast(a+1 as unsigned) from rcte where a < 10),
+cte1 as
+(select count(*) as c1 from rcte,t1 where a between 3 and 5 and id=a-3),
+cte2 as
+(select count(*) as c2 from rcte,t1 where a between 7 and 8 and id=a-7)
+select * from cte1, cte2 where cte1.c1 = 3;
+
+eval $q3;
+
+drop procedure p;
+drop table t1,t2;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 36bf39e..cae5b4a 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3295,6 +3295,15 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
*/
if (tables->with)
{
+ if (tables->is_recursive_with_table() &&
+ !tables->is_with_table_recursive_reference())
+ {
+ tables->with->rec_outer_references++;
+ With_element *with_elem= tables->with;
+ while ((with_elem= with_elem->get_next_mutually_recursive()) !=
+ tables->with)
+ with_elem->rec_outer_references++;
+ }
if (tables->set_as_with_table(thd, tables->with))
DBUG_RETURN(1);
else
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 9a10083..674ae02 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -5106,10 +5106,16 @@ class select_union_recursive :public select_union
TABLE *first_rec_table_to_update;
/* The temporary tables used for recursive table references */
List<TABLE> rec_tables;
+ /*
+ The count of how many times cleanup() was called with cleaned==false
+ for the unit specifying the recursive CTE for which this object was created
+ or for the unit specifying a CTE that mutually recursive with this CTE.
+ */
+ uint cleanup_count;
select_union_recursive(THD *thd_arg):
select_union(thd_arg),
- incr_table(0), first_rec_table_to_update(0) {};
+ incr_table(0), first_rec_table_to_update(0), cleanup_count(0) {};
int send_data(List<Item> &items);
bool create_result_table(THD *thd, List<Item> *column_types,
diff --git a/sql/sql_cte.h b/sql/sql_cte.h
index 70526e8..6351b65 100644
--- a/sql/sql_cte.h
+++ b/sql/sql_cte.h
@@ -98,7 +98,14 @@ class With_element : public Sql_alloc
for the definition of this element
*/
bool is_recursive;
-
+ /*
+ For a simple recursive CTE: the number of references to the CTE from
+ outside of the CTE specification.
+ For a CTE mutually recursive with other CTEs : the total number of
+ references to all these CTEs outside of their specification.
+ Each of these mutually recursive CTEs has the same value in this field.
+ */
+ uint rec_outer_references;
/*
Any non-recursive select in the specification of a recursive
with element is a called anchor. In the case mutually recursive
@@ -140,7 +147,7 @@ class With_element : public Sql_alloc
top_level_dep_map(0), sq_rec_ref(NULL),
next_mutually_recursive(NULL), references(0),
query_name(name), column_list(list), spec(unit),
- is_recursive(false), with_anchor(false),
+ is_recursive(false), rec_outer_references(0), with_anchor(false),
level(0), rec_result(NULL)
{ unit->with_element= this; }
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 0147271..44f8d74 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -1083,6 +1083,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_ASSERT(derived->table && derived->table->is_created());
select_union *derived_result= derived->derived_result;
SELECT_LEX *save_current_select= lex->current_select;
+ bool derived_recursive_is_filled= false;
if (derived_is_recursive)
{
@@ -1095,6 +1096,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
{
/* In this case all iteration are performed */
res= derived->fill_recursive(thd);
+ derived_recursive_is_filled= true;
}
}
else if (unit->is_union())
@@ -1150,7 +1152,9 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
}
}
- if (res || (!lex->describe && !derived_is_recursive))
+ if (res || (!lex->describe &&
+ (!derived_is_recursive ||
+ derived_recursive_is_filled)))
unit->cleanup();
lex->current_select= save_current_select;
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index b409790..419ccf4 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -1337,6 +1337,37 @@ bool st_select_lex_unit::cleanup()
{
DBUG_RETURN(FALSE);
}
+ /*
+ When processing a PS/SP or an EXPLAIN command cleanup of a unit can
+ be performed immediately when the unit is reached in the cleanup
+ traversal initiated by the cleanup of the main unit.
+ */
+ if (!thd->stmt_arena->is_stmt_prepare() && !thd->lex->describe &&
+ with_element && with_element->is_recursive && union_result)
+ {
+ select_union_recursive *result= with_element->rec_result;
+ if (++result->cleanup_count == with_element->rec_outer_references)
+ {
+ /*
+ Perform cleanup for with_element and for all with elements
+ mutually recursive with it.
+ */
+ cleaned= 1;
+ with_element->get_next_mutually_recursive()->spec->cleanup();
+ }
+ else
+ {
+ /*
+ Just increment by 1 cleanup_count for with_element and
+ for all with elements mutually recursive with it.
+ */
+ With_element *with_elem= with_element;
+ while ((with_elem= with_elem->get_next_mutually_recursive()) !=
+ with_element)
+ with_elem->rec_result->cleanup_count++;
+ DBUG_RETURN(FALSE);
+ }
+ }
cleaned= 1;
for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
@@ -1367,7 +1398,7 @@ bool st_select_lex_unit::cleanup()
if (with_element && with_element->is_recursive)
{
- if (union_result )
+ if (union_result)
{
((select_union_recursive *) union_result)->cleanup();
delete union_result;
1
0

07 Sep '18
revision-id: 9b453c1ebf47a20ea1436de3284f8810d0e64c4c (mariadb-10.1.34-8-g9b453c1ebf4)
parent(s): c09a8b5b36edb494e2bcc93074c06e26cd9f2b92
author: Andrei Elkin
committer: Andrei Elkin
timestamp: 2018-09-07 20:36:16 +0300
message:
MDEV-10963 Fragmented BINLOG query
The problem was originally stated in
http://bugs.mysql.com/bug.php?id=82212
The size of an base64-encoded Rows_log_event exceeds its
vanilla byte representation in 4/3 times.
When a binlogged event size is about 1GB mysqlbinlog generates
a BINLOG query that can't be send out due to its size.
It is fixed with fragmenting the BINLOG argument C-string into
(approximate) halves when the base64 encoded event is over 1GB size.
The mysqlbinlog in such case puts out
SET @binlog_fragment_0='base64-encoded-fragment_0';
SET @binlog_fragment_1='base64-encoded-fragment_1';
BINLOG 2, 'binlog_fragment';
to represent a big BINLOG 'base64-encoded-"total"'.
Two more statements are composed to promptly release memory
SET @binlog_fragment_0=NULL;
SET @binlog_fragment_1=NULL;
The 2 fragments are enough, though the client and server still may
need to tweak their @@max_allowed_packet to satisfy to the fragment
size (which they would have to do anyway with greater number of
fragments, should that be desired).
On the lower level the following changes are made:
Log_event::print_base64()
remains to call encoder and store the encoded data into a cache but
now *without* doing any formatting. The latter is left for time
when the cache is copied to an output file (e.g mysqlbinlog output).
No formatting behavior is also reflected by the change in the meaning
of the last argument which specifies whether to cache the encoded data.
my_b_copy_to_file()
is turned into my_b_copy_to_file_frag()
which accepts format specifier arguments to build a proper syntax BINLOG
query in both the fragmented (n_frag > 1) and non-fragmented (n_frag == 1)
cases.
Rows_log_event::print_helper()
Takes decision whether to fragment, prepares respective format specifiers
and invokes the cache-to-file copying function, which is now
copy_cache_frag_to_file_and_reinit()
replaces original copy_event_cache_to_file_and_reinit() to pass
extra arguments to my_b_copy_to_file() successor of
my_b_copy_to_file_frag()
replaces the former pure copier. With its 'n_frag' argument as 1
and the rest or args NULL works as the original function.
---
client/mysqlbinlog.cc | 33 ++++-
include/my_sys.h | 10 +-
.../suite/binlog/r/binlog_base64_flag.result | 15 +++
.../binlog/r/binlog_mysqlbinlog_row_frag.result | 27 ++++
mysql-test/suite/binlog/t/binlog_base64_flag.test | 21 ++++
.../binlog/t/binlog_mysqlbinlog_row_frag.test | 45 +++++++
mysys/mf_iocache2.c | 135 +++++++++++++++++---
sql/log_event.cc | 136 ++++++++++++++++++---
sql/log_event.h | 28 ++++-
sql/log_event_old.cc | 70 ++++++++++-
sql/sql_binlog.cc | 105 ++++++++++++++--
sql/sql_lex.cc | 3 +-
sql/sql_lex.h | 1 +
sql/sql_yacc.yy | 13 +-
unittest/sql/mf_iocache-t.cc | 52 +++++++-
15 files changed, 633 insertions(+), 61 deletions(-)
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 9753125dd67..f0689631cfb 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -56,7 +56,13 @@ Rpl_filter *binlog_filter= 0;
#define BIN_LOG_HEADER_SIZE 4
#define PROBE_HEADER_LEN (EVENT_LEN_OFFSET+4)
-
+/*
+ 2 fragments can always represent near 1GB row-based base64-encoded event as
+ two strings each of size less than max(max_allowed_packet).
+ Bigger number of fragments does not safe from potential need to tune (increase)
+ @@max_allowed_packet before to process the fragments. So 2 is safe and enough.
+*/
+#define BINLOG_ROWS_EVENT_ENCODED_FRAGMENTS 2
#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_LOCAL_FILES)
@@ -71,6 +77,9 @@ ulong bytes_sent = 0L, bytes_received = 0L;
ulong mysqld_net_retry_count = 10L;
ulong open_files_limit;
ulong opt_binlog_rows_event_max_size;
+#ifndef DBUG_OFF
+ulong opt_binlog_rows_event_max_encoded_size;
+#endif
uint test_flags = 0;
static uint opt_protocol= 0;
static FILE *result_file;
@@ -813,7 +822,14 @@ write_event_header_and_base64(Log_event *ev, FILE *result_file,
/* Write header and base64 output to cache */
ev->print_header(head, print_event_info, FALSE);
- ev->print_base64(body, print_event_info, FALSE);
+
+ /* the assert states the only current use case for the function */
+ DBUG_ASSERT(print_event_info->base64_output_mode ==
+ BASE64_OUTPUT_ALWAYS);
+
+ ev->print_base64(body, print_event_info,
+ print_event_info->base64_output_mode !=
+ BASE64_OUTPUT_DECODE_ROWS);
/* Read data from cache and write to result file */
if (copy_event_cache_to_file_and_reinit(head, result_file) ||
@@ -852,7 +868,9 @@ static bool print_base64(PRINT_EVENT_INFO *print_event_info, Log_event *ev)
return 1;
}
ev->print(result_file, print_event_info);
- return print_event_info->head_cache.error == -1;
+ return
+ print_event_info->head_cache.error == -1 ||
+ print_event_info->body_cache.error == -1;
}
@@ -1472,6 +1490,15 @@ that may lead to an endless loop.",
"This value must be a multiple of 256.",
&opt_binlog_rows_event_max_size, &opt_binlog_rows_event_max_size, 0,
GET_ULONG, REQUIRED_ARG, UINT_MAX, 256, ULONG_MAX, 0, 256, 0},
+#ifndef DBUG_OFF
+ {"binlog-row-event-max-encoded-size", 0,
+ "The maximum size of base64-encoded rows-event in one BINLOG pseudo-query "
+ "instance. When the computed actual size exceeds the limit "
+ "the BINLOG's argument string is fragmented in two.",
+ &opt_binlog_rows_event_max_encoded_size,
+ &opt_binlog_rows_event_max_encoded_size, 0,
+ GET_ULONG, REQUIRED_ARG, UINT_MAX/4, 256, ULONG_MAX, 0, 256, 0},
+#endif
{"verify-binlog-checksum", 'c', "Verify checksum binlog events.",
(uchar**) &opt_verify_binlog_checksum, (uchar**) &opt_verify_binlog_checksum,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
diff --git a/include/my_sys.h b/include/my_sys.h
index 110a2ee9af3..1d0088e6698 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -602,7 +602,15 @@ static inline size_t my_b_bytes_in_cache(const IO_CACHE *info)
return *info->current_end - *info->current_pos;
}
-int my_b_copy_to_file(IO_CACHE *cache, FILE *file);
+int
+my_b_copy_to_file_frag(IO_CACHE *cache, FILE *file,
+ uint n_frag,
+ const char* before_frag,
+ const char* after_frag,
+ const char* after_last,
+ const char* after_last_per_frag,
+ char* buf);
+
my_off_t my_b_append_tell(IO_CACHE* info);
my_off_t my_b_safe_tell(IO_CACHE* info); /* picks the correct tell() */
int my_b_pread(IO_CACHE *info, uchar *Buffer, size_t Count, my_off_t pos);
diff --git a/mysql-test/suite/binlog/r/binlog_base64_flag.result b/mysql-test/suite/binlog/r/binlog_base64_flag.result
index d13e13c97b0..12b369680c6 100644
--- a/mysql-test/suite/binlog/r/binlog_base64_flag.result
+++ b/mysql-test/suite/binlog/r/binlog_base64_flag.result
@@ -28,6 +28,21 @@ a
1
1
3
+DELETE FROM t1 WHERE a=3;
+BINLOG '
+ODdYRw8BAAAAZgAAAGoAAAABAAQANS4xLjIzLXJjLWRlYnVnLWxvZwAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAA4N1hHEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC
+';
+SET @binlog_format_0='
+TFtYRxMBAAAAKQAAAH8BAAAAABAAAAAAAAAABHRlc3QAAnQxAAEDAAE=
+TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA==
+';
+BINLOG 1, 'binlog_format';
+select * from t1;
+a
+1
+1
+3
==== Test --base64-output=never on a binlog with row events ====
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result
new file mode 100644
index 00000000000..d4037ff7983
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result
@@ -0,0 +1,27 @@
+CREATE TABLE t (a TEXT);
+RESET MASTER;
+INSERT INTO t SET a=repeat('a', 1024);
+SELECT a from t into @a;
+FLUSH LOGS;
+DELETE FROM t;
+SELECT a LIKE @a as 'true' FROM t;
+true
+1
+SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL';
+@binlog_fragment_0 NULL
+NULL NULL
+BINLOG number-of-fragments must be greater than 0
+BINLOG 0, 'binlog_fragment';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ''binlog_fragment'' at line 1
+BINLOG -1, 'binlog_fragment';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '-1, 'binlog_fragment'' at line 1
+SET @binlog_fragment_0='012345';
+SET @binlog_fragment_2='012345';
+BINLOG 2, 'binlog_fragment';
+ERROR HY000: Decoding of base64 string failed: BINLOG fragment user variable 'binlog_fragment_1' is unexpectedly empty
+SET @binlog_fragment_0='012345';
+SET @binlog_fragment_1='012345';
+BINLOG 2, 'binlog_fragment';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use
+# Cleanup
+DROP TABLE t;
diff --git a/mysql-test/suite/binlog/t/binlog_base64_flag.test b/mysql-test/suite/binlog/t/binlog_base64_flag.test
index f8333315088..a1da28a8892 100644
--- a/mysql-test/suite/binlog/t/binlog_base64_flag.test
+++ b/mysql-test/suite/binlog/t/binlog_base64_flag.test
@@ -67,6 +67,27 @@ TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA==
# The above line should succeed and 3 should be in the table
select * from t1;
+# The same as above with one-fragment BINLOG to prove
+# BINLOG 'base64-encoded-data' is equivalent to the pair of
+# SET @uservar='base64-encoded-data';
+# BINLOG 1, @uservar;
+DELETE FROM t1 WHERE a=3;
+# This is a binlog statement containing a Format_description_log_event
+# from the same version as the Table_map and Write_rows_log_event.
+BINLOG '
+ODdYRw8BAAAAZgAAAGoAAAABAAQANS4xLjIzLXJjLWRlYnVnLWxvZwAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAA4N1hHEzgNAAgAEgAEBAQEEgAAUwAEGggAAAAICAgC
+';
+
+# This is a Table_map_log_event+Write_rows_log_event corresponding to:
+# INSERT INTO TABLE test.t1 VALUES (3)
+SET @binlog_format_0='
+TFtYRxMBAAAAKQAAAH8BAAAAABAAAAAAAAAABHRlc3QAAnQxAAEDAAE=
+TFtYRxcBAAAAIgAAAKEBAAAQABAAAAAAAAEAAf/+AwAAAA==
+';
+BINLOG 1, 'binlog_format';
+# The above line should succeed and 3 should be in the table
+select * from t1;
# Test that mysqlbinlog stops with an error message when the
# --base64-output=never flag is used on a binlog with base64 events.
diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test
new file mode 100644
index 00000000000..81f4ddad1c1
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test
@@ -0,0 +1,45 @@
+--source include/have_debug.inc
+--source include/have_log_bin.inc
+--source include/have_binlog_format_row.inc
+
+--let $MYSQLD_DATADIR= `select @@datadir`
+--let $max_size=1024
+
+CREATE TABLE t (a TEXT);
+# events of interest are guaranteed to stay in 000001 log
+RESET MASTER;
+--eval INSERT INTO t SET a=repeat('a', $max_size)
+SELECT a from t into @a;
+FLUSH LOGS;
+DELETE FROM t;
+
+--exec $MYSQL_BINLOG --binlog-row-event-max-encoded-size=256 $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
+
+--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/mysqlbinlog.sql
+
+SELECT a LIKE @a as 'true' FROM t;
+SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL';
+
+# improper syntax error
+--echo BINLOG number-of-fragments must be greater than 0
+--error ER_PARSE_ERROR
+BINLOG 0, 'binlog_fragment';
+--error ER_PARSE_ERROR
+BINLOG -1, 'binlog_fragment';
+
+# lost fragment error check
+SET @binlog_fragment_0='012345';
+SET @binlog_fragment_2='012345';
+--error ER_BASE64_DECODE_ERROR
+BINLOG 2, 'binlog_fragment';
+
+# corrupted fragments error check (to the expected error code notice,
+# the same error code occurs in a similar unfragmented case)
+SET @binlog_fragment_0='012345';
+SET @binlog_fragment_1='012345';
+--error ER_SYNTAX_ERROR
+BINLOG 2, 'binlog_fragment';
+
+--echo # Cleanup
+DROP TABLE t;
+
diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c
index 2499094037d..ffc5458ed15 100644
--- a/mysys/mf_iocache2.c
+++ b/mysys/mf_iocache2.c
@@ -22,13 +22,23 @@
#include <stdarg.h>
#include <m_ctype.h>
-/*
+/**
Copy contents of an IO_CACHE to a file.
SYNOPSIS
- my_b_copy_to_file()
- cache IO_CACHE to copy from
- file File to copy to
+ my_b_copy_to_file_frag
+
+ cache IO_CACHE to copy from
+ file File to copy to
+ n_frag # of fragments
+
+ Other arguments represent format strings to enable wrapping
+ of the fragments and total, including
+
+ before_frag before a fragment
+ after_frag after a fragment
+ after_last_frag after all the fragments
+ after_last_per_frag as the last items per each fragment
DESCRIPTION
Copy the contents of the cache to the file. The cache will be
@@ -38,33 +48,120 @@
If a failure to write fully occurs, the cache is only copied
partially.
- TODO
- Make this function solid by handling partial reads from the cache
- in a correct manner: it should be atomic.
+ The copying is made in so many steps as the number of fragments as
+ specified by the parameter 'n_frag'. Each step is wrapped with
+ writing to the file 'before_frag' and 'after_frag' formated
+ strings, unless the parameters are NULL. In the end, optionally,
+ first 'after_last_frag' string is appended to 'file' followed by
+ 'after_last_per_frag' per each fragment.
+ final item.
RETURN VALUE
0 All OK
1 An error occurred
+
+ TODO
+ Make this function solid by handling partial reads from the cache
+ in a correct manner: it should be atomic.
*/
int
-my_b_copy_to_file(IO_CACHE *cache, FILE *file)
+my_b_copy_to_file_frag(IO_CACHE *cache, FILE *file,
+ uint n_frag,
+ const char* before_frag,
+ const char* after_frag,
+ const char* after_last,
+ const char* after_last_per_frag,
+ char* buf)
{
- size_t bytes_in_cache;
- DBUG_ENTER("my_b_copy_to_file");
+ size_t bytes_in_cache; // block, may have short size in the last one
+ size_t written_off_last_block; // consumed part of the block by last fragment
+ size_t total_size= my_b_tell(cache);
+ size_t frag_size= total_size / n_frag + 1;
+ size_t total_written= 0;
+ size_t frag_written; // bytes collected in the current fragment
+ uint i;
+
+ DBUG_ENTER("my_b_copy_to_file_frag");
+
+ DBUG_ASSERT(cache->type == WRITE_CACHE);
/* Reinit the cache to read from the beginning of the cache */
if (reinit_io_cache(cache, READ_CACHE, 0L, FALSE, FALSE))
DBUG_RETURN(1);
- bytes_in_cache= my_b_bytes_in_cache(cache);
- do
+
+ for (i= 0, written_off_last_block= 0, bytes_in_cache= my_b_bytes_in_cache(cache);
+ i < n_frag;
+ i++, total_written += frag_written)
{
- if (my_fwrite(file, cache->read_pos, bytes_in_cache,
- MYF(MY_WME | MY_NABP)) == (size_t) -1)
- DBUG_RETURN(1);
- } while ((bytes_in_cache= my_b_fill(cache)));
- if(cache->error == -1)
- DBUG_RETURN(1);
- DBUG_RETURN(0);
+ frag_written= 0;
+ if (before_frag)
+ {
+ sprintf(buf, before_frag, i);
+ my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP));
+ }
+ do
+ {
+ /*
+ Either the current block is the last (L) in making the
+ current fragment and possibly has some extra not to fit (LG) into
+ the fragment, or (I) the current (whole then) block is
+ intermediate.
+ */
+ size_t block_to_write= (frag_written + bytes_in_cache >= frag_size) ?
+ frag_size - frag_written : bytes_in_cache;
+
+ DBUG_ASSERT(n_frag != 1 ||
+ (block_to_write == bytes_in_cache &&
+ written_off_last_block == 0));
+
+ if (my_fwrite(file, cache->read_pos + written_off_last_block,
+ block_to_write,
+ MYF(MY_WME | MY_NABP)) == (size_t) -1)
+ /* no cache->error is set here */
+ DBUG_RETURN(1);
+
+ frag_written += block_to_write;
+ if (frag_written == frag_size) // (L)
+ {
+ DBUG_ASSERT(block_to_write <= bytes_in_cache);
+ written_off_last_block= block_to_write;
+ bytes_in_cache -= written_off_last_block; // (LG) when bytes>0
+ /*
+ Nothing should be left in cache at the end of the
+ last fragment composition.
+ */
+ DBUG_ASSERT(i != n_frag - 1 || bytes_in_cache == 0);
+
+ break;
+ }
+ else
+ {
+ written_off_last_block= 0; // (I)
+ }
+ } while ((bytes_in_cache= my_b_fill(cache)));
+
+ if (after_frag)
+ {
+ sprintf(buf, after_frag, NULL);
+ my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP));
+ }
+ }
+
+ DBUG_ASSERT(total_written == total_size); // output == input
+
+ if (after_last)
+ {
+ sprintf(buf, after_last, n_frag);
+ my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP));
+ }
+
+ for (i= 0; after_last_per_frag && i < n_frag ; i++)
+ {
+ sprintf(buf, after_last_per_frag, i);
+ my_fwrite(file, (uchar*) buf, strlen(buf), MYF(MY_WME | MY_NABP));
+ }
+
+ DBUG_RETURN(cache->error == -1);
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3ac7ac5a20f..319f34e214d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -2762,9 +2762,16 @@ void free_table_map_log_event(Table_map_log_event *event)
delete event;
}
+/*
+ Encode the event, optionally per 'do_print_encoded_base64' store the result
+ into the argument cache; optionally per 'verbose' print into the cache
+ a verbose represenation of the event.
+ Note, no extra wrapping is done to the encoded data, like procuding a BINLOG
+ query. It's left for a routine that extracts from the cache.
+*/
void Log_event::print_base64(IO_CACHE* file,
PRINT_EVENT_INFO* print_event_info,
- bool more)
+ bool do_print_encoded_base64)
{
const uchar *ptr= (const uchar *)temp_buf;
uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET);
@@ -2783,17 +2790,9 @@ void Log_event::print_base64(IO_CACHE* file,
DBUG_ASSERT(0);
}
- if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS)
- {
- if (my_b_tell(file) == 0)
- my_b_write_string(file, "\nBINLOG '\n");
-
+ if (do_print_encoded_base64)
my_b_printf(file, "%s\n", tmp_str);
- if (!more)
- my_b_printf(file, "'%s\n", print_event_info->delimiter);
- }
-
if (print_event_info->verbose)
{
Rows_log_event *ev= NULL;
@@ -4833,9 +4832,17 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
print_event_info->base64_output_mode != BASE64_OUTPUT_NEVER &&
!print_event_info->short_form)
{
- if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS)
+ /* BINLOG is matched with the delimiter below on the same level */
+ bool do_print_encoded_base64=
+ print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS;
+ if (do_print_encoded_base64)
my_b_printf(&cache, "BINLOG '\n");
- print_base64(&cache, print_event_info, FALSE);
+
+ print_base64(&cache, print_event_info, do_print_encoded_base64);
+
+ if (do_print_encoded_base64)
+ my_b_printf(&cache, "'%s\n", print_event_info->delimiter);
+
print_event_info->printed_fd_event= TRUE;
}
DBUG_VOID_RETURN;
@@ -10479,12 +10486,48 @@ void Rows_log_event::pack_info(Protocol *protocol)
#endif
#ifdef MYSQL_CLIENT
+/*
+ The function invokes base64 encoder to run on the current
+ event string and store the result into two caches.
+ When the event ends the current statement the caches are is copied into
+ the argument file.
+ Copying is also concerned how to wrap the event, specifically to produce
+ a valid SQL syntax.
+ When the encoded data size is within max(MAX_ALLOWED_PACKET)
+ a regular BINLOG query is composed. Otherwise it is build as fragmented
+
+ BINLOG number_of_fragments,'user_var_name_prefix'
+
+ where fragments are represented by a sequence of "indexed" user
+ variables. E.g when the above variable's name prefix is
+ 'binlog_fragment' and the number of fragments is 2 the fragmented
+ version is as the following:
+
+ SET @binlog_fragment_0='...';
+ SET @binlog_fragment_1='...';
+ BINLOG 2, 'binlog_fragment';
+
+ Two more statements are composed as well
+
+ SET @binlog_fragment_0=NULL;
+ SET @binlog_fragment_1=NULL;
+
+ to promptly release memory.
+
+ NOTE.
+ If any changes made don't forget to duplicate them to
+ Old_rows_log_event as long as it's supported.
+*/
void Rows_log_event::print_helper(FILE *file,
PRINT_EVENT_INFO *print_event_info,
char const *const name)
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
+ bool do_print_encoded_base64=
+ print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
+ !print_event_info->short_form;
+
if (!print_event_info->short_form)
{
bool const last_stmt_event= get_flags(STMT_END_F);
@@ -10492,13 +10535,72 @@ void Rows_log_event::print_helper(FILE *file,
my_b_printf(head, "\t%s: table id %lu%s\n",
name, m_table_id,
last_stmt_event ? " flags: STMT_END_F" : "");
- print_base64(body, print_event_info, !last_stmt_event);
+ print_base64(body, print_event_info, do_print_encoded_base64);
}
if (get_flags(STMT_END_F))
{
- copy_event_cache_to_file_and_reinit(head, file);
- copy_event_cache_to_file_and_reinit(body, file);
+ uint n_frag= 1;
+ const char* before_frag= NULL;
+ char* after_frag= NULL;
+ char* after_last= NULL;
+ char* after_last_per_frag= NULL;
+ const char fmt_last_frag[]= "\nBINLOG %%d, 'binlog_fragment'%s\n";
+ const char fmt_last_per_frag[]= "\nSET @binlog_fragment_%%d = NULL%s\n";
+ const char fmt_before_frag[]= "\nSET @binlog_fragment_%d ='\n";
+ /*
+ Buffer to pass to copy_cache_frag_to_file_and_reinit to
+ compute formatted strings according to specifiers.
+ The sizes may depend on an actual fragment number size in terms of decimal
+ signs so its maximum is estimated (not precisely yet safely) below.
+ */
+ char buf[(sizeof(fmt_last_frag) + sizeof(fmt_last_per_frag))
+ + ((sizeof(n_frag) * 8)/3 + 1) // decimal index
+ + sizeof(print_event_info->delimiter + 3)]; // delim, \n and 0.
+ if (copy_event_cache_to_file_and_reinit(head, file))
+ {
+ head->error= -1;
+ return;
+ }
+
+ if (do_print_encoded_base64)
+ {
+ after_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_frag, "'%s\n", print_event_info->delimiter);
+ if (my_b_tell(body) >
+#ifndef DBUG_OFF
+ opt_binlog_rows_event_max_encoded_size
+#else
+ MAX_MAX_ALLOWED_PACKET
+#endif
+ )
+ n_frag= BINLOG_ROWS_EVENT_ENCODED_FRAGMENTS;
+ if (n_frag > 1)
+ {
+ before_frag= fmt_before_frag;
+ after_last= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_last, fmt_last_frag, (char*) print_event_info->delimiter);
+ after_last_per_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_last_per_frag, fmt_last_per_frag,
+ (char*) print_event_info->delimiter);
+ }
+ else
+ {
+ before_frag= "\nBINLOG '\n";
+ }
+ }
+ if (copy_cache_frag_to_file_and_reinit(body, file, n_frag,
+ before_frag, after_frag,
+ after_last, after_last_per_frag, buf))
+ {
+ body->error= -1;
+ goto err;
+ }
+
+err:
+ my_free(after_frag);
+ my_free(after_last);
+ my_free(after_last_per_frag);
}
}
#endif
@@ -11357,7 +11459,9 @@ void Table_map_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
m_dbnam, m_tblnam, m_table_id,
((m_flags & TM_BIT_HAS_TRIGGERS_F) ?
" (has triggers)" : ""));
- print_base64(&print_event_info->body_cache, print_event_info, TRUE);
+ print_base64(&print_event_info->body_cache, print_event_info,
+ print_event_info->base64_output_mode !=
+ BASE64_OUTPUT_DECODE_ROWS);
copy_event_cache_to_file_and_reinit(&print_event_info->head_cache, file);
}
}
diff --git a/sql/log_event.h b/sql/log_event.h
index 90900f63533..ac675bcb5da 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -1157,7 +1157,7 @@ class Log_event
void print_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
bool is_more);
void print_base64(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info,
- bool is_more);
+ bool do_print_encoded_base64);
#endif
/*
read_log_event() functions read an event from a binlog or relay
@@ -4895,11 +4895,33 @@ class Ignorable_log_event : public Log_event {
static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache,
FILE *file)
{
- return
- my_b_copy_to_file(cache, file) ||
+ return
+ my_b_copy_to_file_frag(cache, file, 1, NULL, NULL, NULL, NULL, NULL) ||
reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE);
}
+
+/**
+ Copying of 'cache' content to 'file' in steps of the number of
+ fragments as specified by 'n_frag'. Other arguments enables wrapping
+ of the fragments and total. See more in my_b_copy_to_file_frag() header comments.
+*/
+inline bool copy_cache_frag_to_file_and_reinit(IO_CACHE *cache,
+ FILE *file,
+ uint n_frag,
+ const char* before_frag,
+ const char* after_frag,
+ const char* after_last,
+ const char* after_last_per_frag,
+ char* buf)
+{
+ return
+ my_b_copy_to_file_frag(cache, file, n_frag, before_frag, after_frag,
+ after_last, after_last_per_frag, buf) ||
+ reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE);
+}
+
+
#ifdef MYSQL_SERVER
/*****************************************************************************
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index d2b4470bbf9..eb119cdc8bc 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -1850,12 +1850,17 @@ void Old_rows_log_event::pack_info(Protocol *protocol)
#ifdef MYSQL_CLIENT
+/* Method duplicates Rows_log_event's one */
void Old_rows_log_event::print_helper(FILE *file,
PRINT_EVENT_INFO *print_event_info,
char const *const name)
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
+ bool do_print_encoded_base64=
+ print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
+ !print_event_info->short_form;
+
if (!print_event_info->short_form)
{
bool const last_stmt_event= get_flags(STMT_END_F);
@@ -1863,13 +1868,72 @@ void Old_rows_log_event::print_helper(FILE *file,
my_b_printf(head, "\t%s: table id %lu%s\n",
name, m_table_id,
last_stmt_event ? " flags: STMT_END_F" : "");
- print_base64(body, print_event_info, !last_stmt_event);
+ print_base64(body, print_event_info, do_print_encoded_base64);
}
if (get_flags(STMT_END_F))
{
- copy_event_cache_to_file_and_reinit(head, file);
- copy_event_cache_to_file_and_reinit(body, file);
+ uint n_frag= 1;
+ const char* before_frag= NULL;
+ char* after_frag= NULL;
+ char* after_last= NULL;
+ char* after_last_per_frag= NULL;
+ const char fmt_last_frag[]= "\nBINLOG %%d, 'binlog_fragment'%s\n";
+ const char fmt_last_per_frag[]= "\nSET @binlog_fragment_%%d = NULL%s\n";
+ const char fmt_before_frag[]= "\nSET @binlog_fragment_%d ='\n";
+ /*
+ Buffer to pass to copy_cache_frag_to_file_and_reinit to
+ compute formatted strings according to specifiers.
+ The sizes may depend on an actual fragment number size in terms of decimal
+ signs so its maximum is estimated (not precisely yet safely) below.
+ */
+ char buf[(sizeof(fmt_last_frag) + sizeof(fmt_last_per_frag))
+ + ((sizeof(n_frag) * 8)/3 + 1) // decimal index
+ + sizeof(print_event_info->delimiter + 3)]; // delim, \n and 0.
+ if (copy_event_cache_to_file_and_reinit(head, file))
+ {
+ head->error= -1;
+ return;
+ }
+
+ if (do_print_encoded_base64)
+ {
+ after_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_frag, "'%s\n", print_event_info->delimiter);
+ if (my_b_tell(body) >
+#ifndef DBUG_OFF
+ opt_binlog_rows_event_max_encoded_size
+#else
+ MAX_MAX_ALLOWED_PACKET
+#endif
+ )
+ n_frag= BINLOG_ROWS_EVENT_ENCODED_FRAGMENTS;
+ if (n_frag > 1)
+ {
+ before_frag= fmt_before_frag;
+ after_last= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_last, fmt_last_frag, (char*) print_event_info->delimiter);
+ after_last_per_frag= (char*) my_malloc(sizeof(buf), MYF(MY_WME));
+ sprintf(after_last_per_frag, fmt_last_per_frag,
+ (char*) print_event_info->delimiter);
+ }
+ else
+ {
+ before_frag= "\nBINLOG '\n";
+ }
+ }
+ if (copy_cache_frag_to_file_and_reinit(body, file, n_frag,
+ before_frag, after_frag,
+ after_last, after_last_per_frag, buf))
+ {
+ body->error= -1;
+ goto err;
+ }
+
+err:
+ my_free(after_frag);
+ my_free(after_last);
+ my_free(after_last_per_frag);
}
}
#endif
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index 91cf038907e..37789c5fe8f 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -53,14 +53,6 @@ void mysql_client_binlog_statement(THD* thd)
if (check_global_access(thd, SUPER_ACL))
DBUG_VOID_RETURN;
- size_t coded_len= thd->lex->comment.length;
- if (!coded_len)
- {
- my_error(ER_SYNTAX_ERROR, MYF(0));
- DBUG_VOID_RETURN;
- }
- size_t decoded_len= base64_needed_decoded_length(coded_len);
-
/*
option_bits will be changed when applying the event. But we don't expect
it be changed permanently after BINLOG statement, so backup it first.
@@ -81,7 +73,9 @@ void mysql_client_binlog_statement(THD* thd)
int err;
Relay_log_info *rli;
rpl_group_info *rgi;
-
+ char *buf= NULL;
+ size_t coded_len= 0, decoded_len= 0;
+ const char* name_buf;
rli= thd->rli_fake;
if (!rli)
{
@@ -102,15 +96,12 @@ void mysql_client_binlog_statement(THD* thd)
rgi->thd= thd;
const char *error= 0;
- char *buf= (char *) my_malloc(decoded_len, MYF(MY_WME));
Log_event *ev = 0;
/*
Out of memory check
*/
- if (!(rli &&
- rli->relay_log.description_event_for_exec &&
- buf))
+ if (!(rli && rli->relay_log.description_event_for_exec))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
goto end;
@@ -119,6 +110,88 @@ void mysql_client_binlog_statement(THD* thd)
rli->sql_driver_thd= thd;
rli->no_storage= TRUE;
+ if (thd->lex->fragmented_binlog_event > 0)
+ {
+ /*
+ Copy fragments into the standard placeholder thd->lex->comment.str
+ and compute the size of the (still) encoded total.
+ The size can exceed max(max_allowed_packet) which is not a
+ problem as no String instance is created off this char array.
+ */
+ const char *name_fmt= "%s_%d";
+ name_buf= (char *) my_malloc(thd->lex->ident.length /* %s */ + 1 /* _ */ +
+ (sizeof(thd->lex->
+ fragmented_binlog_event) *
+ (8/3 + 1)) /* %d */ + 1 /* 0 */, MYF(MY_WME));
+ if (!name_buf)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
+ goto end;
+ }
+
+ // In the first loop the total length is computed only.
+ thd->lex->comment.length= 0;
+ thd->lex->comment.str= NULL;
+ for (uint i= 0; i < thd->lex->fragmented_binlog_event; i++)
+ {
+ user_var_entry *entry;
+
+ sprintf((char*) name_buf, name_fmt, thd->lex->ident.str, i);
+ entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars,
+ (uchar*) name_buf,
+ strlen(name_buf));
+ if (!entry || entry->type != STRING_RESULT)
+ {
+ my_printf_error(ER_BASE64_DECODE_ERROR,
+ "%s: BINLOG fragment user "
+ "variable '%s' is unexpectedly empty", MYF(0),
+ ER_THD(thd, ER_BASE64_DECODE_ERROR), name_buf);
+ goto end;
+ }
+ thd->lex->comment.length += entry->length;
+ }
+
+ thd->lex->comment.str= (char *) my_malloc(thd->lex->comment.length,
+ MYF(MY_WME));
+ if (!thd->lex->comment.str)
+ {
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
+ goto end;
+ }
+
+ // In the 2nd the user var values are merged into allocated buf
+ size_t gathered_length= 0;
+ for (uint i= 0; i < thd->lex->fragmented_binlog_event; i++)
+ {
+ user_var_entry *entry;
+
+ sprintf((char*) name_buf, name_fmt, thd->lex->ident.str, i);
+ entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars,
+ (uchar*) name_buf,
+ strlen(name_buf));
+ memcpy(thd->lex->comment.str + gathered_length,
+ entry->value, entry->length);
+
+ gathered_length += entry->length;
+ }
+ DBUG_ASSERT(gathered_length == thd->lex->comment.length);
+ }
+
+ if (!(coded_len= thd->lex->comment.length))
+ {
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ DBUG_VOID_RETURN;
+ }
+
+ decoded_len= base64_needed_decoded_length(coded_len);
+ if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
+ goto end;
+ }
+
for (char const *strptr= thd->lex->comment.str ;
strptr < thd->lex->comment.str + thd->lex->comment.length ; )
{
@@ -272,6 +345,12 @@ void mysql_client_binlog_statement(THD* thd)
my_ok(thd);
end:
+ if (thd->lex->fragmented_binlog_event > 0)
+ {
+ my_free(thd->lex->comment.str);
+ thd->lex->fragmented_binlog_event= 0;
+ my_free((char*) name_buf);
+ }
thd->variables.option_bits= thd_options;
rgi->slave_close_thread_tables(thd);
my_free(buf);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 085ad1a4b3b..551183667bf 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2874,7 +2874,8 @@ LEX::LEX()
: explain(NULL),
result(0), arena_for_set_stmt(0), mem_root_for_set_stmt(0),
option_type(OPT_DEFAULT), context_analysis_only(0), sphead(0),
- is_lex_started(0), limit_rows_examined_cnt(ULONGLONG_MAX)
+ is_lex_started(0), limit_rows_examined_cnt(ULONGLONG_MAX),
+ fragmented_binlog_event(0)
{
init_dynamic_array2(&plugins, sizeof(plugin_ref), plugins_static_buffer,
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 3b47b1d25c9..1aff4f9f2de 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -2747,6 +2747,7 @@ struct LEX: public Query_tables_list
*/
Item *limit_rows_examined;
ulonglong limit_rows_examined_cnt;
+ uint fragmented_binlog_event;
/**
Holds a set of domain_ids for deletion at FLUSH..DELETE_DOMAIN_ID
*/
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index fcfc63439cb..3f9593e1b02 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -7951,7 +7951,18 @@ binlog_base64_event:
Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT;
Lex->comment= $2;
}
- ;
+ |
+ BINLOG_SYM NUM_literal ',' TEXT_STRING_sys
+ {
+ Lex->sql_command = SQLCOM_BINLOG_BASE64_EVENT;
+ if ((Lex->fragmented_binlog_event= $2->val_int()) <= 0)
+ {
+ my_parse_error(thd, ER_SYNTAX_ERROR);
+ MYSQL_YYABORT;
+ }
+ Lex->ident= $4;
+ }
+ ;
check_view_or_table:
table_or_tables table_list opt_mi_check_type
diff --git a/unittest/sql/mf_iocache-t.cc b/unittest/sql/mf_iocache-t.cc
index 1b04f8eb0d3..89c5de8b0df 100644
--- a/unittest/sql/mf_iocache-t.cc
+++ b/unittest/sql/mf_iocache-t.cc
@@ -253,10 +253,58 @@ void mdev10259()
}
+void mdev10963()
+{
+ int res;
+ int n_frag_max= 16;
+ int n_checks= 8;
+ uchar buf[1024 * 512];
+ FILE *file;
+ myf my_flags= MYF(MY_WME);
+ const char *file_name="cache.log";
+
+ memset(buf, FILL, sizeof(buf));
+ diag("MDEV-10963 Fragmented BINLOG query");
+
+ init_io_cache_encryption();
+
+ /* copying source */
+ res= open_cached_file(&info, 0, 0, CACHE_SIZE, 0);
+ ok(res == 0, "open_cached_file" INFO_TAIL);
+ res= my_b_write(&info, buf, sizeof(buf));
+ ulong saved_pos= my_b_tell(&info);
+ ok(res == 0 && saved_pos == sizeof(buf), "cache is filled");
+
+ /* destination */
+ file= my_fopen(file_name, O_WRONLY | O_TRUNC | O_CREAT, my_flags);
+ ok(my_fileno(file) > 0, "opened file fd = %d", my_fileno(file));
+
+ /*
+ Verify copying with random fragment numbers which cover cases
+ when the fragment size is less than the cache read buffer size.
+ */
+ for (; n_checks; n_checks--)
+ {
+ int c_frag= rand() % n_frag_max + 1;
+
+ res= my_b_copy_to_file_frag(&info, file, c_frag,
+ NULL, NULL, NULL, NULL, NULL);
+ ok(res == 0, "write to file" INFO_TAIL);
+ ok(my_ftell(file, my_flags) == sizeof(buf), "file written in %d fragments", c_frag);
+ res= reinit_io_cache(&info, WRITE_CACHE, saved_pos, 0, 0);
+ ok(res == 0 && my_b_tell(&info) == sizeof(buf), "write cache is filled back");
+
+ rewind(file);
+ my_chsize(my_fileno(file), 0, 0, my_flags);
+ }
+ close_cached_file(&info);
+ my_fclose(file, my_flags);
+}
+
int main(int argc __attribute__((unused)),char *argv[])
{
MY_INIT(argv[0]);
- plan(46);
+ plan(73);
/* temp files with and without encryption */
encrypt_tmp_files= 1;
@@ -272,6 +320,8 @@ int main(int argc __attribute__((unused)),char *argv[])
mdev10259();
encrypt_tmp_files= 0;
+ mdev10963();
+
my_end(0);
return exit_status();
}
1
0

[Commits] 563ee38: MDEV-15890 Strange error message if you try to FLUSH TABLES <view> after LOCK TABLES <view>.
by holyfoot@askmonty.org 07 Sep '18
by holyfoot@askmonty.org 07 Sep '18
07 Sep '18
revision-id: 563ee3804f1ec5a66f35126933a340e916ad888c (mariadb-10.1.35-40-g563ee38)
parent(s): edb3a32c6cc06407efc96a30a6c7948fb9628ace
committer: Alexey Botchkov
timestamp: 2018-09-07 21:36:40 +0400
message:
MDEV-15890 Strange error message if you try to FLUSH TABLES <view> after LOCK TABLES <view>.
LOCK view WRITE shouldn't block FLUSH view.
So we set the view's mdl_request type to it's tables.
---
mysql-test/r/flush.result | 1 -
mysql-test/r/lock_multi.result | 9 ++++-----
mysql-test/r/lock_sync.result | 6 +-----
.../suite/sys_vars/r/delayed_insert_limit_func.result | 4 ++--
.../suite/sys_vars/r/sql_low_priority_updates_func.result | 4 ++--
mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test | 4 ++--
.../suite/sys_vars/t/sql_low_priority_updates_func.test | 4 ++--
mysql-test/t/flush.test | 1 -
mysql-test/t/lock_multi.test | 14 +++++++++-----
mysql-test/t/lock_sync.test | 8 ++------
sql/sql_view.cc | 3 +--
11 files changed, 25 insertions(+), 33 deletions(-)
diff --git a/mysql-test/r/flush.result b/mysql-test/r/flush.result
index 2d7b81b..4e4aec4 100644
--- a/mysql-test/r/flush.result
+++ b/mysql-test/r/flush.result
@@ -508,7 +508,6 @@ ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
LOCK TABLES v1 WRITE;
FLUSH TABLES v1;
-ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
LOCK TABLES v1 READ;
FLUSH TABLES t1;
diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result
index 12960a4..4df415b 100644
--- a/mysql-test/r/lock_multi.result
+++ b/mysql-test/r/lock_multi.result
@@ -276,15 +276,14 @@ DROP VIEW IF EXISTS v1;
#
# Test 1: LOCK TABLES v1 WRITE, t1 READ;
#
-# Thanks to the fact that we no longer allow DDL on tables
-# which are locked for write implicitly, the exact scenario
-# in which assert was failing is no longer repeatable.
CREATE TABLE t1 ( f1 integer );
CREATE VIEW v1 AS SELECT f1 FROM t1 ;
+# Connection 2
LOCK TABLES v1 WRITE, t1 READ;
FLUSH TABLE t1;
-ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
-UNLOCK TABLES;
+# Connection 1
+LOCK TABLES t1 WRITE;
+FLUSH TABLE t1;
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result
index 219cc08..8143f3f 100644
--- a/mysql-test/r/lock_sync.result
+++ b/mysql-test/r/lock_sync.result
@@ -648,9 +648,6 @@ set debug_sync= 'RESET';
set @old_general_log = @@global.general_log;
set @@global.general_log= OFF;
create table t1 (i int) engine=InnoDB;
-# We have to use view in order to make LOCK TABLES avoid
-# acquiring SNRW metadata lock on table.
-create view v1 as select * from t1;
insert into t1 values (1);
# Prepare user lock which will be used for resuming execution of
# the first statement after it acquires TL_WRITE_ALLOW_WRITE lock.
@@ -673,7 +670,7 @@ select count(*) > 0 from t1 as a, t1 as b for update;;
# acquiring lock for the the first instance of 't1'.
set debug_sync= 'now WAIT_FOR parked';
# Send LOCK TABLE statement which will try to get TL_WRITE lock on 't1':
-lock table v1 write;;
+lock table t1 write concurrent;;
# Switch to connection 'default'.
# Wait until this LOCK TABLES statement starts waiting for table lock.
# Allow SELECT ... FOR UPDATE to resume.
@@ -703,7 +700,6 @@ unlock tables;
# Do clean-up.
set debug_sync= 'RESET';
set @@global.general_log= @old_general_log;
-drop view v1;
drop table t1;
#
# Bug#50821 Deadlock between LOCK TABLES and ALTER TABLE
diff --git a/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result b/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
index eeb7a28..6076d02 100644
--- a/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
+++ b/mysql-test/suite/sys_vars/r/delayed_insert_limit_func.result
@@ -13,7 +13,7 @@ INSERT INTO t1 VALUES('3','1','1');
INSERT INTO t1 VALUES('4','1','1');
INSERT INTO t1 VALUES('5','1','1');
INSERT INTO t1 VALUES('6','1','1');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
INSERT DELAYED INTO t1 VALUES('7','1','1');
INSERT DELAYED INTO t1 VALUES('8','1','1');
@@ -82,7 +82,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
Asynchronous execute
INSERT DELAYED INTO t1 VALUES('7');
diff --git a/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result b/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
index fe76c2c..e9758e2 100644
--- a/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
+++ b/mysql-test/suite/sys_vars/r/sql_low_priority_updates_func.result
@@ -20,7 +20,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 WRITE CONCURRENT;
** Connection con1 **
** Asynchronous Execution **
UPDATE t1 SET a = CONCAT(a,"-updated");|
@@ -56,7 +56,7 @@ INSERT INTO t1 VALUES('3');
INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
** Connection con1 **
** Asynchronous Execution **
UPDATE t1 SET a = CONCAT(a,"-updated");|
diff --git a/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test b/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
index 427f273..8ad4978 100644
--- a/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
+++ b/mysql-test/suite/sys_vars/t/delayed_insert_limit_func.test
@@ -61,7 +61,7 @@ INSERT INTO t1 VALUES('4','1','1');
INSERT INTO t1 VALUES('5','1','1');
INSERT INTO t1 VALUES('6','1','1');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
@@ -173,7 +173,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
diff --git a/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test b/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
index ba13558..5d7d6cb 100644
--- a/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
+++ b/mysql-test/suite/sys_vars/t/sql_low_priority_updates_func.test
@@ -70,7 +70,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 WRITE CONCURRENT;
--echo ** Connection con1 **
connection con1;
@@ -144,7 +144,7 @@ INSERT INTO t1 VALUES('4');
INSERT INTO t1 VALUES('5');
INSERT INTO t1 VALUES('6');
-LOCK TABLE v1 WRITE;
+LOCK TABLE v1 READ;
--echo ** Connection con1 **
connection con1;
diff --git a/mysql-test/t/flush.test b/mysql-test/t/flush.test
index 7736574..11a1ac2 100644
--- a/mysql-test/t/flush.test
+++ b/mysql-test/t/flush.test
@@ -724,7 +724,6 @@ FLUSH TABLES v1;
UNLOCK TABLES;
LOCK TABLES v1 WRITE;
---error ER_TABLE_NOT_LOCKED_FOR_WRITE
FLUSH TABLES v1;
UNLOCK TABLES;
diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test
index efd0fe5..ee73c2e 100644
--- a/mysql-test/t/lock_multi.test
+++ b/mysql-test/t/lock_multi.test
@@ -771,17 +771,21 @@ DROP VIEW IF EXISTS v1;
--echo #
--echo # Test 1: LOCK TABLES v1 WRITE, t1 READ;
--echo #
---echo # Thanks to the fact that we no longer allow DDL on tables
---echo # which are locked for write implicitly, the exact scenario
---echo # in which assert was failing is no longer repeatable.
CREATE TABLE t1 ( f1 integer );
CREATE VIEW v1 AS SELECT f1 FROM t1 ;
+--echo # Connection 2
+connect (con2,localhost,root);
LOCK TABLES v1 WRITE, t1 READ;
---error ER_TABLE_NOT_LOCKED_FOR_WRITE
FLUSH TABLE t1;
-UNLOCK TABLES;
+disconnect con2;
+--source include/wait_until_disconnected.inc
+
+--echo # Connection 1
+connection default;
+LOCK TABLES t1 WRITE;
+FLUSH TABLE t1; # Assertion happened here
# Cleanup
DROP TABLE t1;
diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test
index ef79cc2..0e0aa8f 100644
--- a/mysql-test/t/lock_sync.test
+++ b/mysql-test/t/lock_sync.test
@@ -909,9 +909,6 @@ set @old_general_log = @@global.general_log;
set @@global.general_log= OFF;
create table t1 (i int) engine=InnoDB;
---echo # We have to use view in order to make LOCK TABLES avoid
---echo # acquiring SNRW metadata lock on table.
-create view v1 as select * from t1;
insert into t1 values (1);
--echo # Prepare user lock which will be used for resuming execution of
--echo # the first statement after it acquires TL_WRITE_ALLOW_WRITE lock.
@@ -942,14 +939,14 @@ connection con_bug45143_3;
--echo # acquiring lock for the the first instance of 't1'.
set debug_sync= 'now WAIT_FOR parked';
--echo # Send LOCK TABLE statement which will try to get TL_WRITE lock on 't1':
---send lock table v1 write;
+--send lock table t1 write concurrent;
--echo # Switch to connection 'default'.
connection default;
--echo # Wait until this LOCK TABLES statement starts waiting for table lock.
let $wait_condition= select count(*)= 1 from information_schema.processlist
where state= 'Waiting for table level lock' and
- info='lock table v1 write';
+ info='lock table t1 write concurrent';
--source include/wait_condition.inc
--echo # Allow SELECT ... FOR UPDATE to resume.
--echo # Since it already has TL_WRITE_ALLOW_WRITE lock on the first instance
@@ -993,7 +990,6 @@ disconnect con_bug45143_2;
disconnect con_bug45143_3;
set debug_sync= 'RESET';
set @@global.general_log= @old_general_log;
-drop view v1;
drop table t1;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 6bd6b6a..d7a2a6b 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1534,8 +1534,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local)
{
tbl->lock_type= table->lock_type;
- tbl->mdl_request.set_type((tbl->lock_type >= TL_WRITE_ALLOW_WRITE) ?
- MDL_SHARED_WRITE : MDL_SHARED_READ);
+ tbl->mdl_request.set_type(table->mdl_request.type);
}
/*
If the view is mergeable, we might want to
1
0

[Commits] 285969e1c68: Fix result file for wsrep.variables, for some reason had too new
by jan 07 Sep '18
by jan 07 Sep '18
07 Sep '18
revision-id: 285969e1c6848f2c5f382d182bb11496de486482 (mariadb-10.1.35-38-g285969e1c68)
parent(s): b0026e33af8fc3b25a42099c096a84591fd550e2
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-07 11:27:15 +0300
message:
Fix result file for wsrep.variables, for some reason had too new
galera library used.
---
mysql-test/suite/wsrep/r/variables.result | 60 +------------------------------
mysql-test/suite/wsrep/t/variables.test | 6 ++--
2 files changed, 3 insertions(+), 63 deletions(-)
diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result
index 2e31e761f5f..1dafa6b5ed5 100644
--- a/mysql-test/suite/wsrep/r/variables.result
+++ b/mysql-test/suite/wsrep/r/variables.result
@@ -19,6 +19,7 @@ SET GLOBAL wsrep_provider=none;
# variables when using "_"
#
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
+# wsrep
SHOW GLOBAL STATUS LIKE 'wsrep%';
Variable_name Value
wsrep_apply_oooe #
@@ -59,65 +60,6 @@ wsrep_local_send_queue_min #
wsrep_local_state #
wsrep_local_state_comment #
wsrep_local_state_uuid #
-wsrep_open_connections #
-wsrep_open_transactions #
-wsrep_protocol_version #
-wsrep_provider_name #
-wsrep_provider_vendor #
-wsrep_provider_version #
-wsrep_ready #
-wsrep_received #
-wsrep_received_bytes #
-wsrep_repl_data_bytes #
-wsrep_repl_keys #
-wsrep_repl_keys_bytes #
-wsrep_repl_other_bytes #
-wsrep_replicated #
-wsrep_replicated_bytes #
-wsrep_thread_count #
-
-SHOW GLOBAL STATUS LIKE 'wsrep_%';
-Variable_name Value
-wsrep_apply_oooe #
-wsrep_apply_oool #
-wsrep_apply_window #
-wsrep_causal_reads #
-wsrep_cert_deps_distance #
-wsrep_cert_index_size #
-wsrep_cert_interval #
-wsrep_cluster_conf_id #
-wsrep_cluster_size #
-wsrep_cluster_state_uuid #
-wsrep_cluster_status #
-wsrep_commit_oooe #
-wsrep_commit_oool #
-wsrep_commit_window #
-wsrep_connected #
-wsrep_flow_control_paused #
-wsrep_flow_control_paused_ns #
-wsrep_flow_control_recv #
-wsrep_flow_control_sent #
-wsrep_incoming_addresses #
-wsrep_last_committed #
-wsrep_local_bf_aborts #
-wsrep_local_cached_downto #
-wsrep_local_cert_failures #
-wsrep_local_commits #
-wsrep_local_index #
-wsrep_local_recv_queue #
-wsrep_local_recv_queue_avg #
-wsrep_local_recv_queue_max #
-wsrep_local_recv_queue_min #
-wsrep_local_replays #
-wsrep_local_send_queue #
-wsrep_local_send_queue_avg #
-wsrep_local_send_queue_max #
-wsrep_local_send_queue_min #
-wsrep_local_state #
-wsrep_local_state_comment #
-wsrep_local_state_uuid #
-wsrep_open_connections #
-wsrep_open_transactions #
wsrep_protocol_version #
wsrep_provider_name #
wsrep_provider_vendor #
diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test
index f3cd66e5cf9..15104b9b654 100644
--- a/mysql-test/suite/wsrep/t/variables.test
+++ b/mysql-test/suite/wsrep/t/variables.test
@@ -35,13 +35,11 @@ source include/check_galera_version.inc;
--enable_result_log
--enable_query_log
+--echo # wsrep
+--sorted_result
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep%';
---echo
---replace_column 2 #
-SHOW GLOBAL STATUS LIKE 'wsrep_%';
-
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';
1
0
revision-id: c64ff168f2fdb3c3675908f9fe6565653cce867d (mariadb-10.1.35-37-gc64ff168f2f)
parent(s): 6ca6f25d4e96a479eb144a8da1066a27d0abce40
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-07 08:04:01 +0300
message:
Run #2.
---
mysql-test/suite/galera/r/MW-44.result | 7 ++++---
mysql-test/suite/galera/t/MW-44.test | 5 ++++-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index 394c749b253..7e3d2f4b7ec 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -6,9 +6,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
-COUNT(*) = 2
-1
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+argument
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+ALTER TABLE t1 ADD COLUMN f2 INTEGER
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index cb5db1b208a..6defa432879 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -19,7 +19,10 @@ SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--let $wait_condition = SELECT COUNT(argument) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--source include/wait_condition.inc
+
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
--connection node_2
SET GLOBAL general_log='ON';
1
0

[Commits] 96572b7: EV-16992 Assertion `table_ref->table || table_ref->view' failed in
by IgorBabaev 07 Sep '18
by IgorBabaev 07 Sep '18
07 Sep '18
revision-id: 96572b7aa1698ddd1ff64672aac920e667b35790 (mariadb-10.3.6-117-g96572b7)
parent(s): f6694b62447454028dd087802cd3b326ed721dd7
author: Igor Babaev
committer: Igor Babaev
timestamp: 2018-09-06 20:19:37 -0700
message:
EV-16992 Assertion `table_ref->table || table_ref->view' failed in
Field_iterator_table_ref::set_field_iterator
Several functions that processed different prepare statements missed
the DT_INIT flag in last parameter of the open_normal_and_derived_tables()
calls. It made context analysis of derived tables dependent on the order in
which the derived tables were processed by mysql_handle_derived(). This
order was induced by the order of SELECTs in all_select_list.
In 10.4 the order of SELECTs in all_select_list became different and lack
of the DT_INIT flags in some open_normal_and_derived_tables() call became
critical as some derived tables were not identified as such.
---
mysql-test/main/ps.result | 43 +++++++++++++++++++++++++++++++++++++++++++
mysql-test/main/ps.test | 46 ++++++++++++++++++++++++++++++++++++++++++++++
sql/sql_prepare.cc | 13 +++++++------
3 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index 540315b..d619b21 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5299,5 +5299,48 @@ DROP PROCEDURE p2;
DROP PROCEDURE p1;
DROP ROLE testrole;
#
+# MDEV-16992: prepare of CREATE TABLE, CREATE VIEW, DO, SET, CALL
+# statements with CTE containing materialized derived
+# (the bug is reproducible on 10.4)
+#
+prepare stmt from
+"CREATE TABLE t1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from t1;
+a
+1
+prepare stmt from
+"CREATE VIEW v1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from v1;
+a
+1
+prepare stmt from
+"DO (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt);";
+execute stmt;
+prepare stmt from
+"SET @a = (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS t);";
+execute stmt;
+create procedure p (i int) insert into t1 values(i);
+prepare stmt from
+"CALL p
+ ((SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt));";
+execute stmt;
+select * from t1;
+a
+1
+1
+drop procedure p;
+drop view v1;
+drop table t1;
+#
# End of 10.2 tests
#
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index ff93c46..86ae11c 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -4777,5 +4777,51 @@ DROP PROCEDURE p1;
DROP ROLE testrole;
--echo #
+--echo # MDEV-16992: prepare of CREATE TABLE, CREATE VIEW, DO, SET, CALL
+--echo # statements with CTE containing materialized derived
+--echo # (the bug is reproducible on 10.4)
+--echo #
+
+--enable_result_log
+
+prepare stmt from
+"CREATE TABLE t1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from t1;
+
+prepare stmt from
+"CREATE VIEW v1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from v1;
+
+prepare stmt from
+"DO (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt);";
+execute stmt;
+
+prepare stmt from
+"SET @a = (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS t);";
+execute stmt;
+
+create procedure p (i int) insert into t1 values(i);
+
+prepare stmt from
+"CALL p
+ ((SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt));";
+execute stmt;
+select * from t1;
+
+drop procedure p;
+drop view v1;
+drop table t1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index c615356..b0b0c81 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1582,7 +1582,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
DBUG_RETURN(TRUE);
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
DBUG_RETURN(setup_fields(thd, Ref_ptr_array(),
*values, COLUMNS_READ, 0, NULL, 0));
@@ -1614,7 +1614,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
goto error;
while ((var= it++))
@@ -1651,7 +1651,8 @@ static bool mysql_test_call_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
- open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL, DT_PREPARE))
+ open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
+ DT_INIT | DT_PREPARE))
goto err;
while ((item= it++))
@@ -1777,7 +1778,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
select_lex->context.resolve_in_select_list= TRUE;
@@ -1798,7 +1799,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
*/
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
DBUG_RETURN(TRUE);
}
@@ -2025,7 +2026,7 @@ static bool mysql_test_create_view(Prepared_statement *stmt)
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
goto err;
res= select_like_stmt_test(stmt, 0, 0);
1
0