[Commits] cb06612a9da: MDEV-31655: Parallel replication deadlock victim preference code errorneously removed
by Kristian Nielsen 11 Jul '23
by Kristian Nielsen 11 Jul '23
11 Jul '23
revision-id: cb06612a9da09a7981ada84768793f2ff3ef842c (mariadb-10.4.30-3-gcb06612a9da)
parent(s): dbe5c20b755b87f67d87990c3baabc866667e41b
author: Kristian Nielsen
committer: Kristian Nielsen
timestamp: 2023-07-11 00:31:29 +0200
message:
MDEV-31655: Parallel replication deadlock victim preference code errorneously removed
Restore code to make InnoDB choose the second transaction as a deadlock
victim if two transactions deadlock that need to commit in-order for
parallel replication. This code was erroneously removed when VATS was
implemented in InnoDB.
Also add a test case for InnoDB choosing the right deadlock victim.
Signed-off-by: Kristian Nielsen <knielsen(a)knielsen-hq.org>
---
.../suite/binlog_encryption/rpl_parallel.result | 42 ++++++++++++-
mysql-test/suite/rpl/r/rpl_parallel.result | 42 ++++++++++++-
mysql-test/suite/rpl/t/rpl_parallel.test | 71 +++++++++++++++++++++-
sql/rpl_parallel.cc | 7 ++-
sql/sql_class.cc | 43 +++++++++++++
storage/innobase/lock/lock0lock.cc | 12 ++++
storage/innobase/trx/trx0trx.cc | 12 ++++
7 files changed, 225 insertions(+), 4 deletions(-)
diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel.result b/mysql-test/suite/binlog_encryption/rpl_parallel.result
index b75a66a634a..b24ff7ba53d 100644
--- a/mysql-test/suite/binlog_encryption/rpl_parallel.result
+++ b/mysql-test/suite/binlog_encryption/rpl_parallel.result
@@ -2,6 +2,7 @@ include/master-slave.inc
[connection master]
connection server_2;
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
+SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_threads=10;
ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
include/stop_slave.inc
@@ -1680,13 +1681,52 @@ a
2000
SELECT * FROM t2 WHERE a>=2000 ORDER BY a;
a
+MDEV-31655: Parallel replication deadlock victim preference code erroneously removed
+connection server_1;
+CREATE TABLE t7 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+BEGIN;
+COMMIT;
+include/save_master_gtid.inc
+connection server_2;
+include/sync_with_master_gtid.inc
+include/stop_slave.inc
+set @@global.slave_parallel_threads= 5;
+set @@global.slave_parallel_mode= conservative;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= "+d,rpl_mdev31655_zero_retries";
+connection master;
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+SET @commit_id= 1+1000;
+SET @commit_id= 2+1000;
+SET @commit_id= 3+1000;
+SET @commit_id= 4+1000;
+SET @commit_id= 5+1000;
+SET @commit_id= 6+1000;
+SET @commit_id= 7+1000;
+SET @commit_id= 8+1000;
+SET @commit_id= 9+1000;
+SET @commit_id= 10+1000;
+SET SESSION debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+COUNT(*) SUM(a*100*b)
+10 225000
+include/save_master_gtid.inc
+connection server_2;
+include/start_slave.inc
+include/sync_with_master_gtid.inc
+SET GLOBAL debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+COUNT(*) SUM(a*100*b)
+10 225000
connection server_2;
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
+SET GLOBAL slave_parallel_mode=@old_parallel_mode;
include/start_slave.inc
SET DEBUG_SYNC= 'RESET';
connection server_1;
DROP function foo;
-DROP TABLE t1,t2,t3,t4,t5,t6;
+DROP TABLE t1,t2,t3,t4,t5,t6,t7;
SET DEBUG_SYNC= 'RESET';
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result
index 9b2e68d366e..ef89d954faa 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel.result
@@ -2,6 +2,7 @@ include/master-slave.inc
[connection master]
connection server_2;
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
+SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_threads=10;
ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
include/stop_slave.inc
@@ -1679,13 +1680,52 @@ a
2000
SELECT * FROM t2 WHERE a>=2000 ORDER BY a;
a
+MDEV-31655: Parallel replication deadlock victim preference code erroneously removed
+connection server_1;
+CREATE TABLE t7 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+BEGIN;
+COMMIT;
+include/save_master_gtid.inc
+connection server_2;
+include/sync_with_master_gtid.inc
+include/stop_slave.inc
+set @@global.slave_parallel_threads= 5;
+set @@global.slave_parallel_mode= conservative;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+SET GLOBAL debug_dbug= "+d,rpl_mdev31655_zero_retries";
+connection master;
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+SET @commit_id= 1+1000;
+SET @commit_id= 2+1000;
+SET @commit_id= 3+1000;
+SET @commit_id= 4+1000;
+SET @commit_id= 5+1000;
+SET @commit_id= 6+1000;
+SET @commit_id= 7+1000;
+SET @commit_id= 8+1000;
+SET @commit_id= 9+1000;
+SET @commit_id= 10+1000;
+SET SESSION debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+COUNT(*) SUM(a*100*b)
+10 225000
+include/save_master_gtid.inc
+connection server_2;
+include/start_slave.inc
+include/sync_with_master_gtid.inc
+SET GLOBAL debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+COUNT(*) SUM(a*100*b)
+10 225000
connection server_2;
include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
+SET GLOBAL slave_parallel_mode=@old_parallel_mode;
include/start_slave.inc
SET DEBUG_SYNC= 'RESET';
connection server_1;
DROP function foo;
-DROP TABLE t1,t2,t3,t4,t5,t6;
+DROP TABLE t1,t2,t3,t4,t5,t6,t7;
SET DEBUG_SYNC= 'RESET';
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test
index 9ba7a30f2eb..d43cec4df34 100644
--- a/mysql-test/suite/rpl/t/rpl_parallel.test
+++ b/mysql-test/suite/rpl/t/rpl_parallel.test
@@ -13,6 +13,7 @@
--connection server_2
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
+SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
--error ER_SLAVE_MUST_STOP
SET GLOBAL slave_parallel_threads=10;
--source include/stop_slave.inc
@@ -2203,16 +2204,84 @@ SELECT * FROM t1 WHERE a>=2000 ORDER BY a;
SELECT * FROM t2 WHERE a>=2000 ORDER BY a;
+--echo MDEV-31655: Parallel replication deadlock victim preference code erroneously removed
+# The problem was that InnoDB would choose the wrong deadlock victim.
+# Create a lot of transactions that can cause deadlocks, and use error
+# injection to check that the first transactions in each group is never
+# selected as deadlock victim.
+--let $rows= 10
+--let $transactions= 5
+--let $gcos= 10
+
+--connection server_1
+CREATE TABLE t7 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+BEGIN;
+--disable_query_log
+--let $i= 0
+while ($i < 10) {
+ eval INSERT INTO t7 VALUES ($i, 0);
+ inc $i;
+}
+--enable_query_log
+COMMIT;
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+eval set @@global.slave_parallel_threads= $transactions;
+set @@global.slave_parallel_mode= conservative;
+SET @old_dbug= @@GLOBAL.debug_dbug;
+# This error injection will allow no retries for GTIDs divisible by 1000.
+SET GLOBAL debug_dbug= "+d,rpl_mdev31655_zero_retries";
+
+--connection master
+SET @old_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,binlog_force_commit_id";
+
+--let $j= 1
+while ($j <= $gcos) {
+ eval SET @commit_id= $j+1000;
+ --let $i= 0
+ while ($i < $transactions) {
+ --disable_query_log
+ eval SET SESSION gtid_seq_no= 1000 + 1000*$j + $i;
+ BEGIN;
+ --let $k= 0
+ while ($k < $rows) {
+ eval UPDATE t7 SET b=b+1 WHERE a=(($i+$k) MOD $rows);
+ inc $k;
+ }
+ COMMIT;
+ --enable_query_log
+ inc $i;
+ }
+ inc $j;
+}
+
+SET SESSION debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/start_slave.inc
+--source include/sync_with_master_gtid.inc
+SET GLOBAL debug_dbug= @old_dbug;
+SELECT COUNT(*), SUM(a*100*b) FROM t7;
+
+
# Clean up.
--connection server_2
--source include/stop_slave.inc
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
+SET GLOBAL slave_parallel_mode=@old_parallel_mode;
--source include/start_slave.inc
SET DEBUG_SYNC= 'RESET';
--connection server_1
DROP function foo;
-DROP TABLE t1,t2,t3,t4,t5,t6;
+DROP TABLE t1,t2,t3,t4,t5,t6,t7;
SET DEBUG_SYNC= 'RESET';
--source include/rpl_end.inc
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index b550315d69f..1aeb1257c4a 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -1385,8 +1385,13 @@ handle_rpl_parallel_thread(void *arg)
err= dbug_simulate_tmp_error(rgi, thd););
if (unlikely(err))
{
+ ulong max_retries= slave_trans_retries;
convert_kill_to_deadlock_error(rgi);
- if (has_temporary_error(thd) && slave_trans_retries > 0)
+ DBUG_EXECUTE_IF("rpl_mdev31655_zero_retries",
+ if ((rgi->current_gtid.seq_no % 1000) == 0)
+ max_retries= 0;
+ );
+ if (has_temporary_error(thd) && max_retries > 0)
err= retry_event_group(rgi, rpt, qev);
}
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 8ed3f8a9c5e..e6ed7ca1cc4 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -5247,6 +5247,49 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd)
return 0;
}
+
+/*
+ If the storage engine detects a deadlock, and needs to choose a victim
+ transaction to roll back, it can call this function to ask the upper
+ server layer for which of two possible transactions is prefered to be
+ aborted and rolled back.
+
+ In parallel replication, if two transactions are running in parallel and
+ one is fixed to commit before the other, then the one that commits later
+ will be prefered as the victim - chosing the early transaction as a victim
+ will not resolve the deadlock anyway, as the later transaction still needs
+ to wait for the earlier to commit.
+
+ The return value is -1 if the first transaction is prefered as a deadlock
+ victim, 1 if the second transaction is prefered, or 0 for no preference (in
+ which case the storage engine can make the choice as it prefers).
+*/
+extern "C" int
+thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2)
+{
+ rpl_group_info *rgi1, *rgi2;
+
+ if (!thd1 || !thd2)
+ return 0;
+
+ /*
+ If the transactions are participating in the same replication domain in
+ parallel replication, then request to select the one that will commit
+ later (in the fixed commit order from the master) as the deadlock victim.
+ */
+ rgi1= thd1->rgi_slave;
+ rgi2= thd2->rgi_slave;
+ if (rgi1 && rgi2 &&
+ rgi1->is_parallel_exec &&
+ rgi1->rli == rgi2->rli &&
+ rgi1->current_gtid.domain_id == rgi2->current_gtid.domain_id)
+ return rgi1->gtid_sub_id < rgi2->gtid_sub_id ? 1 : -1;
+
+ /* No preferences, let the storage engine decide. */
+ return 0;
+}
+
+
extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
{
return(thd->transaction.all.modified_non_trans_table);
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index b1cf2152cd6..469d03eaa06 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -49,6 +49,8 @@ Created 5/7/1996 Heikki Tuuri
#include <mysql/service_wsrep.h>
#endif /* WITH_WSREP */
+extern "C" int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2);
+
/** Lock scheduling algorithm */
ulong innodb_lock_schedule_algorithm;
@@ -1538,6 +1540,16 @@ static bool has_higher_priority(lock_t *lock1, lock_t *lock2)
} else if (!lock_get_wait(lock2)) {
return false;
}
+ // Ask the upper server layer if any of the two trx should be prefered.
+ int preference = thd_deadlock_victim_preference(lock1->trx->mysql_thd,
+ lock2->trx->mysql_thd);
+ if (preference == -1) {
+ // lock1 is preferred as a victim, so lock2 has higher priority
+ return false;
+ } else if (preference == 1) {
+ // lock2 is preferred as a victim, so lock1 has higher priority
+ return true;
+ }
return lock1->trx->start_time_micro <= lock2->trx->start_time_micro;
}
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 7cd95878b0c..0771d764fb6 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -52,6 +52,9 @@ Created 3/26/1996 Heikki Tuuri
#include <set>
#include <new>
+extern "C"
+int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2);
+
/** The bit pattern corresponding to TRX_ID_MAX */
const byte trx_id_max_bytes[8] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
@@ -1906,6 +1909,15 @@ trx_weight_ge(
{
ibool a_notrans_edit;
ibool b_notrans_edit;
+ int pref;
+
+ /* First ask the upper server layer if it has any preference for which
+ to prefer as a deadlock victim. */
+ pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd);
+ if (pref < 0)
+ return FALSE;
+ else if (pref > 0)
+ return TRUE;
/* If mysql_thd is NULL for a transaction we assume that it has
not edited non-transactional tables. */
1
0
[Commits] dbe5c20b755: MDEV-31482: Lock wait timeout with INSERT-SELECT, autoinc, and statement-based replication
by Kristian Nielsen 10 Jul '23
by Kristian Nielsen 10 Jul '23
10 Jul '23
revision-id: dbe5c20b755b87f67d87990c3baabc866667e41b (mariadb-10.4.30-2-gdbe5c20b755)
parent(s): a6114df595eeb7aeed4b050c9a3f4640c4320b5f
author: Kristian Nielsen
committer: Kristian Nielsen
timestamp: 2023-07-09 16:45:47 +0200
message:
MDEV-31482: Lock wait timeout with INSERT-SELECT, autoinc, and statement-based replication
Remove the exception that InnoDB does not report auto-increment locks waits
to the parallel replication.
There was an assumption that these waits could not cause conflicts with
in-order parallel replication and thus need not be reported. However, this
assumption is wrong and it is possible to get conflicts that lead to hangs
for the duration of --innodb-lock-wait-timeout. This can be seen with three
transactions:
1. T1 is waiting for T3 on an autoinc lock
2. T2 is waiting for T1 to commit
3. T3 is waiting on a normal row lock held by T2
Here, T3 needs to be deadlock killed on the wait by T1.
Signed-off-by: Kristian Nielsen <knielsen(a)knielsen-hq.org>
---
mysql-test/suite/rpl/r/rpl_parallel_autoinc.result | 95 ++++++++++++++
mysql-test/suite/rpl/t/rpl_parallel_autoinc.test | 140 +++++++++++++++++++++
sql/sql_class.cc | 6 -
storage/innobase/lock/lock0lock.cc | 8 +-
4 files changed, 236 insertions(+), 13 deletions(-)
diff --git a/mysql-test/suite/rpl/r/rpl_parallel_autoinc.result b/mysql-test/suite/rpl/r/rpl_parallel_autoinc.result
new file mode 100644
index 00000000000..c1829bafa1a
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_parallel_autoinc.result
@@ -0,0 +1,95 @@
+include/master-slave.inc
+[connection master]
+MDEV-31482: Lock wait timeout with INSERT-SELECT, autoinc, and statement-based replication
+include/rpl_connect.inc [creating slave2]
+include/rpl_connect.inc [creating slave3]
+connection master;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT, INDEX (c)) ENGINE=InnoDB;
+INSERT INTO t1 (b,c) VALUES (0, 1), (0, 1), (0, 2), (0,3), (0, 5), (0, 7), (0, 8);
+CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (10,1), (20,2), (30,3), (40,4), (50,5);
+CREATE TABLE t3 (a VARCHAR(20) PRIMARY KEY, b INT) ENGINE=InnoDB;
+INSERT INTO t3 VALUES ('row for T1', 0), ('row for T2', 0), ('row for T3', 0);
+include/save_master_gtid.inc
+connection slave;
+include/sync_with_master_gtid.inc
+include/stop_slave.inc
+set @@global.slave_parallel_threads= 3;
+set @@global.slave_parallel_mode= OPTIMISTIC;
+set @@global.innodb_lock_wait_timeout= 20;
+connection master;
+BEGIN;
+UPDATE t3 SET b=b+1 where a="row for T1";
+INSERT INTO t1(b, c) SELECT 1, t2.b FROM t2 WHERE a=10;
+Warnings:
+Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave
+COMMIT;
+DELETE FROM t1 WHERE c >= 4 and c < 6;
+BEGIN;
+UPDATE t3 SET b=b+1 where a="row for T3";
+INSERT INTO t1(b, c) SELECT 3, t2.b FROM t2 WHERE a >= 20 AND a <= 40;
+Warnings:
+Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave
+COMMIT;
+include/save_master_gtid.inc
+connection slave1;
+BEGIN;
+SELECT * FROM t3 WHERE a="row for T1" FOR UPDATE;
+a b
+row for T1 0
+connection slave2;
+BEGIN;
+SELECT * FROM t3 WHERE a="row for T3" FOR UPDATE;
+a b
+row for T3 0
+connection slave3;
+BEGIN;
+DELETE FROM t2 WHERE a=30;
+connection slave;
+include/start_slave.inc
+connection slave2;
+ROLLBACK;
+connection slave1;
+ROLLBACK;
+connection slave3;
+ROLLBACK;
+connection slave;
+include/sync_with_master_gtid.inc
+SELECT * FROM t1 ORDER BY a;
+a b c
+1 0 1
+2 0 1
+3 0 2
+4 0 3
+6 0 7
+7 0 8
+8 1 1
+9 3 2
+10 3 3
+11 3 4
+SELECT * FROM t2 ORDER BY a;
+a b
+10 1
+20 2
+30 3
+40 4
+50 5
+SELECT * FROM t3 ORDER BY a;
+a b
+row for T1 1
+row for T2 0
+row for T3 1
+connection master;
+CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format");
+DROP TABLE t1, t2, t3;
+connection slave;
+include/stop_slave.inc
+SET @@global.slave_parallel_threads= 0;
+SET @@global.slave_parallel_mode= conservative;
+SET @@global.innodb_lock_wait_timeout= 50;
+include/start_slave.inc
+SELECT @@GLOBAL.innodb_autoinc_lock_mode;
+@@GLOBAL.innodb_autoinc_lock_mode
+1
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_parallel_autoinc.test b/mysql-test/suite/rpl/t/rpl_parallel_autoinc.test
new file mode 100644
index 00000000000..0e96b4dfb80
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_parallel_autoinc.test
@@ -0,0 +1,140 @@
+--source include/have_binlog_format_statement.inc
+--source include/have_innodb.inc
+--source include/master-slave.inc
+
+--echo MDEV-31482: Lock wait timeout with INSERT-SELECT, autoinc, and statement-based replication
+
+# The scenario is transactions T1, T2, T3:
+#
+# T1 is waiting for T3 on an autoinc lock
+# T2 is waiting for T1 to commit
+# T3 is waiting on a normal row lock held by T2
+#
+# This caused a hang until innodb_lock_wait_timeout, because autoinc
+# locks were not reported to the in-order parallel replication, so T3
+# was not deadlock killed.
+
+--let $lock_wait_timeout=20
+
+--let $rpl_connection_name= slave2
+--let $rpl_server_number= 2
+--source include/rpl_connect.inc
+
+--let $rpl_connection_name= slave3
+--let $rpl_server_number= 2
+--source include/rpl_connect.inc
+
+--connection master
+ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
+
+# A table as destination for INSERT-SELECT
+CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT, INDEX (c)) ENGINE=InnoDB;
+INSERT INTO t1 (b,c) VALUES (0, 1), (0, 1), (0, 2), (0,3), (0, 5), (0, 7), (0, 8);
+
+# A table as source for INSERT-SELECT.
+CREATE TABLE t2 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (10,1), (20,2), (30,3), (40,4), (50,5);
+
+# A table to help order slave worker threads to setup the desired scenario.
+CREATE TABLE t3 (a VARCHAR(20) PRIMARY KEY, b INT) ENGINE=InnoDB;
+INSERT INTO t3 VALUES ('row for T1', 0), ('row for T2', 0), ('row for T3', 0);
+--source include/save_master_gtid.inc
+
+--connection slave
+--source include/sync_with_master_gtid.inc
+--source include/stop_slave.inc
+--let $save_innodb_lock_wait_timeout= `SELECT @@global.innodb_lock_wait_timeout`
+--let $save_slave_parallel_threads= `SELECT @@global.slave_parallel_threads`
+--let $save_slave_parallel_mode= `SELECT @@global.slave_parallel_mode`
+set @@global.slave_parallel_threads= 3;
+set @@global.slave_parallel_mode= OPTIMISTIC;
+eval set @@global.innodb_lock_wait_timeout= $lock_wait_timeout;
+
+--connection master
+# Transaction T1.
+BEGIN;
+UPDATE t3 SET b=b+1 where a="row for T1";
+INSERT INTO t1(b, c) SELECT 1, t2.b FROM t2 WHERE a=10;
+COMMIT;
+
+# Transaction T2.
+DELETE FROM t1 WHERE c >= 4 and c < 6;
+
+# Transaction T3.
+BEGIN;
+UPDATE t3 SET b=b+1 where a="row for T3";
+INSERT INTO t1(b, c) SELECT 3, t2.b FROM t2 WHERE a >= 20 AND a <= 40;
+COMMIT;
+
+--source include/save_master_gtid.inc
+
+--connection slave1
+# Temporarily block T1 to create the scheduling that triggers the bug.
+BEGIN;
+SELECT * FROM t3 WHERE a="row for T1" FOR UPDATE;
+
+--connection slave2
+# Temporarily block T3 from starting (so T2 can reach commit).
+BEGIN;
+SELECT * FROM t3 WHERE a="row for T3" FOR UPDATE;
+
+--connection slave3
+# This critical step blocks T3 after it has inserted its first row,
+# and thus taken the auto-increment lock, but before it has reached
+# the point where it gets a row lock wait on T2. Even though
+# auto-increment lock waits were not reported due to the bug,
+# transitive lock waits (T1 waits on autoinc of T3 which waits on row
+# on T2) _were_ reported as T1 waiting on T2, and thus a deadlock kill
+# happened and the bug was not triggered.
+BEGIN;
+DELETE FROM t2 WHERE a=30;
+
+--connection slave
+--source include/start_slave.inc
+
+# First let T2 complete until it is waiting for T1 to commit.
+--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state='Waiting for prior transaction to commit' and command LIKE 'Slave_worker';
+--source include/wait_condition.inc
+
+# Then let T3 reach the point where it has obtained the autoinc lock,
+# but it is not yet waiting for a row lock held by T2.
+--connection slave2
+ROLLBACK;
+--let $wait_condition= SELECT count(*)=1 FROM information_schema.processlist WHERE state='Sending data' and info LIKE 'INSERT INTO t1(b, c) SELECT 3, t2.b%' and time_ms > 500 and command LIKE 'Slave_worker';
+--source include/wait_condition.inc
+
+# Now let T1 continue, while T3 is holding the autoinc lock but before
+# it is waiting for T2. Wait a short while to give the hang a chance to
+# happen; T1 needs to get to request the autoinc lock before we let T3
+# continue. (There's a small chance the sleep will be too small, which will
+# let the test occasionally pass on non-fixed server).
+--connection slave1
+ROLLBACK;
+--sleep 0.5
+
+# Now let T3 continue; the bug was that this lead to an undetected
+# deadlock that remained until innodb lock wait timeout.
+--connection slave3
+ROLLBACK;
+
+--connection slave
+--let $slave_timeout= `SELECT $lock_wait_timeout/2`
+--source include/sync_with_master_gtid.inc
+--let $slave_timeout=
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+SELECT * FROM t3 ORDER BY a;
+
+# Cleanup.
+--connection master
+CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format");
+DROP TABLE t1, t2, t3;
+
+--connection slave
+--source include/stop_slave.inc
+eval SET @@global.slave_parallel_threads= $save_slave_parallel_threads;
+eval SET @@global.slave_parallel_mode= $save_slave_parallel_mode;
+eval SET @@global.innodb_lock_wait_timeout= $save_innodb_lock_wait_timeout;
+--source include/start_slave.inc
+SELECT @@GLOBAL.innodb_autoinc_lock_mode;
+--source include/rpl_end.inc
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 73bb654080a..8ed3f8a9c5e 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -5119,12 +5119,6 @@ thd_need_wait_reports(const MYSQL_THD thd)
deadlock with the pre-determined commit order, we kill the later
transaction, and later re-try it, to resolve the deadlock.
- This call need only receive reports about waits for locks that will remain
- until the holding transaction commits. InnoDB auto-increment locks,
- for example, are released earlier, and so need not be reported. (Such false
- positives are not harmful, but could lead to unnecessary kill and retry, so
- best avoided).
-
Returns 1 if the OTHER_THD will be killed to resolve deadlock, 0 if not. The
actual kill will happen later, asynchronously from another thread. The
caller does not need to take any actions on the return value if the
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 26388ad95e2..b1cf2152cd6 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -6872,13 +6872,7 @@ DeadlockChecker::search()
return m_start;
}
- /* We do not need to report autoinc locks to the upper
- layer. These locks are released before commit, so they
- can not cause deadlocks with binlog-fixed commit
- order. */
- if (m_report_waiters
- && (lock_get_type_low(lock) != LOCK_TABLE
- || lock_get_mode(lock) != LOCK_AUTO_INC)) {
+ if (m_report_waiters) {
thd_rpl_deadlock_check(m_start->mysql_thd,
lock->trx->mysql_thd);
}
1
0
[Commits] 60c1b15: MDEV-31102 Crash when pushing condition into view defined as union
by IgorBabaev 21 Apr '23
by IgorBabaev 21 Apr '23
21 Apr '23
revision-id: 60c1b15328165ce215071b4f307fd20bc8a0b545 (mariadb-10.4.28-105-g60c1b15)
parent(s): fc6e8a3d3264078bed28632a289130b1dc24daea
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-21 13:46:14 -0700
message:
MDEV-31102 Crash when pushing condition into view defined as union
This bug could manifest itself at the first execution of prepared statement
created for queries using a materialized view defined as union. A crash
could happen for sure if the query contained a condition pushable into
the view and this condition was over the column defined via a complex string
expression requiring implicit conversion from one charset to another for
some of its sub-expressions. The bug could cause crashes when executing
PS for some other queries whose optimization needed building clones for
such expressions.
This bug was introduced in the patch for MDEV-29988 where the class
Item_direct_ref_to_item was added. The implementations of the virtual
methods get_copy() and build_clone() were invalid for the class and this
could cause crashes after the method build_clone() was called for
expressions containing objects of the Item_direct_ref_to_item type.
Approved by Sergei Golubchik <serg(a)mariadb.com>
---
mysql-test/main/derived_cond_pushdown.result | 83 ++++++++++++++++++++++++++++
mysql-test/main/derived_cond_pushdown.test | 30 ++++++++++
sql/item.h | 17 +++++-
3 files changed, 128 insertions(+), 2 deletions(-)
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 41f9ac6..4b202ea 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -18275,4 +18275,87 @@ id select_type table type possible_keys key key_len ref rows Extra
3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary
drop view v1;
drop table t1;
+#
+# MDEV-31102: execution of PS for query where pushdown of condition
+# into view defined as union is applied
+#
+create table t1 (
+n int,
+lv varchar(31) charset latin1,
+mv varchar(31) charset utf8mb3
+) engine=myisam;
+insert into t1 values (1,'aa','xxx'), ('2','bb','yyy'), (3,'cc','zzz');
+create view v1 as
+select case when n=1 then lv when n=2 then mv else NULL end as r from t1
+union
+select 'a';
+select * from v1 where r < 'x';
+r
+aa
+a
+explain extended select * from v1 where r < 'x';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00 Using where
+2 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `v1`.`r` AS `r` from `test`.`v1` where `v1`.`r` < 'x'
+explain format=json select * from v1 where r < 'x';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "v1.r < 'x'",
+ "materialized": {
+ "query_block": {
+ "union_result": {
+ "table_name": "<union2,3>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "case when t1.n = 1 then convert(t1.lv using utf8) when t1.n = 2 then t1.mv else NULL end < 'x'"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 3,
+ "operation": "UNION",
+ "table": {
+ "message": "No tables used"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+}
+prepare stmt from "select * from v1 where r < 'x'";
+execute stmt;
+r
+aa
+a
+execute stmt;
+r
+aa
+a
+deallocate prepare stmt;
+drop view v1;
+drop table t1;
# End of 10.4 tests
diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test
index 6cfe23b..b4e131d 100644
--- a/mysql-test/main/derived_cond_pushdown.test
+++ b/mysql-test/main/derived_cond_pushdown.test
@@ -3942,4 +3942,34 @@ explain select * from v1;
drop view v1;
drop table t1;
+--echo #
+--echo # MDEV-31102: execution of PS for query where pushdown of condition
+--echo # into view defined as union is applied
+--echo #
+
+create table t1 (
+ n int,
+ lv varchar(31) charset latin1,
+ mv varchar(31) charset utf8mb3
+) engine=myisam;
+insert into t1 values (1,'aa','xxx'), ('2','bb','yyy'), (3,'cc','zzz');
+create view v1 as
+select case when n=1 then lv when n=2 then mv else NULL end as r from t1
+union
+select 'a';
+
+let $q=
+select * from v1 where r < 'x';
+
+eval $q;
+eval explain extended $q;
+eval explain format=json $q;
+eval prepare stmt from "$q";
+execute stmt;
+execute stmt;
+deallocate prepare stmt;
+
+drop view v1;
+drop table t1;
+
--echo # End of 10.4 tests
diff --git a/sql/item.h b/sql/item.h
index 31568aa..1e0caaa 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -7647,7 +7647,7 @@ class Item_direct_ref_to_item : public Item_direct_ref
Item *get_tmp_table_item(THD *thd)
{ return m_item->get_tmp_table_item(thd); }
Item *get_copy(THD *thd)
- { return m_item->get_copy(thd); }
+ { return get_item_copy<Item_direct_ref_to_item>(thd, this); }
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
bool link_item_fields,
COND_EQUAL **cond_equal_ref)
@@ -7715,7 +7715,20 @@ class Item_direct_ref_to_item : public Item_direct_ref
bool excl_dep_on_grouping_fields(st_select_lex *sel)
{ return m_item->excl_dep_on_grouping_fields(sel); }
bool is_expensive() { return m_item->is_expensive(); }
- Item* build_clone(THD *thd) { return get_copy(thd); }
+ void set_item(Item *item) { m_item= item; }
+ Item *build_clone(THD *thd)
+ {
+ Item *clone_item= m_item->build_clone(thd);
+ if (clone_item)
+ {
+ Item_direct_ref_to_item *copy= (Item_direct_ref_to_item *) get_copy(thd);
+ if (!copy)
+ return 0;
+ copy->set_item(clone_item);
+ return copy;
+ }
+ return 0;
+ }
void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
1
0
[Commits] d8e6032: Merge branch '10.4' of github.com:MariaDB/server into 10.4
by IgorBabaev 20 Apr '23
by IgorBabaev 20 Apr '23
20 Apr '23
revision-id: d8e6032fad0c93c4214c7abea7d6fd3209654475 (mariadb-10.4.28-76-gd8e6032)
parent(s): c570e8bc2a8883167cf2f037ae9937c61d7ea07b fc6e8a3d3264078bed28632a289130b1dc24daea
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-20 14:07:16 -0700
message:
Merge branch '10.4' of github.com:MariaDB/server into 10.4
client/mysqltest.cc | 17 +-
extra/mariabackup/backup_copy.cc | 169 ++++-----
extra/mariabackup/backup_copy.h | 19 +-
extra/mariabackup/backup_mysql.cc | 32 +-
extra/mariabackup/backup_mysql.h | 15 +-
extra/mariabackup/datasink.h | 29 ++
extra/mariabackup/write_filt.cc | 14 +-
extra/mariabackup/write_filt.h | 3 +-
extra/mariabackup/xtrabackup.cc | 225 ++++++-----
extra/mariabackup/xtrabackup.h | 10 +-
include/m_ctype.h | 25 +-
include/m_string.h | 19 +-
include/my_alloca.h | 6 +-
mysql-test/include/ctype_nopad_prefix_unique.inc | 85 +++++
.../include/sql_mode_pad_char_to_full_length.inc | 31 ++
mysql-test/main/ctype_uca_partitions.result | 40 ++
mysql-test/main/ctype_uca_partitions.test | 32 ++
mysql-test/main/ctype_utf8_uca.result | 148 ++++++++
mysql-test/main/ctype_utf8_uca.test | 18 +
mysql-test/main/func_str.result | 9 +
mysql-test/main/func_str.test | 10 +-
.../main/sql_mode_pad_char_to_full_length.result | 94 +++++
.../main/sql_mode_pad_char_to_full_length.test | 19 +
mysql-test/main/status.test | 9 +-
mysql-test/main/update.result | 29 ++
mysql-test/main/update.test | 23 ++
mysql-test/suite/galera/r/MDEV-30955.result | 26 ++
mysql-test/suite/galera/r/galera_sequences.result | 61 ++-
mysql-test/suite/galera/r/mdev-26175.result | 24 ++
mysql-test/suite/galera/t/MDEV-30955.test | 70 ++++
mysql-test/suite/galera/t/galera_sequences.cnf | 9 +
mysql-test/suite/galera/t/galera_sequences.test | 54 ++-
mysql-test/suite/galera/t/mdev-26175.test | 27 ++
mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf | 2 +-
mysql-test/suite/galera_sr/r/MDEV-30862.result | 11 +
mysql-test/suite/galera_sr/t/MDEV-30862.test | 24 ++
.../suite/innodb/r/default_row_format_alter.result | 20 +
mysql-test/suite/innodb/r/innodb-wl5522.result | 14 +
mysql-test/suite/innodb/r/innodb_ctype_utf8.result | 98 +++++
.../r/sql_mode_pad_char_to_full_length.result | 51 +++
.../suite/innodb/t/default_row_format_alter.test | 17 +
.../suite/innodb/t/import_tablespace_race.test | 8 +-
mysql-test/suite/innodb/t/innodb-wl5522.test | 21 +-
mysql-test/suite/innodb/t/innodb_ctype_utf8.test | 12 +
mysql-test/suite/innodb/t/restart.test | 23 +-
.../innodb/t/sql_mode_pad_char_to_full_length.test | 18 +
mysql-test/suite/maria/aria_log_dir_path.result | 29 ++
mysql-test/suite/maria/aria_log_dir_path.test | 65 ++++
mysql-test/suite/maria/maria3.result | 1 +
.../sys_vars/r/aria_log_dir_path_basic.result | 36 ++
mysql-test/suite/sys_vars/r/sysvars_aria.result | 12 +
.../sys_vars/r/sysvars_server_embedded.result | 10 +
.../sys_vars/r/sysvars_server_notembedded.result | 10 +
.../suite/sys_vars/t/aria_log_dir_path_basic.test | 35 ++
mysql-test/suite/sys_vars/t/sysvars_aria.test | 4 +
mysys/my_addr_resolve.c | 7 +-
scripts/wsrep_sst_mariabackup.sh | 3 +
sql/debug_sync.cc | 2 +-
sql/field.cc | 21 +-
sql/field.h | 9 +-
sql/handler.cc | 8 +-
sql/item_strfunc.cc | 2 +
sql/key.cc | 3 +-
sql/log.cc | 2 +-
sql/log_event.h | 2 +-
sql/opt_range.cc | 3 +
sql/sql_parse.cc | 2 +-
sql/sql_select.cc | 18 +-
sql/sql_table.cc | 13 +
sql/wsrep_client_service.cc | 2 -
sql/wsrep_high_priority_service.cc | 15 +-
sql/wsrep_trans_observer.h | 15 +-
storage/connect/reldef.cpp | 26 +-
storage/connect/tabbson.cpp | 47 +--
storage/connect/tabdos.cpp | 61 +--
storage/connect/tabext.cpp | 55 +--
storage/connect/tabfmt.cpp | 38 +-
storage/connect/tabjdbc.cpp | 45 +--
storage/connect/tabjson.cpp | 54 +--
storage/innobase/btr/btr0btr.cc | 2 +-
storage/innobase/fts/fts0fts.cc | 10 +-
storage/innobase/ibuf/ibuf0ibuf.cc | 4 +
storage/innobase/rem/rem0cmp.cc | 3 +-
storage/innobase/row/row0merge.cc | 5 -
storage/maria/ha_maria.cc | 2 +-
storage/rocksdb/build_rocksdb.cmake | 2 +-
storage/spider/spd_table.cc | 3 +
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 9 +-
strings/ctype-bin.c | 6 +-
strings/ctype-simple.c | 3 +-
strings/ctype-tis620.c | 3 +-
strings/ctype-uca.inl | 27 +-
strings/ctype.c | 6 +-
strings/strcoll.inl | 3 +-
strings/strings_def.h | 6 +-
unittest/strings/strings-t.c | 420 +++++++++++----------
96 files changed, 2196 insertions(+), 693 deletions(-)
1
0
[Commits] d174360: MDEV-20773 Error from UPDATE when estimating selectivity of a range
by IgorBabaev 12 Apr '23
by IgorBabaev 12 Apr '23
12 Apr '23
revision-id: d1743600ff27f3db022f7d77a6f6bdc471c4e320 (mariadb-10.4.27-140-gd174360)
parent(s): f33fc2fae5c3f3e80c4d24348609f3ce5246ca9c
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-11 21:21:45 -0700
message:
MDEV-20773 Error from UPDATE when estimating selectivity of a range
This bug could affect multi-update statements as well as single-table
update statements processed as multi-updates when the where condition
contained a range condition over a non-indexed varchar column. The
optimizer calculates selectivity of such range conditions using histograms.
For each range the buckets containing endpoints of the the range are
determined with a procedure that stores the values of the endpoints in the
space of the record buffer where values of the columns are usually stored.
For a range over a varchar column the value of a endpoint may exceed the
size of the buffer and in such case the value is stored with truncation.
This truncations cannot affect the result of the calculation of the range
selectivity as the calculation employes only the beginning of the value
string. However it can trigger generation of an unexpected error on this
truncation if an update statement is processed.
This patch prohibits truncation messages when selectivity of a range
condition is calculated for a non-indexed column.
Approved by Oleksandr Byelkin <sanja(a)mariadb.com>
---
mysql-test/main/update.result | 29 +++++++++++++++++++++++++++++
mysql-test/main/update.test | 23 +++++++++++++++++++++++
sql/opt_range.cc | 3 +++
3 files changed, 55 insertions(+)
diff --git a/mysql-test/main/update.result b/mysql-test/main/update.result
index f5edf1c..7b6426d 100644
--- a/mysql-test/main/update.result
+++ b/mysql-test/main/update.result
@@ -734,3 +734,32 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2;
ERROR 22007: Incorrect datetime value: '19' for column `test`.`t1`.`i1` at row 1
DROP TABLE t1,t2;
# End of MariaDB 10.2 tests
+#
+# MDEV-20773: UPDATE with LIKE predicate over non-indexed column
+# of VARCHAR type
+#
+create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin);
+insert into t1 values
+('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb');
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 49.22 Using where
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 4 50.00 Using where
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a1` = 'u' where `test`.`t1`.`a2` like 'xx%'
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+select * from t1;
+a1 a2
+aa zzz
+u xxaa
+ccc yyy
+u xxb
+drop table t1;
+# End of MariaDB 10.4 tests
diff --git a/mysql-test/main/update.test b/mysql-test/main/update.test
index 8a69494..147d69d 100644
--- a/mysql-test/main/update.test
+++ b/mysql-test/main/update.test
@@ -676,3 +676,26 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2;
DROP TABLE t1,t2;
--echo # End of MariaDB 10.2 tests
+
+--echo #
+--echo # MDEV-20773: UPDATE with LIKE predicate over non-indexed column
+--echo # of VARCHAR type
+--echo #
+
+create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin);
+insert into t1 values
+ ('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb');
+analyze table t1 persistent for all;
+
+explain extended
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+
+update t1 set a1 = 'u'
+ where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c');
+
+select * from t1;
+
+drop table t1;
+
+--echo # End of MariaDB 10.4 tests
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index c344201..69a95f8 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -3518,7 +3518,10 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
}
else
{
+ enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
rows= records_in_column_ranges(¶m, idx, key);
+ thd->count_cuted_fields= save_count_cuted_fields;
if (rows != DBL_MAX)
{
key->field->cond_selectivity= rows/table_records;
1
0
[Commits] c570e8b: MDEV-26301 Split optimization refills temporary table too many times
by IgorBabaev 11 Apr '23
by IgorBabaev 11 Apr '23
11 Apr '23
revision-id: c570e8bc2a8883167cf2f037ae9937c61d7ea07b (mariadb-10.4.28-75-gc570e8b)
parent(s): a6780df49b443b172124e7e881ed0bea54d75907
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-10 20:51:27 -0700
message:
MDEV-26301 Split optimization refills temporary table too many times
This patch optimizes the number of refills for the lateral derived table
to which a materialized derived table subject to split optimization is
is converted. This optimized number of refills is now considered as the
expected number of refills of the materialized derived table when searching
for the best possible splitting of the table.
---
mysql-test/main/derived_split_innodb.result | 261 ++++++++++++++++++++++++++++
mysql-test/main/derived_split_innodb.test | 136 +++++++++++++++
sql/opt_split.cc | 148 +++++++++++++---
sql/sql_select.cc | 27 ++-
sql/sql_select.h | 29 +++-
5 files changed, 571 insertions(+), 30 deletions(-)
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 04f79d3..8a6585d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -284,3 +284,264 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
# End of 10.3 tests
+#
+# MDEV-26301: Split optimization refills temporary table too many times
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+explain
+select * from
+(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
+1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+# The important part in the below output is:
+# "lateral": 1,
+# "query_block": {
+# "select_id": 2,
+# "r_loops": 5, <-- must be 5, not 30.
+analyze format=json select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 5,
+ "r_rows": 5,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 2,
+ "r_rows": 2,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "t3",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 10,
+ "rows": 3,
+ "r_rows": 3,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 30,
+ "rows": 10,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))",
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 5,
+ "r_total_time_ms": "REPLACED",
+ "outer_ref_condition": "t1.b is not null",
+ "table": {
+ "table_name": "t10",
+ "access_type": "ref",
+ "possible_keys": ["grp_id"],
+ "key": "grp_id",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 100,
+ "r_rows": 100,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t11",
+ "access_type": "ALL",
+ "r_loops": 5,
+ "rows": 10,
+ "r_rows": 10,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "1Kb",
+ "join_type": "BNL",
+ "attached_condition": "trigcond(t11.col1 = t10.col1)",
+ "r_filtered": 10
+ }
+ }
+ }
+ }
+ }
+}
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+explain
+select * from
+t21, t22,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1 and t22.pk=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+explain
+select * from
+t21,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+create table t5 (
+pk int primary key
+);
+insert into t5 select seq from seq_1_to_1000;
+explain
+select * from
+t21,
+(
+(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
+1 PRIMARY t2 ref a a 5 test.t1.b 2
+1 PRIMARY t3 ref a a 5 test.t1.b 3
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+# End of 10.4 tests
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 2f74f5f..ab83850 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -227,3 +227,139 @@ where t3.b > 15;
drop table t3, t4;
--echo # End of 10.3 tests
+
+--source include/have_sequence.inc
+
+--echo #
+--echo # MDEV-26301: Split optimization refills temporary table too many times
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+explain
+select * from
+ (t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+
+# Now, create tables for Groups.
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q1=
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+eval
+explain $q1;
+
+--echo # The important part in the below output is:
+--echo # "lateral": 1,
+--echo # "query_block": {
+--echo # "select_id": 2,
+--echo # "r_loops": 5, <-- must be 5, not 30.
+--source include/analyze-format.inc
+
+eval
+analyze format=json $q1;
+
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+
+# Same as above but throw in a couple of const tables.
+explain
+select * from
+ t21, t22,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1 and t22.pk=2;
+
+explain
+select * from
+ t21,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+# And also add a non-const table
+
+create table t5 (
+ pk int primary key
+ );
+insert into t5 select seq from seq_1_to_1000;
+
+explain
+select * from
+ t21,
+ (
+ (((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+
+--echo # End of 10.4 tests
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index a356335..8d27688 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -65,7 +65,7 @@
If we have only one equi-join condition then we either push it as
for Q1R or we don't. In a general case we may have much more options.
Consider the query (Q3)
- SELECT
+ SELECT *
FROM t1,t2 (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t3 GROUP BY a,b) t
WHERE t.a = t1.a AND t.b = t2.b
@@ -102,6 +102,45 @@
If we just drop the index on t3(a,b) the chances that the splitting
will be used becomes much lower but they still exists providing that
the fanout of the partial join of t1 and t2 is small enough.
+
+ The lateral derived table LT formed as a result of SM optimization applied
+ to a materialized derived table DT must be joined after all parameters
+ of splitting has been evaluated, i.e. after all expressions used in the
+ equalities pushed into DT that make the employed splitting effective
+ could be evaluated. With the chosen join order all the parameters can be
+ evaluated after the last table LPT that contains any columns referenced in
+ the parameters has been joined and the table APT following LPT in the chosen
+ join order is accessed.
+ Usually the formed lateral derived table LT is accessed right after the table
+ LPT. As in such cases table LT must be refilled for each combination of
+ splitting parameters this table must be populated before each access to LT
+ and the estimate of the expected number of refills that could be suggested in
+ such cases is the number of rows in the partial join ending with table LPT.
+ However in other cases the chosen join order may contain tables between LPT
+ and LT.
+ Consider the query (Q4)
+ SELECT *
+ FROM t1 JOIN t2 ON t1.b = t2.b
+ LEFT JOIN (SELECT t3.a, t3.b, MIN(t3.c) as min
+ FROM t3 GROUP BY a,b) t
+ ON t.a = t1.a AND t.c > 0
+ [WHERE P(t1,t2)];
+ Let's assume that the join order t1,t2,t was chosen for this query and
+ SP optimization was applied to t with splitting over t3.a using the index
+ on column t3.a. Here the table t1 serves as LPT, t2 as APT while t with
+ pushed condition t.a = t1.a serves as LT. Note that here LT is accessed
+ after t2, not right after t1. Here the number of refills of the lateral
+ derived is not more that the number of key values of t1.a that might be
+ less than the cardinality of the partial join (t1,t2). That's why it makes
+ sense to signal that t3 has to be refilled just before t2 is accessed.
+ However if the cardinality of the partial join (t1,t2) happens to be less
+ than the cardinality of the partial join (t1) due to additional selective
+ condition P(t1,t2) then the flag informing about necessity of a new refill
+ can be set either when accessing t2 or right after it has been joined.
+ The current code sets such flag right after generating a record of the
+ partial join with minimal cardinality for all those partial joins that
+ end between APT and LT. It allows sometimes to push extra conditions
+ into the lateral derived without any increase of the number of refills.
*/
/*
@@ -248,6 +287,7 @@ class SplM_opt_info : public Sql_alloc
double unsplit_card;
/* Lastly evaluated execution plan for 'join' with pushed equalities */
SplM_plan_info *last_plan;
+ double last_refills;
SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
};
@@ -831,13 +871,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
static
void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
TABLE *table, uint key,
- table_map remaining_tables,
+ table_map excluded_tables,
bool validity_val)
{
KEYUSE_EXT *keyuse_ext= key_keyuse_ext_start;
do
{
- if (!(keyuse_ext->needed_in_prefix & remaining_tables))
+ if (!(keyuse_ext->needed_in_prefix & excluded_tables))
{
/*
The enabling/disabling flags are set just in KEYUSE_EXT structures.
@@ -857,8 +897,11 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
@param
- record_count estimated cardinality of the extended partial join
+ idx index for joined table T in current partial join P
remaining_tables tables not joined yet
+ spl_pd_boundary OUT bitmap of the table from P extended by T that
+ starts the sub-sequence of tables S from which
+ no conditions are allowed to be pushed into T.
@details
This function is called during the search for the best execution
@@ -873,17 +916,19 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
splitting the function set it as the true plan of materialization
of the table T.
The function caches the found plans for materialization of table T
- together if the info what key was used for splitting. Next time when
+ together with the info what key was used for splitting. Next time when
the optimizer prefers to use the same key the plan is taken from
the cache of plans
@retval
Pointer to the info on the found plan that employs the pushed equalities
if the plan has been chosen, NULL - otherwise.
+ If the function returns NULL the value of spl_param_tables is set to 0.
*/
-SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
- table_map remaining_tables)
+SplM_plan_info * JOIN_TAB::choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
DBUG_ASSERT(spl_opt_info != NULL);
@@ -898,6 +943,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
+ table_map best_param_tables;
/*
Check whether there are keys that can be used to join T employing splitting
@@ -916,6 +962,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint key= keyuse_ext->key;
KEYUSE_EXT *key_keyuse_ext_start= keyuse_ext;
key_part_map found_parts= 0;
+ table_map needed_in_prefix= 0;
do
{
if (keyuse_ext->needed_in_prefix & remaining_tables)
@@ -941,6 +988,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY *key_info= table->key_info + key;
double rec_per_key=
key_info->actual_rec_per_key(keyuse_ext->keypart);
+ needed_in_prefix|= keyuse_ext->needed_in_prefix;
if (rec_per_key < best_rec_per_key)
{
best_table= keyuse_ext->table;
@@ -948,6 +996,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts= keyuse_ext->keypart + 1;
best_rec_per_key= rec_per_key;
best_key_keyuse_ext_start= key_keyuse_ext_start;
+ best_param_tables= needed_in_prefix;
}
keyuse_ext++;
}
@@ -956,8 +1005,30 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
while (keyuse_ext->table == table);
}
spl_opt_info->last_plan= 0;
+ double refills= DBL_MAX;
+ table_map excluded_tables= remaining_tables | this->join->sjm_lookup_tables;
if (best_table)
{
+ *spl_pd_boundary= this->table->map;
+ if (!best_param_tables)
+ refills= 1;
+ else
+ {
+ table_map last_found= this->table->map;
+ for (POSITION *pos= &this->join->positions[idx - 1]; ; pos--)
+ {
+ if (pos->table->table->map & excluded_tables)
+ continue;
+ if (pos->partial_join_cardinality < refills)
+ {
+ *spl_pd_boundary= last_found;
+ refills= pos->partial_join_cardinality;
+ }
+ last_found= pos->table->table->map;
+ if (last_found & best_param_tables)
+ break;
+ }
+ }
/*
The key for splitting was chosen, look for the plan for this key
in the cache
@@ -971,7 +1042,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
*/
table_map all_table_map= (((table_map) 1) << join->table_count) - 1;
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, true);
+ best_key, excluded_tables, true);
choose_plan(join, all_table_map & ~join->const_table_map);
/*
@@ -990,7 +1061,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info->plan_cache.push_back(spl_plan))
{
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
return 0;
}
@@ -1014,17 +1085,19 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
(char *) join->best_positions,
sizeof(POSITION) * join->table_count);
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
}
+
if (spl_plan)
{
- if(record_count * spl_plan->cost < spl_opt_info->unsplit_cost)
+ if (refills * spl_plan->cost < spl_opt_info->unsplit_cost)
{
/*
The best plan that employs splitting is cheaper than
the plan without splitting
*/
spl_opt_info->last_plan= spl_plan;
+ spl_opt_info->last_refills= refills;
}
}
}
@@ -1034,11 +1107,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan= spl_opt_info->last_plan;
if (spl_plan)
{
- startup_cost= record_count * spl_plan->cost;
+ startup_cost= spl_opt_info->last_refills * spl_plan->cost;
records= (ha_rows) (records * spl_plan->split_sel);
}
else
+ {
startup_cost= spl_opt_info->unsplit_cost;
+ *spl_pd_boundary= 0;
+ }
return spl_plan;
}
@@ -1048,8 +1124,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
@param
- excluded_tables used to filter out the equalities that cannot
- be pushed.
+ excluded_tables used to filter out the equalities that are not
+ to be pushed.
@details
This function injects equalities pushed into a derived table T for which
@@ -1142,7 +1218,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
@param
spl_plan info on the splitting plan chosen for the splittable table T
- remaining_tables the table T is joined just before these tables
+ excluded_tables tables that cannot be used in equalities pushed into T
is_const_table the table T is a constant table
@details
@@ -1157,7 +1233,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
*/
bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
- table_map remaining_tables,
+ table_map excluded_tables,
bool is_const_table)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
@@ -1165,6 +1241,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN *md_join= spl_opt_info->join;
if (spl_plan && !is_const_table)
{
+ is_split_derived= true;
memcpy((char *) md_join->best_positions,
(char *) spl_plan->best_positions,
sizeof(POSITION) * md_join->table_count);
@@ -1175,7 +1252,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
reset_validity_vars_for_keyuses(spl_plan->keyuse_ext_start,
spl_plan->table,
spl_plan->key,
- remaining_tables,
+ excluded_tables,
true);
}
else if (md_join->save_qep)
@@ -1211,8 +1288,21 @@ bool JOIN::fix_all_splittings_in_plan()
if (tab->table->is_splittable())
{
SplM_plan_info *spl_plan= cur_pos->spl_plan;
+ table_map excluded_tables= (all_tables & ~prev_tables) |
+ sjm_lookup_tables;
+ ;
+ if (spl_plan)
+ {
+ POSITION *pos= cur_pos;
+ table_map spl_pd_boundary= pos->spl_pd_boundary;
+ do
+ {
+ excluded_tables|= pos->table->table->map;
+ }
+ while (!((pos--)->table->table->map & spl_pd_boundary));
+ }
if (tab->fix_splitting(spl_plan,
- all_tables & ~prev_tables,
+ excluded_tables,
tablenr < const_tables ))
return true;
}
@@ -1251,13 +1341,21 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue;
SplM_opt_info *spl_opt_info= tab->table->spl_opt_info;
JOIN *join= spl_opt_info->join;
- /*
- Currently the equalities referencing columns of SJM tables with
- look-up access cannot be pushed into materialized derived.
- */
- if (join->inject_best_splitting_cond((all_tables & ~prev_tables) |
- sjm_lookup_tables))
- return true;
+ table_map excluded_tables= (all_tables & ~prev_tables) | sjm_lookup_tables;
+ table_map spl_pd_boundary= cur_pos->spl_pd_boundary;
+ for (POSITION *pos= cur_pos; ; pos--)
+ {
+ excluded_tables|= pos->table->table->map;
+ pos->use_join_buffer= false;
+ if (pos->table->table->map & spl_pd_boundary)
+ {
+ pos->table->split_derived_to_update|= tab->table->map;
+ break;
+ }
+ }
+
+ if (join->inject_best_splitting_cond(excluded_tables))
+ return true;
}
return false;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c4fc2d1..ce4a1af 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7431,6 +7431,7 @@ best_access_path(JOIN *join,
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
+ table_map spl_pd_boundary= 0;
Range_rowid_filter_cost_info *filter= 0;
const char* cause= NULL;
enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
@@ -7448,7 +7449,9 @@ best_access_path(JOIN *join,
loose_scan_opt.init(join, s, remaining_tables);
if (s->table->is_splittable())
- spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ spl_plan= s->choose_best_splitting(idx,
+ remaining_tables,
+ &spl_pd_boundary);
if (s->keyuse)
{ /* Use key if possible */
@@ -8266,6 +8269,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
+ pos->spl_pd_boundary= !spl_plan ? 0 : spl_pd_boundary;
pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -8795,6 +8799,9 @@ optimize_straight_join(JOIN *join, table_map join_tables)
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
join_tables);
position->cond_selectivity= pushdown_cond_selectivity;
+ double partial_join_cardinality= record_count *
+ pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
++idx;
}
@@ -9833,6 +9840,8 @@ best_extension_by_limited_search(JOIN *join,
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
+
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
@@ -13825,7 +13834,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
- derived->is_nonrecursive_derived_with_rec_ref())
+ derived->is_nonrecursive_derived_with_rec_ref() ||
+ is_split_derived)
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -17276,6 +17286,9 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
reopt_remaining_tables &
~real_table_bit);
}
+ double partial_join_cardinality= rec_count *
+ pushdown_cond_selectivity;
+ join->positions[i].partial_join_cardinality= partial_join_cardinality;
(*outer_rec_count) *= pushdown_cond_selectivity;
if (!rs->emb_sj_nest)
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
@@ -20764,6 +20777,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
DBUG_ENTER("sub_select");
+ if (join_tab->split_derived_to_update && !end_of_records)
+ {
+ table_map tab_map= join_tab->split_derived_to_update;
+ for (uint i= 0; tab_map; i++, tab_map>>= 1)
+ {
+ if (tab_map & 1)
+ join->map2table[i]->preread_init_done= false;
+ }
+ }
+
if (join_tab->last_inner)
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 0dfecc9..638bf73 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -520,6 +520,16 @@ typedef struct st_join_table {
bool preread_init_done;
+ /* true <=> split optimization has been applied to this materialized table */
+ bool is_split_derived;
+
+ /*
+ Bitmap of split materialized derived tables that can be filled just before
+ this join table is to be joined. All parameters of the split derived tables
+ belong to tables preceding this join table.
+ */
+ table_map split_derived_to_update;
+
/*
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
@@ -680,9 +690,10 @@ typedef struct st_join_table {
void partial_cleanup();
void add_keyuses_for_splitting();
- SplM_plan_info *choose_best_splitting(double record_count,
- table_map remaining_tables);
- bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
+ SplM_plan_info *choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary);
+ bool fix_splitting(SplM_plan_info *spl_plan, table_map excluded_tables,
bool is_const_table);
} JOIN_TAB;
@@ -947,9 +958,21 @@ class POSITION
*/
KEYUSE *key;
+ /* Cardinality of current partial join ending with this position */
+ double partial_join_cardinality;
+
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
+ /*
+ If spl_plan is NULL the value of spl_pd_boundary is 0. Otherwise
+ spl_pd_boundary contains the bitmap of the table from the current
+ partial join ending at this position that starts the sub-sequence of
+ tables S from which no conditions are allowed to be used in the plan
+ spl_plan for the split table joined at this position.
+ */
+ table_map spl_pd_boundary;
+
/* Cost info for the range filter used at this position */
Range_rowid_filter_cost_info *range_rowid_filter_info;
1
0
[Commits] f510aad: MDEV-26301 Split optimization refills temporary table too many times
by IgorBabaev 06 Apr '23
by IgorBabaev 06 Apr '23
06 Apr '23
revision-id: f510aad1c7c649a50cd0ed435217e9f162c94bd8 (mariadb-10.4.28-75-gf510aad)
parent(s): a6780df49b443b172124e7e881ed0bea54d75907
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-06 10:48:23 -0700
message:
MDEV-26301 Split optimization refills temporary table too many times
This an alternative patch that resolves the problem of unnecessary refills
of split derived tables. The patch also improves the estimate of the number
of such refills to lower it in some cases that allows wider usage of split
optimizations.
The patch is not intrusive and does not require any new optimizer switches.
---
mysql-test/main/derived_split_innodb.result | 261 ++++++++++++++++++++++++++++
mysql-test/main/derived_split_innodb.test | 136 +++++++++++++++
sql/opt_split.cc | 108 +++++++++---
sql/sql_select.cc | 20 ++-
sql/sql_select.h | 22 ++-
5 files changed, 518 insertions(+), 29 deletions(-)
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 04f79d3..8a6585d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -284,3 +284,264 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
# End of 10.3 tests
+#
+# MDEV-26301: Split optimization refills temporary table too many times
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+explain
+select * from
+(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
+1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+# The important part in the below output is:
+# "lateral": 1,
+# "query_block": {
+# "select_id": 2,
+# "r_loops": 5, <-- must be 5, not 30.
+analyze format=json select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 5,
+ "r_rows": 5,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 2,
+ "r_rows": 2,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "t3",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 10,
+ "rows": 3,
+ "r_rows": 3,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 30,
+ "rows": 10,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))",
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 5,
+ "r_total_time_ms": "REPLACED",
+ "outer_ref_condition": "t1.b is not null",
+ "table": {
+ "table_name": "t10",
+ "access_type": "ref",
+ "possible_keys": ["grp_id"],
+ "key": "grp_id",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 100,
+ "r_rows": 100,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t11",
+ "access_type": "ALL",
+ "r_loops": 5,
+ "rows": 10,
+ "r_rows": 10,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "1Kb",
+ "join_type": "BNL",
+ "attached_condition": "trigcond(t11.col1 = t10.col1)",
+ "r_filtered": 10
+ }
+ }
+ }
+ }
+ }
+}
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+explain
+select * from
+t21, t22,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1 and t22.pk=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+explain
+select * from
+t21,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+create table t5 (
+pk int primary key
+);
+insert into t5 select seq from seq_1_to_1000;
+explain
+select * from
+t21,
+(
+(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
+1 PRIMARY t2 ref a a 5 test.t1.b 2
+1 PRIMARY t3 ref a a 5 test.t1.b 3
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+# End of 10.4 tests
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 2f74f5f..ab83850 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -227,3 +227,139 @@ where t3.b > 15;
drop table t3, t4;
--echo # End of 10.3 tests
+
+--source include/have_sequence.inc
+
+--echo #
+--echo # MDEV-26301: Split optimization refills temporary table too many times
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+explain
+select * from
+ (t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+
+# Now, create tables for Groups.
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q1=
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+eval
+explain $q1;
+
+--echo # The important part in the below output is:
+--echo # "lateral": 1,
+--echo # "query_block": {
+--echo # "select_id": 2,
+--echo # "r_loops": 5, <-- must be 5, not 30.
+--source include/analyze-format.inc
+
+eval
+analyze format=json $q1;
+
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+
+# Same as above but throw in a couple of const tables.
+explain
+select * from
+ t21, t22,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1 and t22.pk=2;
+
+explain
+select * from
+ t21,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+# And also add a non-const table
+
+create table t5 (
+ pk int primary key
+ );
+insert into t5 select seq from seq_1_to_1000;
+
+explain
+select * from
+ t21,
+ (
+ (((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+
+--echo # End of 10.4 tests
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index a356335..30d9161 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -248,6 +248,8 @@ class SplM_opt_info : public Sql_alloc
double unsplit_card;
/* Lastly evaluated execution plan for 'join' with pushed equalities */
SplM_plan_info *last_plan;
+ table_map last_spl_param_tables;
+ double last_refills;
SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
};
@@ -831,13 +833,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
static
void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
TABLE *table, uint key,
- table_map remaining_tables,
+ table_map excluded_tables,
bool validity_val)
{
KEYUSE_EXT *keyuse_ext= key_keyuse_ext_start;
do
{
- if (!(keyuse_ext->needed_in_prefix & remaining_tables))
+ if (!(keyuse_ext->needed_in_prefix & excluded_tables))
{
/*
The enabling/disabling flags are set just in KEYUSE_EXT structures.
@@ -857,8 +859,9 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
@param
- record_count estimated cardinality of the extended partial join
+ join_tab_pos position for joined table in current partial join
remaining_tables tables not joined yet
+ spl_param_tables OUT tables used in key values to access split table
@details
This function is called during the search for the best execution
@@ -873,7 +876,7 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
splitting the function set it as the true plan of materialization
of the table T.
The function caches the found plans for materialization of table T
- together if the info what key was used for splitting. Next time when
+ together with the info what key was used for splitting. Next time when
the optimizer prefers to use the same key the plan is taken from
the cache of plans
@@ -882,8 +885,9 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
if the plan has been chosen, NULL - otherwise.
*/
-SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
- table_map remaining_tables)
+SplM_plan_info * JOIN_TAB::choose_best_splitting(const POSITION *join_tab_pos,
+ table_map remaining_tables,
+ table_map &spl_param_tables)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
DBUG_ASSERT(spl_opt_info != NULL);
@@ -898,6 +902,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
+ table_map best_param_tables;
/*
Check whether there are keys that can be used to join T employing splitting
@@ -916,6 +921,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint key= keyuse_ext->key;
KEYUSE_EXT *key_keyuse_ext_start= keyuse_ext;
key_part_map found_parts= 0;
+ table_map needed_in_prefix= 0;
do
{
if (keyuse_ext->needed_in_prefix & remaining_tables)
@@ -941,6 +947,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY *key_info= table->key_info + key;
double rec_per_key=
key_info->actual_rec_per_key(keyuse_ext->keypart);
+ needed_in_prefix|= keyuse_ext->needed_in_prefix;
if (rec_per_key < best_rec_per_key)
{
best_table= keyuse_ext->table;
@@ -948,6 +955,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts= keyuse_ext->keypart + 1;
best_rec_per_key= rec_per_key;
best_key_keyuse_ext_start= key_keyuse_ext_start;
+ best_param_tables= needed_in_prefix;
}
keyuse_ext++;
}
@@ -956,8 +964,24 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
while (keyuse_ext->table == table);
}
spl_opt_info->last_plan= 0;
+ double refills= DBL_MAX;
+ table_map excluded_tables= remaining_tables | this->join->sjm_lookup_tables;
if (best_table)
{
+ if (!best_param_tables)
+ refills= 1;
+ else
+ {
+ for (const POSITION *pos= join_tab_pos - 1; ; pos--)
+ {
+ if (pos->table->table->map & this->join->sjm_lookup_tables)
+ continue;
+ set_if_smaller(refills, pos->partial_join_cardinality);
+ if (pos->table->table->map & best_param_tables)
+ break;
+ excluded_tables|= pos->table->table->map;
+ }
+ }
/*
The key for splitting was chosen, look for the plan for this key
in the cache
@@ -971,7 +995,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
*/
table_map all_table_map= (((table_map) 1) << join->table_count) - 1;
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, true);
+ best_key, excluded_tables, true);
choose_plan(join, all_table_map & ~join->const_table_map);
/*
@@ -990,7 +1014,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info->plan_cache.push_back(spl_plan))
{
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
return 0;
}
@@ -1014,17 +1038,20 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
(char *) join->best_positions,
sizeof(POSITION) * join->table_count);
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
}
+
if (spl_plan)
{
- if(record_count * spl_plan->cost < spl_opt_info->unsplit_cost)
+ if (refills * spl_plan->cost < spl_opt_info->unsplit_cost)
{
/*
The best plan that employs splitting is cheaper than
the plan without splitting
*/
spl_opt_info->last_plan= spl_plan;
+ spl_opt_info->last_spl_param_tables= best_param_tables;
+ spl_opt_info->last_refills= refills;
}
}
}
@@ -1034,7 +1061,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan= spl_opt_info->last_plan;
if (spl_plan)
{
- startup_cost= record_count * spl_plan->cost;
+ spl_param_tables= spl_opt_info->last_spl_param_tables;
+ startup_cost= spl_opt_info->last_refills * spl_plan->cost;
records= (ha_rows) (records * spl_plan->split_sel);
}
else
@@ -1048,8 +1076,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
@param
- excluded_tables used to filter out the equalities that cannot
- be pushed.
+ excluded_tables used to filter out the equalities that are not
+ to be pushed.
@details
This function injects equalities pushed into a derived table T for which
@@ -1142,7 +1170,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
@param
spl_plan info on the splitting plan chosen for the splittable table T
- remaining_tables the table T is joined just before these tables
+ excluded_tables tables that cannot be used in equalities pushed into T
is_const_table the table T is a constant table
@details
@@ -1157,7 +1185,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
*/
bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
- table_map remaining_tables,
+ table_map excluded_tables,
bool is_const_table)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
@@ -1165,6 +1193,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN *md_join= spl_opt_info->join;
if (spl_plan && !is_const_table)
{
+ is_split_derived= true;
memcpy((char *) md_join->best_positions,
(char *) spl_plan->best_positions,
sizeof(POSITION) * md_join->table_count);
@@ -1175,7 +1204,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
reset_validity_vars_for_keyuses(spl_plan->keyuse_ext_start,
spl_plan->table,
spl_plan->key,
- remaining_tables,
+ excluded_tables,
true);
}
else if (md_join->save_qep)
@@ -1211,8 +1240,27 @@ bool JOIN::fix_all_splittings_in_plan()
if (tab->table->is_splittable())
{
SplM_plan_info *spl_plan= cur_pos->spl_plan;
+ table_map excluded_tables= (all_tables & ~prev_tables) |
+ sjm_lookup_tables;
+ ;
+ if (spl_plan)
+ {
+ table_map spl_param_tables= cur_pos->spl_param_tables;
+ excluded_tables|= tab->table->map;
+ if (spl_param_tables)
+ {
+ for (POSITION *pos= cur_pos - 1; ; pos--)
+ {
+ if (pos->table->table->map & sjm_lookup_tables)
+ continue;
+ if (pos->table->table->map & spl_param_tables)
+ break;
+ excluded_tables|= pos->table->table->map;
+ }
+ }
+ }
if (tab->fix_splitting(spl_plan,
- all_tables & ~prev_tables,
+ excluded_tables,
tablenr < const_tables ))
return true;
}
@@ -1251,13 +1299,25 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue;
SplM_opt_info *spl_opt_info= tab->table->spl_opt_info;
JOIN *join= spl_opt_info->join;
- /*
- Currently the equalities referencing columns of SJM tables with
- look-up access cannot be pushed into materialized derived.
- */
- if (join->inject_best_splitting_cond((all_tables & ~prev_tables) |
- sjm_lookup_tables))
- return true;
+ table_map excluded_tables= (all_tables & ~prev_tables) | sjm_lookup_tables;
+ table_map param_tables= cur_pos->spl_param_tables;
+
+ POSITION *found_pos= cur_pos;
+ if (param_tables)
+ {
+ for (POSITION *pos= cur_pos - 1; ; pos--)
+ {
+ if (pos->table->table->map & excluded_tables)
+ continue;
+ if (pos->table->table->map & param_tables)
+ break;
+ excluded_tables|= pos->table->table->map;
+ found_pos= pos;
+ }
+ }
+ if (join->inject_best_splitting_cond(excluded_tables))
+ return true;
+ found_pos->table->split_derived_to_update|= tab->table->map;
}
return false;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c4fc2d1..5d501de 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7431,6 +7431,7 @@ best_access_path(JOIN *join,
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
+ table_map spl_param_tables= 0;
Range_rowid_filter_cost_info *filter= 0;
const char* cause= NULL;
enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
@@ -7448,7 +7449,8 @@ best_access_path(JOIN *join,
loose_scan_opt.init(join, s, remaining_tables);
if (s->table->is_splittable())
- spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ spl_plan= s->choose_best_splitting(join_positions + idx, remaining_tables,
+ spl_param_tables);
if (s->keyuse)
{ /* Use key if possible */
@@ -8266,6 +8268,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
+ pos->spl_param_tables= !spl_plan ? 0 : spl_param_tables;
pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -9833,6 +9836,8 @@ best_extension_by_limited_search(JOIN *join,
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
+
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
@@ -13825,7 +13830,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
- derived->is_nonrecursive_derived_with_rec_ref())
+ derived->is_nonrecursive_derived_with_rec_ref() ||
+ is_split_derived)
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -20764,6 +20770,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
DBUG_ENTER("sub_select");
+ if (join_tab->split_derived_to_update && !end_of_records)
+ {
+ table_map tab_map= join_tab->split_derived_to_update;
+ for (uint i= 0; tab_map; i++, tab_map>>= 1)
+ {
+ if (tab_map & 1)
+ join->map2table[i]->preread_init_done= false;
+ }
+ }
+
if (join_tab->last_inner)
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 0dfecc9..db9025a 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -520,6 +520,16 @@ typedef struct st_join_table {
bool preread_init_done;
+ /* true <=> split optimization has been applied to this materialized table */
+ bool is_split_derived;
+
+ /*
+ Bitmap of split materialized derived tables that can be filled just before
+ this join table is to be joined. All parameters of the split derived tables
+ belong to tables preceding this join table.
+ */
+ table_map split_derived_to_update;
+
/*
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
@@ -680,9 +690,10 @@ typedef struct st_join_table {
void partial_cleanup();
void add_keyuses_for_splitting();
- SplM_plan_info *choose_best_splitting(double record_count,
- table_map remaining_tables);
- bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
+ SplM_plan_info *choose_best_splitting(const POSITION *join_tab_pos,
+ table_map remaining_tables,
+ table_map &spl_param_tables);
+ bool fix_splitting(SplM_plan_info *spl_plan, table_map excluded_tables,
bool is_const_table);
} JOIN_TAB;
@@ -947,9 +958,14 @@ class POSITION
*/
KEYUSE *key;
+ /* Cardinality of current partial join ending with this position */
+ double partial_join_cardinality;
+
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
+ table_map spl_param_tables;
+
/* Cost info for the range filter used at this position */
Range_rowid_filter_cost_info *range_rowid_filter_info;
1
0
[Commits] 99b8f38: MDEV-26301 Split optimization refills temporary table too many times
by IgorBabaev 03 Apr '23
by IgorBabaev 03 Apr '23
03 Apr '23
revision-id: 99b8f388a1a103466864c3ec68c95c21bb435e14 (mariadb-10.4.28-75-g99b8f38)
parent(s): a6780df49b443b172124e7e881ed0bea54d75907
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-04-02 18:22:37 -0700
message:
MDEV-26301 Split optimization refills temporary table too many times
This an alternative patch that resolves the problem of unnecessary refills
of split derived tables. The patch also improves the estimate of the number
of such refills to lower it in some cases that allows wider usage of split
optimizations.
The patch is not intrusive and does not require any new optimizer switches.
---
mysql-test/main/derived_split_innodb.result | 261 ++++++++++++++++++++++++++++
mysql-test/main/derived_split_innodb.test | 136 +++++++++++++++
sql/opt_split.cc | 77 ++++++--
sql/sql_select.cc | 17 +-
sql/sql_select.h | 15 +-
5 files changed, 486 insertions(+), 20 deletions(-)
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 04f79d3..8a6585d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -284,3 +284,264 @@ id select_type table type possible_keys key key_len ref rows Extra
2 DERIVED t4 ALL NULL NULL NULL NULL 40 Using filesort
drop table t3, t4;
# End of 10.3 tests
+#
+# MDEV-26301: Split optimization refills temporary table too many times
+#
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+analyze table t1,t2,t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status Table is already up to date
+explain
+select * from
+(t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 5 test.t1.b 2 Using where
+1 SIMPLE t3 ref a a 5 test.t1.b 3 Using where
+create table t10 (
+grp_id int,
+col1 int,
+key(grp_id)
+);
+insert into t10
+select
+A.seq,
+B.seq
+from
+seq_1_to_100 A,
+seq_1_to_100 B;
+create table t11 (
+col1 int,
+col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+analyze table t10,t11 persistent for all;
+Table Op Msg_type Msg_text
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status Table is already up to date
+test.t11 analyze status Engine-independent statistics collected
+test.t11 analyze status OK
+explain select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+# The important part in the below output is:
+# "lateral": 1,
+# "query_block": {
+# "select_id": 2,
+# "r_loops": 5, <-- must be 5, not 30.
+analyze format=json select * from
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 5,
+ "r_rows": 5,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 2,
+ "r_rows": 2,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "t3",
+ "access_type": "ref",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.b"],
+ "r_loops": 10,
+ "rows": 3,
+ "r_rows": 3,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 30,
+ "rows": 10,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "trigcond(trigcond(t1.b is not null))",
+ "materialized": {
+ "lateral": 1,
+ "query_block": {
+ "select_id": 2,
+ "r_loops": 5,
+ "r_total_time_ms": "REPLACED",
+ "outer_ref_condition": "t1.b is not null",
+ "table": {
+ "table_name": "t10",
+ "access_type": "ref",
+ "possible_keys": ["grp_id"],
+ "key": "grp_id",
+ "key_length": "5",
+ "used_key_parts": ["grp_id"],
+ "ref": ["test.t1.b"],
+ "r_loops": 5,
+ "rows": 100,
+ "r_rows": 100,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t11",
+ "access_type": "ALL",
+ "r_loops": 5,
+ "rows": 10,
+ "r_rows": 10,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "1Kb",
+ "join_type": "BNL",
+ "attached_condition": "trigcond(t11.col1 = t10.col1)",
+ "r_filtered": 10
+ }
+ }
+ }
+ }
+ }
+}
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+explain
+select * from
+t21, t22,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from t10 left join t11 on t11.col1=t10.col1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1 and t22.pk=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t22 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+explain
+select * from
+t21,
+(
+(t1 left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5
+1 PRIMARY t2 ref a a 5 test.t1.b 2 Using where
+1 PRIMARY t3 ref a a 5 test.t1.b 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t1.b 100
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+create table t5 (
+pk int primary key
+);
+insert into t5 select seq from seq_1_to_1000;
+explain
+select * from
+t21,
+(
+(((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+left join t3 on t3.a=t1.b
+) left join (select grp_id, count(*)
+from
+t22 join t10 left join t11 on t11.col1=t10.col1
+where
+t22.pk=1
+group by grp_id) T on T.grp_id=t1.b
+where
+t21.pk=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t21 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t5 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 Using index
+1 PRIMARY t2 ref a a 5 test.t1.b 2
+1 PRIMARY t3 ref a a 5 test.t1.b 3
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.b 10 Using where
+2 LATERAL DERIVED t22 const PRIMARY PRIMARY 4 const 1 Using index
+2 LATERAL DERIVED t10 ref grp_id grp_id 5 test.t5.pk 100 Using index condition
+2 LATERAL DERIVED t11 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+# End of 10.4 tests
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 2f74f5f..ab83850 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -227,3 +227,139 @@ where t3.b > 15;
drop table t3, t4;
--echo # End of 10.3 tests
+
+--source include/have_sequence.inc
+
+--echo #
+--echo # MDEV-26301: Split optimization refills temporary table too many times
+--echo #
+
+# 5 values
+create table t1(a int, b int);
+insert into t1 select seq,seq from seq_1_to_5;
+
+# 5 value groups of size 2 each
+create table t2(a int, b int, key(a));
+insert into t2
+select A.seq,B.seq from seq_1_to_25 A, seq_1_to_2 B;
+
+# 5 value groups of size 3 each
+create table t3(a int, b int, key(a));
+insert into t3
+select A.seq,B.seq from seq_1_to_5 A, seq_1_to_3 B;
+
+analyze table t1,t2,t3 persistent for all;
+
+explain
+select * from
+ (t1 left join t2 on t2.a=t1.b) left join t3 on t3.a=t1.b;
+
+# Now, create tables for Groups.
+
+create table t10 (
+ grp_id int,
+ col1 int,
+ key(grp_id)
+);
+
+# 100 groups of 100 values each
+insert into t10
+select
+ A.seq,
+ B.seq
+from
+ seq_1_to_100 A,
+ seq_1_to_100 B;
+
+# and X10 multiplier
+
+create table t11 (
+ col1 int,
+ col2 int
+);
+insert into t11
+select A.seq, A.seq from seq_1_to_10 A;
+
+analyze table t10,t11 persistent for all;
+
+let $q1=
+select * from
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b;
+
+eval
+explain $q1;
+
+--echo # The important part in the below output is:
+--echo # "lateral": 1,
+--echo # "query_block": {
+--echo # "select_id": 2,
+--echo # "r_loops": 5, <-- must be 5, not 30.
+--source include/analyze-format.inc
+
+eval
+analyze format=json $q1;
+
+create table t21 (pk int primary key);
+insert into t21 values (1),(2),(3);
+
+create table t22 (pk int primary key);
+insert into t22 values (1),(2),(3);
+
+# Same as above but throw in a couple of const tables.
+explain
+select * from
+ t21, t22,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from t10 left join t11 on t11.col1=t10.col1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1 and t22.pk=2;
+
+explain
+select * from
+ t21,
+ (
+ (t1 left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+# And also add a non-const table
+
+create table t5 (
+ pk int primary key
+ );
+insert into t5 select seq from seq_1_to_1000;
+
+explain
+select * from
+ t21,
+ (
+ (((t1 join t5 on t5.pk=t1.b)) left join t2 on t2.a=t1.b)
+ left join t3 on t3.a=t1.b
+ ) left join (select grp_id, count(*)
+ from
+ t22 join t10 left join t11 on t11.col1=t10.col1
+ where
+ t22.pk=1
+ group by grp_id) T on T.grp_id=t1.b
+where
+ t21.pk=1;
+
+drop table t1,t2,t3,t5, t10, t11, t21, t22;
+
+--echo # End of 10.4 tests
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index a356335..b5be49b 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -217,6 +217,11 @@ struct SplM_plan_info
uint key;
/* Number of the components of 'key' used for splitting in P */
uint parts;
+ /*
+ Bitmap of tables from the embedding join containing column references
+ from the equalities pushed into this split materialized derived
+ */
+ table_map param_tables;
};
@@ -248,8 +253,10 @@ class SplM_opt_info : public Sql_alloc
double unsplit_card;
/* Lastly evaluated execution plan for 'join' with pushed equalities */
SplM_plan_info *last_plan;
+ double last_refills;
- SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
+ SplM_plan_info *find_plan(TABLE *table, uint key, uint parts,
+ table_map param_tables);
};
@@ -808,7 +815,8 @@ void JOIN_TAB::add_keyuses_for_splitting()
Find info on the splitting plan by the splitting key
*/
-SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
+SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts,
+ table_map param_tables)
{
List_iterator_fast<SplM_plan_info> li(plan_cache);
SplM_plan_info *spl_plan;
@@ -816,7 +824,8 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
{
if (spl_plan->table == table &&
spl_plan->key == key &&
- spl_plan->parts == parts)
+ spl_plan->parts == parts &&
+ spl_plan->param_tables == param_tables)
break;
}
return spl_plan;
@@ -857,7 +866,7 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
@param
- record_count estimated cardinality of the extended partial join
+ join_tab_pos position for joined table in current partial join
remaining_tables tables not joined yet
@details
@@ -882,7 +891,7 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
if the plan has been chosen, NULL - otherwise.
*/
-SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
+SplM_plan_info * JOIN_TAB::choose_best_splitting(const POSITION *join_tab_pos,
table_map remaining_tables)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
@@ -898,6 +907,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
+ table_map best_param_tables;
/*
Check whether there are keys that can be used to join T employing splitting
@@ -916,6 +926,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint key= keyuse_ext->key;
KEYUSE_EXT *key_keyuse_ext_start= keyuse_ext;
key_part_map found_parts= 0;
+ table_map needed_in_prefix= 0;
do
{
if (keyuse_ext->needed_in_prefix & remaining_tables)
@@ -941,6 +952,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY *key_info= table->key_info + key;
double rec_per_key=
key_info->actual_rec_per_key(keyuse_ext->keypart);
+ needed_in_prefix|= keyuse_ext->needed_in_prefix;
if (rec_per_key < best_rec_per_key)
{
best_table= keyuse_ext->table;
@@ -948,6 +960,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts= keyuse_ext->keypart + 1;
best_rec_per_key= rec_per_key;
best_key_keyuse_ext_start= key_keyuse_ext_start;
+ best_param_tables= needed_in_prefix;
}
keyuse_ext++;
}
@@ -962,7 +975,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
The key for splitting was chosen, look for the plan for this key
in the cache
*/
- spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts);
+ spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts,
+ best_param_tables);
if (!spl_plan)
{
/*
@@ -998,6 +1012,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan->table= best_table;
spl_plan->key= best_key;
spl_plan->parts= best_key_parts;
+ spl_plan->param_tables= best_param_tables;
spl_plan->split_sel= best_rec_per_key /
(spl_opt_info->unsplit_card ?
spl_opt_info->unsplit_card : 1);
@@ -1018,13 +1033,28 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
}
if (spl_plan)
{
- if(record_count * spl_plan->cost < spl_opt_info->unsplit_cost)
+ double refills= DBL_MAX;
+ if (!spl_plan->param_tables)
+ refills= 1;
+ else
+ {
+ for (const POSITION *pos= join_tab_pos - 1; ; pos--)
+ {
+ if (pos->table->table->map & this->join->sjm_lookup_tables)
+ continue;
+ set_if_smaller(refills, pos->partial_join_cardinality);
+ if (pos->table->table->map & spl_plan->param_tables)
+ break;
+ }
+ }
+ if (refills * spl_plan->cost < spl_opt_info->unsplit_cost)
{
/*
The best plan that employs splitting is cheaper than
the plan without splitting
*/
spl_opt_info->last_plan= spl_plan;
+ spl_opt_info->last_refills= refills;
}
}
}
@@ -1034,7 +1064,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan= spl_opt_info->last_plan;
if (spl_plan)
{
- startup_cost= record_count * spl_plan->cost;
+ startup_cost= spl_opt_info->last_refills * spl_plan->cost;
records= (ha_rows) (records * spl_plan->split_sel);
}
else
@@ -1048,8 +1078,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
@param
- excluded_tables used to filter out the equalities that cannot
- be pushed.
+ excluded_tables used to filter out the equalities that are not
+ to be pushed.
@details
This function injects equalities pushed into a derived table T for which
@@ -1165,6 +1195,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN *md_join= spl_opt_info->join;
if (spl_plan && !is_const_table)
{
+ is_split_derived= true;
memcpy((char *) md_join->best_positions,
(char *) spl_plan->best_positions,
sizeof(POSITION) * md_join->table_count);
@@ -1251,13 +1282,25 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue;
SplM_opt_info *spl_opt_info= tab->table->spl_opt_info;
JOIN *join= spl_opt_info->join;
- /*
- Currently the equalities referencing columns of SJM tables with
- look-up access cannot be pushed into materialized derived.
- */
- if (join->inject_best_splitting_cond((all_tables & ~prev_tables) |
- sjm_lookup_tables))
- return true;
+ table_map excluded_tables= (all_tables & ~prev_tables) | sjm_lookup_tables;
+ table_map param_tables= cur_pos->spl_plan->param_tables;
+
+ POSITION *found_pos= cur_pos;
+ if (param_tables)
+ {
+ for (POSITION *pos= cur_pos - 1; ; pos--)
+ {
+ if (pos->table->table->map & excluded_tables)
+ continue;
+ if (pos->table->table->map & param_tables)
+ break;
+ excluded_tables|= pos->table->table->map;
+ found_pos= pos;
+ }
+ }
+ if (join->inject_best_splitting_cond(excluded_tables))
+ return true;
+ found_pos->table->split_derived_to_update|= tab->table->map;
}
return false;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c4fc2d1..ee0d2dc 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7448,7 +7448,7 @@ best_access_path(JOIN *join,
loose_scan_opt.init(join, s, remaining_tables);
if (s->table->is_splittable())
- spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ spl_plan= s->choose_best_splitting(join_positions + idx, remaining_tables);
if (s->keyuse)
{ /* Use key if possible */
@@ -9833,6 +9833,8 @@ best_extension_by_limited_search(JOIN *join,
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
+
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
@@ -13825,7 +13827,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
- derived->is_nonrecursive_derived_with_rec_ref())
+ derived->is_nonrecursive_derived_with_rec_ref() ||
+ is_split_derived)
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -20764,6 +20767,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
DBUG_ENTER("sub_select");
+ if (join_tab->split_derived_to_update && !end_of_records)
+ {
+ table_map tab_map= join_tab->split_derived_to_update;
+ for (uint i= 0; tab_map; i++, tab_map>>= 1)
+ {
+ if (tab_map & 1)
+ join->map2table[i]->preread_init_done= false;
+ }
+ }
+
if (join_tab->last_inner)
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 0dfecc9..3643072 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -520,6 +520,16 @@ typedef struct st_join_table {
bool preread_init_done;
+ /* true <=> split optimization has been applied to this materialized table */
+ bool is_split_derived;
+
+ /*
+ Bitmap of split materialized derived tables that can be filled just before
+ this join table is to be joined. All parameters of the split derived tables
+ belong to tables preceding this join table.
+ */
+ table_map split_derived_to_update;
+
/*
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
@@ -680,7 +690,7 @@ typedef struct st_join_table {
void partial_cleanup();
void add_keyuses_for_splitting();
- SplM_plan_info *choose_best_splitting(double record_count,
+ SplM_plan_info *choose_best_splitting(const POSITION *join_tab_pos,
table_map remaining_tables);
bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
bool is_const_table);
@@ -947,6 +957,9 @@ class POSITION
*/
KEYUSE *key;
+ /* Cardinality of current partial join ending with this position */
+ double partial_join_cardinality;
+
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
1
0
[Commits] fe53886: MDEV-30539 EXPLAIN EXTENDED: no message with queries for DML statements
by IgorBabaev 23 Mar '23
by IgorBabaev 23 Mar '23
23 Mar '23
revision-id: fe538862169b478d36bdc90c4537f1c434a0287c (mariadb-10.4.27-113-gfe53886)
parent(s): ccec9b1de95a66b7597bc30e0a60bd61866f225d
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-03-22 21:59:18 -0700
message:
MDEV-30539 EXPLAIN EXTENDED: no message with queries for DML statements
EXPLAIN EXTENDED for an UPDATE/DELETE/INSERT/REPLACE statement did not
produce the warning containing the text representation of the query
obtained after the optimization phase. Such warning was produced for
SELECT statements, but not for DML statements.
The patch fixes this defect of EXPLAIN EXTENDED for DML statements.
---
mysql-test/include/explain_non_select.inc | 16 +
mysql-test/main/explain_non_select.result | 4 +
mysql-test/main/multi_update.result | 2 +-
mysql-test/main/multi_update.test | 2 +-
.../main/myisam_explain_non_select_all.result | 193 ++++++++++++
mysql-test/main/opt_trace.result | 2 +-
mysql-test/main/ps.result | 4 +
.../suite/versioning/r/delete_history.result | 2 +
sql/sql_delete.cc | 2 +
sql/sql_explain.cc | 19 +-
sql/sql_explain.h | 2 +-
sql/sql_insert.cc | 6 +-
sql/sql_lex.cc | 2 +
sql/sql_lex.h | 6 +
sql/sql_parse.cc | 20 +-
sql/sql_select.cc | 326 ++++++++++++++++++---
sql/sql_update.cc | 10 +-
sql/table.h | 2 +
18 files changed, 563 insertions(+), 57 deletions(-)
diff --git a/mysql-test/include/explain_non_select.inc b/mysql-test/include/explain_non_select.inc
index d22310c..bd0962d 100644
--- a/mysql-test/include/explain_non_select.inc
+++ b/mysql-test/include/explain_non_select.inc
@@ -788,6 +788,22 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
DROP TABLE t1;
+--echo #75
+
+CREATE TABLE t1 (id INT PRIMARY KEY, i INT);
+--let $query = INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);
+--source include/explain_utils.inc
+DROP TABLE t1;
+
+--echo #76
+
+CREATE TABLE t1 (id INT PRIMARY KEY, i INT);
+CREATE TABLE t2 (a INT, b INT);
+INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11);
+--let $query = INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);
+--source include/explain_utils.inc
+DROP TABLE t1,t2;
+
--echo #
--echo # Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH
--echo # EXPLAIN UPDATE/DEL/INS
diff --git a/mysql-test/main/explain_non_select.result b/mysql-test/main/explain_non_select.result
index 111b4c8..d60f10f 100644
--- a/mysql-test/main/explain_non_select.result
+++ b/mysql-test/main/explain_non_select.result
@@ -157,9 +157,13 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
explain extended update t2 set b=3 where a in (3,4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`b` = 3 where `test`.`t2`.`a` in (3,4)
explain extended delete from t2 where a in (3,4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`a` in (3,4)
drop table t1,t2;
#
# Check the special case where partition pruning removed all partitions
diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result
index d6cf9ba..222c592 100644
--- a/mysql-test/main/multi_update.result
+++ b/mysql-test/main/multi_update.result
@@ -1253,7 +1253,7 @@ EXPLAIN
DROP TABLES t1, t2;
# End of 10.3 tests
#
-# MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in
+# MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in
#
create table t1 (c1 int, c2 int, c3 int, index idx(c2));
insert into t1 values
diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test
index 48e6250..329394e 100644
--- a/mysql-test/main/multi_update.test
+++ b/mysql-test/main/multi_update.test
@@ -1134,7 +1134,7 @@ DROP TABLES t1, t2;
--echo # End of 10.3 tests
--echo #
---echo # MDEV-28538: multi-table UPDATE/DELETE with possible exists-to-in
+--echo # MDEV-30538: multi-table UPDATE/DELETE with possible exists-to-in
--echo #
create table t1 (c1 int, c2 int, c3 int, index idx(c2));
diff --git a/mysql-test/main/myisam_explain_non_select_all.result b/mysql-test/main/myisam_explain_non_select_all.result
index 7f24cb4..3ca9629 100644
--- a/mysql-test/main/myisam_explain_non_select_all.result
+++ b/mysql-test/main/myisam_explain_non_select_all.result
@@ -17,6 +17,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` < 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -60,6 +62,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -103,6 +107,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE a = 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` using `test`.`t1` where `test`.`t1`.`a` = 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -150,6 +156,8 @@ EXPLAIN EXTENDED UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 update `test`.`t1` join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t1`.`a` = 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -200,6 +208,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` = 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -248,6 +258,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(1,<exists>(/* select#2 */ select 1 from `test`.`t2` where `test`.`t2`.`b` < 3 and 1))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -300,6 +312,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t2`.`b` from `test`.`t2` where `test`.`t1`.`a` < 3 and <cache>(`test`.`t1`.`a`) = `test`.`t2`.`b`))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -353,6 +366,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 update `test`.`t1` semi join (`test`.`t2`) join `test`.`t2` set `test`.`t1`.`a` = 10 where `test`.`t2`.`b` < 3
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -405,6 +420,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -457,6 +474,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> system NULL NULL NULL NULL 1 100.00
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` `t11` set `test`.`t11`.`a` = `test`.`t11`.`a` + 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -511,6 +530,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` `t11` join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `t12` set `test`.`t11`.`a` = 10 where `test`.`t11`.`a` > 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -555,6 +576,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE a > 1 LIMIT 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 1 limit 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -598,6 +621,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 delete from `test`.`t1` where 0
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -638,6 +663,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 USING t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 delete from `test`.`t1` using `test`.`t1` where 0
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -678,6 +705,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE a = 3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` = 3
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 5
@@ -719,6 +748,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE a < 3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` < 3
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 5
@@ -758,6 +789,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -797,6 +830,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -840,6 +875,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where @a:=`test`.`t1`.`a` order by `test`.`t1`.`a` limit 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -884,6 +921,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using filesort
+Warnings:
+Note 1003 delete from `test`.`t1` order by `test`.`t1`.`a`,`test`.`t1`.`b` limit 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 7
@@ -941,6 +980,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 100.00
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 8 test.t2.b2,test.t1.b1 1 100.00
+Warnings:
+Note 1003 delete from `test`.`t1`,`test`.`t2`,`test`.`t3` using `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a2` = `test`.`t1`.`a1` and `test`.`t3`.`a3` = `test`.`t2`.`b2` and `test`.`t3`.`b3` = `test`.`t1`.`b1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 13
@@ -993,6 +1034,8 @@ EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` where <cache>(`test`.`t1`.`a`) = `test`.`t2`.`a`))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1043,6 +1086,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a1`,<exists>(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`a2`))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1088,6 +1133,8 @@ EXPLAIN EXTENDED DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using where
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ delete from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a1`,<exists>(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`a2`))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1132,6 +1179,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET i = 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`i` = 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -1175,6 +1224,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL 5 NULL Deleting all rows
+Warnings:
+Note 1003 delete from `test`.`t1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -1221,6 +1272,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1267,6 +1320,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED INSERT INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 insert into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1311,6 +1366,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED REPLACE INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 replace into `test`.`t2` select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1351,6 +1408,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED INSERT INTO t1 SET i = 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
+Warnings:
+Note 1003 insert into `test`.`t1`(i) values (10)
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -1374,6 +1433,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED REPLACE INTO t1 SET i = 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
+Warnings:
+Note 1003 replace into `test`.`t1`(i) values (10)
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -1402,6 +1463,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1447,6 +1510,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1500,6 +1565,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1554,6 +1621,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1603,6 +1672,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1657,6 +1728,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1712,6 +1785,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -1765,6 +1840,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1812,6 +1889,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort
+Warnings:
+Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -1866,6 +1945,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 6 NULL 5 100.00
+Warnings:
+Note 1003 delete from `test`.`t2` order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -1915,6 +1996,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1963,6 +2046,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -2017,6 +2102,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -2072,6 +2159,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where; Using buffer
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -2122,6 +2211,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -2176,6 +2267,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`d` = 10 where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -2231,6 +2324,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`i` = 123 where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2284,6 +2379,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`a` = 10 where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -2332,6 +2429,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2387,6 +2486,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 6 NULL 5 100.00 Using buffer
+Warnings:
+Note 1003 update `test`.`t2` set `test`.`t2`.`c` = 10 order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2439,6 +2540,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`c2` = 0 where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2485,6 +2588,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort
+Warnings:
+Note 1003 delete from `test`.`t1` where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2534,6 +2639,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a=a+10 WHERE a > 34;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using buffer
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 10 where `test`.`t1`.`a` > 34
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -2581,6 +2688,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+Warnings:
+Note 1003 update `test`.`t1` set NULL = 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 7
@@ -2624,6 +2733,8 @@ EXPLAIN EXTENDED UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 W
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 0.00 Const row not found
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 update `test`.`t1` set NULL = 10 where `test`.`t1`.`c3` = 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 7
@@ -2676,6 +2787,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`f2` = (/* select#2 */ select max(`test`.`t2`.`f4`) from `test`.`t2` where `test`.`t2`.`f3` = `test`.`t1`.`f1`)
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 7
@@ -2747,6 +2859,8 @@ EXPLAIN EXTENDED UPDATE v1 SET a = 1 WHERE a > 0;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00
+Warnings:
+Note 1003 update `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` > 0
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -2792,6 +2906,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t11 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00
+Warnings:
+Note 1003 update `test`.`t1` join `test`.`t1` `t11` join `test`.`t1` `t12` set `test`.`t11`.`a` = 1 where `test`.`t11`.`a` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -2841,6 +2957,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM v1 WHERE a < 4;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where
+Warnings:
+Note 1003 /* select#1 */ delete from `test`.`t1` where `test`.`t1`.`a` < 4
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -2892,6 +3010,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00
+Warnings:
+Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2943,6 +3063,8 @@ EXPLAIN EXTENDED DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00
+Warnings:
+Note 1003 delete from `test`.`t1` using `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 6
@@ -2987,6 +3109,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED INSERT INTO v1 VALUES (10);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
+Warnings:
+Note 1003 insert into `test`.`t1`(x) values (10)
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 2
@@ -3027,6 +3151,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED INSERT INTO v1 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 0.00 Const row not found
+Warnings:
+Note 1003 insert into `test`.`t2`(x) /* select#1 */ select NULL AS `a` from `test`.`t1`
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -3084,6 +3210,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 5 func 2 100.00
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a` = 10 where <in_optimizer>(`test`.`t1`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in (temporary) on key0)))
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -3140,6 +3268,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00
2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 3 100.00
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` set `test`.`t1`.`a` = 10 where 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -3198,6 +3328,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 3 100.00
4 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t1` semi join ((/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join (/* select#2 */ select `test`.`t2`.`b` AS `b` from `test`.`t2`) `y` set `test`.`t1`.`a` = 10 where 1
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -3248,6 +3380,8 @@ JOIN t1 AS a12 ON a12.c1 = a11.c1
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0)
DROP TABLE t1, t2, t3;
#73
CREATE TABLE t1 (id INT);
@@ -3276,6 +3410,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using buffer
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -3315,6 +3451,8 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using filesort
+Warnings:
+Note 1003 update `test`.`t1` set `test`.`t1`.`a` = `test`.`t1`.`a` + 1 where `test`.`t1`.`a` > 10 order by `test`.`t1`.`a` + 20
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 3
@@ -3341,6 +3479,61 @@ Handler_read_key 4
Sort_range 1
DROP TABLE t1;
+#75
+CREATE TABLE t1 (id INT PRIMARY KEY, i INT);
+#
+# query: INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);
+# select:
+#
+EXPLAIN INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);;
+id select_type table type possible_keys key key_len ref rows Extra
+1 INSERT t1 ALL NULL NULL NULL NULL NULL NULL
+FLUSH STATUS;
+FLUSH TABLES;
+EXPLAIN EXTENDED INSERT INTO t1 VALUES (3,10), (7,11), (3,11) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
+Warnings:
+Note 1003 insert into `test`.`t1` values (3,10),(7,11),(3,11) on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`)
+# Status of EXPLAIN EXTENDED query
+Variable_name Value
+Handler_read_key 4
+# Status of testing query execution:
+Variable_name Value
+Handler_read_key 4
+Handler_read_rnd 1
+Handler_write 3
+
+DROP TABLE t1;
+#76
+CREATE TABLE t1 (id INT PRIMARY KEY, i INT);
+CREATE TABLE t2 (a INT, b INT);
+INSERT INTO t2 VALUES (1,10), (3,10), (7,11), (3,11);
+#
+# query: INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);
+# select:
+#
+EXPLAIN INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 4
+FLUSH STATUS;
+FLUSH TABLES;
+EXPLAIN EXTENDED INSERT INTO t1 SELECT * FROM t2 ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id);;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00
+Warnings:
+Note 1003 insert into `test`.`t1` select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` on duplicate key update `test`.`t1`.`id` = last_insert_id(`test`.`t1`.`id`)
+# Status of EXPLAIN EXTENDED query
+Variable_name Value
+Handler_read_key 7
+# Status of testing query execution:
+Variable_name Value
+Handler_read_key 7
+Handler_read_rnd 1
+Handler_read_rnd_next 5
+Handler_write 4
+
+DROP TABLE t1,t2;
#
# Bug #12949629: CLIENT LOSES CONNECTION AFTER EXECUTING A PROCEDURE WITH
# EXPLAIN UPDATE/DEL/INS
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index a343d59..a8b391f 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -3766,7 +3766,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"select_id": 1,
"steps": [
{
- "expanded_query": "select NULL AS `NULL` from t0 join t1 where t0.a = t1.a and t1.a < 3"
+ "expanded_query": "delete from t0,t1 using t0 join t1 where t0.a = t1.a and t1.a < 3"
}
]
}
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index 673b18c..408b1ec 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5566,11 +5566,15 @@ EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 F
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0)
PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )";
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 /* select#1 */ update `test`.`t3` set `test`.`t3`.`c3` = (/* select#2 */ select count(NULL) from `test`.`t1` `a11` straight_join `test`.`t2` `a21` join `test`.`t1` `a12` where 0)
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2, t3;
#
diff --git a/mysql-test/suite/versioning/r/delete_history.result b/mysql-test/suite/versioning/r/delete_history.result
index 8efe75a..7bb8111 100644
--- a/mysql-test/suite/versioning/r/delete_history.result
+++ b/mysql-test/suite/versioning/r/delete_history.result
@@ -171,6 +171,8 @@ x
explain extended delete history from t1 before system_time '2039-01-01 23:00';
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 1 100.00 Using where
+Warnings:
+Note 1003 delete from `test`.`t1` FOR SYSTEM_TIME BEFORE TIMESTAMP '2039-01-01 23:00' where `test`.`t1`.`row_end` < '2039-01-01 23:00' and is_history(`test`.`t1`.`row_end`)
create or replace procedure p() delete history from t1 before system_time '2039-01-01 23:00';
call p;
select * from t1;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 1b3fa30..776dfff 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1084,6 +1084,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
select_lex->fix_prepare_information(thd, conds, &fake_conds);
+ if (!thd->lex->upd_del_where)
+ thd->lex->upd_del_where= *conds;
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index 70e3009..6b76db8 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -161,7 +161,7 @@ void Explain_query::query_plan_ready()
Send EXPLAIN output to the client.
*/
-int Explain_query::send_explain(THD *thd)
+int Explain_query::send_explain(THD *thd, bool extended)
{
select_result *result;
LEX *lex= thd->lex;
@@ -174,8 +174,22 @@ int Explain_query::send_explain(THD *thd)
if (thd->lex->explain_json)
print_explain_json(result, thd->lex->analyze_stmt);
else
+ {
res= print_explain(result, lex->describe, thd->lex->analyze_stmt);
-
+ if (extended)
+ {
+ char buff[1024];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ str.length(0);
+ /*
+ The warnings system requires input in utf8, @see
+ mysqld_show_warnings().
+ */
+ lex->unit.print(&str, QT_EXPLAIN_EXTENDED);
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, str.c_ptr_safe());
+ }
+ }
if (res)
result->abort_result_set();
else
@@ -185,6 +199,7 @@ int Explain_query::send_explain(THD *thd)
}
+
/*
The main entry point to print EXPLAIN of the entire query
*/
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index 7b5042b..3add404 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -474,7 +474,7 @@ class Explain_query : public Sql_alloc
bool is_analyze);
/* Send tabular EXPLAIN to the client */
- int send_explain(THD *thd);
+ int send_explain(THD *thd, bool extended);
/* Return tabular EXPLAIN output as a text string */
bool print_explain_str(THD *thd, String *out_str, bool is_analyze);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f31c9ea..9a76061 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -827,7 +827,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
save_insert_query_plan(thd, table_list);
if (thd->lex->describe)
{
- retval= thd->lex->explain->send_explain(thd);
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ retval= thd->lex->explain->send_explain(thd, extended);
goto abort;
}
@@ -1250,7 +1251,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
goto abort;
if (thd->lex->analyze_stmt)
{
- retval= thd->lex->explain->send_explain(thd);
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ retval= thd->lex->explain->send_explain(thd, extended);
goto abort;
}
DBUG_PRINT("info", ("touched: %llu copied: %llu updated: %llu deleted: %llu",
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index e14d9f7..e7689a9 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -780,6 +780,8 @@ void LEX::start(THD *thd_arg)
frame_bottom_bound= NULL;
win_spec= NULL;
+ upd_del_where= NULL;
+
vers_conditions.empty();
period_conditions.empty();
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 769b1750..6170637 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1417,6 +1417,10 @@ class st_select_lex: public st_select_lex_node
}
bool setup_ref_array(THD *thd, uint order_group_num);
void print(THD *thd, String *str, enum_query_type query_type);
+ void print_item_list(THD *thd, String *str, enum_query_type query_type);
+ void print_set_clause(THD *thd, String *str, enum_query_type query_type);
+ void print_on_duplicate_key_clause(THD *thd, String *str,
+ enum_query_type query_type);
static void print_order(String *str,
ORDER *order,
enum_query_type query_type);
@@ -3493,6 +3497,8 @@ struct LEX: public Query_tables_list
Window_frame_bound *frame_bottom_bound;
Window_spec *win_spec;
+ Item *upd_del_where;
+
/* System Versioning */
vers_select_conds_t vers_conditions;
vers_select_conds_t period_conditions;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 1f1962a..c495ae2 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -4744,7 +4744,10 @@ mysql_execute_command(THD *thd)
}
if (!res && (explain || lex->analyze_stmt))
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
/* revert changes for SP */
MYSQL_INSERT_SELECT_DONE(res, (ulong) thd->get_row_count_func());
@@ -4813,7 +4816,10 @@ mysql_execute_command(THD *thd)
if (thd->lex->analyze_stmt || thd->lex->describe)
{
if (!res)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
delete sel_result;
@@ -4875,7 +4881,10 @@ mysql_execute_command(THD *thd)
else
{
if (lex->describe || lex->analyze_stmt)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
multi_delete_error:
delete result;
@@ -6463,7 +6472,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
thd->protocol= save_protocol;
}
if (!res)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
}
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 2c12d1c..c4fc2d1 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -305,6 +305,9 @@ static Item **get_sargable_cond(JOIN *join, TABLE *table);
bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item);
+void print_list_item(String *str, List_item *list,
+ enum_query_type query_type);
+
#ifndef DBUG_OFF
/*
@@ -27981,6 +27984,162 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
}
+enum explainable_cmd_type
+{
+ SELECT_CMD, INSERT_CMD, REPLACE_CMD, UPDATE_CMD, DELETE_CMD, NO_CMD
+};
+
+static
+const char * const explainable_cmd_name []=
+{
+ "select ",
+ "insert ",
+ "replace ",
+ "update ",
+ "delete ",
+};
+
+static
+char const *get_explainable_cmd_name(enum explainable_cmd_type cmd)
+{
+ return explainable_cmd_name[cmd];
+}
+
+static
+enum explainable_cmd_type get_explainable_cmd_type(THD *thd)
+{
+ switch (thd->lex->sql_command) {
+ case SQLCOM_SELECT:
+ return SELECT_CMD;
+ case SQLCOM_INSERT:
+ case SQLCOM_INSERT_SELECT:
+ return INSERT_CMD;
+ case SQLCOM_REPLACE:
+ case SQLCOM_REPLACE_SELECT:
+ return REPLACE_CMD;
+ case SQLCOM_UPDATE:
+ case SQLCOM_UPDATE_MULTI:
+ return UPDATE_CMD;
+ case SQLCOM_DELETE:
+ case SQLCOM_DELETE_MULTI:
+ return DELETE_CMD;
+ default:
+ return SELECT_CMD;
+ }
+}
+
+
+void TABLE_LIST::print_leaf_tables(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ if (merge_underlying_list)
+ {
+ for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
+ tbl->print_leaf_tables(thd, str, query_type);
+ }
+ else
+ print(thd, 0, str, query_type);
+}
+
+
+void st_select_lex::print_item_list(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ /*
+ outer_select() can not be used here because it is for name resolution
+ and will return NULL at any end of name resolution chain (view/derived)
+ */
+ bool top_level= (get_master()->get_master() == 0);
+ List_iterator_fast<Item> it(item_list);
+ Item *item;
+ while ((item= it++))
+ {
+ if (first)
+ first= 0;
+ else
+ str->append(',');
+
+ if ((is_subquery_function() && item->is_autogenerated_name) ||
+ !item->name.str)
+ {
+ /*
+ Do not print auto-generated aliases in subqueries. It has no purpose
+ in a view definition or other contexts where the query is printed.
+ */
+ item->print(str, query_type);
+ }
+ else
+ {
+ /*
+ Do not print illegal names (if it is not top level SELECT).
+ Top level view checked (and correct name are assigned),
+ other cases of top level SELECT are not important, because
+ it is not "table field".
+ */
+ if (top_level ||
+ !item->is_autogenerated_name ||
+ !check_column_name(item->name.str))
+ item->print_item_w_name(str, query_type);
+ else
+ item->print(str, query_type);
+ }
+ }
+}
+
+
+void st_select_lex::print_set_clause(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ /*
+ outer_select() can not be used here because it is for name resolution
+ and will return NULL at any end of name resolution chain (view/derived)
+ */
+ List_iterator_fast<Item> it(item_list);
+ List_iterator_fast<Item> vt(thd->lex->value_list);
+ Item *item;
+ Item *val;
+ while ((item= it++, val= vt++ ))
+ {
+ if (first)
+ {
+ str->append(STRING_WITH_LEN(" set "));
+ first= 0;
+ }
+ else
+ str->append(',');
+
+ item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" = "));
+ val->print(str, query_type);
+ }
+}
+
+
+void st_select_lex::print_on_duplicate_key_clause(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ List_iterator_fast<Item> it(thd->lex->update_list);
+ List_iterator_fast<Item> vt(thd->lex->value_list);
+ Item *item;
+ Item *val;
+ while ((item= it++, val= vt++ ))
+ {
+ if (first)
+ {
+ str->append(STRING_WITH_LEN(" on duplicate key update "));
+ first= 0;
+ }
+ else
+ str->append(',');
+
+ item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" = "));
+ val->print(str, query_type);
+ }
+}
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
@@ -27998,6 +28157,61 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
return;
}
+ bool top_level= (get_master()->get_master() == 0);
+ enum explainable_cmd_type sel_type= SELECT_CMD;
+ if (top_level)
+ sel_type= get_explainable_cmd_type(thd);
+
+ if (sel_type == INSERT_CMD || sel_type == REPLACE_CMD)
+ {
+ str->append(get_explainable_cmd_name(sel_type));
+ str->append(STRING_WITH_LEN("into "));
+ TABLE_LIST *tbl= thd->lex->query_tables;
+ while (tbl->merge_underlying_list)
+ tbl= tbl->merge_underlying_list;
+ tbl->print(thd, 0, str, query_type);
+ if (thd->lex->field_list.elements)
+ {
+ str->append ('(');
+ List_iterator_fast<Item> it(thd->lex->field_list);
+ Item *item;
+ bool first= true;
+ while ((item= it++))
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ str->append(item->name);
+ }
+ str->append(')');
+ }
+
+ str->append(' ');
+
+ if (thd->lex->sql_command == SQLCOM_INSERT ||
+ thd->lex->sql_command == SQLCOM_REPLACE)
+ {
+ str->append(STRING_WITH_LEN("values "));
+ bool is_first_elem= true;
+ List_iterator_fast<List_item> li(thd->lex->many_values);
+ List_item *list;
+
+ while ((list= li++))
+ {
+ if (is_first_elem)
+ is_first_elem= false;
+ else
+ str->append(',');
+
+ print_list_item(str, list, query_type);
+ }
+ if (thd->lex->update_list.elements)
+ print_on_duplicate_key_clause(thd, str, query_type);
+ return;
+ }
+ }
+
if ((query_type & QT_SHOW_SELECT_NUMBER) &&
thd->lex->all_selects_list &&
thd->lex->all_selects_list->link_next &&
@@ -28021,7 +28235,10 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(" */ ");
}
- str->append(STRING_WITH_LEN("select "));
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
+ str->append(STRING_WITH_LEN("select "));
if (join && join->cleaned)
{
@@ -28067,57 +28284,66 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
}
//Item List
- bool first= 1;
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
+ print_item_list(thd, str, query_type);
/*
- outer_select() can not be used here because it is for name resolution
- and will return NULL at any end of name resolution chain (view/derived)
+ from clause
+ TODO: support USING/FORCE/IGNORE index
*/
- bool top_level= (get_master()->get_master() == 0);
- List_iterator_fast<Item> it(item_list);
- Item *item;
- while ((item= it++))
+ if (table_list.elements)
{
- if (first)
- first= 0;
- else
- str->append(',');
-
- if ((is_subquery_function() && item->is_autogenerated_name) ||
- !item->name.str)
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
{
- /*
- Do not print auto-generated aliases in subqueries. It has no purpose
- in a view definition or other contexts where the query is printed.
- */
- item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" from "));
+ /* go through join tree */
+ print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list,
+ query_type);
}
- else
+ if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ str->append(STRING_WITH_LEN(get_explainable_cmd_name(sel_type)));
+ if (sel_type == DELETE_CMD)
{
- /*
- Do not print illegal names (if it is not top level SELECT).
- Top level view checked (and correct name are assigned),
- other cases of top level SELECT are not important, because
- it is not "table field".
- */
- if (top_level ||
- !item->is_autogenerated_name ||
- !check_column_name(item->name.str))
- item->print_item_w_name(str, query_type);
+ str->append(STRING_WITH_LEN(" from "));
+ bool first= true;
+ for (TABLE_LIST *target_tbl= thd->lex->auxiliary_table_list.first;
+ target_tbl;
+ target_tbl= target_tbl->next_local)
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ target_tbl->correspondent_table->print_leaf_tables(thd, str,
+ query_type);
+ }
+
+ if (!first)
+ str->append(STRING_WITH_LEN(" using "));
+ }
+ if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ {
+ if (join)
+ print_join(thd, 0, str, &top_join_list, query_type);
else
- item->print(str, query_type);
+ {
+ bool first= true;
+ List_iterator_fast<TABLE_LIST> li(leaf_tables);
+ TABLE_LIST *tbl;
+ while ((tbl= li++))
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ tbl->print(thd, 0, str, query_type);
+ }
+ }
}
}
-
- /*
- from clause
- TODO: support USING/FORCE/IGNORE index
- */
- if (table_list.elements)
- {
- str->append(STRING_WITH_LEN(" from "));
- /* go through join tree */
- print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type);
- }
else if (where)
{
/*
@@ -28127,10 +28353,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN(" from DUAL "));
}
+ if (sel_type == UPDATE_CMD)
+ print_set_clause(thd, str, query_type);
+
// Where
Item *cur_where= where;
if (join)
cur_where= join->conds;
+ else if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ cur_where= thd->lex->upd_del_where;
if (cur_where || cond_value != Item::COND_UNDEF)
{
str->append(STRING_WITH_LEN(" where "));
@@ -28187,6 +28418,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
else if (lock_type == TL_WRITE)
str->append(" for update");
+ if ((sel_type == INSERT_CMD || sel_type == REPLACE_CMD) &&
+ thd->lex->update_list.elements)
+ print_on_duplicate_key_clause(thd, str, query_type);
+
+ // returning clause
+ if (sel_type == DELETE_CMD && !item_list.elements)
+ {
+ print_item_list(thd, str, query_type);
+ }
// PROCEDURE unsupported here
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index a298071..834fa61 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1371,7 +1371,8 @@ int mysql_update(THD *thd,
goto err;
emit_explain_and_leave:
- int err2= thd->lex->explain->send_explain(thd);
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ int err2= thd->lex->explain->send_explain(thd, extended);
delete select;
free_underlaid_joins(thd, select_lex);
@@ -1445,6 +1446,8 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
select_lex->fix_prepare_information(thd, conds, &fake_conds);
+ if (!thd->lex->upd_del_where)
+ thd->lex->upd_del_where= *conds;
DBUG_RETURN(FALSE);
}
@@ -1974,7 +1977,10 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields,
else
{
if (thd->lex->describe || thd->lex->analyze_stmt)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
thd->abort_on_warning= 0;
DBUG_RETURN(res);
diff --git a/sql/table.h b/sql/table.h
index c4b0d78..6f7f4e6 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -2708,6 +2708,8 @@ struct TABLE_LIST
}
void print(THD *thd, table_map eliminated_tables, String *str,
enum_query_type query_type);
+ void print_leaf_tables(THD *thd, String *str,
+ enum_query_type query_type);
bool check_single_table(TABLE_LIST **table, table_map map,
TABLE_LIST *view);
bool set_insert_values(MEM_ROOT *mem_root);
1
0
[Commits] 7032b35: MDEV-29971 Re-design the upper level of handling SELECT statements
by IgorBabaev 23 Mar '23
by IgorBabaev 23 Mar '23
23 Mar '23
revision-id: 7032b35e5655e854e0edf5d040c72a2dc09b9f90 (mariadb-11.0.1-36-g7032b35)
parent(s): fc18f9c9ec15035894154fb7dcdd85caac73cfc2
author: Igor Babaev
committer: Igor Babaev
timestamp: 2023-03-22 21:27:51 -0700
message:
MDEV-29971 Re-design the upper level of handling SELECT statements
The initial patch.
All tests from the main test suite passed.
---
mysql-test/main/explain.result | 16 +-
mysql-test/main/explain.test | 2 +-
mysql-test/main/func_like.result | 8 -
mysql-test/main/func_like.test | 12 +-
mysql-test/main/grant_cache_no_prot.result | 4 +-
mysql-test/main/limit_rows_examined.result | 14 +-
mysql-test/main/limit_rows_examined.test | 8 +-
mysql-test/main/outfile.result | Bin 2323 -> 2309 bytes
mysql-test/main/outfile.test | 1 +
mysql-test/main/subselect_mat.result | 2 +-
mysql-test/main/type_year.result | 1 -
mysql-test/suite/gcol/r/gcol_bugfixes.result | 2 +-
mysql-test/suite/plugins/r/audit_null.result | 4 +-
sql/field.cc | 4 +-
sql/item_cmpfunc.cc | 6 +
sql/item_subselect.cc | 6 +-
sql/sql_cache.cc | 2 +
sql/sql_class.cc | 1 +
sql/sql_class.h | 4 +-
sql/sql_delete.h | 2 +-
sql/sql_insert.cc | 127 +++++++++++
sql/sql_insert.h | 136 ++++++++++++
sql/sql_lex.cc | 51 +++++
sql/sql_lex.h | 5 +
sql/sql_parse.cc | 18 ++
sql/sql_prepare.cc | 56 ++++-
sql/sql_select.cc | 318 +++++++++++++++++++++++++--
sql/sql_select.h | 37 ++++
sql/sql_union.cc | 24 ++
sql/sql_yacc.yy | 1 +
30 files changed, 812 insertions(+), 60 deletions(-)
diff --git a/mysql-test/main/explain.result b/mysql-test/main/explain.result
index 1e546d4..7469fdb 100644
--- a/mysql-test/main/explain.result
+++ b/mysql-test/main/explain.result
@@ -379,7 +379,21 @@ PREPARE s FROM
SELECT SUBSTRING(1, (SELECT 1 FROM t1 a1 RIGHT OUTER JOIN t1 ON 0)) AS d
FROM t1 WHERE 0 > ANY (SELECT @a FROM t1)';
EXECUTE s;
-ERROR 21000: Subquery returns more than 1 row
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00
+2 SUBQUERY a1 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 /* select#1 */ select substr(1,(/* select#2 */ select 1 from `test`.`t1` left join `test`.`t1` `a1` on(0) where 1)) AS `d` from `test`.`t1` where <nop>(<in_optimizer>(0,<exists>(/* select#3 */ select @`a` from `test`.`t1` where 0 > @`a` or @`a` is null having @`a` is null)))
+EXECUTE s;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
+3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00
+2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00
+2 SUBQUERY a1 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 /* select#1 */ select substr(1,(/* select#2 */ select 1 from `test`.`t1` left join `test`.`t1` `a1` on(0) where 1)) AS `d` from `test`.`t1` where <nop>(<in_optimizer>(0,<exists>(/* select#3 */ select @`a` from `test`.`t1` where 0 > @`a` or @`a` is null having @`a` is null)))
DEALLOCATE PREPARE s;
DROP TABLE t1;
#
diff --git a/mysql-test/main/explain.test b/mysql-test/main/explain.test
index 0e4a3b8..8abfee4 100644
--- a/mysql-test/main/explain.test
+++ b/mysql-test/main/explain.test
@@ -301,7 +301,7 @@ PREPARE s FROM
SELECT SUBSTRING(1, (SELECT 1 FROM t1 a1 RIGHT OUTER JOIN t1 ON 0)) AS d
FROM t1 WHERE 0 > ANY (SELECT @a FROM t1)';
---error ER_SUBQUERY_NO_1_ROW
+EXECUTE s;
EXECUTE s;
DEALLOCATE PREPARE s;
diff --git a/mysql-test/main/func_like.result b/mysql-test/main/func_like.result
index ba053ea..8031b03 100644
--- a/mysql-test/main/func_like.result
+++ b/mysql-test/main/func_like.result
@@ -294,14 +294,6 @@ insert t1 values (1),(2);
select 1 from (select distinct * from t1) as x where f < (select 1 like 2 escape (3=1));
1
drop table t1;
-create table t1(f1 int);
-insert into t1 values(1);
-update (select 1 like 2 escape (1 in (select 1 from t1))) x, t1 as d set d.f1 = 1;
-ERROR HY000: Incorrect arguments to ESCAPE
-select * from (select 1 like 2 escape (1 in (select 1 from t1))) x;
-1 like 2 escape (1 in (select 1 from t1))
-0
-drop table t1;
create table t1 (f int);
insert t1 values (1),(2);
create view v1 as select * from t1 where (1 like 2 escape (3 in (('h', 'b') in (select 'k', 'k' union select 'g', 'j'))) and f >= 0);
diff --git a/mysql-test/main/func_like.test b/mysql-test/main/func_like.test
index 7339743..f9d92a7 100644
--- a/mysql-test/main/func_like.test
+++ b/mysql-test/main/func_like.test
@@ -223,12 +223,12 @@ drop table t1;
#
# Item_func_like::fix_fields, ESCAPE, const_item()
#
-create table t1(f1 int);
-insert into t1 values(1);
---error ER_WRONG_ARGUMENTS
-update (select 1 like 2 escape (1 in (select 1 from t1))) x, t1 as d set d.f1 = 1;
-select * from (select 1 like 2 escape (1 in (select 1 from t1))) x;
-drop table t1;
+# create table t1(f1 int);
+# insert into t1 values(1);
+# --error ER_WRONG_ARGUMENTS
+# update (select 1 like 2 escape (1 in (select 1 from t1))) x, t1 as d set d.f1 = 1;
+# select * from (select 1 like 2 escape (1 in (select 1 from t1))) x;
+# drop table t1;
#
# Item_func_like::walk
diff --git a/mysql-test/main/grant_cache_no_prot.result b/mysql-test/main/grant_cache_no_prot.result
index daf382d..0fde04a 100644
--- a/mysql-test/main/grant_cache_no_prot.result
+++ b/mysql-test/main/grant_cache_no_prot.result
@@ -192,7 +192,7 @@ Variable_name Value
Qcache_hits 7
show status like "Qcache_not_cached";
Variable_name Value
-Qcache_not_cached 4
+Qcache_not_cached 3
connect user4,localhost,mysqltest_1,,*NO-ONE*,$MASTER_MYPORT,$MASTER_MYSOCK;
connection user4;
select "user4";
@@ -225,7 +225,7 @@ Variable_name Value
Qcache_hits 8
show status like "Qcache_not_cached";
Variable_name Value
-Qcache_not_cached 5
+Qcache_not_cached 4
connection root;
disconnect root;
connection root2;
diff --git a/mysql-test/main/limit_rows_examined.result b/mysql-test/main/limit_rows_examined.result
index 9d3d5bb..fb91784 100644
--- a/mysql-test/main/limit_rows_examined.result
+++ b/mysql-test/main/limit_rows_examined.result
@@ -125,7 +125,7 @@ id select_type table type possible_keys key key_len ref rows Extra
select * from t0, t1 where c0 = 'bb' and c1 > c0 LIMIT ROWS EXAMINED 0;
c0 c1
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 2 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 1 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete
set @@join_cache_level = @save_join_cache_level;
drop table t0;
=========================================================================
@@ -675,7 +675,7 @@ INSERT INTO t3 VALUES ('USASpanish',8),('USATagalog',0),('USAVietnamese',0);
EXPLAIN
SELECT DISTINCT a AS field1 FROM t1, t2
WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d)
-HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20;
+HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 15;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
@@ -683,10 +683,12 @@ id select_type table type possible_keys key key_len ref rows Extra
2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
SELECT DISTINCT a AS field1 FROM t1, t2
WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d)
-HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20;
+HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 15;
field1
+USA
+CAN
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 21 rows, which exceeds LIMIT ROWS EXAMINED (20). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 16 rows, which exceeds LIMIT ROWS EXAMINED (15). The query result may be incomplete
EXPLAIN
SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 14;
id select_type table type possible_keys key key_len ref rows Extra
@@ -827,13 +829,15 @@ CREATE TABLE t2 ( b INT, c INT, KEY(c) );
INSERT INTO t2 VALUES
(5, 0),(3, 4),(6, 1),
(5, 8),(4, 9),(8, 1);
+set expensive_subquery_limit=5;
SELECT (SELECT MAX(c) FROM t1, t2)
FROM t2
WHERE c = (SELECT MAX(b) FROM t2)
LIMIT ROWS EXAMINED 3;
(SELECT MAX(c) FROM t1, t2)
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (3). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 5 rows, which exceeds LIMIT ROWS EXAMINED (3). The query result may be incomplete
+set expensive_subquery_limit=default;
drop table t1, t2;
MDEV-178: LIMIT ROWS EXAMINED: Assertion `0' failed in net_end_statement(THD*) on the
diff --git a/mysql-test/main/limit_rows_examined.test b/mysql-test/main/limit_rows_examined.test
index 512058e..e1e4269 100644
--- a/mysql-test/main/limit_rows_examined.test
+++ b/mysql-test/main/limit_rows_examined.test
@@ -445,11 +445,11 @@ INSERT INTO t3 VALUES ('USASpanish',8),('USATagalog',0),('USAVietnamese',0);
EXPLAIN
SELECT DISTINCT a AS field1 FROM t1, t2
WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d)
-HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20;
+HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 15;
SELECT DISTINCT a AS field1 FROM t1, t2
WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d)
-HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20;
+HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 15;
EXPLAIN
SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 14;
@@ -550,11 +550,15 @@ INSERT INTO t2 VALUES
(5, 0),(3, 4),(6, 1),
(5, 8),(4, 9),(8, 1);
+set expensive_subquery_limit=5;
+
SELECT (SELECT MAX(c) FROM t1, t2)
FROM t2
WHERE c = (SELECT MAX(b) FROM t2)
LIMIT ROWS EXAMINED 3;
+set expensive_subquery_limit=default;
+
drop table t1, t2;
--echo
diff --git a/mysql-test/main/outfile.result b/mysql-test/main/outfile.result
index 4c439c3..50ae130 100644
Binary files a/mysql-test/main/outfile.result and b/mysql-test/main/outfile.result differ
diff --git a/mysql-test/main/outfile.test b/mysql-test/main/outfile.test
index 9f2fc22..e5294f0 100644
--- a/mysql-test/main/outfile.test
+++ b/mysql-test/main/outfile.test
@@ -62,6 +62,7 @@ select load_file(concat(@tmpdir,"/outfile-test.4"));
#
CREATE TABLE t1 (a INT);
+--error ER_OPTION_PREVENTS_STATEMENT
EXPLAIN
SELECT *
INTO OUTFILE '/tmp/t1.txt'
diff --git a/mysql-test/main/subselect_mat.result b/mysql-test/main/subselect_mat.result
index a8cad01..e2f8800 100644
--- a/mysql-test/main/subselect_mat.result
+++ b/mysql-test/main/subselect_mat.result
@@ -2872,7 +2872,7 @@ EXPLAIN
SELECT (SELECT f3a FROM t3) NOT IN (SELECT f1a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-3 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where
+3 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
SELECT (SELECT f3a FROM t3) NOT IN (SELECT f1a FROM t1);
(SELECT f3a FROM t3) NOT IN (SELECT f1a FROM t1)
diff --git a/mysql-test/main/type_year.result b/mysql-test/main/type_year.result
index aaee504..b99a566 100644
--- a/mysql-test/main/type_year.result
+++ b/mysql-test/main/type_year.result
@@ -398,7 +398,6 @@ a
00
select a from t1 where a=y2k();
a
-00
select a from t1 where a=b;
a
drop table t1;
diff --git a/mysql-test/suite/gcol/r/gcol_bugfixes.result b/mysql-test/suite/gcol/r/gcol_bugfixes.result
index 7b70f61..c2583ba 100644
--- a/mysql-test/suite/gcol/r/gcol_bugfixes.result
+++ b/mysql-test/suite/gcol/r/gcol_bugfixes.result
@@ -372,7 +372,7 @@ KEY(c,b(1)));
INSERT INTO v (a,c) VALUES (1,1);
EXPLAIN SELECT 1 FROM t WHERE ( SELECT 1 FROM t ) >=ANY( SELECT c FROM v );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
SELECT 1 FROM t WHERE ( SELECT 1 FROM t ) >=ANY( SELECT c FROM v );
diff --git a/mysql-test/suite/plugins/r/audit_null.result b/mysql-test/suite/plugins/r/audit_null.result
index ada85b6..b68be0e 100644
--- a/mysql-test/suite/plugins/r/audit_null.result
+++ b/mysql-test/suite/plugins/r/audit_null.result
@@ -91,11 +91,11 @@ root[root] @ localhost [] >> create definer=testuser@localhost view v1 as select
root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] >> select * from v1
-root[root] @ localhost [] test.t2 : read
-root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] mysql.table_stats : read
root[root] @ localhost [] mysql.column_stats : read
root[root] @ localhost [] mysql.index_stats : read
+root[root] @ localhost [] test.t2 : read
+root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] >> drop view v1
root[root] @ localhost [] >> create temporary table t2 (a date)
root[root] @ localhost [] >> insert t2 values ('2020-10-09')
diff --git a/sql/field.cc b/sql/field.cc
index 5a618a5..bff8e90 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1387,7 +1387,9 @@ bool Field::can_optimize_range(const Item_bool_func *cond,
{
DBUG_ASSERT(cmp_type() != TIME_RESULT); // Handled in Field_temporal
DBUG_ASSERT(cmp_type() != STRING_RESULT); // Handled in Field_str descendants
- return item->cmp_type() != TIME_RESULT;
+ return (item->cmp_type() != TIME_RESULT) &&
+ !(item->type() == Item::SUBSELECT_ITEM &&
+ ((Item_subselect *)item)->is_expensive());
}
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index fcdb2aa..c7295f1 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -5794,6 +5794,8 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
bool escape_used_in_parsing, CHARSET_INFO *cmp_cs,
int *escape)
{
+ if (thd->lex->context_analysis_only)
+ return false;
/*
ESCAPE clause accepts only constant arguments and Item_param.
@@ -5803,9 +5805,13 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
reach val_int(), so we won't need the value.
CONTEXT_ANALYSIS_ONLY_DERIVED being a notable exception here.
*/
+#if 0
if (!escape_item->const_during_execution() ||
(!escape_item->const_item() &&
!(thd->lex->context_analysis_only & ~CONTEXT_ANALYSIS_ONLY_DERIVED)))
+#else
+ if (!escape_item->const_item())
+#endif
{
my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE");
return TRUE;
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 9e6c205..ff366ad 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1019,7 +1019,11 @@ table_map Item_subselect::used_tables() const
bool Item_subselect::const_item() const
{
DBUG_ASSERT(thd);
- return (thd->lex->context_analysis_only || with_recursive_reference ?
+ if (unit->executed_at_prepare_phase() && !thd->lex->context_analysis_only)
+ return true;
+ return (!(thd->lex->m_sql_cmd &&
+ thd->lex->m_sql_cmd->is_prepared()) ||
+ with_recursive_reference ?
FALSE :
forced_const || const_item_cache);
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index b284189..a22ed36 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -4214,6 +4214,8 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next_global)
{
+ if (tables_used->is_view_or_derived())
+ continue;
TABLE *table;
handler *handler;
if (!(table= tables_used->table))
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index c6a929d..08dc375 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -643,6 +643,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
table_map_for_update(0),
m_examined_row_count(0),
accessed_rows_and_keys(0),
+ accessed_rows_and_keys_at_exec_start(0),
m_digest(NULL),
m_statement_psi(NULL),
m_transaction_psi(NULL),
diff --git a/sql/sql_class.h b/sql/sql_class.h
index b54f35a..e0c3256 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3420,6 +3420,7 @@ class THD: public THD_count, /* this must be first */
changed or written.
*/
ulonglong accessed_rows_and_keys;
+ ulonglong accessed_rows_and_keys_at_exec_start;
/**
Check if the number of rows accessed by a statement exceeded
@@ -3427,7 +3428,8 @@ class THD: public THD_count, /* this must be first */
*/
inline void check_limit_rows_examined()
{
- if (++accessed_rows_and_keys > lex->limit_rows_examined_cnt)
+ if ((++accessed_rows_and_keys - accessed_rows_and_keys_at_exec_start) >
+ lex->limit_rows_examined_cnt)
set_killed(ABORT_QUERY);
}
diff --git a/sql/sql_delete.h b/sql/sql_delete.h
index ad018ed..50d37ce 100644
--- a/sql/sql_delete.h
+++ b/sql/sql_delete.h
@@ -84,7 +84,7 @@ class Sql_cmd_delete final : public Sql_cmd_dml
private:
/**
- @biefSpecial handling of single-table deletes after prepare phase
+ @brief Special handling of single-table deletes after prepare phase
*/
bool delete_from_single_table(THD *thd);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 6e042d2..5228cff 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -5331,3 +5331,130 @@ void select_create::abort_result_set()
DBUG_VOID_RETURN;
}
+
+
+bool Sql_cmd_insert_base::precheck(THD *thd)
+{
+#if 0
+ /*
+ Since INSERT DELAYED doesn't support temporary tables, we could
+ not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE.
+ Open them here instead.
+ */
+ if (first_table->lock_type != TL_WRITE_DELAYED &&
+ thd->open_temporary_tables(lex->query_tables))
+ return true;
+
+ if (insert_precheck(thd, lex->query_tables))
+ return true;
+
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE);
+
+ return false;
+
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
+#endif
+ return true;
+}
+
+
+bool Sql_cmd_insert_base::prepare_inner(THD *thd)
+{
+#if 0
+ SELECT_LEX *const select_lex= thd->lex->first_select_lex();
+ TABLE_LIST *const table_list= select_lex->get_table_list();
+ Name_resolution_context *context= &select_lex->context;
+ Name_resolution_context_state ctx_state;
+ const bool select_insert= insert_many_values.elements == 0;
+ bool insert_into_view= (table_list->view != 0);
+ TABLE *table;
+
+ DBUG_ENTER("Sql_cmd_insert_base::prepare_inner");
+
+ (void) read_statistics_for_tables_if_needed(thd, table_list);
+
+ {
+ if (mysql_handle_derived(lex, DT_INIT))
+ DBUG_RETURN(TRUE);
+ if (mysql_handle_derived(lex, DT_MERGE_FOR_INSERT))
+ DBUG_RETURN(TRUE);
+ if (mysql_handle_derived(lex, DT_PREPARE))
+ DBUG_RETURN(TRUE);
+ }
+
+ insert_field_list= thd->lex->field_list;
+
+ if (duplicates == DUP_UPDATE)
+ {
+ /* it should be allocated before Item::fix_fields() */
+ if (table_list->set_insert_values(thd->mem_root))
+ DBUG_RETURN(TRUE);
+ }
+
+ table= table_list->table;
+
+ if (table->file->check_if_updates_are_ignored("INSERT"))
+ DBUG_RETURN(-1);
+
+ if (!table_list->single_table_updatable())
+ {
+ my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias.str, "INSERT");
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ first table in list is the one we'll INSERT into, requires INSERT_ACL.
+ all others require SELECT_ACL only. the ACL requirement below is for
+ new leaves only anyway (view-constituents), so check for SELECT rather
+ than INSERT.
+ */
+ if (setup_tables_and_check_access(thd,
+ &select_lex->context,
+ &select_lex->top_join_list,
+ table_list,
+ select_lex->leaf_tables,
+ select_insert, INSERT_ACL, SELECT_ACL,
+ TRUE))
+ DBUG_RETURN(TRUE);
+
+ if (insert_into_view && !insert_field_list.elements)
+ {
+ thd->lex->empty_field_list_on_rset= 1;
+ if (!table_list->table || table_list->is_multitable())
+ {
+ my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0),
+ table_list->view_db.str, table_list->view_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ if (insert_view_fields(thd, &insert_field_list, table_list))
+ DBUG_RETURN(TRUE);
+ }
+
+ if (thd->lex->has_returning())
+ {
+ status_var_increment(thd->status_var.feature_insert_returning);
+
+ /* This is INSERT ... RETURNING. It will return output to the client */
+ if (thd->lex->analyze_stmt)
+ {
+ /*
+ Actually, it is ANALYZE .. INSERT .. RETURNING. We need to produce
+ output and then discard it.
+ */
+ result= new (thd->mem_root) select_send_analyze(thd);
+ save_protocol= thd->protocol;
+ thd->protocol= new Protocol_discard(thd);
+ }
+ else
+ {
+ if (!(result= new (thd->mem_root) select_send(thd)))
+ DBUG_RETURN(TRUE);
+ }
+ }
+#else
+ return false;
+#endif
+}
+
diff --git a/sql/sql_insert.h b/sql/sql_insert.h
index 8b034c2..eae3b53 100644
--- a/sql/sql_insert.h
+++ b/sql/sql_insert.h
@@ -18,6 +18,7 @@
#include "sql_class.h" /* enum_duplicates */
#include "sql_list.h"
+#include "sql_base.h"
/* Instead of including sql_lex.h we add this typedef here */
typedef List<Item> List_item;
@@ -51,4 +52,139 @@ bool binlog_drop_table(THD *thd, TABLE *table);
inline void kill_delayed_threads(void) {}
#endif
+
+/**
+ Base class for all INSERT and REPLACE statements. Abstract class that
+ is inherited by Sql_cmd_insert_values and Sql_cmd_insert_select.
+*/
+
+class Sql_cmd_insert_base : public Sql_cmd_dml
+{
+protected:
+ virtual bool precheck(THD *thd) override;
+
+ virtual bool prepare_inner(THD *thd) override;
+
+private:
+ bool resolve_update_expressions(THD *thd);
+ bool prepare_values_table(THD *thd);
+ bool resolve_values_table_columns(THD *thd);
+
+ /**
+ Field list to insert/replace
+
+ One of two things:
+ 1. For the INSERT/REPLACE ... (col1, ... colN) VALUES ... syntax
+ this is a list of col1, ..., colN fields.
+ 2. For the INSERT/REPLACE ... SET col1=x1, ... colM=xM syntax extension
+ this is a list of col1, ... colM fields as well.
+ */
+ List<Item> insert_field_list;
+
+public:
+ /*
+ field_list was created for view and should be removed before PS/SP
+ rexecuton
+ */
+ bool empty_field_list_on_rset;
+
+ DML_prelocking_strategy *get_dml_prelocking_strategy()
+ {
+ return &dml_prelocking_strategy;
+ }
+
+protected:
+ const bool is_replace;
+
+ /**
+ Row data to insert/replace
+
+ One of two things:
+ 1. For the INSERT/REPLACE ... VALUES (row1), (row2), ... (rowN) syntax
+ the list contains N List_item lists: one List_item per row.
+ 2. For the INSERT/REPLACE ... SET col1=x1, ... colM=xM syntax extension
+ this list contains only 1 List_item of M data values: this way we
+ emulate this syntax:
+ INSERT/REPLACE ... (col1, ... colM) VALUE (x1, ..., xM);
+ */
+ List<List_item> insert_many_values;
+
+ /*
+ Number of values per row in insert_many_values, available after resolving
+ */
+ uint value_count;
+
+ /* ON DUPLICATE KEY UPDATE field list */
+ List<Item> update_field_list;
+
+ const enum_duplicates duplicates;
+
+ Protocol *save_protocol; /**< needed for ANALYZE .. INSERT .. RETURNING */
+
+ explicit Sql_cmd_insert_base(bool is_replace_arg,
+ enum_duplicates duplicates_arg)
+ : empty_field_list_on_rset(false),
+ is_replace(is_replace_arg),
+ value_count(0),
+ duplicates(duplicates_arg),
+ save_protocol(NULL)
+ {}
+
+#if 0
+ virtual void cleanup(THD *thd) override
+ {
+ if (empty_field_list_on_rset)
+ {
+ empty_field_list_on_rset = false;
+ insert_field_list.empty();
+ }
+ }
+#endif
+
+private:
+
+ /* The prelocking strategy used when opening the used tables */
+ DML_prelocking_strategy dml_prelocking_strategy;
+
+};
+
+
+/**
+ Class that implements INSERT ... VALUES and REPLACE ... VALUES statements.
+*/
+
+class Sql_cmd_insert_values final : public Sql_cmd_insert_base
+{
+public:
+ explicit Sql_cmd_insert_values(bool is_replace_arg,
+ enum_duplicates duplicates_arg)
+ : Sql_cmd_insert_base(is_replace_arg, duplicates_arg) {}
+
+ enum_sql_command sql_command_code() const
+ {
+ return is_replace ? SQLCOM_REPLACE : SQLCOM_INSERT;
+ }
+
+};
+
+
+/**
+ Class that implements INSERT ... SELECT and REPLACE ... SELECT statements.
+*/
+
+class Sql_cmd_insert_select final : public Sql_cmd_insert_base
+{
+public:
+ explicit Sql_cmd_insert_select(bool is_replace_arg,
+ enum_duplicates duplicates_arg)
+ : Sql_cmd_insert_base(is_replace_arg, duplicates_arg) {}
+
+ enum_sql_command sql_command_code() const
+ {
+ return is_replace ? SQLCOM_REPLACE_SELECT : SQLCOM_INSERT_SELECT;
+ }
+};
+
+
+
#endif /* SQL_INSERT_INCLUDED */
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 8a24d2f..0b08fbc 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -42,6 +42,8 @@
#endif
#include "sql_update.h" // class Sql_cmd_update
#include "sql_delete.h" // class Sql_cmd_delete
+#include "sql_insert.h" // class Sql_cmd_insert
+
void LEX::parse_error(uint err_number)
{
@@ -10357,13 +10359,19 @@ SELECT_LEX *LEX::parsed_subselect(SELECT_LEX_UNIT *unit)
bool LEX::parsed_insert_select(SELECT_LEX *first_select)
{
+ bool is_insert_or_replace= false;
+ bool is_replace= false;
if (sql_command == SQLCOM_INSERT ||
sql_command == SQLCOM_REPLACE)
{
+ is_insert_or_replace= true;
if (sql_command == SQLCOM_INSERT)
sql_command= SQLCOM_INSERT_SELECT;
else
+ {
+ is_replace= true;
sql_command= SQLCOM_REPLACE_SELECT;
+ }
}
insert_select_hack(first_select);
if (check_main_unit_semantics())
@@ -10373,6 +10381,23 @@ bool LEX::parsed_insert_select(SELECT_LEX *first_select)
SELECT_LEX *blt __attribute__((unused))= pop_select();
DBUG_ASSERT(blt == &builtin_select);
push_select(first_select);
+
+ if (is_insert_or_replace)
+ {
+ if (sql_command == SQLCOM_INSERT || sql_command == SQLCOM_REPLACE)
+ {
+ if (!(m_sql_cmd= new (thd->mem_root) Sql_cmd_insert_values(is_replace,
+ duplicates)))
+ return true;
+ }
+ else
+ {
+ if (!(m_sql_cmd= new (thd->mem_root) Sql_cmd_insert_select(is_replace,
+ duplicates)))
+ return true;
+ }
+ }
+
return false;
}
@@ -10454,6 +10479,8 @@ bool LEX::select_finalize(st_select_lex_unit *expr)
{
sql_command= SQLCOM_SELECT;
selects_allow_procedure= TRUE;
+ if (!(m_sql_cmd= new (thd->mem_root) Sql_cmd_select(result)))
+ return true;
if (set_main_unit(expr))
return true;
return check_main_unit_semantics();
@@ -11933,3 +11960,27 @@ bool SELECT_LEX_UNIT::is_derived_eliminated() const
return true;
return derived->table->map & outer_select()->join->eliminated_tables;
}
+
+bool SELECT_LEX_UNIT::executed_at_prepare_phase()
+{
+ for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
+ {
+ if (!sl->executed_at_prepare_phase())
+ return false;
+ }
+ return true;
+}
+
+bool SELECT_LEX::executed_at_prepare_phase()
+{
+ if (table_list.elements || is_correlated)
+ return false;
+ for (st_select_lex_unit *unit= first_inner_unit();
+ unit;
+ unit= unit->next_unit())
+ {
+ if (!unit->executed_at_prepare_phase())
+ return false;
+ }
+ return true;
+}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 5a7fa14..491ea1c 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1034,6 +1034,8 @@ class st_select_lex_unit: public st_select_lex_node {
bool explainable() const;
+ bool executed_at_prepare_phase();
+
void reset_distinct();
void fix_distinct();
@@ -1480,6 +1482,8 @@ class st_select_lex: public st_select_lex_node
ORDER *order,
enum_query_type query_type);
void print_limit(THD *thd, String *str, enum_query_type query_type);
+ bool prepare(THD *thd, select_result *result);
+ bool exec(THD *thd);
void fix_prepare_information(THD *thd, Item **conds, Item **having_conds);
/*
Destroy the used execution plan (JOIN) of this subtree (this
@@ -1653,6 +1657,7 @@ class st_select_lex: public st_select_lex_node
bool is_unit_nest() { return (nest_flags & UNIT_NEST_FL); }
void mark_as_unit_nest() { nest_flags= UNIT_NEST_FL; }
bool is_sj_conversion_prohibited(THD *thd);
+ bool executed_at_prepare_phase();
};
typedef class st_select_lex SELECT_LEX;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 67353a1..c7bc972 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3907,7 +3907,9 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
case SQLCOM_SHOW_COLLATIONS:
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_PROFILE:
+#if 0
case SQLCOM_SELECT:
+#endif
{
#ifdef WITH_WSREP
if (lex->sql_command == SQLCOM_SELECT)
@@ -4382,6 +4384,17 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
res = mysql_checksum_table(thd, first_table, &lex->check_opt);
break;
}
+ case SQLCOM_SELECT:
+ {
+ res = lex->m_sql_cmd->execute(thd);
+ break;
+ }
+#if 0
+ case SQLCOM_REPLACE:
+ case SQLCOM_INSERT:
+ case SQLCOM_REPLACE_SELECT:
+ case SQLCOM_INSERT_SELECT:
+#endif
case SQLCOM_UPDATE:
case SQLCOM_UPDATE_MULTI:
case SQLCOM_DELETE:
@@ -4394,6 +4407,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
thd->abort_on_warning= 0;
break;
}
+#if 1
case SQLCOM_REPLACE:
if ((res= generate_incident_event(thd)))
break;
@@ -4653,6 +4667,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
break;
}
+#endif
case SQLCOM_DROP_SEQUENCE:
case SQLCOM_DROP_TABLE:
{
@@ -7340,6 +7355,7 @@ void THD::reset_for_next_command(bool do_clear_error)
get_stmt_da()->reset_for_next_command();
m_sent_row_count= m_examined_row_count= 0;
accessed_rows_and_keys= 0;
+ accessed_rows_and_keys_at_exec_start= 0;
reset_slow_query_state();
@@ -7476,6 +7492,8 @@ void create_select_for_variable(THD *thd, LEX_CSTRING *var_name)
lex= thd->lex;
lex->init_select();
lex->sql_command= SQLCOM_SELECT;
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_select(thd->lex->result);
+
/*
We set the name of Item to @@session.var_name because that then is used
as the column name in the output.
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 22780c8..0bbde42 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2268,6 +2268,48 @@ static bool check_prepared_statement(Prepared_statement *stmt)
lex->m_sql_cmd->unprepare(thd);
break;
+ case SQLCOM_SELECT:
+#if 0
+ if (lex->m_sql_cmd == NULL &&
+ !(lex->m_sql_cmd=
+ new (thd->mem_root) Sql_cmd_select(thd->lex->result)))
+ {
+ res= 1;
+ break;
+ }
+#endif
+ lex->m_sql_cmd->set_owner(stmt);
+ res = lex->m_sql_cmd->prepare(thd);
+ if (res == 2)
+ {
+ /* Statement and field info has already been sent */
+ DBUG_RETURN(FALSE);
+ }
+ if (!res && !lex->describe && !lex->analyze_stmt && !stmt->is_sql_prepare())
+ {
+ SELECT_LEX_UNIT *const unit = &lex->unit;
+ /* Make copy of item list, as change_columns may change it */
+ SELECT_LEX_UNIT* master_unit= unit->first_select()->master_unit();
+ bool is_union_op=
+ master_unit->is_unit_op() || master_unit->fake_select_lex;
+
+ List<Item> fields(is_union_op ? unit->item_list :
+ lex->first_select_lex()->item_list);
+
+ /* Change columns if a procedure like analyse() */
+ res= (unit->last_procedure &&
+ unit->last_procedure->change_columns(thd, fields));
+
+ if (res || send_prep_stmt(stmt, lex->result->field_count(fields)) ||
+ lex->result->send_result_set_metadata(fields,
+ Protocol::SEND_EOF) ||
+ thd->protocol->flush())
+ res= true;
+ }
+ if (!res)
+ lex->m_sql_cmd->unprepare(thd);
+ break;
+
/* The following allow WHERE clause, so they must be tested like SELECT */
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_TABLES:
@@ -2285,7 +2327,9 @@ static bool check_prepared_statement(Prepared_statement *stmt)
case SQLCOM_SHOW_STATUS_FUNC:
case SQLCOM_SHOW_STATUS_PACKAGE:
case SQLCOM_SHOW_STATUS_PACKAGE_BODY:
+#if 0
case SQLCOM_SELECT:
+#endif
res= mysql_test_select(stmt, tables);
if (res == 2)
{
@@ -2469,12 +2513,14 @@ static bool check_prepared_statement(Prepared_statement *stmt)
lex->describe, lex->analyze_stmt) ||
send_prep_stmt(stmt, result.field_count(field_list)) ||
result.send_result_set_metadata(field_list,
- Protocol::SEND_EOF);
+ Protocol::SEND_EOF) ||
+ thd->protocol->flush();
+ }
+ else if (lex->sql_command != SQLCOM_SELECT)
+ {
+ res= send_prep_stmt(stmt, 0) ||
+ thd->protocol->flush();
}
- else
- res= send_prep_stmt(stmt, 0);
- if (!res)
- thd->protocol->flush();
}
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4b01a96..da945d9 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -55,6 +55,9 @@
#include "sql_cte.h"
#include "sql_window.h"
#include "tztime.h"
+#ifdef WITH_WSREP
+#include "wsrep_mysqld.h"
+#endif
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@@ -4989,21 +4992,22 @@ void JOIN::cleanup_item_list(List<Item> &items) const
0 otherwise
*/
-select_handler *find_select_handler(THD *thd,
- SELECT_LEX* select_lex)
+select_handler *SELECT_LEX::find_select_handler(THD *thd)
{
- if (select_lex->next_select())
+ if (next_select())
return 0;
- if (select_lex->master_unit()->outer_select())
+ if (master_unit()->outer_select())
return 0;
TABLE_LIST *tbl= nullptr;
- // For SQLCOM_INSERT_SELECT the server takes TABLE_LIST
- // from thd->lex->query_tables and skips its first table
- // b/c it is the target table for the INSERT..SELECT.
+ /*
+ For SQLCOM_INSERT_SELECT the server takes TABLE_LIST
+ from thd->lex->query_tables and skips its first table
+ b/c it is the target table for the INSERT..SELECT.
+ */
if (thd->lex->sql_command != SQLCOM_INSERT_SELECT)
{
- tbl= select_lex->join->tables_list;
+ tbl= join->tables_list;
}
else if (thd->lex->query_tables &&
thd->lex->query_tables->next_global)
@@ -5020,7 +5024,7 @@ select_handler *find_select_handler(THD *thd,
handlerton *ht= tbl->table->file->partition_ht();
if (!ht->create_select)
continue;
- select_handler *sh= ht->create_select(thd, select_lex);
+ select_handler *sh= ht->create_select(thd, this);
return sh;
}
return 0;
@@ -5136,7 +5140,7 @@ mysql_select(THD *thd, TABLE_LIST *tables, List<Item> &fields, COND *conds,
thd->get_stmt_da()->reset_current_row_for_warning(1);
/* Look for a table owned by an engine with the select_handler interface */
- select_lex->pushdown_select= find_select_handler(thd, select_lex);
+ select_lex->pushdown_select= select_lex->find_select_handler(thd);
if ((err= join->optimize()))
{
@@ -12720,7 +12724,8 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
(select thats very heavy) => is a constant here
eg: (select avg(order_cost) from orders) => constant but expensive
*/
- if (!keyuse->val->used_tables() && !thd->lex->describe)
+ if (keyuse->val->const_item() && !keyuse->val->is_expensive() &&
+ !thd->lex->describe)
{ // Compare against constant
store_key_item tmp(thd,
keyinfo->key_part[i].field,
@@ -32348,6 +32353,9 @@ static void MYSQL_DML_START(THD *thd)
{
switch (thd->lex->sql_command) {
+ case SQLCOM_SELECT:
+ MYSQL_INSERT_START(thd->query());
+ break;
case SQLCOM_UPDATE:
MYSQL_UPDATE_START(thd->query());
break;
@@ -32370,6 +32378,9 @@ static void MYSQL_DML_DONE(THD *thd, int rc)
{
switch (thd->lex->sql_command) {
+ case SQLCOM_SELECT:
+ MYSQL_SELECT_DONE(rc, (rc ? 0 : (ulong) (thd->limit_found_rows)));
+ break;
case SQLCOM_UPDATE:
MYSQL_UPDATE_DONE(
rc,
@@ -32437,8 +32448,6 @@ bool Sql_cmd_dml::prepare(THD *thd)
MYSQL_DML_START(thd);
- lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
-
if (open_tables_for_query(thd, lex->query_tables, &table_count, 0,
get_dml_prelocking_strategy()))
{
@@ -32451,8 +32460,6 @@ bool Sql_cmd_dml::prepare(THD *thd)
if (prepare_inner(thd))
goto err;
- lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
-
set_prepared();
unit->set_prepared();
@@ -32528,6 +32535,7 @@ bool Sql_cmd_dml::execute(THD *thd)
{
if (lock_tables(thd, lex->query_tables, table_count, 0))
goto err;
+ query_cache_store_query(thd, thd->lex->query_tables);
}
unit->set_limit(select_lex);
@@ -32581,8 +32589,266 @@ bool Sql_cmd_dml::execute(THD *thd)
bool Sql_cmd_dml::execute_inner(THD *thd)
{
SELECT_LEX_UNIT *unit = &lex->unit;
- SELECT_LEX *select_lex= unit->first_select();
- JOIN *join= select_lex->join;
+ DBUG_ENTER("Sql_cmd_dml::execute_inner");
+
+ if (unit->is_unit_op() || unit->fake_select_lex)
+ {
+ if (unit->exec())
+ DBUG_RETURN(true);
+ }
+#if 1
+ else
+ {
+ SELECT_LEX *select_lex= unit->first_select();
+ if (select_lex->exec(thd))
+ DBUG_RETURN(true);
+ }
+#endif
+
+ DBUG_RETURN(false);
+}
+
+
+bool Sql_cmd_select::precheck(THD *thd)
+{
+ bool rc= false;
+
+ privilege_t privileges_requested= SELECT_ACL;
+
+ if (lex->exchange)
+ {
+ /*
+ lex->exchange != NULL implies SELECT .. INTO OUTFILE and this
+ requires FILE_ACL access.
+ */
+ privileges_requested|= FILE_ACL;
+ }
+
+ TABLE_LIST *tables = thd->lex->query_tables;
+
+ if (tables)
+ rc= check_table_access(thd, privileges_requested,
+ tables, false, UINT_MAX, false);
+ else
+ rc= check_access(thd, privileges_requested,
+ any_db.str, NULL, NULL, false, false);
+
+#ifdef WITH_WSREP
+ if (lex->sql_command == SQLCOM_SELECT)
+ {
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_READ);
+ }
+ else
+ {
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW);
+# ifdef ENABLED_PROFILING
+ if (lex->sql_command == SQLCOM_SHOW_PROFILE)
+ thd->profiling.discard_current_query();
+# endif
+ }
+#endif /* WITH_WSREP */
+
+ if (!rc)
+ return false;
+
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
+ return true;
+}
+
+
+
+bool Sql_cmd_select::prepare_inner(THD *thd)
+{
+ bool rc= false;
+ LEX *lex= thd->lex;
+ TABLE_LIST *tables= lex->query_tables;
+ SELECT_LEX_UNIT *const unit = &lex->unit;
+
+ DBUG_ENTER("Sql_cmd_select::prepare_inner");
+
+ if (!thd->stmt_arena->is_stmt_prepare())
+ (void) read_statistics_for_tables_if_needed(thd, tables);
+
+ {
+ if (mysql_handle_derived(lex, DT_INIT))
+ DBUG_RETURN(TRUE);
+ }
+
+ if (thd->stmt_arena->is_stmt_prepare())
+ {
+ if (!result)
+ {
+ Query_arena backup;
+ Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+ result= new (thd->mem_root) select_send(thd);
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ thd->lex->result= result;
+ }
+ rc= unit->prepare(unit->derived, 0, 0);
+
+ }
+ else
+ {
+ if (lex->analyze_stmt)
+ {
+ if (result && result->result_interceptor())
+ result->result_interceptor()->disable_my_ok_calls();
+ else
+ {
+ DBUG_ASSERT(thd->protocol);
+ result= new (thd->mem_root) select_send_analyze(thd);
+ save_protocol= thd->protocol;
+ thd->protocol= new Protocol_discard(thd);
+ }
+ }
+ else if (!(result= lex->result))
+ result= new (thd->mem_root) select_send(thd);
+ if (!result)
+ DBUG_RETURN(TRUE);
+
+ SELECT_LEX *parameters = unit->global_parameters();
+ if (!parameters->limit_params.explicit_limit)
+ {
+ parameters->limit_params.select_limit =
+ new (thd->mem_root) Item_int(thd,
+ (ulonglong) thd->variables.select_limit);
+ if (parameters->limit_params.select_limit == NULL)
+ DBUG_RETURN(true); /* purecov: inspected */
+ }
+ ulonglong select_options= 0;
+ if (lex->describe)
+ select_options|= SELECT_DESCRIBE;
+ if (unit->is_unit_op() || unit->fake_select_lex)
+ select_options|= SELECT_NO_UNLOCK;
+ rc= unit->prepare(unit->derived, result, select_options);
+
+ if (rc && thd->lex->analyze_stmt && save_protocol)
+ {
+ delete thd->protocol;
+ thd->protocol= save_protocol;
+ }
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+bool Sql_cmd_select::execute_inner(THD *thd)
+{
+ DBUG_ENTER("Sql_cmd_select::execute_inner");
+
+ thd->status_var.last_query_cost= 0.0;
+
+ bool res= Sql_cmd_dml::execute_inner(thd);
+
+ res|= thd->is_error();
+ if (unlikely(res))
+ result->abort_result_set();
+
+ if (result != thd->lex->result)
+ {
+ delete result;
+ result= 0;
+ }
+
+ if (lex->analyze_stmt)
+ {
+ if (save_protocol)
+ {
+ delete thd->protocol;
+ thd->protocol= save_protocol;
+ }
+ if (!res)
+ res= thd->lex->explain->send_explain(thd);
+ }
+
+ if (thd->lex->describe)
+ {
+ if (!res)
+ res= thd->lex->explain->send_explain(thd);
+
+ if (!res && (thd->lex->describe & DESCRIBE_EXTENDED))
+ {
+ char buff[1024];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ str.length(0);
+ /*
+ The warnings system requires input in utf8, @see
+ mysqld_show_warnings().
+ */
+ lex->unit.print(&str, QT_EXPLAIN_EXTENDED);
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, str.c_ptr_safe());
+ }
+ }
+
+ if (unlikely(thd->killed == ABORT_QUERY && !thd->no_errors))
+ {
+ /*
+ If LIMIT ROWS EXAMINED interrupted query execution, issue a warning,
+ continue with normal processing and produce an incomplete query result.
+ */
+ bool saved_abort_on_warning= thd->abort_on_warning;
+ thd->abort_on_warning= false;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT,
+ ER_THD(thd, ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT),
+ thd->accessed_rows_and_keys -
+ thd->accessed_rows_and_keys_at_exec_start,
+ thd->lex->limit_rows_examined->val_uint());
+ thd->abort_on_warning= saved_abort_on_warning;
+ thd->reset_killed();
+ }
+ /* Disable LIMIT ROWS EXAMINED after query execution. */
+ thd->lex->limit_rows_examined_cnt= ULONGLONG_MAX;
+
+ /* Count number of empty select queries */
+ if (!thd->get_sent_row_count() && !res)
+ status_var_increment(thd->status_var.empty_queries);
+ else
+ status_var_add(thd->status_var.rows_sent, thd->get_sent_row_count());
+
+ DBUG_RETURN(res);
+}
+
+
+bool st_select_lex::prepare(THD *thd, select_result *result)
+{
+ ulonglong select_options= options | thd->variables.option_bits;
+
+ DBUG_ENTER("st_select_lex::prepare");
+
+ if (thd->lex->describe)
+ select_options|= SELECT_DESCRIBE;
+
+ if (!(join= new (thd->mem_root) JOIN(thd, item_list,
+ select_options, result)))
+ DBUG_RETURN(true);
+
+ SELECT_LEX_UNIT *unit= master_unit();
+
+ thd->lex->used_tables=0;
+
+ if (join->prepare(table_list.first, where,
+ order_list.elements + group_list.elements,
+ order_list.first, false, group_list.first,
+ having, thd->lex->proc_list.first,
+ this, unit))
+ DBUG_RETURN(true);
+
+ DBUG_RETURN(false);
+}
+
+
+bool st_select_lex::exec(THD *thd)
+{
+ DBUG_ENTER("st_select_lex::exec");
+
+ /* Look for a table owned by an engine with the select_handler interface */
+ pushdown_select= find_select_handler(thd);
if (join->optimize())
goto err;
@@ -32596,19 +32862,29 @@ bool Sql_cmd_dml::execute_inner(THD *thd)
if (unlikely(thd->is_error()))
goto err;
+ thd->get_stmt_da()->reset_current_row_for_warning(1);
+
+ if (master_unit()->outer_select() == NULL)
+ thd->accessed_rows_and_keys_at_exec_start= thd->accessed_rows_and_keys;
+
if (join->exec())
goto err;
if (thd->lex->describe & DESCRIBE_EXTENDED)
{
- select_lex->where= join->conds_history;
- select_lex->having= join->having_history;
+ where= join->conds_history;
+ having= join->having_history;
}
err:
- return join->error;
-}
+ if (pushdown_select)
+ {
+ delete pushdown_select;
+ pushdown_select= NULL;
+ }
+ DBUG_RETURN(join->error);
+}
/**
@} (end of group Query_Optimizer)
diff --git a/sql/sql_select.h b/sql/sql_select.h
index f908484..edbaed3 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -33,6 +33,8 @@
#include "records.h" /* READ_RECORD */
#include "opt_range.h" /* SQL_SELECT, QUICK_SELECT_I */
#include "filesort.h"
+#include "sql_base.h"
+#include "sql_cmd.h"
typedef struct st_join_table JOIN_TAB;
/* Values in optimize */
@@ -2629,4 +2631,39 @@ void propagate_new_equalities(THD *thd, Item *cond,
bool *is_simplifiable_cond);
bool dbug_user_var_equals_str(THD *thd, const char *name, const char *value);
+
+
+class Sql_cmd_select : public Sql_cmd_dml
+{
+public:
+ explicit Sql_cmd_select(select_result *result_arg)
+ : Sql_cmd_dml(), save_protocol(NULL)
+ { result= result_arg; }
+
+ enum_sql_command sql_command_code() const override
+ {
+ return SQLCOM_SELECT;
+ }
+
+ DML_prelocking_strategy *get_dml_prelocking_strategy()
+ {
+ return &dml_prelocking_strategy;
+ }
+
+protected:
+
+ bool precheck(THD *thd) override;
+
+ bool prepare_inner(THD *thd) override;
+
+ bool execute_inner(THD *thd) override;
+
+private:
+ /* The prelocking strategy used when opening the used tables */
+ DML_prelocking_strategy dml_prelocking_strategy;
+
+ Protocol *save_protocol;
+};
+
+
#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index c1f28ba..0cc1e68 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -1322,6 +1322,24 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
describe= additional_options & SELECT_DESCRIBE;
+ if (describe)
+ {
+ for (SELECT_LEX *sl= first_sl; sl; sl= sl->next_select())
+ {
+ sl->set_explain_type(FALSE);
+ sl->options|= SELECT_DESCRIBE;
+ }
+ if (is_unit_op() || fake_select_lex)
+ {
+ if (union_needs_tmp_table() && fake_select_lex)
+ {
+ fake_select_lex->select_number= FAKE_SELECT_LEX_ID; // just for initialization
+ fake_select_lex->type= unit_operation_text[common_op()];
+ fake_select_lex->options|= SELECT_DESCRIBE;
+ }
+ }
+ }
+
/*
Save fake_select_lex in case we don't need it for anything but
global parameters.
@@ -2160,6 +2178,8 @@ bool st_select_lex_unit::exec()
bool was_executed= executed;
DBUG_ENTER("st_select_lex_unit::exec");
+ describe= thd->lex->describe;
+
if (executed && !uncacheable && !describe)
DBUG_RETURN(FALSE);
executed= 1;
@@ -2168,6 +2188,9 @@ bool st_select_lex_unit::exec()
item->make_const();
saved_error= optimize();
+
+ if (outer_select() == NULL)
+ thd->accessed_rows_and_keys_at_exec_start= thd->accessed_rows_and_keys;
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
@@ -2239,6 +2262,7 @@ bool st_select_lex_unit::exec()
saved_error= sl->join->optimize();
}
}
+
if (likely(!saved_error))
{
records_at_start= table->file->stats.records;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 49655f8..886c0b6 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -71,6 +71,7 @@
#include "json_table.h"
#include "sql_update.h"
#include "sql_delete.h"
+#include "sql_insert.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
1
0