
[Commits] 70e12615732: Make rocksdb.range_locking test to be run in forward and backward-ordered CFs
by Sergei Petrunia 29 Apr '19
by Sergei Petrunia 29 Apr '19
29 Apr '19
revision-id: 70e12615732d658939972b24da162de8d57086f0 (fb-prod201801-226-g70e12615732)
parent(s): cd66b2e5f14a40f581b639f64ed74ad3b0f9f37d
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-29 13:17:06 +0300
message:
Make rocksdb.range_locking test to be run in forward and backward-ordered CFs
(one test that makes assumptions about physical record ordering
is only enabled in forward ordered cfs)
---
mysql-test/suite/rocksdb/r/range_locking.result | 44 ++-
.../suite/rocksdb/r/range_locking_rev_cf.result | 341 +++++++++++++++++
mysql-test/suite/rocksdb/t/range_locking.inc | 414 +++++++++++++++++++++
mysql-test/suite/rocksdb/t/range_locking.test | 386 +------------------
.../suite/rocksdb/t/range_locking_rev_cf.test | 12 +
rocksdb | 2 +-
6 files changed, 803 insertions(+), 396 deletions(-)
diff --git a/mysql-test/suite/rocksdb/r/range_locking.result b/mysql-test/suite/rocksdb/r/range_locking.result
index 439e9c6fb00..0518260f53a 100644
--- a/mysql-test/suite/rocksdb/r/range_locking.result
+++ b/mysql-test/suite/rocksdb/r/range_locking.result
@@ -2,8 +2,9 @@ show variables like 'rocksdb_use_range_locking';
Variable_name Value
rocksdb_use_range_locking ON
create table t1 (
-pk int primary key,
-a int
+pk int,
+a int,
+primary key (pk) comment 'default'
) engine=rocksdb;
insert into t1 values
(10,10),(20,20),(30,30);
@@ -54,9 +55,10 @@ connection con1;
rollback;
## Test that locks are not released when a statement in the transaction
create table t2 (
-pk int primary key,
+pk int,
a int,
-unique key(a)
+primary key (pk) comment 'default',
+unique key(a) comment 'default'
) engine=rocksdb;
insert into t2 values (1,1),(2,2);
begin;
@@ -79,8 +81,9 @@ drop table t1;
# Test INFORMATION_SCHEMA.lock_info in range-locking mode
#
create table t1 (
-pk int primary key,
-a int
+pk int,
+a int,
+primary key (pk) comment 'default'
) engine=rocksdb;
insert into t1 values
(10,10),(20,20),(30,30);
@@ -117,7 +120,7 @@ create table t1 (
kp1 int not null,
kp2 int not null,
a int,
-primary key(kp1, kp2)
+primary key(kp1, kp2) comment 'default'
) engine=rocksdb;
insert into t1 select 1, a, 1234 from t0;
insert into t1 select 2, a, 1234 from t0;
@@ -157,7 +160,7 @@ create table t1 (
kp1 int not null,
kp2 int not null,
a int,
-key(kp1, kp2)
+key(kp1, kp2) comment 'default'
) engine=rocksdb;
insert into t1 select 1, a, 1234 from t0;
insert into t1 values (2, 3, 1234);
@@ -229,7 +232,11 @@ drop table t1;
#
# Same test as above, but check the range scan
#
-create table t1 (pk int primary key, a int) engine=rocksdb;
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'default'
+) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
connect con1,localhost,root,,;
# TRX1: Start, Allocate a snapshot
@@ -267,7 +274,11 @@ drop table t1;
#
# Same as above, but test SELECT FOR UPDATE.
#
-create table t1 (pk int primary key, a int) engine=rocksdb;
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'default'
+) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
connect con1,localhost,root,,;
# TRX1: Start, Allocate a snapshot
@@ -305,7 +316,12 @@ drop table t1;
# Another no-snapshot-checking test, this time for single-statement
# transaction
#
-create table t1 (pk int primary key, a int, name varchar(16)) engine=rocksdb;
+create table t1 (
+pk int,
+a int,
+name varchar(16),
+primary key(pk) comment 'default'
+) engine=rocksdb;
insert into t1 values (1,1, 'row1'), (2,2,'row2');
connect con1,localhost,root,,;
connection con1;
@@ -339,7 +355,11 @@ drop table t1;
#
# Check that I_S.processlist.state is set correctly now.
#
-create table t1(pk int primary key, a int) engine=rocksdb;
+create table t1(
+pk int,
+a int,
+primary key(pk) comment 'default'
+) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3);
begin;
select * from t1 where pk=2 for update;
diff --git a/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result b/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result
new file mode 100644
index 00000000000..444c51162cd
--- /dev/null
+++ b/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result
@@ -0,0 +1,341 @@
+show variables like 'rocksdb_use_range_locking';
+Variable_name Value
+rocksdb_use_range_locking ON
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 values
+(10,10),(20,20),(30,30);
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+### Test: check that range lock inhibits a point lock
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25 for update;
+pk a
+10 10
+20 20
+connection con2;
+insert into t1 values (15,15);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+connection con1;
+rollback;
+## Test: check that range lock inhibits another range lock
+## (It should not, but currently it does)
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25 for update;
+pk a
+10 10
+20 20
+connection con2;
+begin;
+select * from t1 where pk between 15 and 35 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+rollback;
+connection con1;
+rollback;
+## Test: check that regular read does not get a range lock
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25;
+pk a
+10 10
+20 20
+connection con2;
+begin;
+select * from t1 where pk between 15 and 35 for update;
+pk a
+20 20
+30 30
+rollback;
+connection con1;
+rollback;
+## Test that locks are not released when a statement in the transaction
+create table t2 (
+pk int,
+a int,
+primary key (pk) comment 'rev:cf1',
+unique key(a) comment ''
+) engine=rocksdb;
+insert into t2 values (1,1),(2,2);
+begin;
+insert into t2 values (3,3);
+insert into t2 values (10,2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+connection con2;
+begin;
+select * from t2 where pk=3 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t2.PRIMARY
+rollback;
+connection con1;
+rollback;
+drop table t2;
+connection default;
+disconnect con1;
+disconnect con2;
+drop table t1;
+#
+# Test INFORMATION_SCHEMA.lock_info in range-locking mode
+#
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 values
+(10,10),(20,20),(30,30);
+begin;
+select * from t1 where pk=10 for update;
+pk a
+10 10
+select * from information_schema.rocksdb_locks;
+COLUMN_FAMILY_ID TRANSACTION_ID KEY MODE
+0 15 00000001078000000a X
+delete from t1 where pk between 25 and 40;
+select * from information_schema.rocksdb_locks;
+COLUMN_FAMILY_ID TRANSACTION_ID KEY MODE
+0 15 000000010780000019 - 010000010780000028 X
+0 15 00000001078000000a X
+rollback;
+begin;
+# The following will show a range lock on 2-9 and also a point lock on 10.
+# This is how things currently work.
+select * from t1 where pk between 2 and 9 for update;
+pk a
+select * from information_schema.rocksdb_locks;
+COLUMN_FAMILY_ID TRANSACTION_ID KEY MODE
+0 16 00000001078000000a X
+0 16 000000010780000002 - 010000010780000009 X
+rollback;
+drop table t1;
+#
+# MDEV-18104: MyRocks-Gap-Lock: range locking bounds are incorrect for multi-part keys
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+kp1 int not null,
+kp2 int not null,
+a int,
+primary key(kp1, kp2) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 select 1, a, 1234 from t0;
+insert into t1 select 2, a, 1234 from t0;
+insert into t1 select 3, a, 1234 from t0;
+connect con1,localhost,root,,;
+connection con1;
+begin;
+select * from t1 where kp1=2 for update;
+kp1 kp2 a
+2 0 1234
+2 1 1234
+2 2 1234
+2 3 1234
+2 4 1234
+2 5 1234
+2 6 1234
+2 7 1234
+2 8 1234
+2 9 1234
+connection default;
+# The lock on kp1=2 should inhibit the following INSERT:
+insert into t1 values ( 2,5,9999);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.PRIMARY
+rollback;
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+drop table t0,t1;
+#
+# Test that locks on ranges on non-unique secondary keys inhibit
+# modifications of the contents of these ranges
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+kp1 int not null,
+kp2 int not null,
+a int,
+key(kp1, kp2) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 select 1, a, 1234 from t0;
+insert into t1 values (2, 3, 1234);
+insert into t1 values (2, 5, 1234);
+insert into t1 values (2, 7, 1234);
+insert into t1 select 3, a, 1234 from t0;
+connect con1,localhost,root,,;
+connection con1;
+begin;
+explain
+select * from t1 where kp1=2 for update;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref kp1 kp1 4 const # NULL
+select * from t1 where kp1=2 for update;
+kp1 kp2 a
+2 3 1234
+2 5 1234
+2 7 1234
+connection default;
+begin;
+insert into t1 values (2, 9, 9999);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.kp1
+delete from t1 where kp1=2 and kp2=5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.kp1
+update t1 set kp1=333 where kp1=2 and kp2=3;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.kp1
+update t1 set kp1=2 where kp1=1 and kp2=8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.kp1
+rollback;
+connection con1;
+rollback;
+disconnect con1;
+connection default;
+drop table t0,t1;
+#
+# Transaction isolation test
+#
+create table t1 (pk int primary key, a int) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+connect con1,localhost,root,,;
+# TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+pk a
+1 1
+2 2
+3 3
+# TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=2222 where pk=2;
+# TRX1: Now, make a change that would overwrite TRX2'x change and commit
+connection con1;
+update t1 set a=a+1 where pk=2;
+commit;
+# Examine the result:
+# pk=2, a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
+# pk=2, a=3 means UPDATE in TRX1 silently overwrote TRX2
+# (and with key tracking, one would get an error on the second UPDATE)
+connection default;
+select * from t1;
+pk a
+1 1
+2 2223
+3 3
+disconnect con1;
+connection default;
+drop table t1;
+#
+# Same test as above, but check the range scan
+#
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+connect con1,localhost,root,,;
+# TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+pk a
+6 6
+5 5
+4 4
+3 3
+2 2
+1 1
+# TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=2222 where pk between 3 and 5;
+# TRX1: Now, make a change that would overwrite TRX2'x change and commit
+connection con1;
+update t1 set a=a+1 where pk between 3 and 5;
+commit;
+# Examine the result:
+# pk={3,4,5} a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
+connection default;
+select * from t1;
+pk a
+6 6
+5 2223
+4 2223
+3 2223
+2 2
+1 1
+disconnect con1;
+connection default;
+drop table t1;
+#
+# Same as above, but test SELECT FOR UPDATE.
+#
+create table t1 (
+pk int,
+a int,
+primary key (pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+connect con1,localhost,root,,;
+# TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+pk a
+6 6
+5 5
+4 4
+3 3
+2 2
+1 1
+# TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=222 where pk=2;
+update t1 set a=333 where pk=3;
+# TRX1: Check what select [FOR UPDATE] sees
+connection con1;
+select * from t1 where pk in (2,3);
+pk a
+2 2
+3 3
+select * from t1 where pk=2 for update;
+pk a
+2 222
+select * from t1 where pk=2;
+pk a
+2 2
+commit;
+disconnect con1;
+connection default;
+drop table t1;
+#
+# Check that I_S.processlist.state is set correctly now.
+#
+create table t1(
+pk int,
+a int,
+primary key(pk) comment 'rev:cf1'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+begin;
+select * from t1 where pk=2 for update;
+pk a
+2 2
+connect con1,localhost,root,,;
+begin;
+set rocksdb_lock_wait_timeout=300;
+select * from t1 where pk=2 for update;
+connection default;
+# Now, will wait until we see con1 have state="Waiting for row lock"
+rollback;
+connection con1;
+pk a
+2 2
+rollback;
+disconnect con1;
+connection default;
+drop table t1;
diff --git a/mysql-test/suite/rocksdb/t/range_locking.inc b/mysql-test/suite/rocksdb/t/range_locking.inc
new file mode 100644
index 00000000000..9c826811695
--- /dev/null
+++ b/mysql-test/suite/rocksdb/t/range_locking.inc
@@ -0,0 +1,414 @@
+#
+# Range locking tests.
+#
+
+--source include/have_rocksdb.inc
+--source suite/rocksdb/include/have_range_locking.inc
+
+--enable_connect_log
+
+
+show variables like 'rocksdb_use_range_locking';
+
+# Test 1: see that ranges are indeed lockied.
+
+eval create table t1 (
+ pk int,
+ a int,
+ primary key (pk) comment '$pk_cf'
+) engine=rocksdb;
+
+insert into t1 values
+(10,10),(20,20),(30,30);
+
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+
+--echo ### Test: check that range lock inhibits a point lock
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25 for update;
+
+connection con2;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values (15,15);
+
+connection con1;
+rollback;
+
+--echo ## Test: check that range lock inhibits another range lock
+--echo ## (It should not, but currently it does)
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25 for update;
+
+connection con2;
+begin;
+--error ER_LOCK_WAIT_TIMEOUT
+select * from t1 where pk between 15 and 35 for update;
+rollback;
+
+connection con1;
+rollback;
+
+--echo ## Test: check that regular read does not get a range lock
+connection con1;
+begin;
+select * from t1 where pk between 5 and 25;
+
+connection con2;
+begin;
+# This must not block
+select * from t1 where pk between 15 and 35 for update;
+rollback;
+
+connection con1;
+rollback;
+
+--echo ## Test that locks are not released when a statement in the transaction
+eval
+create table t2 (
+ pk int,
+ a int,
+ primary key (pk) comment '$pk_cf',
+ unique key(a) comment '$sk_cf'
+) engine=rocksdb;
+
+insert into t2 values (1,1),(2,2);
+
+begin;
+insert into t2 values (3,3);
+--error ER_DUP_ENTRY
+insert into t2 values (10,2);
+
+connection con2;
+begin;
+# This must time out:
+--error ER_LOCK_WAIT_TIMEOUT
+select * from t2 where pk=3 for update;
+
+rollback;
+connection con1;
+rollback;
+drop table t2;
+
+# Cleanup
+connection default;
+disconnect con1;
+disconnect con2;
+drop table t1;
+
+--echo #
+--echo # Test INFORMATION_SCHEMA.lock_info in range-locking mode
+--echo #
+eval
+create table t1 (
+ pk int,
+ a int,
+ primary key (pk) comment '$pk_cf'
+) engine=rocksdb;
+
+insert into t1 values
+(10,10),(20,20),(30,30);
+
+begin;
+select * from t1 where pk=10 for update;
+
+select * from information_schema.rocksdb_locks;
+
+delete from t1 where pk between 25 and 40;
+select * from information_schema.rocksdb_locks;
+
+rollback;
+begin;
+--echo # The following will show a range lock on 2-9 and also a point lock on 10.
+--echo # This is how things currently work.
+select * from t1 where pk between 2 and 9 for update;
+select * from information_schema.rocksdb_locks;
+rollback;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-18104: MyRocks-Gap-Lock: range locking bounds are incorrect for multi-part keys
+--echo #
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+eval
+create table t1 (
+ kp1 int not null,
+ kp2 int not null,
+ a int,
+ primary key(kp1, kp2) comment '$pk_cf'
+) engine=rocksdb;
+
+insert into t1 select 1, a, 1234 from t0;
+insert into t1 select 2, a, 1234 from t0;
+insert into t1 select 3, a, 1234 from t0;
+
+connect (con1,localhost,root,,);
+connection con1;
+
+begin;
+select * from t1 where kp1=2 for update;
+
+connection default;
+--echo # The lock on kp1=2 should inhibit the following INSERT:
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ( 2,5,9999);
+rollback;
+
+connection con1;
+rollback;
+connection default;
+disconnect con1;
+drop table t0,t1;
+
+--echo #
+--echo # Test that locks on ranges on non-unique secondary keys inhibit
+--echo # modifications of the contents of these ranges
+--echo #
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+eval
+create table t1 (
+ kp1 int not null,
+ kp2 int not null,
+ a int,
+ key(kp1, kp2) comment '$pk_cf'
+) engine=rocksdb;
+
+insert into t1 select 1, a, 1234 from t0;
+insert into t1 values (2, 3, 1234);
+insert into t1 values (2, 5, 1234);
+insert into t1 values (2, 7, 1234);
+insert into t1 select 3, a, 1234 from t0;
+
+connect (con1,localhost,root,,);
+connection con1;
+begin;
+--replace_column 9 #
+explain
+select * from t1 where kp1=2 for update;
+select * from t1 where kp1=2 for update;
+
+connection default;
+begin;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values (2, 9, 9999);
+
+--error ER_LOCK_WAIT_TIMEOUT
+delete from t1 where kp1=2 and kp2=5;
+
+# Update that "moves a row away" from the locked range
+--error ER_LOCK_WAIT_TIMEOUT
+update t1 set kp1=333 where kp1=2 and kp2=3;
+
+# Update that "moves a row into" the locked range
+--error ER_LOCK_WAIT_TIMEOUT
+update t1 set kp1=2 where kp1=1 and kp2=8;
+
+rollback;
+
+connection con1;
+rollback;
+disconnect con1;
+connection default;
+drop table t0,t1;
+
+--echo #
+--echo # Transaction isolation test
+--echo #
+
+create table t1 (pk int primary key, a int) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+
+connect (con1,localhost,root,,);
+
+--echo # TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+
+--echo # TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=2222 where pk=2;
+
+--echo # TRX1: Now, make a change that would overwrite TRX2'x change and commit
+connection con1;
+update t1 set a=a+1 where pk=2;
+commit;
+
+--echo # Examine the result:
+--echo # pk=2, a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
+--echo # pk=2, a=3 means UPDATE in TRX1 silently overwrote TRX2
+--echo # (and with key tracking, one would get an error on the second UPDATE)
+connection default;
+select * from t1;
+
+disconnect con1;
+connection default;
+drop table t1;
+
+--echo #
+--echo # Same test as above, but check the range scan
+--echo #
+
+eval
+create table t1 (
+ pk int,
+ a int,
+ primary key (pk) comment '$pk_cf'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+
+connect (con1,localhost,root,,);
+
+--echo # TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+
+--echo # TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=2222 where pk between 3 and 5;
+
+--echo # TRX1: Now, make a change that would overwrite TRX2'x change and commit
+connection con1;
+update t1 set a=a+1 where pk between 3 and 5;
+commit;
+
+--echo # Examine the result:
+--echo # pk={3,4,5} a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
+connection default;
+select * from t1;
+
+disconnect con1;
+connection default;
+drop table t1;
+
+--echo #
+--echo # Same as above, but test SELECT FOR UPDATE.
+--echo #
+eval
+create table t1 (
+ pk int,
+ a int,
+ primary key (pk) comment '$pk_cf'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+
+connect (con1,localhost,root,,);
+
+--echo # TRX1: Start, Allocate a snapshot
+connection con1;
+begin;
+select * from t1;
+
+--echo # TRX2: Make a change that TRX1 will not see
+connection default;
+update t1 set a=222 where pk=2;
+update t1 set a=333 where pk=3;
+
+--echo # TRX1: Check what select [FOR UPDATE] sees
+connection con1;
+select * from t1 where pk in (2,3);
+select * from t1 where pk=2 for update;
+select * from t1 where pk=2;
+
+commit;
+
+disconnect con1;
+connection default;
+drop table t1;
+
+if (!$PK_USES_REVERSE_CF) {
+--echo #
+--echo # Another no-snapshot-checking test, this time for single-statement
+--echo # transaction
+--echo #
+eval
+create table t1 (
+ pk int,
+ a int,
+ name varchar(16),
+ primary key(pk) comment '$pk_cf'
+) engine=rocksdb;
+insert into t1 values (1,1, 'row1'), (2,2,'row2');
+
+connect (con1,localhost,root,,);
+connection con1;
+select get_lock('row1', 100);
+
+connection default;
+
+--echo # The following will read the first row (1,1,'row1'), and stop.
+
+send update t1 set a=a+100 where get_lock(name, 1000)=1;
+
+# Wait till the default connection has stopped:
+connection con1;
+
+let $wait_condition=
+ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = "User lock"
+ AND INFO = "update t1 set a=a+100 where get_lock(name, 1000)=1";
+--source include/wait_condition.inc
+
+# Update the second row
+update t1 set a=5 where pk=2;
+
+select release_lock('row1');
+
+connection default;
+reap;
+
+--echo # Look at the row with pk=2:
+--echo # 2, 105, row2 - means the UPDATE was reading current data (Correct)
+--echo # 2, 102, row - means the UPDATE read the snapshot (incorrect)
+select * from t1;
+
+--echo # Try releasing both locks (in 5.6, we will be holding only the second one)
+select release_lock(name) from t1;
+
+disconnect con1;
+connection default;
+drop table t1;
+}
+
+--echo #
+--echo # Check that I_S.processlist.state is set correctly now.
+--echo #
+eval
+create table t1(
+ pk int,
+ a int,
+ primary key(pk) comment '$pk_cf'
+) engine=rocksdb;
+insert into t1 values (1,1),(2,2),(3,3);
+
+begin;
+select * from t1 where pk=2 for update;
+
+--connect (con1,localhost,root,,)
+begin;
+set rocksdb_lock_wait_timeout=300;
+send select * from t1 where pk=2 for update;
+
+connection default;
+--echo # Now, will wait until we see con1 have state="Waiting for row lock"
+let $wait_condition=
+ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = "Waiting for row lock"
+ AND INFO = "select * from t1 where pk=2 for update";
+--source include/wait_condition.inc
+
+rollback;
+connection con1;
+--reap
+rollback;
+
+disconnect con1;
+connection default;
+drop table t1;
+
diff --git a/mysql-test/suite/rocksdb/t/range_locking.test b/mysql-test/suite/rocksdb/t/range_locking.test
index df926a2920b..694b13215af 100644
--- a/mysql-test/suite/rocksdb/t/range_locking.test
+++ b/mysql-test/suite/rocksdb/t/range_locking.test
@@ -1,386 +1,6 @@
-#
-# Range locking tests.
-#
---source include/have_rocksdb.inc
---source suite/rocksdb/include/have_range_locking.inc
---enable_connect_log
-
-
-show variables like 'rocksdb_use_range_locking';
-
-# Test 1: see that ranges are indeed lockied.
-
-create table t1 (
- pk int primary key,
- a int
-) engine=rocksdb;
-
-insert into t1 values
-(10,10),(20,20),(30,30);
-
-connect (con1,localhost,root,,);
-connect (con2,localhost,root,,);
-
---echo ### Test: check that range lock inhibits a point lock
-connection con1;
-begin;
-select * from t1 where pk between 5 and 25 for update;
-
-connection con2;
---error ER_LOCK_WAIT_TIMEOUT
-insert into t1 values (15,15);
-
-connection con1;
-rollback;
-
---echo ## Test: check that range lock inhibits another range lock
---echo ## (It should not, but currently it does)
-connection con1;
-begin;
-select * from t1 where pk between 5 and 25 for update;
-
-connection con2;
-begin;
---error ER_LOCK_WAIT_TIMEOUT
-select * from t1 where pk between 15 and 35 for update;
-rollback;
-
-connection con1;
-rollback;
-
---echo ## Test: check that regular read does not get a range lock
-connection con1;
-begin;
-select * from t1 where pk between 5 and 25;
-
-connection con2;
-begin;
-# This must not block
-select * from t1 where pk between 15 and 35 for update;
-rollback;
-
-connection con1;
-rollback;
-
---echo ## Test that locks are not released when a statement in the transaction
-create table t2 (
- pk int primary key,
- a int,
- unique key(a)
-) engine=rocksdb;
-
-insert into t2 values (1,1),(2,2);
-
-begin;
-insert into t2 values (3,3);
---error ER_DUP_ENTRY
-insert into t2 values (10,2);
-
-connection con2;
-begin;
-# This must time out:
---error ER_LOCK_WAIT_TIMEOUT
-select * from t2 where pk=3 for update;
-
-rollback;
-connection con1;
-rollback;
-drop table t2;
-
-# Cleanup
-connection default;
-disconnect con1;
-disconnect con2;
-drop table t1;
-
---echo #
---echo # Test INFORMATION_SCHEMA.lock_info in range-locking mode
---echo #
-create table t1 (
- pk int primary key,
- a int
-) engine=rocksdb;
-
-insert into t1 values
-(10,10),(20,20),(30,30);
-
-begin;
-select * from t1 where pk=10 for update;
-
-select * from information_schema.rocksdb_locks;
-
-delete from t1 where pk between 25 and 40;
-select * from information_schema.rocksdb_locks;
-
-rollback;
-begin;
---echo # The following will show a range lock on 2-9 and also a point lock on 10.
---echo # This is how things currently work.
-select * from t1 where pk between 2 and 9 for update;
-select * from information_schema.rocksdb_locks;
-rollback;
-
-drop table t1;
-
---echo #
---echo # MDEV-18104: MyRocks-Gap-Lock: range locking bounds are incorrect for multi-part keys
---echo #
-
-create table t0(a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (
- kp1 int not null,
- kp2 int not null,
- a int,
- primary key(kp1, kp2)
-) engine=rocksdb;
-
-insert into t1 select 1, a, 1234 from t0;
-insert into t1 select 2, a, 1234 from t0;
-insert into t1 select 3, a, 1234 from t0;
-
-connect (con1,localhost,root,,);
-connection con1;
-
-begin;
-select * from t1 where kp1=2 for update;
-
-connection default;
---echo # The lock on kp1=2 should inhibit the following INSERT:
---error ER_LOCK_WAIT_TIMEOUT
-insert into t1 values ( 2,5,9999);
-rollback;
-
-connection con1;
-rollback;
-connection default;
-disconnect con1;
-drop table t0,t1;
-
---echo #
---echo # Test that locks on ranges on non-unique secondary keys inhibit
---echo # modifications of the contents of these ranges
---echo #
-
-create table t0(a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (
- kp1 int not null,
- kp2 int not null,
- a int,
- key(kp1, kp2)
-) engine=rocksdb;
-
-insert into t1 select 1, a, 1234 from t0;
-insert into t1 values (2, 3, 1234);
-insert into t1 values (2, 5, 1234);
-insert into t1 values (2, 7, 1234);
-insert into t1 select 3, a, 1234 from t0;
-
-connect (con1,localhost,root,,);
-connection con1;
-begin;
---replace_column 9 #
-explain
-select * from t1 where kp1=2 for update;
-select * from t1 where kp1=2 for update;
-
-connection default;
-begin;
---error ER_LOCK_WAIT_TIMEOUT
-insert into t1 values (2, 9, 9999);
-
---error ER_LOCK_WAIT_TIMEOUT
-delete from t1 where kp1=2 and kp2=5;
-
-# Update that "moves a row away" from the locked range
---error ER_LOCK_WAIT_TIMEOUT
-update t1 set kp1=333 where kp1=2 and kp2=3;
-
-# Update that "moves a row into" the locked range
---error ER_LOCK_WAIT_TIMEOUT
-update t1 set kp1=2 where kp1=1 and kp2=8;
-
-rollback;
-
-connection con1;
-rollback;
-disconnect con1;
-connection default;
-drop table t0,t1;
-
---echo #
---echo # Transaction isolation test
---echo #
-
-create table t1 (pk int primary key, a int) engine=rocksdb;
-insert into t1 values (1,1),(2,2),(3,3);
-
-connect (con1,localhost,root,,);
-
---echo # TRX1: Start, Allocate a snapshot
-connection con1;
-begin;
-select * from t1;
-
---echo # TRX2: Make a change that TRX1 will not see
-connection default;
-update t1 set a=2222 where pk=2;
-
---echo # TRX1: Now, make a change that would overwrite TRX2'x change and commit
-connection con1;
-update t1 set a=a+1 where pk=2;
-commit;
-
---echo # Examine the result:
---echo # pk=2, a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
---echo # pk=2, a=3 means UPDATE in TRX1 silently overwrote TRX2
---echo # (and with key tracking, one would get an error on the second UPDATE)
-connection default;
-select * from t1;
-
-disconnect con1;
-connection default;
-drop table t1;
-
---echo #
---echo # Same test as above, but check the range scan
---echo #
-
-create table t1 (pk int primary key, a int) engine=rocksdb;
-insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
-
-connect (con1,localhost,root,,);
-
---echo # TRX1: Start, Allocate a snapshot
-connection con1;
-begin;
-select * from t1;
-
---echo # TRX2: Make a change that TRX1 will not see
-connection default;
-update t1 set a=2222 where pk between 3 and 5;
-
---echo # TRX1: Now, make a change that would overwrite TRX2'x change and commit
-connection con1;
-update t1 set a=a+1 where pk between 3 and 5;
-commit;
-
---echo # Examine the result:
---echo # pk={3,4,5} a=2223 means UPDATE in TRX1 used "read committed" (InnoDB-like isolation)
-connection default;
-select * from t1;
-
-disconnect con1;
-connection default;
-drop table t1;
-
---echo #
---echo # Same as above, but test SELECT FOR UPDATE.
---echo #
-create table t1 (pk int primary key, a int) engine=rocksdb;
-insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
-
-connect (con1,localhost,root,,);
-
---echo # TRX1: Start, Allocate a snapshot
-connection con1;
-begin;
-select * from t1;
-
---echo # TRX2: Make a change that TRX1 will not see
-connection default;
-update t1 set a=222 where pk=2;
-update t1 set a=333 where pk=3;
-
---echo # TRX1: Check what select [FOR UPDATE] sees
-connection con1;
-select * from t1 where pk in (2,3);
-select * from t1 where pk=2 for update;
-select * from t1 where pk=2;
-
-commit;
-
-disconnect con1;
-connection default;
-drop table t1;
-
---echo #
---echo # Another no-snapshot-checking test, this time for single-statement
---echo # transaction
---echo #
-
-create table t1 (pk int primary key, a int, name varchar(16)) engine=rocksdb;
-insert into t1 values (1,1, 'row1'), (2,2,'row2');
-
-connect (con1,localhost,root,,);
-connection con1;
-select get_lock('row1', 100);
-
-connection default;
-
---echo # The following will read the first row (1,1,'row1'), and stop.
-
-send update t1 set a=a+100 where get_lock(name, 1000)=1;
-
-# Wait till the default connection has stopped:
-connection con1;
-
-let $wait_condition=
- SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = "User lock"
- AND INFO = "update t1 set a=a+100 where get_lock(name, 1000)=1";
---source include/wait_condition.inc
-
-# Update the second row
-update t1 set a=5 where pk=2;
-
-select release_lock('row1');
-
-connection default;
-reap;
-
---echo # Look at the row with pk=2:
---echo # 2, 105, row2 - means the UPDATE was reading current data (Correct)
---echo # 2, 102, row - means the UPDATE read the snapshot (incorrect)
-select * from t1;
-
---echo # Try releasing both locks (in 5.6, we will be holding only the second one)
-select release_lock(name) from t1;
-
-disconnect con1;
-connection default;
-drop table t1;
-
-
---echo #
---echo # Check that I_S.processlist.state is set correctly now.
---echo #
-create table t1(pk int primary key, a int) engine=rocksdb;
-insert into t1 values (1,1),(2,2),(3,3);
-
-begin;
-select * from t1 where pk=2 for update;
-
---connect (con1,localhost,root,,)
-begin;
-set rocksdb_lock_wait_timeout=300;
-send select * from t1 where pk=2 for update;
-
-connection default;
---echo # Now, will wait until we see con1 have state="Waiting for row lock"
-let $wait_condition=
- SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = "Waiting for row lock"
- AND INFO = "select * from t1 where pk=2 for update";
---source include/wait_condition.inc
-
-rollback;
-connection con1;
---reap
-rollback;
-
-disconnect con1;
-connection default;
-drop table t1;
+--let pk_cf=default
+--let sk_cf=default
+--source range_locking.inc
diff --git a/mysql-test/suite/rocksdb/t/range_locking_rev_cf.test b/mysql-test/suite/rocksdb/t/range_locking_rev_cf.test
new file mode 100644
index 00000000000..8b993764235
--- /dev/null
+++ b/mysql-test/suite/rocksdb/t/range_locking_rev_cf.test
@@ -0,0 +1,12 @@
+#
+# Range locking tests.
+#
+
+--source include/have_rocksdb.inc
+--source suite/rocksdb/include/have_range_locking.inc
+
+--let pk_cf=rev:cf1
+--let PK_USES_REVERSE_CF=1
+
+--source range_locking.inc
+
diff --git a/rocksdb b/rocksdb
index 248acd10346..9af13f0e966 160000
--- a/rocksdb
+++ b/rocksdb
@@ -1 +1 @@
-Subproject commit 248acd10346428b078aed780183ffe7f9f3c6896
+Subproject commit 9af13f0e966171ef8688484b2da7fa576a1b4574
1
0

[Commits] 9af13f0e9: Temporary: range locking should still assume foward ordering
by Sergei Petrunia 29 Apr '19
by Sergei Petrunia 29 Apr '19
29 Apr '19
revision-id: 9af13f0e966171ef8688484b2da7fa576a1b4574 (v5.8-1037-g9af13f0e9)
parent(s): 248acd10346428b078aed780183ffe7f9f3c6896
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-29 13:13:38 +0300
message:
Temporary: range locking should still assume foward ordering
---
utilities/transactions/transaction_lock_mgr.cc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc
index 9b6fd9381..c98803acb 100644
--- a/utilities/transactions/transaction_lock_mgr.cc
+++ b/utilities/transactions/transaction_lock_mgr.cc
@@ -1204,7 +1204,7 @@ void RangeLockMgr::AddColumnFamily(const ColumnFamilyHandle *cfh) {
if (!strcmp(cfh->GetComparator()->Name(), "RocksDB_SE_v3.10"))
ltree_cmp = &fw_cmp_;
else if (!strcmp(cfh->GetComparator()->Name(),"rev:RocksDB_SE_v3.10"))
- ltree_cmp = &bw_cmp_;
+ ltree_cmp = &fw_cmp_; // temporary: use the same ordering
else {
assert(false);
ltree_cmp= nullptr;
1
0

[Commits] a529188: MDEV-17456 Malicious SUPER user can possibly change audit log configuration without leaving traces.
by holyfootï¼ askmonty.org 28 Apr '19
by holyfootï¼ askmonty.org 28 Apr '19
28 Apr '19
revision-id: a529188e05da8060c95eeb4c8caef05adbd6cc6a (mariadb-10.2.23-88-ga529188)
parent(s): cd26cdcd974725031e30393ff165fb0dfb365c4d
committer: Alexey Botchkov
timestamp: 2019-04-29 01:25:17 +0400
message:
MDEV-17456 Malicious SUPER user can possibly change audit log configuration without leaving traces.
The 'SET server_audit_logging ' statements should be logged no matter
what.
---
mysql-test/suite/plugins/r/server_audit.result | 3 +++
mysql-test/suite/plugins/t/server_audit.test | 3 +++
plugin/server_audit/server_audit.c | 7 ++++---
3 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result
index 0709444..b8d2986 100644
--- a/mysql-test/suite/plugins/r/server_audit.result
+++ b/mysql-test/suite/plugins/r/server_audit.result
@@ -212,6 +212,8 @@ select 2;
2
2
drop table t1;
+set global server_audit_logging= off;
+set global server_audit_logging= on;
set global server_audit_events='';
set global server_audit_query_log_limit= 15;
select (1), (2), (3), (4);
@@ -378,6 +380,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SET PASSWORD FOR u1=<secret>',ID
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY *****',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into t1 values (1), (2)',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_logging= off',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_events=\'\'',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global serv',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select (1), (2)',0
diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test
index fa23dc5..f19c8f5 100644
--- a/mysql-test/suite/plugins/t/server_audit.test
+++ b/mysql-test/suite/plugins/t/server_audit.test
@@ -136,6 +136,9 @@ select * from t1;
select 2;
drop table t1;
+set global server_audit_logging= off;
+set global server_audit_logging= on;
+
set global server_audit_events='';
set global server_audit_query_log_limit= 15;
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index 2f9cd99..f03564e 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -15,7 +15,7 @@
#define PLUGIN_VERSION 0x104
-#define PLUGIN_STR_VERSION "1.4.4"
+#define PLUGIN_STR_VERSION "1.4.5"
#define _my_thread_var loc_thread_var
@@ -1623,7 +1623,7 @@ static int log_statement_ex(const struct connection_info *cn,
}
if (query && !(events & EVENT_QUERY_ALL) &&
- (events & EVENT_QUERY))
+ (events & EVENT_QUERY && !cn->log_always))
{
const char *orig_query= query;
@@ -2556,9 +2556,10 @@ static void log_current_query(MYSQL_THD thd)
if (!ci_needs_setup(cn) && cn->query_length &&
FILTER(EVENT_QUERY) && do_log_user(cn->user))
{
+ cn->log_always= 1;
log_statement_ex(cn, cn->query_time, thd_get_thread_id(thd),
cn->query, cn->query_length, 0, "QUERY");
- cn->log_always= 1;
+ cn->log_always= 0;
}
}
1
0

[Commits] cd66b2e5f14: Update to latest rocksdb (Use a different locktree for each column family)
by Sergei Petrunia 28 Apr '19
by Sergei Petrunia 28 Apr '19
28 Apr '19
revision-id: cd66b2e5f14a40f581b639f64ed74ad3b0f9f37d (fb-prod201801-225-gcd66b2e5f14)
parent(s): 9921dbd4542c4fe1e5d28c611f48816e2568fffd
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-28 23:41:13 +0300
message:
Update to latest rocksdb (Use a different locktree for each column family)
---
rocksdb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/rocksdb b/rocksdb
index 97b782b47ae..248acd10346 160000
--- a/rocksdb
+++ b/rocksdb
@@ -1 +1 @@
-Subproject commit 97b782b47ae55675e2b0132d6332824343fe141e
+Subproject commit 248acd10346428b078aed780183ffe7f9f3c6896
1
0

28 Apr '19
revision-id: 248acd10346428b078aed780183ffe7f9f3c6896 (v5.8-1036-g248acd103)
parent(s): 97b782b47ae55675e2b0132d6332824343fe141e
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-28 23:39:10 +0300
message:
Use a different locktree for each column family
This is a preparation for range locking to properly support
reverse-ordered column families (and not just assume an identical
ordering across the whole DB).
---
.../transactions/pessimistic_transaction_db.cc | 6 +-
utilities/transactions/transaction_lock_mgr.cc | 184 +++++++++++++++++----
utilities/transactions/transaction_lock_mgr.h | 33 ++--
3 files changed, 178 insertions(+), 45 deletions(-)
diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc
index e50fb1dad..e3ce435af 100644
--- a/utilities/transactions/pessimistic_transaction_db.cc
+++ b/utilities/transactions/pessimistic_transaction_db.cc
@@ -356,7 +356,7 @@ Status TransactionDB::WrapStackableDB(
// allocate a LockMap for it.
void PessimisticTransactionDB::AddColumnFamily(
const ColumnFamilyHandle* handle) {
- lock_mgr_->AddColumnFamily(handle->GetID());
+ lock_mgr_->AddColumnFamily(handle);
}
Status PessimisticTransactionDB::CreateColumnFamily(
@@ -370,7 +370,7 @@ Status PessimisticTransactionDB::CreateColumnFamily(
s = db_->CreateColumnFamily(options, column_family_name, handle);
if (s.ok()) {
- lock_mgr_->AddColumnFamily((*handle)->GetID());
+ lock_mgr_->AddColumnFamily(*handle);
UpdateCFComparatorMap(*handle);
}
@@ -385,7 +385,7 @@ Status PessimisticTransactionDB::DropColumnFamily(
Status s = db_->DropColumnFamily(column_family);
if (s.ok()) {
- lock_mgr_->RemoveColumnFamily(column_family->GetID());
+ lock_mgr_->RemoveColumnFamily(column_family);
}
return s;
diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc
index edec58261..9b6fd9381 100644
--- a/utilities/transactions/transaction_lock_mgr.cc
+++ b/utilities/transactions/transaction_lock_mgr.cc
@@ -189,7 +189,8 @@ size_t LockMap::GetStripe(const std::string& key) const {
return stripe;
}
-void TransactionLockMgr::AddColumnFamily(uint32_t column_family_id) {
+void TransactionLockMgr::AddColumnFamily(const ColumnFamilyHandle *cfh) {
+ uint32_t column_family_id= cfh->GetID();
InstrumentedMutexLock l(&lock_map_mutex_);
if (lock_maps_.find(column_family_id) == lock_maps_.end()) {
@@ -202,7 +203,8 @@ void TransactionLockMgr::AddColumnFamily(uint32_t column_family_id) {
}
}
-void TransactionLockMgr::RemoveColumnFamily(uint32_t column_family_id) {
+void TransactionLockMgr::RemoveColumnFamily(const ColumnFamilyHandle *cfh) {
+ uint32_t column_family_id= cfh->GetID();
// Remove lock_map for this column family. Since the lock map is stored
// as a shared ptr, concurrent transactions can still keep using it
// until they release their references to it.
@@ -780,23 +782,31 @@ RangeLockMgrHandle* NewRangeLockManager(
class RangeLockList: public PessimisticTransaction::LockStorage {
public:
virtual ~RangeLockList() {
- buffer_.destroy();
+ for(auto it : buffers_) {
+ it.second->destroy();
+ }
}
RangeLockList() : releasing_locks_(false) {
- buffer_.create();
}
- void append(const DBT *left_key, const DBT *right_key) {
+ void append(uint32_t cf_id, const DBT *left_key, const DBT *right_key) {
MutexLock l(&mutex_);
// there's only one thread that calls this function.
// the same thread will do lock release.
assert(!releasing_locks_);
- buffer_.append(left_key, right_key);
+ auto it= buffers_.find(cf_id);
+ if (it == buffers_.end()) {
+ // create a new one
+ //it->second.create();
+ it= buffers_.emplace(cf_id, std::shared_ptr<toku::range_buffer>(new toku::range_buffer())).first;
+ it->second->create();
+ }
+ else
+ it->second->append(left_key, right_key);
}
- // Ranges that the transaction is holding locks on
- toku::range_buffer buffer_;
+ std::unordered_map<uint32_t, std::shared_ptr<toku::range_buffer>> buffers_;
// Synchronization. See RangeLockMgr::UnLockAll for details
port::Mutex mutex_;
@@ -836,6 +846,8 @@ Status RangeLockMgr::TryRangeLock(PessimisticTransaction* txn,
// locktree::kill_waiter call. Do we need this anymore?
TransactionID wait_txn_id = txn->GetID();
+ auto lt= get_locktree_by_cfid(column_family_id);
+
request.set(lt, (TXNID)txn, &start_key_dbt, &end_key_dbt,
toku::lock_request::WRITE, false /* not a big txn */,
(void*)wait_txn_id);
@@ -911,7 +923,7 @@ Status RangeLockMgr::TryRangeLock(PessimisticTransaction* txn,
txn->owned_locks= std::unique_ptr<RangeLockList>(new RangeLockList);
}
RangeLockList* range_list= (RangeLockList*)txn->owned_locks.get();
- range_list->append(&start_key_dbt, &end_key_dbt);
+ range_list->append(column_family_id, &start_key_dbt, &end_key_dbt);
return Status::OK();
}
@@ -948,6 +960,7 @@ range_lock_mgr_release_lock_int(toku::locktree *lt,
void RangeLockMgr::UnLock(PessimisticTransaction* txn,
uint32_t column_family_id,
const std::string& key, Env*) {
+ auto lt= get_locktree_by_cfid(column_family_id);
range_lock_mgr_release_lock_int(lt, txn, column_family_id, key);
toku::lock_request::retry_all_lock_requests(lt, nullptr /* lock_wait_needed_callback */);
}
@@ -959,14 +972,14 @@ void RangeLockMgr::UnLock(const PessimisticTransaction* txn,
for (auto& key_map_iter : *key_map) {
uint32_t column_family_id = key_map_iter.first;
auto& keys = key_map_iter.second;
+ auto lt= get_locktree_by_cfid(column_family_id);
for (auto& key_iter : keys) {
const std::string& key = key_iter.first;
range_lock_mgr_release_lock_int(lt, txn, column_family_id, key);
}
+ toku::lock_request::retry_all_lock_requests(lt, nullptr /* lock_wait_needed_callback */);
}
-
- toku::lock_request::retry_all_lock_requests(lt, nullptr /* lock_wait_needed_callback */);
}
void RangeLockMgr::UnLockAll(const PessimisticTransaction* txn, Env*) {
@@ -1012,13 +1025,18 @@ void RangeLockMgr::UnLockAll(const PessimisticTransaction* txn, Env*) {
// not holding any locks, the lock tree might be in the STO-mode with
// another transaction, and our attempt to release an empty set of locks
// will cause an assertion failure.
- if (range_list->buffer_.get_num_ranges())
- lt->release_locks((TXNID)txn, &range_list->buffer_, true);
- range_list->buffer_.destroy();
- range_list->buffer_.create();
- range_list->releasing_locks_= false;
+ for (auto it: range_list->buffers_) {
+ if (it.second->get_num_ranges()) {
+ toku::locktree *lt = get_locktree_by_cfid(it.first);
+ lt->release_locks((TXNID)txn, it.second.get(), true);
- toku::lock_request::retry_all_lock_requests(lt, nullptr /* lock_wait_needed_callback */);
+ it.second->destroy();
+ it.second->create();
+
+ toku::lock_request::retry_all_lock_requests(lt, nullptr);
+ }
+ }
+ range_list->releasing_locks_= false;
}
}
@@ -1082,13 +1100,21 @@ int RangeLockMgr::compare_dbt_endpoints(__toku_db*, void *arg,
return res;
}
+int RangeLockMgr::compare_dbt_endpoints_rev(__toku_db* db, void *arg,
+ const DBT *a_key,
+ const DBT *b_key) {
+ return -compare_dbt_endpoints(db, arg, a_key, b_key);
+}
+
RangeLockMgr::RangeLockMgr(std::shared_ptr<TransactionDBMutexFactory> mutex_factory) :
- mutex_factory_(mutex_factory) {
- ltm.create(on_create, on_destroy, on_escalate, NULL, mutex_factory_);
- cmp_.create(compare_dbt_endpoints, (void*)this, NULL);
- DICTIONARY_ID dict_id = { .dictid = 1 };
- lt= ltm.get_lt(dict_id, cmp_, /* on_create_extra*/nullptr);
+ mutex_factory_(mutex_factory),
+ ltree_lookup_cache_(new ThreadLocalPtr(nullptr)) {
+
+ ltm_.create(on_create, on_destroy, on_escalate, NULL, mutex_factory_);
+
+ fw_cmp_.create(compare_dbt_endpoints, (void*)this, NULL);
+ bw_cmp_.create(compare_dbt_endpoints, (void*)this, NULL);
}
@@ -1102,7 +1128,7 @@ RangeLockMgr::RangeLockMgr(std::shared_ptr<TransactionDBMutexFactory> mutex_fact
@param void* Callback context
*/
-void RangeLockMgr::on_escalate(TXNID txnid, const locktree*,
+void RangeLockMgr::on_escalate(TXNID txnid, const locktree* lt,
const range_buffer &buffer, void *) {
auto txn= (PessimisticTransaction*)txnid;
@@ -1119,12 +1145,17 @@ void RangeLockMgr::on_escalate(TXNID txnid, const locktree*,
}
// TODO: are we tracking this mem: lt->get_manager()->note_mem_released(trx_locks->ranges.buffer->total_memory_size());
- trx_locks->buffer_.destroy();
- trx_locks->buffer_.create();
+
+ uint32_t cf_id = (uint32_t)lt->get_dict_id().dictid;
+
+ auto it= trx_locks->buffers_.find(cf_id);
+ it->second->destroy();
+ it->second->create();
+
toku::range_buffer::iterator iter(&buffer);
toku::range_buffer::iterator::record rec;
while (iter.current(&rec)) {
- trx_locks->buffer_.append(rec.get_left_key(), rec.get_right_key());
+ it->second->append(rec.get_left_key(), rec.get_right_key());
iter.next();
}
// TODO: same as above: lt->get_manager()->note_mem_used(ranges.buffer->total_memory_size());
@@ -1132,16 +1163,18 @@ void RangeLockMgr::on_escalate(TXNID txnid, const locktree*,
RangeLockMgr::~RangeLockMgr() {
- if (lt) {
- ltm.release_lt(lt);
+ //TODO: at this point, synchronization is not needed, right?
+ for (auto it: ltree_map_) {
+ ltm_.release_lt(it.second);
}
- ltm.destroy();
- cmp_.destroy();
+ ltm_.destroy();
+ fw_cmp_.destroy();
+ bw_cmp_.destroy();
}
uint64_t RangeLockMgr::get_escalation_count() {
LTM_STATUS_S ltm_status_test;
- ltm.get_status(<m_status_test);
+ ltm_.get_status(<m_status_test);
// Searching status variable by its string name is how Toku's unit tests
// do it (why didn't they make LTM_ESCALATION_COUNT constant visible?)
@@ -1160,6 +1193,87 @@ uint64_t RangeLockMgr::get_escalation_count() {
return key_status->value.num;
}
+void RangeLockMgr::AddColumnFamily(const ColumnFamilyHandle *cfh) {
+ uint32_t column_family_id= cfh->GetID();
+
+ InstrumentedMutexLock l(<ree_map_mutex_);
+ if (ltree_map_.find(column_family_id) == ltree_map_.end()) {
+ DICTIONARY_ID dict_id = { .dictid = column_family_id };
+ toku::comparator *ltree_cmp;
+ // "RocksDB_SE_v3.10" // BytewiseComparator() ,ReverseBytewiseComparator()
+ if (!strcmp(cfh->GetComparator()->Name(), "RocksDB_SE_v3.10"))
+ ltree_cmp = &fw_cmp_;
+ else if (!strcmp(cfh->GetComparator()->Name(),"rev:RocksDB_SE_v3.10"))
+ ltree_cmp = &bw_cmp_;
+ else {
+ assert(false);
+ ltree_cmp= nullptr;
+ }
+ toku::locktree *ltree= ltm_.get_lt(dict_id, *ltree_cmp,
+ /* on_create_extra*/nullptr);
+ ltree_map_.emplace(column_family_id, ltree);
+ } else {
+ // column_family already exists in lock map
+ assert(false);
+ }
+}
+
+void RangeLockMgr::RemoveColumnFamily(const ColumnFamilyHandle *cfh) {
+ uint32_t column_family_id= cfh->GetID();
+ // Remove lock_map for this column family. Since the lock map is stored
+ // as a shared ptr, concurrent transactions can still keep using it
+ // until they release their references to it.
+ {
+ InstrumentedMutexLock l(<ree_map_mutex_);
+
+ auto lock_maps_iter = ltree_map_.find(column_family_id);
+ assert(lock_maps_iter != ltree_map_.end());
+
+ ltm_.release_lt(lock_maps_iter->second);
+
+ ltree_map_.erase(lock_maps_iter);
+ } // lock_map_mutex_
+
+ //TODO: why do we delete first and clear the caches second? Shouldn't this be
+ // done in the reverse order? (if we do it in the reverse order, how will we
+ // prevent somebody from re-populating the cache?)
+
+ // Clear all thread-local caches. We collect a vector of caches but we dont
+ // really need them.
+ autovector<void*> local_caches;
+ ltree_lookup_cache_->Scrape(&local_caches, nullptr);
+}
+
+toku::locktree *RangeLockMgr::get_locktree_by_cfid(uint32_t column_family_id) {
+
+ // First check thread-local cache
+ if (ltree_lookup_cache_->Get() == nullptr) {
+ ltree_lookup_cache_->Reset(new LockTreeMap());
+ }
+
+ auto ltree_map_cache = static_cast<LockTreeMap*>(ltree_lookup_cache_->Get());
+
+ auto it = ltree_map_cache->find(column_family_id);
+ if (it != ltree_map_cache->end()) {
+ // Found lock map for this column family.
+ return it->second;
+ }
+
+ // Not found in local cache, grab mutex and check shared LockMaps
+ InstrumentedMutexLock l(<ree_map_mutex_);
+
+ it = ltree_map_.find(column_family_id);
+ if (it == ltree_map_.end()) {
+ return nullptr;
+ } else {
+ // Found lock map. Store in thread-local cache and return.
+ //std::shared_ptr<LockMap>& lock_map = lock_map_iter->second;
+ ltree_map_cache->insert({column_family_id, it->second});
+ return it->second;
+ }
+
+ return nullptr;
+}
struct LOCK_PRINT_CONTEXT {
BaseLockMgr::LockStatusData *data;
@@ -1193,7 +1307,13 @@ void push_into_lock_status_data(void* param, const DBT *left,
BaseLockMgr::LockStatusData RangeLockMgr::GetLockStatusData() {
LockStatusData data;
LOCK_PRINT_CONTEXT ctx = {&data, GetColumnFamilyID(my_txn_db_->DefaultColumnFamily()) };
- lt->dump_locks((void*)&ctx, push_into_lock_status_data);
+
+ {
+ InstrumentedMutexLock l(<ree_map_mutex_);
+ for (auto it : ltree_map_) {
+ it.second->dump_locks((void*)&ctx, push_into_lock_status_data);
+ }
+ }
return data;
}
diff --git a/utilities/transactions/transaction_lock_mgr.h b/utilities/transactions/transaction_lock_mgr.h
index 02c68964e..e41a4961a 100644
--- a/utilities/transactions/transaction_lock_mgr.h
+++ b/utilities/transactions/transaction_lock_mgr.h
@@ -60,8 +60,8 @@ class PessimisticTransactionDB;
//
class BaseLockMgr {
public:
- virtual void AddColumnFamily(uint32_t column_family_id) = 0;
- virtual void RemoveColumnFamily(uint32_t column_family_id) = 0;
+ virtual void AddColumnFamily(const ColumnFamilyHandle *cfh) = 0;
+ virtual void RemoveColumnFamily(const ColumnFamilyHandle *cfh) = 0;
virtual
Status TryLock(PessimisticTransaction* txn, uint32_t column_family_id,
@@ -97,11 +97,11 @@ class TransactionLockMgr : public BaseLockMgr {
// Creates a new LockMap for this column family. Caller should guarantee
// that this column family does not already exist.
- void AddColumnFamily(uint32_t column_family_id);
+ void AddColumnFamily(const ColumnFamilyHandle *cfh);
// Deletes the LockMap for this column family. Caller should guarantee that
// this column family is no longer in use.
- void RemoveColumnFamily(uint32_t column_family_id);
+ void RemoveColumnFamily(const ColumnFamilyHandle *cfh);
// Attempt to lock key. If OK status is returned, the caller is responsible
// for calling UnLock() on this key.
@@ -199,8 +199,8 @@ class RangeLockMgr :
public BaseLockMgr,
public RangeLockMgrHandle {
public:
- void AddColumnFamily(uint32_t) override { /* do nothing */ }
- void RemoveColumnFamily(uint32_t) override { /* do nothing */ }
+ void AddColumnFamily(const ColumnFamilyHandle *cfh) override;
+ void RemoveColumnFamily(const ColumnFamilyHandle *cfh) override;
Status TryLock(PessimisticTransaction* txn, uint32_t column_family_id,
const std::string& key, Env* env, bool exclusive) override ;
@@ -240,7 +240,7 @@ class RangeLockMgr :
int set_max_lock_memory(size_t max_lock_memory) override
{
- return ltm.set_max_lock_memory(max_lock_memory);
+ return ltm_.set_max_lock_memory(max_lock_memory);
}
uint64_t get_escalation_count() override;
@@ -248,15 +248,28 @@ class RangeLockMgr :
LockStatusData GetLockStatusData() override;
private:
- toku::locktree_manager ltm;
- toku::locktree *lt; // only one tree for now
+ toku::locktree_manager ltm_;
- toku::comparator cmp_;
+ toku::comparator fw_cmp_;
+ toku::comparator bw_cmp_;
TransactionDB* my_txn_db_;
std::shared_ptr<TransactionDBMutexFactory> mutex_factory_;
+ // Map from cf_id to locktree*. Can only be accessed while holding the
+ // ltree_map_mutex_.
+ using LockTreeMap = std::unordered_map<uint32_t, locktree*>;
+ LockTreeMap ltree_map_;
+
+ InstrumentedMutex ltree_map_mutex_;
+
+ // Per-thread cache of ltree_map_.
+ std::unique_ptr<ThreadLocalPtr> ltree_lookup_cache_;
+
+ toku::locktree *get_locktree_by_cfid(uint32_t cf_id);
+
static int compare_dbt_endpoints(__toku_db*, void *arg, const DBT *a_key, const DBT *b_key);
+ static int compare_dbt_endpoints_rev(__toku_db*, void *arg, const DBT *a_key, const DBT *b_key);
// Callbacks
static int on_create(locktree*, void*) { return 0; /* no error */ }
1
0

[Commits] cd26cdc: MDEV-19141 server_audit_excl_users accepts only values with less than 1024 chars.
by holyfootï¼ askmonty.org 28 Apr '19
by holyfootï¼ askmonty.org 28 Apr '19
28 Apr '19
revision-id: cd26cdcd974725031e30393ff165fb0dfb365c4d (mariadb-10.2.23-87-gcd26cdc)
parent(s): 00377147e3029b982cbc29d3f4477362c6e6fdb4
committer: Alexey Botchkov
timestamp: 2019-04-29 00:11:48 +0400
message:
MDEV-19141 server_audit_excl_users accepts only values with less than 1024 chars.
Since this limit is imposed by the SHOW_VAR_FUNC_BUFF_SIZE, we just
launch the error message.
---
mysql-test/suite/plugins/r/server_audit.result | 14 ++++++
mysql-test/suite/plugins/t/server_audit.test | 8 ++++
plugin/server_audit/server_audit.c | 62 +++++++++++++++++++++++---
3 files changed, 78 insertions(+), 6 deletions(-)
diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result
index 0139276..0709444 100644
--- a/mysql-test/suite/plugins/r/server_audit.result
+++ b/mysql-test/suite/plugins/r/server_audit.result
@@ -21,6 +21,16 @@ set global server_audit_incl_users=null;
set global server_audit_file_path='server_audit.log';
set global server_audit_output_type=file;
set global server_audit_logging=on;
+set global server_audit_incl_users= repeat("'root',", 10000);
+ERROR 42000: Variable 'server_audit_incl_users' can't be set to the value of ''root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','roo'
+show variables like 'server_audit_incl_users';
+Variable_name Value
+server_audit_incl_users
+set global server_audit_excl_users= repeat("'root',", 10000);
+ERROR 42000: Variable 'server_audit_excl_users' can't be set to the value of ''root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','root','roo'
+show variables like 'server_audit_excl_users';
+Variable_name Value
+server_audit_excl_users
connect con1,localhost,root,,mysql;
connection default;
disconnect con1;
@@ -251,6 +261,10 @@ uninstall plugin server_audit;
Warnings:
Warning 1620 Plugin is busy and will be uninstalled on shutdown
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_logging=on',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users= repeat("\'root\',", 10000)',ID
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'show variables like \'server_audit_incl_users\'',0
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_excl_users= repeat("\'root\',", 10000)',ID
+TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'show variables like \'server_audit_excl_users\'',0
TIME,HOSTNAME,root,localhost,ID,0,CONNECT,mysql,,0
TIME,HOSTNAME,root,localhost,ID,0,DISCONNECT,mysql,,0
TIME,HOSTNAME,no_such_user,localhost,ID,0,FAILED_CONNECT,,,ID
diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test
index 4af1ed8..fa23dc5 100644
--- a/mysql-test/suite/plugins/t/server_audit.test
+++ b/mysql-test/suite/plugins/t/server_audit.test
@@ -13,6 +13,14 @@ set global server_audit_incl_users=null;
set global server_audit_file_path='server_audit.log';
set global server_audit_output_type=file;
set global server_audit_logging=on;
+
+--error ER_WRONG_VALUE_FOR_VAR
+set global server_audit_incl_users= repeat("'root',", 10000);
+show variables like 'server_audit_incl_users';
+--error ER_WRONG_VALUE_FOR_VAR
+set global server_audit_excl_users= repeat("'root',", 10000);
+show variables like 'server_audit_excl_users';
+
--sleep 2
connect (con1,localhost,root,,mysql);
connection default;
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index 0995327..2f9cd99 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -335,6 +335,10 @@ static void update_file_rotations(MYSQL_THD thd, struct st_mysql_sys_var *var,
void *var_ptr, const void *save);
static void update_incl_users(MYSQL_THD thd, struct st_mysql_sys_var *var,
void *var_ptr, const void *save);
+static int check_incl_users(MYSQL_THD thd, struct st_mysql_sys_var *var, void *save,
+ struct st_mysql_value *value);
+static int check_excl_users(MYSQL_THD thd, struct st_mysql_sys_var *var, void *save,
+ struct st_mysql_value *value);
static void update_excl_users(MYSQL_THD thd, struct st_mysql_sys_var *var,
void *var_ptr, const void *save);
static void update_output_type(MYSQL_THD thd, struct st_mysql_sys_var *var,
@@ -354,10 +358,10 @@ static void rotate_log(MYSQL_THD thd, struct st_mysql_sys_var *var,
static MYSQL_SYSVAR_STR(incl_users, incl_users, PLUGIN_VAR_RQCMDARG,
"Comma separated list of users to monitor.",
- NULL, update_incl_users, NULL);
+ check_incl_users, update_incl_users, NULL);
static MYSQL_SYSVAR_STR(excl_users, excl_users, PLUGIN_VAR_RQCMDARG,
"Comma separated list of users to exclude from auditing.",
- NULL, update_excl_users, NULL);
+ check_excl_users, update_excl_users, NULL);
/* bits in the event filter. */
#define EVENT_CONNECT 1
#define EVENT_QUERY_ALL 2
@@ -2643,16 +2647,56 @@ static void update_file_rotate_size(MYSQL_THD thd __attribute__((unused)),
}
+static int check_users(void *save, struct st_mysql_value *value,
+ size_t s, const char *name)
+{
+ const char *users;
+ int len= 0;
+
+ users= value->val_str(value, NULL, &len);
+ if ((size_t) len > s)
+ {
+ error_header();
+ fprintf(stderr,
+ "server_audit_%s_users value can't be longer than %ld characters.\n",
+ name, s);
+ return 1;
+ }
+ *((const char**)save)= users;
+ return 0;
+}
+
+static int check_incl_users(MYSQL_THD thd __attribute__((unused)),
+ struct st_mysql_sys_var *var __attribute__((unused)),
+ void *save, struct st_mysql_value *value)
+{
+ return check_users(save, value, sizeof(incl_user_buffer), "incl");
+}
+
+static int check_excl_users(MYSQL_THD thd __attribute__((unused)),
+ struct st_mysql_sys_var *var __attribute__((unused)),
+ void *save, struct st_mysql_value *value)
+{
+ return check_users(save, value, sizeof(excl_user_buffer), "excl");
+}
+
+
static void update_incl_users(MYSQL_THD thd,
struct st_mysql_sys_var *var __attribute__((unused)),
void *var_ptr __attribute__((unused)), const void *save)
{
char *new_users= (*(char **) save) ? *(char **) save : empty_str;
+ size_t new_len= strlen(new_users) + 1;
if (!maria_55_started || !debug_server_started)
flogger_mutex_lock(&lock_operations);
mark_always_logged(thd);
- strncpy(incl_user_buffer, new_users, sizeof(incl_user_buffer)-1);
- incl_user_buffer[sizeof(incl_user_buffer)-1]= 0;
+
+ if (new_len > sizeof(incl_user_buffer))
+ new_len= sizeof(incl_user_buffer);
+
+ memcpy(incl_user_buffer, new_users, new_len - 1);
+ incl_user_buffer[new_len - 1]= 0;
+
incl_users= incl_user_buffer;
user_coll_fill(&incl_user_coll, incl_users, &excl_user_coll, 1);
error_header();
@@ -2667,11 +2711,17 @@ static void update_excl_users(MYSQL_THD thd __attribute__((unused)),
void *var_ptr __attribute__((unused)), const void *save)
{
char *new_users= (*(char **) save) ? *(char **) save : empty_str;
+ size_t new_len= strlen(new_users) + 1;
if (!maria_55_started || !debug_server_started)
flogger_mutex_lock(&lock_operations);
mark_always_logged(thd);
- strncpy(excl_user_buffer, new_users, sizeof(excl_user_buffer)-1);
- excl_user_buffer[sizeof(excl_user_buffer)-1]= 0;
+
+ if (new_len > sizeof(excl_user_buffer))
+ new_len= sizeof(excl_user_buffer);
+
+ memcpy(excl_user_buffer, new_users, new_len - 1);
+ excl_user_buffer[new_len - 1]= 0;
+
excl_users= excl_user_buffer;
user_coll_fill(&excl_user_coll, excl_users, &incl_user_coll, 0);
error_header();
1
0

[Commits] d3534d5: MDEV-19351 statistics_for_command_is_needed: Conditional jump or move
by IgorBabaev 27 Apr '19
by IgorBabaev 27 Apr '19
27 Apr '19
revision-id: d3534d5b45a419260d0292d72a6a21f2a2ba2582 (mariadb-10.1.38-144-gd3534d5)
parent(s): d88dfd873232dafd480c2d0382b4ff3add9c936d
author: Igor Babaev
committer: Igor Babaev
timestamp: 2019-04-27 21:31:04 -0700
message:
MDEV-19351 statistics_for_command_is_needed: Conditional jump or move
depends on uninitialised value
Initialized THD::force_read_stats introduced in the patch for MDEV-17605.
Leaving this field uninitialized in the constructor of the THD class may
trigger reading statistical data that is not needed.
---
sql/sql_class.cc | 1 +
1 file changed, 1 insertion(+)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 639c7c1..6fa8c10 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -983,6 +983,7 @@ THD::THD(bool is_wsrep_applier)
memset(&invoker_host, 0, sizeof(invoker_host));
prepare_derived_at_open= FALSE;
create_tmp_table_for_derived= FALSE;
+ force_read_stats= FALSE;
save_prep_leaf_list= FALSE;
/* Restore THR_THD */
set_current_thd(old_THR_THD);
1
0

27 Apr '19
revision-id: 88a80e92dc444ce30718f3e08d3ab66fb02bcea4 (mariadb-10.3.10-284-g88a80e92dc4)
parent(s): 3032cd8e91f1e1ead8b6f941e75cd29e473e7eaa
author: Varun Gupta
committer: Varun Gupta
timestamp: 2019-04-24 13:31:24 +0530
message:
MDEV-9959: A serious MariaDB server performance bug
Step #2: If any field in the select list of the derived tables is present in the group by list also , then we are again guaranteed
that ref access to the derived table would always produce one row per key.
---
mysql-test/main/cte_nonrecursive.result | 6 +-
mysql-test/main/derived.result | 43 +++++++++
mysql-test/main/derived.test | 26 +++++
mysql-test/main/derived_cond_pushdown.result | 106 ++++++++++-----------
mysql-test/main/derived_opt.result | 2 +-
mysql-test/main/derived_split_innodb.result | 4 +-
mysql-test/main/derived_view.result | 34 +++----
mysql-test/main/join_cache.result | 6 +-
mysql-test/main/selectivity.result | 4 +-
mysql-test/main/selectivity_innodb.result | 4 +-
mysql-test/main/subselect_extra.result | 2 +-
mysql-test/main/subselect_extra_no_semijoin.result | 2 +-
sql/sql_lex.cc | 62 ++++++++++++
sql/sql_lex.h | 1 +
sql/table.cc | 3 +
15 files changed, 220 insertions(+), 85 deletions(-)
diff --git a/mysql-test/main/cte_nonrecursive.result b/mysql-test/main/cte_nonrecursive.result
index d80d34ecc7f..b04b0335289 100644
--- a/mysql-test/main/cte_nonrecursive.result
+++ b/mysql-test/main/cte_nonrecursive.result
@@ -85,14 +85,14 @@ with t as (select a, count(*) from t1 where b >= 'c' group by a)
select * from t2,t where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
explain
select * from t2, (select a, count(*) from t1 where b >= 'c' group by a) as t
where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# specivication of t contains having
with t as (select a, count(*) from t1 where b >= 'c'
@@ -597,7 +597,7 @@ explain
select * from v2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived3> ref key0 key0 5 test.t2.c 1
3 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# with clause in the specification of a view that whose definition
# table alias for a with table
diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result
index 857246d68b4..e19b80678be 100644
--- a/mysql-test/main/derived.result
+++ b/mysql-test/main/derived.result
@@ -1249,3 +1249,46 @@ a a
4 4
6 6
drop table t1,t2,t3;
+create table t1(a int, b int);
+insert into t1 values (1,1),(2,2),(3,3);
+create table t2(a int, b int,c int);
+insert into t2(a,b,c) values (7,2,2),(8,1,1),(1,2,5);
+create table t3(a int, b int);
+insert into t3(a,b) values (7,1),(8,2), (1,3);
+# expects rows=1 for derived table as group by fields are a prefix of the keyparts involved in ref access
+explain select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1
+2 DERIVED t2 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort
+select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.a;
+a b a b c
+1 1 1 2 5
+# expects rows=1 for derived table as group by fields are a prefix of the keyparts involved in ref access, this case
+# involves use of multiple equalities to check the prefix condition for rows=1
+explain select * from t1 , ((select a,b from t2 where a=c group by c))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1
+2 DERIVED t2 ALL NULL NULL NULL NULL 3 Using where; Using temporary; Using filesort
+select * from t1 , ((select a,b from t2 where a=c group by c))q where t1.a=q.a;
+a b a b
+# rows should not be 1 for the derived table as group by is not a prefix of the keyparts for ref access
+explain select * from t1 , ((select a,b from t2 where b=c group by c))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2
+2 DERIVED t2 ALL NULL NULL NULL NULL 3 Using where; Using temporary; Using filesort
+select * from t1 , ((select a,b from t2 where b=c group by c))q where t1.a=q.a;
+a b a b
+# rows should not be 1 for the derived table as group by is not a prefix of the keyparts for ref access
+explain select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.b and t1.b=q.c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY <derived2> ref key0 key0 10 test.t1.a,test.t1.b 2
+2 DERIVED t2 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort
+select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.b and t1.b=q.c;
+a b a b c
+1 1 8 1 1
+2 2 7 2 2
+drop table t1,t2,t3;
diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test
index 990f955450a..494500a1c8a 100644
--- a/mysql-test/main/derived.test
+++ b/mysql-test/main/derived.test
@@ -1062,3 +1062,29 @@ analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select
select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
drop table t1,t2,t3;
+
+create table t1(a int, b int);
+insert into t1 values (1,1),(2,2),(3,3);
+create table t2(a int, b int,c int);
+insert into t2(a,b,c) values (7,2,2),(8,1,1),(1,2,5);
+create table t3(a int, b int);
+insert into t3(a,b) values (7,1),(8,2), (1,3);
+
+--echo # expects rows=1 for derived table as group by fields are a prefix of the keyparts involved in ref access
+explain select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.a;
+select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.a;
+
+--echo # expects rows=1 for derived table as group by fields are a prefix of the keyparts involved in ref access, this case
+--echo # involves use of multiple equalities to check the prefix condition for rows=1
+explain select * from t1 , ((select a,b from t2 where a=c group by c))q where t1.a=q.a;
+select * from t1 , ((select a,b from t2 where a=c group by c))q where t1.a=q.a;
+
+--echo # rows should not be 1 for the derived table as group by is not a prefix of the keyparts for ref access
+explain select * from t1 , ((select a,b from t2 where b=c group by c))q where t1.a=q.a;
+select * from t1 , ((select a,b from t2 where b=c group by c))q where t1.a=q.a;
+
+--echo # rows should not be 1 for the derived table as group by is not a prefix of the keyparts for ref access
+explain select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.b and t1.b=q.c;
+select * from t1 , ((select a,b,c from t2 group by a))q where t1.a=q.b and t1.b=q.c;
+drop table t1,t2,t3;
+
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 8086c4480f6..2f46e7eeeed 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -1551,7 +1551,7 @@ a b max_c avg_c a b c d
explain select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.a 2
+1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.a 1
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
explain format=json select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a);
EXPLAIN
@@ -1573,7 +1573,7 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -1839,7 +1839,7 @@ explain select * from v_decimal as v,t2_decimal as t where
(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 6 test.t.b,test.t.b 2
+1 PRIMARY <derived2> ref key0 key0 6 test.t.b,test.t.b 1
2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_decimal as v,t2_decimal as t where
(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1));
@@ -1862,7 +1862,7 @@ EXPLAIN
"key_length": "6",
"used_key_parts": ["a", "b"],
"ref": ["test.t.b", "test.t.b"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -2157,7 +2157,7 @@ explain select * from v_double as v,t2_double as t where
(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 18 test.t.c,test.t.c 2 Using where
+1 PRIMARY <derived2> ref key0 key0 18 test.t.c,test.t.c 1 Using where
2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_double as v,t2_double as t where
(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1));
@@ -2180,7 +2180,7 @@ EXPLAIN
"key_length": "18",
"used_key_parts": ["b", "c"],
"ref": ["test.t.c", "test.t.c"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "t.c > 10 or v.a = 1",
"materialized": {
@@ -2380,7 +2380,7 @@ where t1.a>5 group by a,b having max_c < 707) v1,
t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
explain format=json select * from
(select a, b, max(c) as max_c, avg(c) as avg_c from t1
@@ -2405,7 +2405,7 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "v1.max_c > 300",
"materialized": {
@@ -2506,7 +2506,7 @@ a b max_c avg_c a b c d
explain select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.b 2
+1 PRIMARY <derived2> ref key0 key0 10 test.t2.a,test.t2.b 1
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort
explain format=json select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b);
EXPLAIN
@@ -2528,7 +2528,7 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.b"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -3204,7 +3204,7 @@ explain select * from v1,v2,t2 where
(v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key1 key1 10 test.t2.a,test.t2.a 2
+1 PRIMARY <derived2> ref key1 key1 10 test.t2.a,test.t2.a 1
1 PRIMARY <derived3> ref key0 key0 5 test.t2.a 2 Using where
3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
@@ -3229,7 +3229,7 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -7207,7 +7207,7 @@ SELECT d FROM v4 WHERE s > a
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1)
-3 DEPENDENT SUBQUERY <derived5> index_subquery key0 key0 5 func 2 Using where
+3 DEPENDENT SUBQUERY <derived5> index_subquery key0 key0 5 func 1 Using where
5 DERIVED t4 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
explain format=json SELECT * FROM t1 WHERE a IN (
SELECT b FROM v2 WHERE b < a OR b IN (
@@ -7245,7 +7245,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["func"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -8514,7 +8514,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -8564,7 +8564,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -8848,9 +8848,21 @@ EXPLAIN
"query_block": {
"select_id": 1,
"table": {
- "table_name": "<derived2>",
+ "table_name": "t1",
"access_type": "ALL",
- "rows": 3,
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t1.id2 is not null"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["id2"],
+ "ref": ["test.t1.id2"],
+ "rows": 1,
"filtered": 100,
"attached_condition": "vc.ct > 0",
"materialized": {
@@ -8870,18 +8882,6 @@ EXPLAIN
}
}
}
- },
- "block-nl-join": {
- "table": {
- "table_name": "t1",
- "access_type": "ALL",
- "rows": 4,
- "filtered": 100
- },
- "buffer_type": "flat",
- "buffer_size": "256Kb",
- "join_type": "BNL",
- "attached_condition": "t1.id2 = vc.id2"
}
}
}
@@ -14986,7 +14986,7 @@ on t1.a=t.a
where t1.b < 3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 range idx_b idx_b 5 NULL 4 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00
2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 2 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`c`) AS `m` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` < 3
@@ -15019,7 +15019,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15096,7 +15096,7 @@ on t1.a=t.a
where t1.b <= 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL idx_b NULL NULL NULL 12 75.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 9 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00
2 DERIVED t2 ALL idx_a NULL NULL NULL 90 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`b`) AS `m` from `test`.`t2` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <= 5
@@ -15125,7 +15125,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 9,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15200,7 +15200,7 @@ from t1 left join
on t1.a=t.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00 Using where
2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 2 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t1` left join (/* select#2 */ select `test`.`t2`.`a` AS `a`,max(`test`.`t2`.`b`) AS `max`,min(`test`.`t2`.`b`) AS `min` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` on(`t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` is not null) where 1
@@ -15227,7 +15227,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "trigcond(trigcond(t1.a is not null))",
"materialized": {
@@ -15289,7 +15289,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 range idx_b idx_b 5 NULL 3 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`a`,`test`.`t4`.`c`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` > 15
@@ -15322,7 +15322,7 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15367,7 +15367,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 75.00 Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` group by `test`.`t4`.`a`,`test`.`t4`.`c`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
@@ -15396,7 +15396,7 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 4,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15441,7 +15441,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 range idx_b idx_b 5 NULL 3 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`,`test`.`t4`.`a`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` > 15
@@ -15474,7 +15474,7 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15519,7 +15519,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 75.00 Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` group by `test`.`t4`.`c`,`test`.`t4`.`a`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
@@ -15548,7 +15548,7 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 4,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15603,7 +15603,7 @@ where t2.b between 80 and 85 and t2.c in ('y','z') and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 range idx idx 133 NULL 2 100.00 Using index condition; Using where
1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 3 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`t`.`c` AS `t_c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` between 80 and 85 and `test`.`t2`.`c` in ('y','z')
@@ -15646,7 +15646,7 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -15750,7 +15750,7 @@ where t2.b < 40 and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 90 100.00 Using where
1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 10 100.00
+1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 1 100.00
2 DERIVED t4 ALL idx_c NULL NULL NULL 160 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`t`.`c` AS `t_c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` group by `test`.`t4`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` < 40
@@ -15788,7 +15788,7 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 10,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -16344,7 +16344,7 @@ a c
explain extended SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 ) and a < 2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t4 range a a 5 NULL 1 100.00 Using index condition; Using where
-1 PRIMARY <derived3> ref key0 key0 128 test.t4.c 2 100.00 FirstMatch(t4)
+1 PRIMARY <derived3> ref key0 key0 128 test.t4.c 1 100.00 FirstMatch(t4)
3 LATERAL DERIVED t3 ref c c 128 test.t4.c 2 100.00
3 LATERAL DERIVED <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
4 MATERIALIZED t1 ALL NULL NULL NULL NULL 3 100.00
@@ -16376,7 +16376,7 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t4.c"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"first_match": "t4",
"materialized": {
@@ -16498,7 +16498,7 @@ pk1 f pk2 cnt
EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN v2 ON pk1 = pk2 WHERE f <> 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 4 test.t1.pk1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 4 test.t1.pk1 1 100.00
2 LATERAL DERIVED t2 eq_ref PRIMARY PRIMARY 4 test.t1.pk1 1 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`f` AS `f`,`v2`.`pk2` AS `pk2`,`v2`.`cnt` AS `cnt` from `test`.`t1` join `test`.`v2` where `v2`.`pk2` = `test`.`t1`.`pk1` and `test`.`t1`.`f` <> 5
@@ -16523,7 +16523,7 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk2"],
"ref": ["test.t1.pk1"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -16737,7 +16737,7 @@ left join
on u.id=auditlastlogin.userid;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY u ALL NULL NULL NULL NULL 2
-1 PRIMARY <derived2> ref key0 key0 5 test.u.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.u.id 1
2 DERIVED au ALL NULL NULL NULL NULL 4 Using temporary; Using filesort
select * from t1 as u
left join
diff --git a/mysql-test/main/derived_opt.result b/mysql-test/main/derived_opt.result
index 48ac7e62653..40e034e5c61 100644
--- a/mysql-test/main/derived_opt.result
+++ b/mysql-test/main/derived_opt.result
@@ -535,7 +535,7 @@ ON t2.id=t.id
WHERE t2.id < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 3 Using index condition
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using temporary; Using filesort
set join_cache_level=default;
set optimizer_switch= @save_optimizer_switch;
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index b9ed016429b..6625bd7df0d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -16,7 +16,7 @@ EXPLAIN SELECT t1.n1 FROM t1, (SELECT n1, n2 FROM t1 WHERE c1 = 'a' GROUP BY n1)
WHERE t.n1 = t1.n1 AND t.n2 = t1.n2 AND c1 = 'a' GROUP BY n1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index c1,n1_c1_n2 n1_c1_n2 9 NULL 2 Using where; Using index
-1 PRIMARY <derived2> ref key0 key0 8 test.t1.n1,test.t1.n2 2
+1 PRIMARY <derived2> ref key0 key0 8 test.t1.n1,test.t1.n2 1
2 LATERAL DERIVED t1 ref c1,n1_c1_n2 n1_c1_n2 4 test.t1.n1 1 Using where; Using index
SELECT t1.n1 FROM t1, (SELECT n1, n2 FROM t1 WHERE c1 = 'a' GROUP BY n1) as t
WHERE t.n1 = t1.n1 AND t.n2 = t1.n2 AND c1 = 'a' GROUP BY n1;
@@ -95,7 +95,7 @@ ON t2.id=t.id
WHERE t2.id < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 1
2 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t2.id 1
set join_cache_level=default;
DROP TABLE t1,t2;
diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result
index 30831e75341..242f8514bf4 100644
--- a/mysql-test/main/derived_view.result
+++ b/mysql-test/main/derived_view.result
@@ -213,7 +213,7 @@ explain extended
select * from t1 join (select * from t2 group by f2) tt on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 11 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f11` AS `f11`,`tt`.`f2` AS `f2`,`tt`.`f22` AS `f22` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22` from `test`.`t2` group by `test`.`t2`.`f2`) `tt` where `tt`.`f2` = `test`.`t1`.`f1`
@@ -227,7 +227,7 @@ flush status;
explain select * from t1 join (select * from t2 group by f2) tt on f1=f2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
show status like 'Handler_read%';
Variable_name Value
@@ -287,7 +287,7 @@ explain showing created indexes
explain extended select * from t1 join v2 on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 11 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f11` AS `f11`,`v2`.`f2` AS `f2`,`v2`.`f22` AS `f22` from `test`.`t1` join `test`.`v2` where `v2`.`f2` = `test`.`t1`.`f1`
@@ -338,7 +338,7 @@ flush status;
explain select * from t1 join v2 on f1=f2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
show status like 'Handler_read%';
Variable_name Value
@@ -371,7 +371,7 @@ Handler_read_rnd_next 36
explain extended select * from v1 join v4 on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.f2 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.f2 1 100.00
2 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `v1`.`f1` AS `f1`,`v1`.`f11` AS `f11`,`test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22` from `test`.`v1` join `test`.`t2` where `v1`.`f1` = `test`.`t2`.`f2` and `test`.`t2`.`f2` in (2,3)
@@ -395,7 +395,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["test.t2.f2"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -529,7 +529,7 @@ join
on x.f1 = z.f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived3> ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived5> ref key0 key0 5 tt.f1 2 100.00
+1 PRIMARY <derived5> ref key0 key0 5 tt.f1 1 100.00
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
3 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
@@ -575,7 +575,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["tt.f1"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -652,7 +652,7 @@ join
on x.f1 = z.f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived4> ref key0 key0 5 x.f1 2 100.00
+1 PRIMARY <derived4> ref key0 key0 5 x.f1 1 100.00
4 DERIVED <derived5> ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
2 DERIVED <derived3> ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
@@ -719,7 +719,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["x.f1"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -863,7 +863,7 @@ join of above two
explain extended select * from v6 join v7 on f2=f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived5> ref key0 key0 5 test.t2.f2 2 100.00
+1 PRIMARY <derived5> ref key0 key0 5 test.t2.f2 1 100.00
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22`,`v1`.`f1` AS `f1`,`v1`.`f11` AS `f11` from `test`.`t2` join `test`.`v1` where `v1`.`f1` = `test`.`t2`.`f2` and `test`.`t2`.`f2` < 7 and `test`.`t2`.`f2` in (2,3)
@@ -887,7 +887,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["test.t2.f2"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"materialized": {
"query_block": {
@@ -917,7 +917,7 @@ test two keys
explain select * from t1 join (select * from t2 group by f2) tt on t1.f1=tt.f2 join t1 xx on tt.f22=xx.f1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
1 PRIMARY xx ALL NULL NULL NULL NULL 11 Using where; Using join buffer (flat, BNL join)
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
select * from t1 join (select * from t2 group by f2) tt on t1.f1=tt.f2 join t1 xx on tt.f22=xx.f1;
@@ -1106,7 +1106,7 @@ SELECT * FROM t1, t2, v1 WHERE t2.a=t1.a AND t2.a=v1.a AND t2.a=v1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ref a a 4 const 1 Using index
-1 PRIMARY <derived2> ref key0 key0 8 const,const 1
+1 PRIMARY <derived2> ref key1 key1 8 func,func 1
2 DERIVED t3 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort
SELECT * FROM t1, t2, v1 WHERE t2.a=t1.a AND t2.a=v1.a AND t2.a=v1.b;
a a a b
@@ -1139,7 +1139,7 @@ SELECT * FROM t3
WHERE t3.a IN (SELECT v1.a FROM v1, t2 WHERE t2.a = v1.b);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key1 key1 5 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> ref key1 key1 5 func 1 100.00
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
3 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using temporary; Using filesort
Warnings:
@@ -1505,7 +1505,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
@@ -1932,7 +1932,7 @@ EXPLAIN
SELECT v1.a FROM v1,v2 WHERE v2.b = v1.b ORDER BY 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 Using where; Using filesort
-1 PRIMARY <derived3> ref key0 key0 4 v1.b 2
+1 PRIMARY <derived3> ref key0 key0 4 v1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
2 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort
DROP VIEW v1,v2;
diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result
index fde6e0fec6b..23396d22876 100644
--- a/mysql-test/main/join_cache.result
+++ b/mysql-test/main/join_cache.result
@@ -5197,7 +5197,7 @@ SELECT * FROM (SELECT DISTINCT * FROM t1) t
WHERE t.a IN (SELECT t2.a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Start temporary
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 End temporary
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 1 End temporary
2 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary
SELECT * FROM (SELECT DISTINCT * FROM t1) t
WHERE t.a IN (SELECT t2.a FROM t2);
@@ -5208,8 +5208,8 @@ EXPLAIN
SELECT * FROM (SELECT DISTINCT * FROM t1) t
WHERE t.a IN (SELECT t2.a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
-1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Start temporary
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 1 End temporary
2 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary
SELECT * FROM (SELECT DISTINCT * FROM t1) t
WHERE t.a IN (SELECT t2.a FROM t2);
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 00907235ecc..69f4be54414 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -140,7 +140,7 @@ and total_revenue = (select max(total_revenue) from revenue0)
order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier ALL PRIMARY NULL NULL NULL 10 100.00 Using filesort
-1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
+1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 1 100.00 Using where
3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 268 100.00
4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
@@ -161,7 +161,7 @@ and total_revenue = (select max(total_revenue) from revenue0)
order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier ALL PRIMARY NULL NULL NULL 10 100.00 Using filesort
-1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
+1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 1 100.00 Using where
3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 268 100.00
4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 93917065722..be5348a2a15 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -143,7 +143,7 @@ and total_revenue = (select max(total_revenue) from revenue0)
order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier index PRIMARY PRIMARY 4 NULL 10 100.00
-1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
+1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 1 100.00 Using where
3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 229 100.00
4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
@@ -164,7 +164,7 @@ and total_revenue = (select max(total_revenue) from revenue0)
order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier index PRIMARY PRIMARY 4 NULL 10 100.00
-1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
+1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 1 100.00 Using where
3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 228 100.00
4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
diff --git a/mysql-test/main/subselect_extra.result b/mysql-test/main/subselect_extra.result
index dbcf00268c2..6bcea113759 100644
--- a/mysql-test/main/subselect_extra.result
+++ b/mysql-test/main/subselect_extra.result
@@ -389,7 +389,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
diff --git a/mysql-test/main/subselect_extra_no_semijoin.result b/mysql-test/main/subselect_extra_no_semijoin.result
index 49a1431eb9b..7e257cb9291 100644
--- a/mysql-test/main/subselect_extra_no_semijoin.result
+++ b/mysql-test/main/subselect_extra_no_semijoin.result
@@ -391,7 +391,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 2 Using where
+2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 1 Using where
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index c52005e7683..aa645ac00ca 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -7617,6 +7617,68 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond,
}
+/**
+ Check if any any item in the group by list is also present in the select_list
+ @retval true: All elements common between select and group by list
+*/
+
+void st_select_lex::is_group_by_prefix(KEY *keyinfo)
+{
+ uint key_parts= keyinfo->usable_key_parts;
+ KEY_PART_INFO *key_part_info= keyinfo->key_part;
+ bool found= FALSE;
+
+ if (key_parts < group_list.elements)
+ return;
+
+ uint matched_fields=0, i, j;
+ Item *item;
+
+ for (i= 0; i < key_parts; key_part_info++, i++)
+ {
+ uint fld_idx= key_part_info->fieldnr - 1;
+ item= join->fields_list.elem(fld_idx);
+ for (ORDER *order= group_list.first; order; order= order->next)
+ {
+ Item *ord_item= order->item[0]->real_item();
+ Item_equal *item_equal= ord_item->get_item_equal();
+
+ if (item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *equal_item;
+ while ((equal_item= it++))
+ {
+ if (equal_item->eq(item, 0))
+ {
+ matched_fields++;
+ found= TRUE;
+ break;
+ }
+ }
+ }
+ else
+ {
+ if (item->eq(ord_item, 0))
+ {
+ matched_fields++;
+ found= TRUE;
+ }
+ }
+ if (found)
+ break;
+ }
+
+ if (matched_fields == group_list.elements)
+ {
+ for (j=matched_fields - 1; j < key_parts; j++)
+ keyinfo->rec_per_key[j]= 1;
+ return;
+ }
+ found= FALSE;
+ }
+}
+
int set_statement_var_if_exists(THD *thd, const char *var_name,
size_t var_name_length, ulonglong value)
{
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 4eaec7d062b..6f5d289f3be 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1387,6 +1387,7 @@ class st_select_lex: public st_select_lex_node
bool cond_pushdown_is_allowed() const
{ return !olap && !explicit_limit && !tvc; }
+ void is_group_by_prefix(KEY *keyinfo);
private:
bool m_non_agg_field_used;
diff --git a/sql/table.cc b/sql/table.cc
index c4494c9ae4b..a4d050748ba 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -7287,6 +7287,9 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
derived->check_distinct_in_union())
keyinfo->rec_per_key[key_parts - 1]= 1;
}
+
+ if (!first->is_part_of_union() && first->group_list.elements)
+ first->is_group_by_prefix(keyinfo);
}
set_if_bigger(s->max_key_length, keyinfo->key_length);
2
1

[Commits] b9dd8f8: MDEV-19324 Wrong results from query, using brackets with ORDER BY ..LIMIT
by IgorBabaev 26 Apr '19
by IgorBabaev 26 Apr '19
26 Apr '19
revision-id: b9dd8f8fbd7eb63970e372fb6c3d34595022ade0 (mariadb-10.4.4-37-gb9dd8f8)
parent(s): baadbe96019b205164167928d80e836ebbb6bcfe
author: Igor Babaev
committer: Igor Babaev
timestamp: 2019-04-26 17:55:12 -0700
message:
MDEV-19324 Wrong results from query, using brackets with ORDER BY ..LIMIT
If a select query was of the form (SELECT ... ORDER BY ...) LIMIT ...
then in most cases it returned incorrect result. It happened because
SELECT ... ORDER BY ... was wrapped into a select with materialized
derived table:
SELECT ... ORDER BY ... =>
SELECT * FROM (SELECT ... ORDER BY ...) dt.
Yet for any materialized derived table ORDER BY without LIMIT is ignored.
This patch resolves the problem by the conversion
(SELECT ... ORDER BY ...) LIMIT ... =>
SELECT ... ORDER BY ... LIMIT ...
at the parser stage.
Similarly
((SELECT ... UNION ...) ORDER BY ...) LIMIT ...
is converted to
(SELECT ... UNION ...) ORDER BY ... LIMIT ...
This conversion optimizes execution of the query because the result of
(SELECT ... UNION ...) ORDER BY ... is not materialized into a temporary
table anymore.
---
mysql-test/main/brackets.result | 150 ++++++++++++++++++++++++++++++++++++++++
mysql-test/main/brackets.test | 33 +++++++++
sql/sql_lex.cc | 13 +++-
3 files changed, 194 insertions(+), 2 deletions(-)
diff --git a/mysql-test/main/brackets.result b/mysql-test/main/brackets.result
index e789cde..3cf3468 100644
--- a/mysql-test/main/brackets.result
+++ b/mysql-test/main/brackets.result
@@ -243,4 +243,154 @@ a
a
1
DROP TABLE t1,t2;
+#
+# MDEV-19324: ((SELECT ...) ORDER BY col ) LIMIT n
+#
+create table t1 (a int);
+insert into t1 values (10),(20),(30);
+select a from t1 order by a desc limit 1;
+a
+30
+explain extended select a from t1 order by a desc limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `test`.`t1`.`a` desc limit 1
+explain format=json select a from t1 order by a desc limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "read_sorted_file": {
+ "filesort": {
+ "sort_key": "t1.a desc",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+(select a from t1 order by a desc) limit 1;
+a
+30
+explain extended (select a from t1 order by a desc) limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 (select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `test`.`t1`.`a` desc limit 1)
+explain format=json (select a from t1 order by a desc) limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "read_sorted_file": {
+ "filesort": {
+ "sort_key": "t1.a desc",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+(select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+a
+30
+explain extended (select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 UNION t1 ALL NULL NULL NULL NULL 3 100.00
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 20 union /* select#2 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `a` desc limit 1
+explain format=json (select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t1.a = 20"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+a
+30
+explain extended ((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 UNION t1 ALL NULL NULL NULL NULL 3 100.00
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 (/* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 20) union /* select#2 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `a` desc limit 1
+explain format=json ((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t1.a = 20"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+drop table t1;
# End of 10.4 tests
diff --git a/mysql-test/main/brackets.test b/mysql-test/main/brackets.test
index 0eaa3bf..54f7d27 100644
--- a/mysql-test/main/brackets.test
+++ b/mysql-test/main/brackets.test
@@ -106,5 +106,38 @@ INSERT INTO t2 VALUES (4),(5),(6),(7);
DROP TABLE t1,t2;
+--echo #
+--echo # MDEV-19324: ((SELECT ...) ORDER BY col ) LIMIT n
+--echo #
+
+create table t1 (a int);
+insert into t1 values (10),(20),(30);
+
+let $q1=
+select a from t1 order by a desc limit 1;
+eval $q1;
+eval explain extended $q1;
+eval explain format=json $q1;
+
+let $q2=
+(select a from t1 order by a desc) limit 1;
+eval $q2;
+eval explain extended $q2;
+eval explain format=json $q2;
+
+let $q1=
+(select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+eval $q1;
+eval explain extended $q1;
+eval explain format=json $q1;
+
+let $q2=
+((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+eval $q2;
+eval explain extended $q2;
+eval explain format=json $q2;
+
+drop table t1;
+
--echo # End of 10.4 tests
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 8ea0bc5..ee483ac 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -9111,7 +9111,13 @@ SELECT_LEX *LEX::parsed_select(SELECT_LEX *sel, Lex_order_limit_lock * l)
l->set_to(unit->fake_select_lex);
else
{
- sel= wrap_unit_into_derived(unit);
+ if (!l->order_list && !unit->fake_select_lex->explicit_limit)
+ {
+ sel= unit->fake_select_lex;
+ l->order_list= &sel->order_list;
+ }
+ else
+ sel= wrap_unit_into_derived(unit);
if (!sel)
return NULL;
l->set_to(sel);
@@ -9126,7 +9132,10 @@ SELECT_LEX *LEX::parsed_select(SELECT_LEX *sel, Lex_order_limit_lock * l)
SELECT_LEX_UNIT *unit= create_unit(sel);
if (!unit)
return NULL;
- sel= wrap_unit_into_derived(unit);
+ if (!l->order_list && !sel->explicit_limit)
+ l->order_list= &sel->order_list;
+ else
+ sel= wrap_unit_into_derived(unit);
if (!sel)
return NULL;
l->set_to(sel);
1
0

[Commits] 3032cd8e91f: step #1: if a derived table has SELECT DISTINCT, provide index statistics for it so that the join optimizer in the
by Varun 26 Apr '19
by Varun 26 Apr '19
26 Apr '19
revision-id: 3032cd8e91f1e1ead8b6f941e75cd29e473e7eaa (mariadb-10.3.10-283-g3032cd8e91f)
parent(s): f4019f5b3544a18f3ddf32df2c5214c3f8dabdce
author: Varun Gupta
committer: Varun Gupta
timestamp: 2019-04-22 18:19:25 +0530
message:
step #1: if a derived table has SELECT DISTINCT, provide index statistics for it so that the join optimizer in the
upper select knows that ref access to the table will produce one row.
---
mysql-test/main/cte_nonrecursive.result | 8 ++--
mysql-test/main/derived.result | 54 ++++++++++++++++++++++
mysql-test/main/derived.test | 30 ++++++++++++
mysql-test/main/derived_view.result | 2 +-
mysql-test/main/subselect_extra.result | 2 +-
mysql-test/main/subselect_extra_no_semijoin.result | 2 +-
sql/sql_lex.h | 1 +
sql/sql_union.cc | 40 ++++++++++++++++
sql/table.cc | 20 ++++++++
9 files changed, 152 insertions(+), 7 deletions(-)
diff --git a/mysql-test/main/cte_nonrecursive.result b/mysql-test/main/cte_nonrecursive.result
index b846ec2d8ac..d80d34ecc7f 100644
--- a/mysql-test/main/cte_nonrecursive.result
+++ b/mysql-test/main/cte_nonrecursive.result
@@ -244,7 +244,7 @@ with t as (select distinct a from t1 where b >= 'c')
select * from t as r1, t as r2 where r1.a=r2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <derived3> ref key0 key0 5 r1.a 2
+1 PRIMARY <derived3> ref key0 key0 5 r1.a 1
3 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary
explain
@@ -253,7 +253,7 @@ select * from (select distinct a from t1 where b >= 'c') as r1,
where r1.a=r2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <derived3> ref key0 key0 5 r1.a 2
+1 PRIMARY <derived3> ref key0 key0 5 r1.a 1
3 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary
# two references to t specified by a query
@@ -369,7 +369,7 @@ select c as a from t2 where c < 4)
select * from t2,t where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where
3 UNION t2 ALL NULL NULL NULL NULL 4 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -381,7 +381,7 @@ select c as a from t2 where c < 4) as t
where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where
3 UNION t2 ALL NULL NULL NULL NULL 4 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result
index f0d0289c1ce..857246d68b4 100644
--- a/mysql-test/main/derived.result
+++ b/mysql-test/main/derived.result
@@ -1195,3 +1195,57 @@ drop table t1,t2,t3;
#
# End of 10.2 tests
#
+#
+# MDEV-9959: A serious MariaDB server performance bug
+#
+create table t1(a int);
+insert into t1 values (1),(2),(3),(4),(5),(6);
+create table t2(a int, b int,c int);
+insert into t2(a,b,c) values (1,1,2),(2,2,3),(3,1,4),(4,2,2),(5,1,1),(6,2,5);
+create table t3(a int, b int);
+insert into t3(a,b) values (1,1),(2,2),(2,1),(1,2),(5,1),(9,2);
+table "<derived2>" should have type=ref and rows=1
+one select in derived table
+with distinct
+analyze select * from t1 , ((select distinct t2.a from t2 order by c))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 1.00 100.00 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using temporary; Using filesort
+analyze select * from t1 , ((select distinct t2.a, t2.b from t2 order by c))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 1.00 100.00 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using temporary; Using filesort
+# multiple selects in derived table
+# NO UNION ALL
+analyze select * from t1 , ( (select t2.a from t2 order by c) union (select t2.a from t2 order by c))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 1.00 100.00 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+3 UNION t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL 6.00 NULL NULL
+select * from t1 , ( (select t2.a from t2 order by c) union (select t2.a from t2 order by c))q where t1.a=q.a;
+a a
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+# UNION ALL and EXCEPT
+analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 0.50 100.00 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+3 UNION t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+4 EXCEPT t3 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+NULL UNIT RESULT <unit2,3,4> ALL NULL NULL NULL NULL NULL 3.00 NULL NULL
+select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
+a a
+3 3
+4 4
+6 6
+drop table t1,t2,t3;
diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test
index 6c51f23c51e..990f955450a 100644
--- a/mysql-test/main/derived.test
+++ b/mysql-test/main/derived.test
@@ -1032,3 +1032,33 @@ drop table t1,t2,t3;
--echo #
--echo # End of 10.2 tests
--echo #
+
+--echo #
+--echo # MDEV-9959: A serious MariaDB server performance bug
+--echo #
+
+create table t1(a int);
+insert into t1 values (1),(2),(3),(4),(5),(6);
+create table t2(a int, b int,c int);
+insert into t2(a,b,c) values (1,1,2),(2,2,3),(3,1,4),(4,2,2),(5,1,1),(6,2,5);
+create table t3(a int, b int);
+insert into t3(a,b) values (1,1),(2,2),(2,1),(1,2),(5,1),(9,2);
+
+--echo table "<derived2>" should have type=ref and rows=1
+--echo one select in derived table
+
+--echo with distinct
+analyze select * from t1 , ((select distinct t2.a from t2 order by c))q where t1.a=q.a;
+analyze select * from t1 , ((select distinct t2.a, t2.b from t2 order by c))q where t1.a=q.a;
+
+--echo # multiple selects in derived table
+--echo # NO UNION ALL
+analyze select * from t1 , ( (select t2.a from t2 order by c) union (select t2.a from t2 order by c))q where t1.a=q.a;
+select * from t1 , ( (select t2.a from t2 order by c) union (select t2.a from t2 order by c))q where t1.a=q.a;
+
+--echo # UNION ALL and EXCEPT
+analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
+
+select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
+
+drop table t1,t2,t3;
diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result
index 86dd73f5733..30831e75341 100644
--- a/mysql-test/main/derived_view.result
+++ b/mysql-test/main/derived_view.result
@@ -1525,7 +1525,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
diff --git a/mysql-test/main/subselect_extra.result b/mysql-test/main/subselect_extra.result
index a3a0f1f9a15..dbcf00268c2 100644
--- a/mysql-test/main/subselect_extra.result
+++ b/mysql-test/main/subselect_extra.result
@@ -409,7 +409,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
diff --git a/mysql-test/main/subselect_extra_no_semijoin.result b/mysql-test/main/subselect_extra_no_semijoin.result
index ec9ddb0452e..49a1431eb9b 100644
--- a/mysql-test/main/subselect_extra_no_semijoin.result
+++ b/mysql-test/main/subselect_extra_no_semijoin.result
@@ -411,7 +411,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 2 Using where
+2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 1 Using where
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 72ca4ac0b43..4eaec7d062b 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -896,6 +896,7 @@ class st_select_lex_unit: public st_select_lex_node {
bool union_needs_tmp_table();
void set_unique_exclude();
+ bool check_distinct_in_union();
friend struct LEX;
friend int subselect_union_engine::exec();
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 87fbbebe4ba..3fb5552c77a 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -2049,3 +2049,43 @@ void st_select_lex_unit::set_unique_exclude()
}
}
}
+
+/**
+ @brief
+ Check if the derived table is guaranteed to have distinct rows because of
+ UNION operations used to populate it.
+
+ @detail
+ UNION operation removes duplicate rows from its output. That is, a query like
+
+ select * from t1 UNION select * from t2
+
+ will not produce duplicate rows in its output, even if table t1 (and/or t2)
+ contain duplicate rows. EXCEPT and INTERSECT operations also have this
+ property.
+
+ On the other hand, UNION ALL operation doesn't remove duplicates. (The SQL
+ standard also defines EXCEPT ALL and INTERSECT ALL, but we don't support
+ them).
+
+ st_select_lex_unit computes its value left to right. That is, if there is
+ a st_select_lex_unit object describing
+
+ (select #1) OP1 (select #2) OP2 (select #3)
+
+ then ((select #1) OP1 (select #2)) is computed first, and OP2 is computed
+ second.
+
+ How can one tell if st_select_lex_unit is guaranteed to have distinct
+ output rows? This depends on whether the last operation was duplicate-
+ removing or not:
+ - UNION ALL is not duplicate-removing
+ - all other operations are duplicate-removing
+*/
+
+bool st_select_lex_unit::check_distinct_in_union()
+{
+ if (union_distinct && !union_distinct->next_select())
+ return true;
+ return false;
+}
diff --git a/sql/table.cc b/sql/table.cc
index 80995abc1f9..c4494c9ae4b 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -7269,6 +7269,26 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
key_part_info++;
}
+ /*
+ For the case when there is a derived table that would give distinct rows,
+ the index statistics are passed to the join optimizer to tell that a ref
+ access to all the fields of the derived table will produce only one row.
+ */
+
+ st_select_lex_unit* derived= pos_in_table_list ?
+ pos_in_table_list->derived: NULL;
+ if (derived)
+ {
+ st_select_lex* first= derived->first_select();
+ uint select_list_items= first->get_item_list()->elements;
+ if (key_parts == select_list_items)
+ {
+ if ((!first->is_part_of_union() && (first->options & SELECT_DISTINCT)) ||
+ derived->check_distinct_in_union())
+ keyinfo->rec_per_key[key_parts - 1]= 1;
+ }
+ }
+
set_if_bigger(s->max_key_length, keyinfo->key_length);
s->keys++;
return FALSE;
2
1