[Commits] 1c6c0e651fc: MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly
by Varun 05 Apr '19
by Varun 05 Apr '19
05 Apr '19
revision-id: 1c6c0e651fc3d6c576f2c49c8642b8b54f94f415 (mariadb-10.4.3-162-g1c6c0e651fc)
parent(s): 02d9b048a2ab549a3227a81e15ff2f8c45562a65
author: Varun Gupta
committer: Varun Gupta
timestamp: 2019-04-05 14:39:01 +0530
message:
MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly
Changed the function append_range_all_keyparts to use sel_arg_range_seq_init / sel_arg_range_seq_next to produce ranges.
Also adjusted to print format for the ranges, now the ranges are printed as:
(keypart1_min, keypart2_min,..) OP (keypart1_name,keypart2_name, ..) OP (keypart1_max,keypart2_max, ..)
Also added more tests for range and index merge access for optimizer trace
---
mysql-test/main/opt_trace.result | 276 ++++++++++-
mysql-test/main/opt_trace.test | 57 +++
mysql-test/main/opt_trace_index_merge.result | 509 ++++++++++++++++++++-
mysql-test/main/opt_trace_index_merge.test | 112 +++++
.../main/opt_trace_index_merge_innodb.result | 6 +-
sql/opt_range.cc | 283 ++++++------
sql/opt_range.h | 7 +-
7 files changed, 1080 insertions(+), 170 deletions(-)
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index 12d4c713886..ed5881e08fd 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -1248,7 +1248,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
{
"index": "a",
"covering": true,
- "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "ranges": ["(2,3) <= (b,c) <= (2,3)"],
"rows": 8,
"cost": 2.2
}
@@ -1264,7 +1264,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"rows": 8,
"cost": 2.2,
"key_parts_used_for_access": ["a", "b", "c"],
- "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "ranges": ["(2,3) <= (b,c) <= (2,3)"],
"chosen": false,
"cause": "cost"
},
@@ -1446,7 +1446,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
{
"index": "id",
"covering": true,
- "ranges": ["0x24a20f <= a"],
+ "ranges": ["(0x24a20f) <= (a)"],
"rows": 9,
"cost": 2.35
}
@@ -1462,7 +1462,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"rows": 9,
"cost": 2.35,
"key_parts_used_for_access": ["id"],
- "ranges": ["0x24a20f <= a"],
+ "ranges": ["(0x24a20f) <= (a)"],
"chosen": false,
"cause": "cost"
},
@@ -1624,7 +1624,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
{
"index": "id",
"covering": true,
- "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
"rows": 9,
"cost": 2.35
}
@@ -1640,7 +1640,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"rows": 9,
"cost": 2.35,
"key_parts_used_for_access": ["id", "a"],
- "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
"chosen": false,
"cause": "cost"
},
@@ -1856,7 +1856,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_scan_alternatives": [
{
"index": "a_c",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -1866,7 +1866,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
},
{
"index": "a_b",
- "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"],
+ "ranges": ["(1,2) <= (a,b) <= (1,2)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -1885,7 +1885,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"type": "range_scan",
"index": "a_b",
"rows": 21,
- "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"]
+ "ranges": ["(1,2) <= (a,b) <= (1,2)"]
},
"rows_for_plan": 21,
"cost_for_plan": 27.445,
@@ -2025,7 +2025,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_scan_alternatives": [
{
"index": "a_c",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -2044,7 +2044,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"type": "range_scan",
"index": "a_c",
"rows": 180,
- "ranges": ["1 <= a <= 1"]
+ "ranges": ["(1) <= (a) <= (1)"]
},
"rows_for_plan": 180,
"cost_for_plan": 231.72,
@@ -2895,7 +2895,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"range_scan_alternatives": [
{
"index": "pk",
- "ranges": ["2 <= pk <= 2"],
+ "ranges": ["(2) <= (pk) <= (2)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -2906,7 +2906,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
},
{
"index": "pk_a",
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5"],
+ "ranges": ["(2,5) <= (pk,a) <= (2,5)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -2917,7 +2917,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
},
{
"index": "pk_a_b",
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"],
+ "ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -2964,7 +2964,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"type": "range_scan",
"index": "pk_a_b",
"rows": 1,
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"]
+ "ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"]
},
"rows_for_plan": 1,
"cost_for_plan": 1.1793,
@@ -3338,7 +3338,7 @@ explain delete from t0 where t0.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -3354,7 +3354,7 @@ explain delete from t0 where t0.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 5.007,
@@ -3481,7 +3481,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@@ -3500,7 +3500,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 1.407,
@@ -3546,7 +3546,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@@ -3565,7 +3565,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 1.407,
@@ -6034,4 +6034,238 @@ COUNT(*)
1
DROP VIEW v1;
DROP TABLE t1;
+#
+# MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k (a int);
+insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
+create table t1 ( a int, b int, key a_b(a,b));
+insert into t1 select a,a from one_k;
+set optimizer_trace='enabled=on';
+explain select * from t1 force index (a_b) where a=2 and b=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a_b a_b 10 const,const 1 Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "a_b",
+ "ranges":
+ [
+ "(2,4) <= (a,b) <= (2,4)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 1,
+ "cost": 1.1783,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+explain select * from t1 where a >= 900 and b between 10 and 20;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a_b a_b 10 NULL 107 Using where; Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "a_b",
+ "ranges":
+ [
+ "(900,10) <= (a,b)"
+ ],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 107,
+ "cost": 10.955,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t0,t1;
+create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
+insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
+explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range start_date start_date 8 NULL 1000 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "start_date",
+ "ranges":
+ [
+ "(0x4ac60f,NULL) < (start_date,end_date)"
+ ],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1000,
+ "cost": 1282.2,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t1,one_k;
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+a int not null,
+b int not null,
+c int not null,
+d int not null,
+key a_b_c(a,b,c)
+);
+insert into t1 select a,a, a,a from ten;
+explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a_b_c a_b_c 8 NULL 4 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "a_b_c",
+ "ranges":
+ [
+ "(1) <= (a) < (4,50)"
+ ],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 4,
+ "cost": 6.2648,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table ten,t1;
+# Ported test from MYSQL for ranges involving Binary column
+CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
+INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
+INSERT INTO t1 VALUES (2, NULL);
+EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "i_b",
+ "ranges":
+ [
+ "(0xd95b94336a9946a39cf5b58cfe772d8c) <= (b) <= (0xd95b94336a9946a39cf5b58cfe772d8c)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3797,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "i_b",
+ "ranges":
+ [
+ "(NULL) <= (b) <= (NULL)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3797,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t1;
set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 4ec7c338acd..981a53ac1ad 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -387,4 +387,61 @@ SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
DROP VIEW v1;
DROP TABLE t1;
+--echo #
+--echo # MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
+--echo #
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k (a int);
+insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
+create table t1 ( a int, b int, key a_b(a,b));
+insert into t1 select a,a from one_k;
+set optimizer_trace='enabled=on';
+
+explain select * from t1 force index (a_b) where a=2 and b=4;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+explain select * from t1 where a >= 900 and b between 10 and 20;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t0,t1;
+
+create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
+--disable_warnings
+insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
+--enable_warnings
+explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1,one_k;
+
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+ a int not null,
+ b int not null,
+ c int not null,
+ d int not null,
+ key a_b_c(a,b,c)
+);
+
+insert into t1 select a,a, a,a from ten;
+explain select * from t1 force index(a_b_c) where a between 1 and 4 and b < 50;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table ten,t1;
+
+--echo # Ported test from MYSQL for ranges involving Binary column
+
+CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
+INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
+INSERT INTO t1 VALUES (2, NULL);
+
+EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t1;
+
set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result
index 50daef815d6..b5e68d04615 100644
--- a/mysql-test/main/opt_trace_index_merge.result
+++ b/mysql-test/main/opt_trace_index_merge.result
@@ -110,7 +110,7 @@ explain select * from t1 where a=1 or b=1 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -126,7 +126,7 @@ explain select * from t1 where a=1 or b=1 {
"range_scan_alternatives": [
{
"index": "b",
- "ranges": ["1 <= b <= 1"],
+ "ranges": ["(1) <= (b) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -147,7 +147,7 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "a",
"rows": 1,
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
}
@@ -156,7 +156,7 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "b",
"rows": 1,
- "ranges": ["1 <= b <= 1"],
+ "ranges": ["(1) <= (b) <= (1)"],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
}
@@ -176,13 +176,13 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "a",
"rows": 1,
- "ranges": ["1 <= a <= 1"]
+ "ranges": ["(1) <= (a) <= (1)"]
},
{
"type": "range_scan",
"index": "b",
"rows": 1,
- "ranges": ["1 <= b <= 1"]
+ "ranges": ["(1) <= (b) <= (1)"]
}
]
},
@@ -243,3 +243,500 @@ explain select * from t1 where a=1 or b=1 {
drop table t0,t1;
set optimizer_trace="enabled=off";
set @@optimizer_switch= @tmp_opt_switch;
+# More tests added index_merge access
+create table t1
+(
+/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
+st_a int not null default 0,
+swt1a int not null default 0,
+swt2a int not null default 0,
+st_b int not null default 0,
+swt1b int not null default 0,
+swt2b int not null default 0,
+/* fields/keys for row retrieval tests */
+key1 int,
+key2 int,
+key3 int,
+key4 int,
+/* make rows much bigger then keys */
+filler1 char (200),
+filler2 char (200),
+filler3 char (200),
+filler4 char (200),
+filler5 char (200),
+filler6 char (200),
+/* order of keys is important */
+key sta_swt12a(st_a,swt1a,swt2a),
+key sta_swt1a(st_a,swt1a),
+key sta_swt2a(st_a,swt2a),
+key sta_swt21a(st_a,swt2a,swt1a),
+key st_a(st_a),
+key stb_swt1a_2b(st_b,swt1b,swt2a),
+key stb_swt1b(st_b,swt1b),
+key st_b(st_b),
+key(key1),
+key(key2),
+key(key3),
+key(key4)
+) ;
+create table t0 as select * from t1;
+# Printing of many insert into t0 values (....) disabled.
+alter table t1 disable keys;
+# Printing of many insert into t1 select .... from t0 disabled.
+# Printing of many insert into t1 (...) values (....) disabled.
+alter table t1 enable keys;
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
+insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
+set optimizer_trace='enabled=on';
+# 3-way ROR-intersection
+explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key1",
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": false,
+ "cause": "cost"
+ },
+
+ {
+ "index": "key3",
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key1",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key3",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 174.76,
+ "disk_sweep_cost": 0,
+ "cumulative_total_cost": 174.76,
+ "usable": true,
+ "matching_rows_now": 2.6872,
+ "intersect_covering_with_this_index": true,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 2,
+ "cost": 174.76,
+ "covering": true,
+ "chosen": true
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
+[
+
+ {
+ "range_access_plan":
+ {
+ "type": "index_roworder_intersect",
+ "rows": 2,
+ "cost": 174.76,
+ "covering": true,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key2",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ]
+ }
+ ]
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 174.76,
+ "chosen": true
+ }
+]
+# ROR-union(ROR-intersection, ROR-range)
+explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+
+ {
+ "indexes_to_merge":
+ [
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key1",
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "index_to_merge": "key1",
+ "cumulated_cost": 170.53
+ },
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key3",
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": true
+ },
+
+ {
+ "index": "key4",
+ "ranges":
+ [
+ "(100) <= (key4) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "index_to_merge": "key3",
+ "cumulated_cost": 341.05
+ }
+ ],
+ "cost_of_reading_ranges": 341.05,
+ "use_roworder_union": true,
+ "cause": "always cheaper than non roworder retrieval",
+ "analyzing_roworder_scans":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key1",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "chosen": true
+ }
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key3",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key4",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "chosen": true
+ }
+ }
+ ],
+ "index_roworder_union_cost": 386.73,
+ "members": 2,
+ "chosen": true
+ }
+ ]
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
+[
+
+ {
+ "range_access_plan":
+ {
+ "type": "index_roworder_union",
+ "union_of":
+ [
+
+ {
+ "type": "index_roworder_intersect",
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key2",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ]
+ }
+ ]
+ },
+
+ {
+ "type": "index_roworder_intersect",
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key4",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key4) <= (100)"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "rows_for_plan": 154,
+ "cost_for_plan": 386.73,
+ "chosen": true
+ }
+]
+drop table t0,t1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_index_merge.test b/mysql-test/main/opt_trace_index_merge.test
index d5efaf81db5..73240b6a9e2 100644
--- a/mysql-test/main/opt_trace_index_merge.test
+++ b/mysql-test/main/opt_trace_index_merge.test
@@ -19,3 +19,115 @@ select * from information_schema.OPTIMIZER_TRACE;
drop table t0,t1;
set optimizer_trace="enabled=off";
set @@optimizer_switch= @tmp_opt_switch;
+
+--echo # More tests added index_merge access
+
+--enable_warnings
+create table t1
+(
+ /* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
+ st_a int not null default 0,
+ swt1a int not null default 0,
+ swt2a int not null default 0,
+
+ st_b int not null default 0,
+ swt1b int not null default 0,
+ swt2b int not null default 0,
+
+ /* fields/keys for row retrieval tests */
+ key1 int,
+ key2 int,
+ key3 int,
+ key4 int,
+
+ /* make rows much bigger then keys */
+ filler1 char (200),
+ filler2 char (200),
+ filler3 char (200),
+ filler4 char (200),
+ filler5 char (200),
+ filler6 char (200),
+
+ /* order of keys is important */
+ key sta_swt12a(st_a,swt1a,swt2a),
+ key sta_swt1a(st_a,swt1a),
+ key sta_swt2a(st_a,swt2a),
+ key sta_swt21a(st_a,swt2a,swt1a),
+ key st_a(st_a),
+ key stb_swt1a_2b(st_b,swt1b,swt2a),
+ key stb_swt1b(st_b,swt1b),
+ key st_b(st_b),
+
+ key(key1),
+ key(key2),
+ key(key3),
+ key(key4)
+) ;
+# Fill table
+create table t0 as select * from t1;
+--disable_query_log
+--echo # Printing of many insert into t0 values (....) disabled.
+let $cnt=1000;
+while ($cnt)
+{
+ eval insert into t0 values (1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 'data1', 'data2', 'data3', 'data4', 'data5', 'data6');
+ dec $cnt;
+}
+--enable_query_log
+
+alter table t1 disable keys;
+--disable_query_log
+--echo # Printing of many insert into t1 select .... from t0 disabled.
+let $1=4;
+while ($1)
+{
+ let $2=4;
+ while ($2)
+ {
+ let $3=4;
+ while ($3)
+ {
+ eval insert into t1 select $1, $2, $3, $1 ,$2, $3, key1, key2, key3, key4, filler1, filler2, filler3, filler4, filler5, filler6 from t0;
+ dec $3;
+ }
+ dec $2;
+ }
+ dec $1;
+}
+
+--echo # Printing of many insert into t1 (...) values (....) disabled.
+# Row retrieval tests
+# -1 is used for values 'out of any range we are using'
+# insert enough rows for index intersection to be used for (key1,key2)
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 100, 100,'key1-key2-key3-key4');
+let $cnt=400;
+while ($cnt)
+{
+ eval insert into t1 (key1, key2, key3, key4, filler1) values (100, -1, 100, -1,'key1-key3');
+ dec $cnt;
+}
+let $cnt=400;
+while ($cnt)
+{
+ eval insert into t1 (key1, key2, key3, key4, filler1) values (-1, 100, -1, 100,'key2-key4');
+ dec $cnt;
+}
+--enable_query_log
+alter table t1 enable keys;
+
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
+insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
+set optimizer_trace='enabled=on';
+
+--echo # 3-way ROR-intersection
+explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # ROR-union(ROR-intersection, ROR-range)
+explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t0,t1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_index_merge_innodb.result b/mysql-test/main/opt_trace_index_merge_innodb.result
index 94e9d4f58cc..6a245cc83da 100644
--- a/mysql-test/main/opt_trace_index_merge_innodb.result
+++ b/mysql-test/main/opt_trace_index_merge_innodb.result
@@ -116,7 +116,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"range_scan_alternatives": [
{
"index": "PRIMARY",
- "ranges": ["pk1 < 0", "0 < pk1"],
+ "ranges": ["(pk1) < (0)", "(0) < (pk1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -127,7 +127,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
},
{
"index": "key1",
- "ranges": ["1 <= key1 <= 1"],
+ "ranges": ["(1) <= (key1) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -164,7 +164,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"type": "range_scan",
"index": "key1",
"rows": 1,
- "ranges": ["1 <= key1 <= 1"]
+ "ranges": ["(1) <= (key1) <= (1)"]
},
"rows_for_plan": 1,
"cost_for_plan": 2.3751,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 5ab3d70214d..fa4e30a1e94 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -431,16 +431,18 @@ static int and_range_trees(RANGE_OPT_PARAM *param,
static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree);
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
- const uchar *key);
+ const uchar* key, uint length);
+static void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
+ uint n_keypart, key_part_map keypart_map);
static void append_range_all_keyparts(Json_writer_array *range_trace,
- String *range_string,
- String *range_so_far, const SEL_ARG *keypart,
+ PARAM param, uint idx,
+ SEL_ARG *keypart,
const KEY_PART_INFO *key_parts);
static
-void append_range(String *out, const KEY_PART_INFO *key_parts,
- const uchar *min_key, const uchar *max_key, const uint flag);
+void append_range(String *out, const KEY_PART_INFO *key_part,
+ KEY_MULTI_RANGE *range, uint n_key_parts);
/*
@@ -2273,10 +2275,7 @@ void TRP_RANGE::trace_basic_info(const PARAM *param,
// TRP_RANGE should not be created if there are no range intervals
DBUG_ASSERT(key);
- String range_info;
- range_info.length(0);
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info, key, key_part);
+ append_range_all_keyparts(&trace_range, *param, key_idx, key, key_part);
}
@@ -2489,10 +2488,8 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
// can have group quick without ranges
if (index_tree)
{
- String range_info;
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info, index_tree,
- key_part);
+ append_range_all_keyparts(&trace_range, *param, param_idx,
+ index_tree, key_part);
}
}
@@ -6398,20 +6395,9 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
trace_isect_idx.add("rows", (*cur_scan)->records);
Json_writer_array trace_range(thd, "ranges");
- for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
- current= current->next)
- {
- String range_info;
- range_info.set_charset(system_charset_info);
- for (const SEL_ARG *part= current; part;
- part= part->next_key_part ? part->next_key_part : nullptr)
- {
- const KEY_PART_INFO *cur_key_part= key_part + part->part;
- append_range(&range_info, cur_key_part, part->min_value,
- part->max_value, part->min_flag | part->max_flag);
- }
- trace_range.add(range_info.ptr(), range_info.length());
- }
+
+ append_range_all_keyparts(&trace_range, *param, (*cur_scan)->idx,
+ (*cur_scan)->sel_arg->first(), key_part);
}
}
@@ -7389,9 +7375,6 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
const KEY &cur_key= param->table->key_info[keynr];
const KEY_PART_INFO *key_part= cur_key.key_part;
- String range_info;
- range_info.set_charset(system_charset_info);
-
index_scan->idx= idx;
index_scan->keynr= keynr;
index_scan->key_info= ¶m->table->key_info[keynr];
@@ -7402,8 +7385,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
*tree->index_scans_end++= index_scan;
if (unlikely(thd->trace_started()))
- append_range_all_keyparts(&trace_range, NULL, &range_info, key,
- key_part);
+ append_range_all_keyparts(&trace_range, (*param), idx,
+ key, key_part);
trace_range.end();
trace_idx.add("rowid_ordered", param->is_ror_scan)
@@ -13554,11 +13537,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
Json_writer_array trace_range(thd, "ranges");
const KEY_PART_INFO *key_part= cur_index_info->key_part;
-
- String range_info;
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info,
- cur_index_tree, key_part);
+ append_range_all_keyparts(&trace_range, *param, cur_param_idx,
+ cur_index_tree, key_part);
}
}
cost_group_min_max(table, cur_index_info, cur_used_key_parts,
@@ -15730,12 +15710,17 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
}
#endif /* !DBUG_OFF */
+
static
void append_range(String *out, const KEY_PART_INFO *key_part,
- const uchar *min_key, const uchar *max_key, const uint flag)
+ KEY_MULTI_RANGE *range, uint n_key_parts)
{
- if (out->length() > 0)
- out->append(STRING_WITH_LEN(" AND "));
+ uint flag= range->range_flag;
+ String key_name;
+ key_name.set_charset(system_charset_info);
+ key_part_map keypart_map= range->start_key.length ?
+ range->start_key.keypart_map :
+ range->end_key.keypart_map;
if (flag & GEOM_FLAG)
{
@@ -15744,22 +15729,24 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
range types, so printing "col < some_geom" doesn't make sense.
Just print the column name, not operator.
*/
- out->append(key_part->field->field_name);
+ print_keyparts_name(out, key_part, n_key_parts, keypart_map);
out->append(STRING_WITH_LEN(" "));
- print_key_value(out, key_part, min_key);
+ print_key_value(out, key_part, range->start_key.key,
+ range->start_key.length);
return;
}
if (!(flag & NO_MIN_RANGE))
{
- print_key_value(out, key_part, min_key);
+ print_key_value(out, key_part, range->start_key.key,
+ range->start_key.length);
if (flag & NEAR_MIN)
out->append(STRING_WITH_LEN(" < "));
else
out->append(STRING_WITH_LEN(" <= "));
}
- out->append(key_part->field->field_name);
+ print_keyparts_name(out, key_part, n_key_parts, keypart_map);
if (!(flag & NO_MAX_RANGE))
{
@@ -15767,7 +15754,8 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
out->append(STRING_WITH_LEN(" < "));
else
out->append(STRING_WITH_LEN(" <= "));
- print_key_value(out, key_part, max_key);
+ print_key_value(out, key_part, range->end_key.key,
+ range->end_key.length);
}
}
@@ -15775,60 +15763,39 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
Add ranges to the trace
For ex:
- query: select * from t1 where a=2 ;
- and we have an index on a , so we create a range
- 2 <= a <= 2
+ lets say we have an index a_b(a,b)
+ query: select * from t1 where a=2 and b=4 ;
+ so we create a range:
+ (2,4) <= (a,b) <= (2,4)
this is added to the trace
*/
static void append_range_all_keyparts(Json_writer_array *range_trace,
- String *range_string,
- String *range_so_far, const SEL_ARG *keypart,
+ PARAM param, uint idx,
+ SEL_ARG *keypart,
const KEY_PART_INFO *key_parts)
{
-
- DBUG_ASSERT(keypart);
- DBUG_ASSERT(keypart && keypart != &null_element);
-
- // Navigate to first interval in red-black tree
+ SEL_ARG_RANGE_SEQ seq;
+ KEY_MULTI_RANGE range;
+ range_seq_t seq_it;
+ uint flags= 0;
+ RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init,
+ sel_arg_range_seq_next, 0, 0};
+ KEY *keyinfo= param.table->key_info + param.real_keynr[idx];
+ uint n_key_parts= param.table->actual_n_key_parts(keyinfo);
+ seq.keyno= idx;
+ seq.real_keyno= param.real_keynr[idx];
+ seq.param= ¶m;
+ seq.start= keypart;
const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
- const SEL_ARG *keypart_range= keypart->first();
- const size_t save_range_so_far_length= range_so_far->length();
-
+ seq_it= seq_if.init((void *) &seq, 0, flags);
- while (keypart_range)
+ while (!seq_if.next(seq_it, &range))
{
- // Append the current range predicate to the range String
- switch (keypart->type)
- {
- case SEL_ARG::Type::KEY_RANGE:
- append_range(range_so_far, cur_key_part, keypart_range->min_value,
- keypart_range->max_value,
- keypart_range->min_flag | keypart_range->max_flag);
- break;
- case SEL_ARG::Type::MAYBE_KEY:
- range_so_far->append("MAYBE_KEY");
- break;
- case SEL_ARG::Type::IMPOSSIBLE:
- range_so_far->append("IMPOSSIBLE");
- break;
- default:
- DBUG_ASSERT(false);
- break;
- }
-
- if (keypart_range->next_key_part &&
- keypart_range->next_key_part->part ==
- keypart_range->part + 1 &&
- keypart_range->is_singlepoint())
- {
- append_range_all_keyparts(range_trace, range_string, range_so_far,
- keypart_range->next_key_part, key_parts);
- }
- else
- range_trace->add(range_so_far->c_ptr_safe(), range_so_far->length());
- keypart_range= keypart_range->next;
- range_so_far->length(save_range_so_far_length);
+ String range_info;
+ range_info.set_charset(system_charset_info);
+ append_range(&range_info, cur_key_part, &range, n_key_parts);
+ range_trace->add(range_info.c_ptr_safe(), range_info.length());
}
}
@@ -15838,70 +15805,110 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
@param[out] out String the key is appended to
@param[in] key_part Index components description
@param[in] key Key tuple
+ @param[in] used_length length of the key tuple
*/
+
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
- const uchar *key)
+ const uchar* key, uint used_length)
{
+ out->append(STRING_WITH_LEN("("));
Field *field= key_part->field;
+ StringBuffer<128> tmp(system_charset_info);
+ TABLE *table= field->table;
+ uint store_length;
+ my_bitmap_map *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ const uchar *key_end= key+used_length;
- if (field->flags & BLOB_FLAG)
+ for (; key < key_end; key+=store_length, key_part++)
{
- // Byte 0 of a nullable key is the null-byte. If set, key is NULL.
- if (field->real_maybe_null() && *key)
- out->append(STRING_WITH_LEN("NULL"));
- else
- (field->type() == MYSQL_TYPE_GEOMETRY)
- ? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
- : out->append(STRING_WITH_LEN("unprintable_blob_value"));
- return;
- }
+ field= key_part->field;
+ store_length= key_part->store_length;
+ if (field->flags & BLOB_FLAG)
+ {
+ // Byte 0 of a nullable key is the null-byte. If set, key is NULL.
+ if (field->real_maybe_null() && *key)
+ out->append(STRING_WITH_LEN("NULL"));
+ else
+ (field->type() == MYSQL_TYPE_GEOMETRY)
+ ? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
+ : out->append(STRING_WITH_LEN("unprintable_blob_value"));
+ goto next;
+ }
- uint store_length= key_part->store_length;
+ if (field->real_maybe_null())
+ {
+ /*
+ Byte 0 of key is the null-byte. If set, key is NULL.
+ Otherwise, print the key value starting immediately after the
+ null-byte
+ */
+ if (*key)
+ {
+ out->append(STRING_WITH_LEN("NULL"));
+ goto next;
+ }
+ key++; // Skip null byte
+ store_length--;
+ }
- if (field->real_maybe_null())
- {
/*
- Byte 0 of key is the null-byte. If set, key is NULL.
- Otherwise, print the key value starting immediately after the
- null-byte
+ Binary data cannot be converted to UTF8 which is what the
+ optimizer trace expects. If the column is binary, the hex
+ representation is printed to the trace instead.
*/
- if (*key)
+ if (field->flags & BINARY_FLAG)
{
- out->append(STRING_WITH_LEN("NULL"));
- return;
+ out->append("0x");
+ for (uint i = 0; i < store_length; i++)
+ {
+ out->append(_dig_vec_lower[*(key + i) >> 4]);
+ out->append(_dig_vec_lower[*(key + i) & 0x0F]);
+ }
+ goto next;
}
- key++; // Skip null byte
- store_length--;
+
+ field->set_key_image(key, key_part->length);
+ if (field->type() == MYSQL_TYPE_BIT)
+ (void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
+ else
+ field->val_str(&tmp); // may change tmp's charset
+ out->append(tmp.ptr(), tmp.length(), tmp.charset());
+
+ next:
+ if (key + store_length < key_end)
+ out->append(STRING_WITH_LEN(","));
}
+ dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ out->append(STRING_WITH_LEN(")"));
+}
- /*
- Binary data cannot be converted to UTF8 which is what the
- optimizer trace expects. If the column is binary, the hex
- representation is printed to the trace instead.
- */
- if (field->flags & BINARY_FLAG)
+/**
+ Print key parts involed in a range
+ @param[out] out String the key is appended to
+ @param[in] key_part Index components description
+ @param[in] n_keypart Number of keyparts in index
+ @param[in] keypart_map map for keyparts involved in the range
+*/
+
+void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
+ uint n_keypart, key_part_map keypart_map)
+{
+ uint i;
+ out->append(STRING_WITH_LEN("("));
+ bool first_keypart= TRUE;
+ for (i=0; i < n_keypart; key_part++, i++)
{
- out->append("0x");
- for (uint i = 0; i < store_length; i++)
+ if (keypart_map & (1 << i))
{
- out->append(_dig_vec_lower[*(key + i) >> 4]);
- out->append(_dig_vec_lower[*(key + i) & 0x0F]);
+ if (first_keypart)
+ first_keypart= FALSE;
+ else
+ out->append(STRING_WITH_LEN(","));
+ out->append(key_part->field->field_name);
}
- return;
+ else
+ break;
}
-
- StringBuffer<128> tmp(system_charset_info);
- TABLE *table= field->table;
- my_bitmap_map *old_sets[2];
-
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
-
- field->set_key_image(key, key_part->length);
- if (field->type() == MYSQL_TYPE_BIT)
- (void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
- else
- field->val_str(&tmp); // may change tmp's charset
- out->append(tmp.ptr(), tmp.length(), tmp.charset());
-
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ out->append(STRING_WITH_LEN(")"));
}
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 2dab90b9f69..5c2a2e39137 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -458,7 +458,9 @@ class SEL_ARG :public Sql_alloc
SEL_ARG *key_tree= first();
uint res= key_tree->store_min(key[key_tree->part].store_length,
range_key, *range_key_flag);
- *range_key_flag|= key_tree->min_flag;
+ // add flags only if a key_part is written to the buffer
+ if (res)
+ *range_key_flag|= key_tree->min_flag;
if (key_tree->next_key_part &&
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
key_tree->part != last_part &&
@@ -480,7 +482,8 @@ class SEL_ARG :public Sql_alloc
SEL_ARG *key_tree= last();
uint res=key_tree->store_max(key[key_tree->part].store_length,
range_key, *range_key_flag);
- (*range_key_flag)|= key_tree->max_flag;
+ if (res)
+ (*range_key_flag)|= key_tree->max_flag;
if (key_tree->next_key_part &&
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
key_tree->part != last_part &&
1
0
[Commits] d2013e7: MDEV-18982 Partition pruning with column list causes syntax error in 10.4
by IgorBabaev 04 Apr '19
by IgorBabaev 04 Apr '19
04 Apr '19
revision-id: d2013e73288c953a6cbcdddf9688584c0353535d (mariadb-10.4.3-159-gd2013e7)
parent(s): ae15f91f227015b3e1ad3f566db9396232cf0a3f
author: Igor Babaev
committer: Igor Babaev
timestamp: 2019-04-04 16:36:26 -0700
message:
MDEV-18982 Partition pruning with column list causes syntax error in 10.4
A syntax error was reported for any INSERT statement with explicit
partition selection it if i used a column list.
Fixed by saving the parsing place before parsing the clause for explicit
partition selection and restoring it when the clause has been parsed.
---
mysql-test/main/partition_explicit_prune.result | 9 +++++++++
mysql-test/main/partition_explicit_prune.test | 9 +++++++++
sql/sql_lex.cc | 1 +
sql/sql_lex.h | 1 +
sql/sql_yacc.yy | 8 +++++++-
sql/sql_yacc_ora.yy | 8 +++++++-
6 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/mysql-test/main/partition_explicit_prune.result b/mysql-test/main/partition_explicit_prune.result
index 650c8d2..1199bc2 100644
--- a/mysql-test/main/partition_explicit_prune.result
+++ b/mysql-test/main/partition_explicit_prune.result
@@ -1897,3 +1897,12 @@ SELECT * FROM t1 PARTITION (p0);
i
UNLOCK TABLES;
DROP TABLE t1, t2;
+#
+# MDEV-18982: INSERT using explicit patition pruning with column list
+#
+create table t1 (a int) partition by hash(a);
+insert into t1 partition (p0) (a) values (1);
+select * from t1;
+a
+1
+drop table t1;
diff --git a/mysql-test/main/partition_explicit_prune.test b/mysql-test/main/partition_explicit_prune.test
index b8b6e48..a516527 100644
--- a/mysql-test/main/partition_explicit_prune.test
+++ b/mysql-test/main/partition_explicit_prune.test
@@ -877,3 +877,12 @@ UNLOCK TABLES;
# Cleanup
DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-18982: INSERT using explicit patition pruning with column list
+--echo #
+
+create table t1 (a int) partition by hash(a);
+insert into t1 partition (p0) (a) values (1);
+select * from t1;
+drop table t1;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 4dc6b94..3f28ab9 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2381,6 +2381,7 @@ void st_select_lex::init_query()
first_natural_join_processing= 1;
first_cond_optimization= 1;
parsing_place= NO_MATTER;
+ save_parsing_place= NO_MATTER;
exclude_from_table_unique_test= no_wrap_view_item= FALSE;
nest_level= 0;
link_next= 0;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 8d5c248..3d31453 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1165,6 +1165,7 @@ class st_select_lex: public st_select_lex_node
*/
uint hidden_bit_fields;
enum_parsing_place parsing_place; /* where we are parsing expression */
+ enum_parsing_place save_parsing_place;
enum_parsing_place context_analysis_place; /* where we are in prepare */
bool with_sum_func; /* sum function indicator */
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 5869f51..11634f4 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -12052,6 +12052,8 @@ use_partition:
PARTITION_SYM '(' using_list ')' have_partitioning
{
$$= $3;
+ Select->parsing_place= Select->save_parsing_place;
+ Select->save_parsing_place= NO_MATTER;
}
;
@@ -13347,13 +13349,17 @@ insert2:
;
insert_table:
+ {
+ Select->save_parsing_place= Select->parsing_place;
+ }
table_name_with_opt_use_partition
{
LEX *lex=Lex;
//lex->field_list.empty();
lex->many_values.empty();
lex->insert_list=0;
- };
+ }
+ ;
insert_field_spec:
insert_values {}
diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy
index 39095bc..f5a4e55 100644
--- a/sql/sql_yacc_ora.yy
+++ b/sql/sql_yacc_ora.yy
@@ -12174,6 +12174,8 @@ use_partition:
PARTITION_SYM '(' using_list ')' have_partitioning
{
$$= $3;
+ Select->parsing_place= Select->save_parsing_place;
+ Select->save_parsing_place= NO_MATTER;
}
;
@@ -13485,13 +13487,17 @@ insert2:
;
insert_table:
+ {
+ Select->save_parsing_place= Select->parsing_place;
+ }
table_name_with_opt_use_partition
{
LEX *lex=Lex;
//lex->field_list.empty();
lex->many_values.empty();
lex->insert_list=0;
- };
+ }
+ ;
insert_field_spec:
insert_values {}
1
0
[Commits] a1929d001c7: MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly
by Varun 04 Apr '19
by Varun 04 Apr '19
04 Apr '19
revision-id: a1929d001c7b25e26d178834a196020bade819b8 (mariadb-10.3.6-318-ga1929d001c7)
parent(s): 0dc442ac61ac7ff1a1d6bd7d6090c0f2251fb558
author: Varun Gupta
committer: Varun Gupta
timestamp: 2019-04-03 13:10:07 +0530
message:
MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly
Changed the function append_range_all_keyparts to use sel_arg_range_seq_init / sel_arg_range_seq_next to produce ranges.
Also adjusted to print format for the ranges, now the ranges are printed as:
(keypart1_min, keypart2_min,..) OP (keypart1_name,keypart2_name, ..) OP (keypart1_max,keypart2_max, ..)
Also added more tests for range and index merge access for optimizer trace
---
mysql-test/main/opt_trace.result | 231 +++++++++-
mysql-test/main/opt_trace.test | 42 ++
mysql-test/main/opt_trace_index_merge.result | 509 ++++++++++++++++++++-
mysql-test/main/opt_trace_index_merge.test | 112 +++++
.../main/opt_trace_index_merge_innodb.result | 6 +-
sql/opt_range.cc | 285 ++++++------
6 files changed, 1016 insertions(+), 169 deletions(-)
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index 12d4c713886..576282a4364 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -1248,7 +1248,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
{
"index": "a",
"covering": true,
- "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "ranges": ["(2,3) <= (b,c) <= (2,3)"],
"rows": 8,
"cost": 2.2
}
@@ -1264,7 +1264,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"rows": 8,
"cost": 2.2,
"key_parts_used_for_access": ["a", "b", "c"],
- "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "ranges": ["(2,3) <= (b,c) <= (2,3)"],
"chosen": false,
"cause": "cost"
},
@@ -1446,7 +1446,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
{
"index": "id",
"covering": true,
- "ranges": ["0x24a20f <= a"],
+ "ranges": ["(0x24a20f) <= (a)"],
"rows": 9,
"cost": 2.35
}
@@ -1462,7 +1462,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"rows": 9,
"cost": 2.35,
"key_parts_used_for_access": ["id"],
- "ranges": ["0x24a20f <= a"],
+ "ranges": ["(0x24a20f) <= (a)"],
"chosen": false,
"cause": "cost"
},
@@ -1624,7 +1624,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
{
"index": "id",
"covering": true,
- "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
"rows": 9,
"cost": 2.35
}
@@ -1640,7 +1640,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"rows": 9,
"cost": 2.35,
"key_parts_used_for_access": ["id", "a"],
- "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "ranges": ["(0x24a20f) <= (a) <= (0x24a20f)"],
"chosen": false,
"cause": "cost"
},
@@ -1856,7 +1856,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_scan_alternatives": [
{
"index": "a_c",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -1866,7 +1866,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
},
{
"index": "a_b",
- "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"],
+ "ranges": ["(1,2) <= (a,b) <= (1,2)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -1885,7 +1885,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"type": "range_scan",
"index": "a_b",
"rows": 21,
- "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"]
+ "ranges": ["(1,2) <= (a,b) <= (1,2)"]
},
"rows_for_plan": 21,
"cost_for_plan": 27.445,
@@ -2025,7 +2025,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_scan_alternatives": [
{
"index": "a_c",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -2044,7 +2044,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"type": "range_scan",
"index": "a_c",
"rows": 180,
- "ranges": ["1 <= a <= 1"]
+ "ranges": ["(1) <= (a) <= (1)"]
},
"rows_for_plan": 180,
"cost_for_plan": 231.72,
@@ -2895,7 +2895,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"range_scan_alternatives": [
{
"index": "pk",
- "ranges": ["2 <= pk <= 2"],
+ "ranges": ["(2) <= (pk) <= (2)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -2906,7 +2906,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
},
{
"index": "pk_a",
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5"],
+ "ranges": ["(2,5) <= (pk,a) <= (2,5)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -2917,7 +2917,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
},
{
"index": "pk_a_b",
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"],
+ "ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -2964,7 +2964,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"type": "range_scan",
"index": "pk_a_b",
"rows": 1,
- "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"]
+ "ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"]
},
"rows_for_plan": 1,
"cost_for_plan": 1.1793,
@@ -3338,7 +3338,7 @@ explain delete from t0 where t0.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
@@ -3354,7 +3354,7 @@ explain delete from t0 where t0.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 5.007,
@@ -3481,7 +3481,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@@ -3500,7 +3500,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 1.407,
@@ -3546,7 +3546,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["NULL < a < 3"],
+ "ranges": ["(NULL) < (a) < (3)"],
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
@@ -3565,7 +3565,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"type": "range_scan",
"index": "a",
"rows": 3,
- "ranges": ["NULL < a < 3"]
+ "ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
"cost_for_plan": 1.407,
@@ -6034,4 +6034,193 @@ COUNT(*)
1
DROP VIEW v1;
DROP TABLE t1;
+#
+# MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k (a int);
+insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
+create table t1 ( a int, b int, key a_b(a,b));
+insert into t1 select a,a from one_k;
+set optimizer_trace='enabled=on';
+explain select * from t1 force index (a_b) where a=2 and b=4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a_b a_b 10 const,const 1 Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "a_b",
+ "ranges":
+ [
+ "(2,4) <= (a,b) <= (2,4)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 1,
+ "cost": 1.1783,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+explain select * from t1 where a >= 900 and b between 10 and 20;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a_b a_b 10 NULL 107 Using where; Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "a_b",
+ "ranges":
+ [
+ "(900,10) <= (a,b)"
+ ],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 107,
+ "cost": 10.955,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t0,t1;
+create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
+insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
+explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range start_date start_date 8 NULL 1000 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "start_date",
+ "ranges":
+ [
+ "(0x4ac60f,NULL) < (start_date,end_date)"
+ ],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1000,
+ "cost": 1282.2,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t1,one_k;
+# Ported test from MYSQL for ranges involving Binary column
+CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
+INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
+INSERT INTO t1 VALUES (2, NULL);
+EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "i_b",
+ "ranges":
+ [
+ "(0xd95b94336a9946a39cf5b58cfe772d8c) <= (b) <= (0xd95b94336a9946a39cf5b58cfe772d8c)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3797,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i_b i_b 17 const 1 Using index condition
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "i_b",
+ "ranges":
+ [
+ "(NULL) <= (b) <= (NULL)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3797,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+drop table t1;
set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 4ec7c338acd..484d3e53e56 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -387,4 +387,46 @@ SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
DROP VIEW v1;
DROP TABLE t1;
+--echo #
+--echo # MDEV-18741: Optimizer trace: multi-part key ranges are printed incorrectly.
+--echo #
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k (a int);
+insert into one_k select A.a + B.a*10 + C.a*100 from t0 A, t0 B, t0 C;
+create table t1 ( a int, b int, key a_b(a,b));
+insert into t1 select a,a from one_k;
+set optimizer_trace='enabled=on';
+
+explain select * from t1 force index (a_b) where a=2 and b=4;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+explain select * from t1 where a >= 900 and b between 10 and 20;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t0,t1;
+
+create table t1 (start_date date, end_date date, filler char(100), key(start_date, end_date)) ;
+--disable_warnings
+insert into t1 select date_add(now(), interval a day), date_add(now(), interval (a+7) day), 'data' from one_k;
+--enable_warnings
+explain select * from t1 force index(start_date) where start_date >= '2019-02-10' and end_date <'2019-04-01';
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1,one_k;
+
+--echo # Ported test from MYSQL for ranges involving Binary column
+
+CREATE TABLE t1(i INT PRIMARY KEY, b BINARY(16), INDEX i_b(b));
+INSERT INTO t1 VALUES (1, x'D95B94336A9946A39CF5B58CFE772D8C');
+INSERT INTO t1 VALUES (2, NULL);
+
+EXPLAIN SELECT * FROM t1 WHERE b IN (0xD95B94336A9946A39CF5B58CFE772D8C);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+EXPLAIN SELECT * FROM t1 WHERE b IS NULL;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t1;
+
set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result
index 50daef815d6..b5e68d04615 100644
--- a/mysql-test/main/opt_trace_index_merge.result
+++ b/mysql-test/main/opt_trace_index_merge.result
@@ -110,7 +110,7 @@ explain select * from t1 where a=1 or b=1 {
"range_scan_alternatives": [
{
"index": "a",
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -126,7 +126,7 @@ explain select * from t1 where a=1 or b=1 {
"range_scan_alternatives": [
{
"index": "b",
- "ranges": ["1 <= b <= 1"],
+ "ranges": ["(1) <= (b) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": true,
@@ -147,7 +147,7 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "a",
"rows": 1,
- "ranges": ["1 <= a <= 1"],
+ "ranges": ["(1) <= (a) <= (1)"],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
}
@@ -156,7 +156,7 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "b",
"rows": 1,
- "ranges": ["1 <= b <= 1"],
+ "ranges": ["(1) <= (b) <= (1)"],
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
}
@@ -176,13 +176,13 @@ explain select * from t1 where a=1 or b=1 {
"type": "range_scan",
"index": "a",
"rows": 1,
- "ranges": ["1 <= a <= 1"]
+ "ranges": ["(1) <= (a) <= (1)"]
},
{
"type": "range_scan",
"index": "b",
"rows": 1,
- "ranges": ["1 <= b <= 1"]
+ "ranges": ["(1) <= (b) <= (1)"]
}
]
},
@@ -243,3 +243,500 @@ explain select * from t1 where a=1 or b=1 {
drop table t0,t1;
set optimizer_trace="enabled=off";
set @@optimizer_switch= @tmp_opt_switch;
+# More tests added index_merge access
+create table t1
+(
+/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
+st_a int not null default 0,
+swt1a int not null default 0,
+swt2a int not null default 0,
+st_b int not null default 0,
+swt1b int not null default 0,
+swt2b int not null default 0,
+/* fields/keys for row retrieval tests */
+key1 int,
+key2 int,
+key3 int,
+key4 int,
+/* make rows much bigger then keys */
+filler1 char (200),
+filler2 char (200),
+filler3 char (200),
+filler4 char (200),
+filler5 char (200),
+filler6 char (200),
+/* order of keys is important */
+key sta_swt12a(st_a,swt1a,swt2a),
+key sta_swt1a(st_a,swt1a),
+key sta_swt2a(st_a,swt2a),
+key sta_swt21a(st_a,swt2a,swt1a),
+key st_a(st_a),
+key stb_swt1a_2b(st_b,swt1b,swt2a),
+key stb_swt1b(st_b,swt1b),
+key st_b(st_b),
+key(key1),
+key(key2),
+key(key3),
+key(key4)
+) ;
+create table t0 as select * from t1;
+# Printing of many insert into t0 values (....) disabled.
+alter table t1 disable keys;
+# Printing of many insert into t1 select .... from t0 disabled.
+# Printing of many insert into t1 (...) values (....) disabled.
+alter table t1 enable keys;
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
+insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
+set optimizer_trace='enabled=on';
+# 3-way ROR-intersection
+explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key1",
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": false,
+ "cause": "cost"
+ },
+
+ {
+ "index": "key3",
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 2243,
+ "cost": 2862.1,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key1",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key3",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 174.76,
+ "disk_sweep_cost": 0,
+ "cumulative_total_cost": 174.76,
+ "usable": true,
+ "matching_rows_now": 2.6872,
+ "intersect_covering_with_this_index": true,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 2,
+ "cost": 174.76,
+ "covering": true,
+ "chosen": true
+ },
+ "analyzing_index_merge_union":
+ [
+ ]
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
+[
+
+ {
+ "range_access_plan":
+ {
+ "type": "index_roworder_intersect",
+ "rows": 2,
+ "cost": 174.76,
+ "covering": true,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key2",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ]
+ }
+ ]
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 174.76,
+ "chosen": true
+ }
+]
+# ROR-union(ROR-intersection, ROR-range)
+explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
+[
+
+ {
+ "range_scan_alternatives":
+ [
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union":
+ [
+
+ {
+ "indexes_to_merge":
+ [
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key1",
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "index_to_merge": "key1",
+ "cumulated_cost": 170.53
+ },
+
+ {
+ "range_scan_alternatives":
+ [
+
+ {
+ "index": "key3",
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": true
+ },
+
+ {
+ "index": "key4",
+ "ranges":
+ [
+ "(100) <= (key4) <= (100)"
+ ],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 2243,
+ "cost": 170.53,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "index_to_merge": "key3",
+ "cumulated_cost": 341.05
+ }
+ ],
+ "cost_of_reading_ranges": 341.05,
+ "use_roworder_union": true,
+ "cause": "always cheaper than non roworder retrieval",
+ "analyzing_roworder_scans":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key1",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key2",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "chosen": true
+ }
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ],
+ "analyzing_roworder_intersect":
+ {
+ "intersecting_indexes":
+ [
+
+ {
+ "index": "key3",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 58.252,
+ "disk_sweep_cost": 1923.1,
+ "cumulative_total_cost": 1981.4,
+ "usable": true,
+ "matching_rows_now": 2243,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+
+ {
+ "index": "key4",
+ "index_scan_cost": 58.252,
+ "cumulated_index_scan_cost": 116.5,
+ "disk_sweep_cost": 84.518,
+ "cumulative_total_cost": 201.02,
+ "usable": true,
+ "matching_rows_now": 77.636,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ }
+ ],
+ "clustered_pk":
+ {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "chosen": true
+ }
+ }
+ ],
+ "index_roworder_union_cost": 386.73,
+ "members": 2,
+ "chosen": true
+ }
+ ]
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
+[
+
+ {
+ "range_access_plan":
+ {
+ "type": "index_roworder_union",
+ "union_of":
+ [
+
+ {
+ "type": "index_roworder_intersect",
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key1) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key2",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key2) <= (100)"
+ ]
+ }
+ ]
+ },
+
+ {
+ "type": "index_roworder_intersect",
+ "rows": 77,
+ "cost": 201.02,
+ "covering": false,
+ "clustered_pk_scan": false,
+ "intersect_of":
+ [
+
+ {
+ "type": "range_scan",
+ "index": "key3",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key3) <= (100)"
+ ]
+ },
+
+ {
+ "type": "range_scan",
+ "index": "key4",
+ "rows": 2243,
+ "ranges":
+ [
+ "(100) <= (key4) <= (100)"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "rows_for_plan": 154,
+ "cost_for_plan": 386.73,
+ "chosen": true
+ }
+]
+drop table t0,t1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_index_merge.test b/mysql-test/main/opt_trace_index_merge.test
index d5efaf81db5..73240b6a9e2 100644
--- a/mysql-test/main/opt_trace_index_merge.test
+++ b/mysql-test/main/opt_trace_index_merge.test
@@ -19,3 +19,115 @@ select * from information_schema.OPTIMIZER_TRACE;
drop table t0,t1;
set optimizer_trace="enabled=off";
set @@optimizer_switch= @tmp_opt_switch;
+
+--echo # More tests added index_merge access
+
+--enable_warnings
+create table t1
+(
+ /* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */
+ st_a int not null default 0,
+ swt1a int not null default 0,
+ swt2a int not null default 0,
+
+ st_b int not null default 0,
+ swt1b int not null default 0,
+ swt2b int not null default 0,
+
+ /* fields/keys for row retrieval tests */
+ key1 int,
+ key2 int,
+ key3 int,
+ key4 int,
+
+ /* make rows much bigger then keys */
+ filler1 char (200),
+ filler2 char (200),
+ filler3 char (200),
+ filler4 char (200),
+ filler5 char (200),
+ filler6 char (200),
+
+ /* order of keys is important */
+ key sta_swt12a(st_a,swt1a,swt2a),
+ key sta_swt1a(st_a,swt1a),
+ key sta_swt2a(st_a,swt2a),
+ key sta_swt21a(st_a,swt2a,swt1a),
+ key st_a(st_a),
+ key stb_swt1a_2b(st_b,swt1b,swt2a),
+ key stb_swt1b(st_b,swt1b),
+ key st_b(st_b),
+
+ key(key1),
+ key(key2),
+ key(key3),
+ key(key4)
+) ;
+# Fill table
+create table t0 as select * from t1;
+--disable_query_log
+--echo # Printing of many insert into t0 values (....) disabled.
+let $cnt=1000;
+while ($cnt)
+{
+ eval insert into t0 values (1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 'data1', 'data2', 'data3', 'data4', 'data5', 'data6');
+ dec $cnt;
+}
+--enable_query_log
+
+alter table t1 disable keys;
+--disable_query_log
+--echo # Printing of many insert into t1 select .... from t0 disabled.
+let $1=4;
+while ($1)
+{
+ let $2=4;
+ while ($2)
+ {
+ let $3=4;
+ while ($3)
+ {
+ eval insert into t1 select $1, $2, $3, $1 ,$2, $3, key1, key2, key3, key4, filler1, filler2, filler3, filler4, filler5, filler6 from t0;
+ dec $3;
+ }
+ dec $2;
+ }
+ dec $1;
+}
+
+--echo # Printing of many insert into t1 (...) values (....) disabled.
+# Row retrieval tests
+# -1 is used for values 'out of any range we are using'
+# insert enough rows for index intersection to be used for (key1,key2)
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 100, 100,'key1-key2-key3-key4');
+let $cnt=400;
+while ($cnt)
+{
+ eval insert into t1 (key1, key2, key3, key4, filler1) values (100, -1, 100, -1,'key1-key3');
+ dec $cnt;
+}
+let $cnt=400;
+while ($cnt)
+{
+ eval insert into t1 (key1, key2, key3, key4, filler1) values (-1, 100, -1, 100,'key2-key4');
+ dec $cnt;
+}
+--enable_query_log
+alter table t1 enable keys;
+
+insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2');
+insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3');
+set optimizer_trace='enabled=on';
+
+--echo # 3-way ROR-intersection
+explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # ROR-union(ROR-intersection, ROR-range)
+explain select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t0,t1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_index_merge_innodb.result b/mysql-test/main/opt_trace_index_merge_innodb.result
index 94e9d4f58cc..6a245cc83da 100644
--- a/mysql-test/main/opt_trace_index_merge_innodb.result
+++ b/mysql-test/main/opt_trace_index_merge_innodb.result
@@ -116,7 +116,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"range_scan_alternatives": [
{
"index": "PRIMARY",
- "ranges": ["pk1 < 0", "0 < pk1"],
+ "ranges": ["(pk1) < (0)", "(0) < (pk1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -127,7 +127,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
},
{
"index": "key1",
- "ranges": ["1 <= key1 <= 1"],
+ "ranges": ["(1) <= (key1) <= (1)"],
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
@@ -164,7 +164,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"type": "range_scan",
"index": "key1",
"rows": 1,
- "ranges": ["1 <= key1 <= 1"]
+ "ranges": ["(1) <= (key1) <= (1)"]
},
"rows_for_plan": 1,
"cost_for_plan": 2.3751,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 5ab3d70214d..f53efc55158 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -431,16 +431,18 @@ static int and_range_trees(RANGE_OPT_PARAM *param,
static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree);
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
- const uchar *key);
+ const uchar* key, uint length);
+static void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
+ uint n_keypart, key_part_map keypart_map);
static void append_range_all_keyparts(Json_writer_array *range_trace,
- String *range_string,
- String *range_so_far, const SEL_ARG *keypart,
+ PARAM param, uint idx,
+ SEL_ARG *keypart,
const KEY_PART_INFO *key_parts);
static
-void append_range(String *out, const KEY_PART_INFO *key_parts,
- const uchar *min_key, const uchar *max_key, const uint flag);
+void append_range(String *out, const KEY_PART_INFO *key_part,
+ KEY_MULTI_RANGE *range, uint n_key_parts);
/*
@@ -2273,10 +2275,7 @@ void TRP_RANGE::trace_basic_info(const PARAM *param,
// TRP_RANGE should not be created if there are no range intervals
DBUG_ASSERT(key);
- String range_info;
- range_info.length(0);
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info, key, key_part);
+ append_range_all_keyparts(&trace_range, *param, key_idx, key, key_part);
}
@@ -2489,10 +2488,8 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
// can have group quick without ranges
if (index_tree)
{
- String range_info;
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info, index_tree,
- key_part);
+ append_range_all_keyparts(&trace_range, *param, param_idx,
+ index_tree, key_part);
}
}
@@ -6398,20 +6395,9 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
trace_isect_idx.add("rows", (*cur_scan)->records);
Json_writer_array trace_range(thd, "ranges");
- for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
- current= current->next)
- {
- String range_info;
- range_info.set_charset(system_charset_info);
- for (const SEL_ARG *part= current; part;
- part= part->next_key_part ? part->next_key_part : nullptr)
- {
- const KEY_PART_INFO *cur_key_part= key_part + part->part;
- append_range(&range_info, cur_key_part, part->min_value,
- part->max_value, part->min_flag | part->max_flag);
- }
- trace_range.add(range_info.ptr(), range_info.length());
- }
+
+ append_range_all_keyparts(&trace_range, *param, (*cur_scan)->idx,
+ (*cur_scan)->sel_arg->first(), key_part);
}
}
@@ -7389,9 +7375,6 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
const KEY &cur_key= param->table->key_info[keynr];
const KEY_PART_INFO *key_part= cur_key.key_part;
- String range_info;
- range_info.set_charset(system_charset_info);
-
index_scan->idx= idx;
index_scan->keynr= keynr;
index_scan->key_info= ¶m->table->key_info[keynr];
@@ -7402,8 +7385,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
*tree->index_scans_end++= index_scan;
if (unlikely(thd->trace_started()))
- append_range_all_keyparts(&trace_range, NULL, &range_info, key,
- key_part);
+ append_range_all_keyparts(&trace_range, (*param), idx,
+ key, key_part);
trace_range.end();
trace_idx.add("rowid_ordered", param->is_ror_scan)
@@ -13554,11 +13537,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
Json_writer_array trace_range(thd, "ranges");
const KEY_PART_INFO *key_part= cur_index_info->key_part;
-
- String range_info;
- range_info.set_charset(system_charset_info);
- append_range_all_keyparts(&trace_range, NULL, &range_info,
- cur_index_tree, key_part);
+ append_range_all_keyparts(&trace_range, *param, cur_param_idx,
+ cur_index_tree, key_part);
}
}
cost_group_min_max(table, cur_index_info, cur_used_key_parts,
@@ -15730,12 +15710,17 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
}
#endif /* !DBUG_OFF */
+
static
void append_range(String *out, const KEY_PART_INFO *key_part,
- const uchar *min_key, const uchar *max_key, const uint flag)
+ KEY_MULTI_RANGE *range, uint n_key_parts)
{
- if (out->length() > 0)
- out->append(STRING_WITH_LEN(" AND "));
+ uint flag= range->range_flag;
+ String key_name;
+ key_name.set_charset(system_charset_info);
+ key_part_map keypart_map= range->start_key.length ?
+ range->start_key.keypart_map :
+ range->end_key.keypart_map;
if (flag & GEOM_FLAG)
{
@@ -15744,22 +15729,24 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
range types, so printing "col < some_geom" doesn't make sense.
Just print the column name, not operator.
*/
- out->append(key_part->field->field_name);
+ print_keyparts_name(out, key_part, n_key_parts, keypart_map);
out->append(STRING_WITH_LEN(" "));
- print_key_value(out, key_part, min_key);
+ print_key_value(out, key_part, range->start_key.key,
+ range->start_key.length);
return;
}
if (!(flag & NO_MIN_RANGE))
{
- print_key_value(out, key_part, min_key);
+ print_key_value(out, key_part, range->start_key.key,
+ range->start_key.length);
if (flag & NEAR_MIN)
out->append(STRING_WITH_LEN(" < "));
else
out->append(STRING_WITH_LEN(" <= "));
}
- out->append(key_part->field->field_name);
+ print_keyparts_name(out, key_part, n_key_parts, keypart_map);
if (!(flag & NO_MAX_RANGE))
{
@@ -15767,7 +15754,8 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
out->append(STRING_WITH_LEN(" < "));
else
out->append(STRING_WITH_LEN(" <= "));
- print_key_value(out, key_part, max_key);
+ print_key_value(out, key_part, range->end_key.key,
+ range->end_key.length);
}
}
@@ -15775,60 +15763,39 @@ void append_range(String *out, const KEY_PART_INFO *key_part,
Add ranges to the trace
For ex:
- query: select * from t1 where a=2 ;
- and we have an index on a , so we create a range
- 2 <= a <= 2
+ lets say we have an index a_b(a,b)
+ query: select * from t1 where a=2 and b=4 ;
+ so we create a range:
+ (2,4) <= (a,b) <= (2,4)
this is added to the trace
*/
static void append_range_all_keyparts(Json_writer_array *range_trace,
- String *range_string,
- String *range_so_far, const SEL_ARG *keypart,
+ PARAM param, uint idx,
+ SEL_ARG *keypart,
const KEY_PART_INFO *key_parts)
{
-
- DBUG_ASSERT(keypart);
- DBUG_ASSERT(keypart && keypart != &null_element);
-
- // Navigate to first interval in red-black tree
+ SEL_ARG_RANGE_SEQ seq;
+ KEY_MULTI_RANGE range;
+ range_seq_t seq_it;
+ uint flags= 0;
+ RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init,
+ sel_arg_range_seq_next, 0, 0};
+ KEY *keyinfo= param.table->key_info + param.real_keynr[idx];
+ uint n_key_parts= param.table->actual_n_key_parts(keyinfo);
+ seq.keyno= idx;
+ seq.real_keyno= param.real_keynr[idx];
+ seq.param= ¶m;
+ seq.start= keypart;
const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
- const SEL_ARG *keypart_range= keypart->first();
- const size_t save_range_so_far_length= range_so_far->length();
-
+ seq_it= seq_if.init((void *) &seq, 0, flags);
- while (keypart_range)
+ while (!seq_if.next(seq_it, &range))
{
- // Append the current range predicate to the range String
- switch (keypart->type)
- {
- case SEL_ARG::Type::KEY_RANGE:
- append_range(range_so_far, cur_key_part, keypart_range->min_value,
- keypart_range->max_value,
- keypart_range->min_flag | keypart_range->max_flag);
- break;
- case SEL_ARG::Type::MAYBE_KEY:
- range_so_far->append("MAYBE_KEY");
- break;
- case SEL_ARG::Type::IMPOSSIBLE:
- range_so_far->append("IMPOSSIBLE");
- break;
- default:
- DBUG_ASSERT(false);
- break;
- }
-
- if (keypart_range->next_key_part &&
- keypart_range->next_key_part->part ==
- keypart_range->part + 1 &&
- keypart_range->is_singlepoint())
- {
- append_range_all_keyparts(range_trace, range_string, range_so_far,
- keypart_range->next_key_part, key_parts);
- }
- else
- range_trace->add(range_so_far->c_ptr_safe(), range_so_far->length());
- keypart_range= keypart_range->next;
- range_so_far->length(save_range_so_far_length);
+ String range_info;
+ range_info.set_charset(system_charset_info);
+ append_range(&range_info, cur_key_part, &range, n_key_parts);
+ range_trace->add(range_info.c_ptr_safe(), range_info.length());
}
}
@@ -15838,70 +15805,110 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
@param[out] out String the key is appended to
@param[in] key_part Index components description
@param[in] key Key tuple
+ @param[in] used_length length of the key tuple
*/
+
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
- const uchar *key)
+ const uchar* key, uint used_length)
{
+ out->append(STRING_WITH_LEN("("));
Field *field= key_part->field;
+ StringBuffer<128> tmp(system_charset_info);
+ TABLE *table= field->table;
+ uint store_length;
+ my_bitmap_map *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ const uchar *key_end= key+used_length;
- if (field->flags & BLOB_FLAG)
+ for (; key < key_end; key+=store_length, key_part++)
{
- // Byte 0 of a nullable key is the null-byte. If set, key is NULL.
- if (field->real_maybe_null() && *key)
- out->append(STRING_WITH_LEN("NULL"));
- else
- (field->type() == MYSQL_TYPE_GEOMETRY)
- ? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
- : out->append(STRING_WITH_LEN("unprintable_blob_value"));
- return;
- }
+ field= key_part->field;
+ store_length= key_part->store_length;
+ if (field->flags & BLOB_FLAG)
+ {
+ // Byte 0 of a nullable key is the null-byte. If set, key is NULL.
+ if (field->real_maybe_null() && *key)
+ out->append(STRING_WITH_LEN("NULL"));
+ else
+ (field->type() == MYSQL_TYPE_GEOMETRY)
+ ? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
+ : out->append(STRING_WITH_LEN("unprintable_blob_value"));
+ goto next;
+ }
- uint store_length= key_part->store_length;
+ if (field->real_maybe_null())
+ {
+ /*
+ Byte 0 of key is the null-byte. If set, key is NULL.
+ Otherwise, print the key value starting immediately after the
+ null-byte
+ */
+ if (*key)
+ {
+ out->append(STRING_WITH_LEN("NULL"));
+ goto next;
+ }
+ key++; // Skip null byte
+ store_length--;
+ }
- if (field->real_maybe_null())
- {
/*
- Byte 0 of key is the null-byte. If set, key is NULL.
- Otherwise, print the key value starting immediately after the
- null-byte
+ Binary data cannot be converted to UTF8 which is what the
+ optimizer trace expects. If the column is binary, the hex
+ representation is printed to the trace instead.
*/
- if (*key)
+ if (field->flags & BINARY_FLAG)
{
- out->append(STRING_WITH_LEN("NULL"));
- return;
+ out->append("0x");
+ for (uint i = 0; i < store_length; i++)
+ {
+ out->append(_dig_vec_lower[*(key + i) >> 4]);
+ out->append(_dig_vec_lower[*(key + i) & 0x0F]);
+ }
+ goto next;
}
- key++; // Skip null byte
- store_length--;
+
+ field->set_key_image(key, key_part->length);
+ if (field->type() == MYSQL_TYPE_BIT)
+ (void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
+ else
+ field->val_str(&tmp); // may change tmp's charset
+ out->append(tmp.ptr(), tmp.length(), tmp.charset());
+
+ next:
+ if (key + store_length < key_end)
+ out->append(STRING_WITH_LEN(","));
}
+ dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ out->append(STRING_WITH_LEN(")"));
+}
- /*
- Binary data cannot be converted to UTF8 which is what the
- optimizer trace expects. If the column is binary, the hex
- representation is printed to the trace instead.
- */
- if (field->flags & BINARY_FLAG)
+/**
+ Print key parts involed in a range
+ @param[out] out String the key is appended to
+ @param[in] key_part Index components description
+ @param[in] n_keypart Number of keyparts in index
+ @param[in] keypart_map map for keyparts involved in the range
+*/
+
+void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
+ uint n_keypart, key_part_map keypart_map)
+{
+ uint i;
+ out->append(STRING_WITH_LEN("("));
+ bool first_keypart= TRUE;
+ for (i=0; i < n_keypart; key_part++, i++)
{
- out->append("0x");
- for (uint i = 0; i < store_length; i++)
+ if (keypart_map & (1 << i))
{
- out->append(_dig_vec_lower[*(key + i) >> 4]);
- out->append(_dig_vec_lower[*(key + i) & 0x0F]);
+ if (first_keypart)
+ first_keypart= FALSE;
+ else
+ out->append(STRING_WITH_LEN(","));
+ out->append(key_part->field->field_name);
}
- return;
+ else
+ break;
}
-
- StringBuffer<128> tmp(system_charset_info);
- TABLE *table= field->table;
- my_bitmap_map *old_sets[2];
-
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
-
- field->set_key_image(key, key_part->length);
- if (field->type() == MYSQL_TYPE_BIT)
- (void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
- else
- field->val_str(&tmp); // may change tmp's charset
- out->append(tmp.ptr(), tmp.length(), tmp.charset());
-
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
-}
+ out->append(STRING_WITH_LEN(")"));
+}
\ No newline at end of file
2
1
revision-id: dfa24e558ea9fb1ace29e97383577fe4e18d0939 (mariadb-10.4.3-82-gdfa24e5)
parent(s): 22e70afbd6fadcd2151e146d9b825939de2de4b8
committer: Alexey Botchkov
timestamp: 2019-04-04 13:28:43 +0400
message:
MDEV-7974 XA trnasactions.
XID_STATE::registered_for_binlog is replaced with rw_trans flag
in ha_prepare().
---
sql/handler.cc | 13 ++++++++++---
sql/log.cc | 9 ++++-----
sql/sql_class.h | 1 -
3 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/sql/handler.cc b/sql/handler.cc
index 45d21ea..f21420e 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -113,6 +113,10 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
uint known_extensions_id= 0;
+static
+uint
+ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list,
+ bool all);
static int commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans,
bool is_real_trans);
@@ -1247,7 +1251,6 @@ int ha_prepare(THD *thd)
Ha_trx_info *ha_info= trans->ha_list;
DBUG_ENTER("ha_prepare");
- thd->transaction.xid_state.registered_for_binlog= false;
if (ha_info)
{
for (; ha_info; ha_info= ha_info->next())
@@ -1272,8 +1275,12 @@ int ha_prepare(THD *thd)
}
}
- if (thd->transaction.xid_state.registered_for_binlog &&
- unlikely(tc_log->log_xa_prepare(thd, all)))
+ uint rw_ha_count=
+ ha_check_and_coalesce_trx_read_only(thd,trans->ha_list, all);
+ bool rw_trans=
+ (rw_ha_count > (thd->is_current_stmt_binlog_disabled()?0U:1U));
+
+ if (rw_trans && tc_log->log_xa_prepare(thd, all))
{
ha_rollback_trans(thd, all);
error=1;
diff --git a/sql/log.cc b/sql/log.cc
index 1273ccd..3819f1f 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1958,12 +1958,11 @@ binlog_truncate_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr, bool all)
static int binlog_prepare(handlerton *hton, THD *thd, bool all)
{
/*
- Mark the XA for binlogging.
- Transactions with no binlog handler registered like readonly ones,
- should not go to the binlog.
- Real work is done in MYSQL_BIN_LOG::log_xa_prepare()
+ do nothing.
+ just pretend we can do 2pc, so that MySQL won't
+ switch to 1pc.
+ real work is done in MYSQL_BIN_LOG::log_xa_prepare()
*/
- thd->transaction.xid_state.registered_for_binlog= true;
return 0;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index bc87a5a..2e81cc2 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1296,7 +1296,6 @@ typedef struct st_xid_state {
That can cause inconsistencies (shoud be fixed?).
*/
bool is_binlogged;
- bool registered_for_binlog;
/**
Check that XA transaction has an uncommitted work. Report an error
1
0
revision-id: 473549db66eba115e6a82708fd312bc58e0aadfd (mariadb-10.4.3-81-g473549d)
parent(s): d943ed2c47839a4acfe501f5ee8129261dee3a70
committer: Alexey Botchkov
timestamp: 2019-04-04 12:50:28 +0400
message:
MDEV-7974 XA transactions.
registered_for_binlogging replaced with the rw_trans flag
in the ha_prepare().
---
sql/handler.cc | 13 ++++++++++---
sql/log.cc | 9 ++++-----
sql/sql_class.h | 1 -
3 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/sql/handler.cc b/sql/handler.cc
index 45d21ea..f21420e 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -113,6 +113,10 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
uint known_extensions_id= 0;
+static
+uint
+ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list,
+ bool all);
static int commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans,
bool is_real_trans);
@@ -1247,7 +1251,6 @@ int ha_prepare(THD *thd)
Ha_trx_info *ha_info= trans->ha_list;
DBUG_ENTER("ha_prepare");
- thd->transaction.xid_state.registered_for_binlog= false;
if (ha_info)
{
for (; ha_info; ha_info= ha_info->next())
@@ -1272,8 +1275,12 @@ int ha_prepare(THD *thd)
}
}
- if (thd->transaction.xid_state.registered_for_binlog &&
- unlikely(tc_log->log_xa_prepare(thd, all)))
+ uint rw_ha_count=
+ ha_check_and_coalesce_trx_read_only(thd,trans->ha_list, all);
+ bool rw_trans=
+ (rw_ha_count > (thd->is_current_stmt_binlog_disabled()?0U:1U));
+
+ if (rw_trans && tc_log->log_xa_prepare(thd, all))
{
ha_rollback_trans(thd, all);
error=1;
diff --git a/sql/log.cc b/sql/log.cc
index 1273ccd..3819f1f 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1958,12 +1958,11 @@ binlog_truncate_trx_cache(THD *thd, binlog_cache_mngr *cache_mngr, bool all)
static int binlog_prepare(handlerton *hton, THD *thd, bool all)
{
/*
- Mark the XA for binlogging.
- Transactions with no binlog handler registered like readonly ones,
- should not go to the binlog.
- Real work is done in MYSQL_BIN_LOG::log_xa_prepare()
+ do nothing.
+ just pretend we can do 2pc, so that MySQL won't
+ switch to 1pc.
+ real work is done in MYSQL_BIN_LOG::log_xa_prepare()
*/
- thd->transaction.xid_state.registered_for_binlog= true;
return 0;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index bc87a5a..2e81cc2 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1296,7 +1296,6 @@ typedef struct st_xid_state {
That can cause inconsistencies (shoud be fixed?).
*/
bool is_binlogged;
- bool registered_for_binlog;
/**
Check that XA transaction has an uncommitted work. Report an error
1
0
revision-id: d943ed2c47839a4acfe501f5ee8129261dee3a70 (mariadb-10.4.3-80-gd943ed2)
parent(s): 20b444ad535c058ce90d8195fbd2261cb6609dc4
committer: Andrei Elkin
timestamp: 2019-04-03 20:39:25 +0300
message:
MDEV-7974 review/contribution:
gtid record of XA-prepare should no be merged into the replicated
trx. This patch redirects it into the default mysql.gtid_slave_pos table. A test is added.
---
mysql-test/suite/rpl/r/rpl_xa.result | 37 ++++--------
.../suite/rpl/r/rpl_xa_gtid_pos_auto_engine.result | 65 ++++++++++++++++++++
mysql-test/suite/rpl/t/rpl_xa.inc | 69 ++++++++++++++++++++++
mysql-test/suite/rpl/t/rpl_xa.test | 43 +-------------
.../suite/rpl/t/rpl_xa_gtid_pos_auto_engine.test | 25 ++++++++
5 files changed, 171 insertions(+), 68 deletions(-)
diff --git a/mysql-test/suite/rpl/r/rpl_xa.result b/mysql-test/suite/rpl/r/rpl_xa.result
index d406877..5d6dddb 100644
--- a/mysql-test/suite/rpl/r/rpl_xa.result
+++ b/mysql-test/suite/rpl/r/rpl_xa.result
@@ -1,31 +1,22 @@
include/master-slave.inc
[connection master]
+connection master;
create table t1 (a int, b int) engine=InnoDB;
xa start 't';
insert into t1 values(1, 2);
xa end 't';
xa prepare 't';
xa commit 't';
-select * from t1;
-a b
-1 2
connection slave;
-select * from t1;
-a b
-1 2
+include/diff_tables.inc [master:t1, slave:t1]
connection master;
xa start 't';
insert into t1 values(3, 4);
xa end 't';
xa prepare 't';
xa rollback 't';
-select * from t1;
-a b
-1 2
connection slave;
-select * from t1;
-a b
-1 2
+include/diff_tables.inc [master:t1, slave:t1]
connection master;
SET pseudo_slave_mode=1;
create table t2 (a int) engine=InnoDB;
@@ -37,26 +28,20 @@ xa start 's';
insert into t2 values (0);
xa end 's';
xa prepare 's';
+include/sync_with_master_gtid.inc
+xa recover;
+formatID gtrid_length bqual_length data
+1 1 0 t
+1 1 0 s
+connection master;
xa commit 't';
xa commit 's';
SET pseudo_slave_mode=0;
Warnings:
Warning 1231 Slave applier execution mode not active, statement ineffective.
-select * from t1;
-a b
-1 2
-5 6
-select * from t2;
-a
-0
connection slave;
-select * from t1;
-a b
-1 2
-5 6
-select * from t2;
-a
-0
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
connection master;
drop table t1, t2;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_xa_gtid_pos_auto_engine.result b/mysql-test/suite/rpl/r/rpl_xa_gtid_pos_auto_engine.result
new file mode 100644
index 0000000..e8b4b05
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_xa_gtid_pos_auto_engine.result
@@ -0,0 +1,65 @@
+include/master-slave.inc
+[connection master]
+connection slave;
+call mtr.add_suppression("The automatically created table.*name may not be entirely in lowercase");
+include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=slave_pos;
+SET @@global.gtid_pos_auto_engines="innodb";
+include/start_slave.inc
+connection master;
+create table t1 (a int, b int) engine=InnoDB;
+insert into t1 values(0, 0);
+xa start 't';
+insert into t1 values(1, 2);
+xa end 't';
+xa prepare 't';
+xa commit 't';
+connection slave;
+include/diff_tables.inc [master:t1, slave:t1]
+connection master;
+xa start 't';
+insert into t1 values(3, 4);
+xa end 't';
+xa prepare 't';
+xa rollback 't';
+connection slave;
+include/diff_tables.inc [master:t1, slave:t1]
+connection master;
+SET pseudo_slave_mode=1;
+create table t2 (a int) engine=InnoDB;
+xa start 't';
+insert into t1 values (5, 6);
+xa end 't';
+xa prepare 't';
+xa start 's';
+insert into t2 values (0);
+xa end 's';
+xa prepare 's';
+connection slave;
+include/sync_with_master_gtid.inc
+SELECT @@global.gtid_slave_pos = CONCAT(domain_id,"-",server_id,"-",seq_no) FROM mysql.gtid_slave_pos WHERE seq_no = (SELECT DISTINCT max(seq_no) FROM mysql.gtid_slave_pos);
+@@global.gtid_slave_pos = CONCAT(domain_id,"-",server_id,"-",seq_no)
+1
+xa recover;
+formatID gtrid_length bqual_length data
+1 1 0 t
+1 1 0 s
+connection master;
+xa commit 't';
+xa commit 's';
+SET pseudo_slave_mode=0;
+Warnings:
+Warning 1231 Slave applier execution mode not active, statement ineffective.
+connection slave;
+include/diff_tables.inc [master:t1, slave:t1]
+include/diff_tables.inc [master:t2, slave:t2]
+connection master;
+drop table t1, t2;
+connection slave;
+include/stop_slave.inc
+SET @@global.gtid_pos_auto_engines="";
+SET @@session.sql_log_bin=0;
+DROP TABLE mysql.gtid_slave_pos_InnoDB;
+SET @@session.sql_log_bin=1;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_xa.inc b/mysql-test/suite/rpl/t/rpl_xa.inc
new file mode 100644
index 0000000..69d9f90
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_xa.inc
@@ -0,0 +1,69 @@
+#
+# This "body" file checks general properties of XA transaction replication
+# as of MDEV-7974.
+# Parameters:
+# --let rpl_xa_check= SELECT ...
+#
+connection master;
+create table t1 (a int, b int) engine=InnoDB;
+insert into t1 values(0, 0);
+xa start 't';
+insert into t1 values(1, 2);
+xa end 't';
+xa prepare 't';
+xa commit 't';
+
+sync_slave_with_master;
+let $diff_tables= master:t1, slave:t1;
+source include/diff_tables.inc;
+
+connection master;
+
+xa start 't';
+insert into t1 values(3, 4);
+xa end 't';
+xa prepare 't';
+xa rollback 't';
+
+sync_slave_with_master;
+let $diff_tables= master:t1, slave:t1;
+source include/diff_tables.inc;
+
+connection master;
+SET pseudo_slave_mode=1;
+create table t2 (a int) engine=InnoDB;
+xa start 't';
+insert into t1 values (5, 6);
+xa end 't';
+xa prepare 't';
+xa start 's';
+insert into t2 values (0);
+xa end 's';
+xa prepare 's';
+
+connection slave;
+source include/sync_with_master_gtid.inc;
+if ($rpl_xa_check)
+{
+ --eval $rpl_xa_check
+ if ($rpl_xa_verbose)
+ {
+ --eval SELECT $rpl_xa_check_lhs
+ --eval SELECT $rpl_xa_check_rhs
+ }
+}
+xa recover;
+
+connection master;
+xa commit 't';
+xa commit 's';
+SET pseudo_slave_mode=0;
+
+sync_slave_with_master;
+let $diff_tables= master:t1, slave:t1;
+source include/diff_tables.inc;
+let $diff_tables= master:t2, slave:t2;
+source include/diff_tables.inc;
+
+connection master;
+drop table t1, t2;
diff --git a/mysql-test/suite/rpl/t/rpl_xa.test b/mysql-test/suite/rpl/t/rpl_xa.test
index fda84b3..05a1abe 100644
--- a/mysql-test/suite/rpl/t/rpl_xa.test
+++ b/mysql-test/suite/rpl/t/rpl_xa.test
@@ -1,46 +1,5 @@
source include/have_innodb.inc;
source include/master-slave.inc;
-create table t1 (a int, b int) engine=InnoDB;
-xa start 't';
-insert into t1 values(1, 2);
-xa end 't';
-xa prepare 't';
-xa commit 't';
-select * from t1;
-sync_slave_with_master;
-select * from t1;
-connection master;
-
-xa start 't';
-insert into t1 values(3, 4);
-xa end 't';
-xa prepare 't';
-xa rollback 't';
-select * from t1;
-sync_slave_with_master;
-select * from t1;
-
-connection master;
-SET pseudo_slave_mode=1;
-create table t2 (a int) engine=InnoDB;
-xa start 't';
-insert into t1 values (5, 6);
-xa end 't';
-xa prepare 't';
-xa start 's';
-insert into t2 values (0);
-xa end 's';
-xa prepare 's';
-xa commit 't';
-xa commit 's';
-SET pseudo_slave_mode=0;
-select * from t1;
-select * from t2;
-sync_slave_with_master;
-select * from t1;
-select * from t2;
-
-connection master;
-drop table t1, t2;
+source rpl_xa.inc;
source include/rpl_end.inc;
diff --git a/mysql-test/suite/rpl/t/rpl_xa_gtid_pos_auto_engine.test b/mysql-test/suite/rpl/t/rpl_xa_gtid_pos_auto_engine.test
new file mode 100644
index 0000000..803e29d
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_xa_gtid_pos_auto_engine.test
@@ -0,0 +1,25 @@
+--source include/have_innodb.inc
+--source include/master-slave.inc
+
+--connection slave
+call mtr.add_suppression("The automatically created table.*name may not be entirely in lowercase");
+
+--source include/stop_slave.inc
+CHANGE MASTER TO master_use_gtid=slave_pos;
+
+SET @@global.gtid_pos_auto_engines="innodb";
+--source include/start_slave.inc
+--let $rpl_xa_check_lhs= @@global.gtid_slave_pos
+--let $rpl_xa_check_rhs= CONCAT(domain_id,"-",server_id,"-",seq_no) FROM mysql.gtid_slave_pos WHERE seq_no = (SELECT DISTINCT max(seq_no) FROM mysql.gtid_slave_pos)
+--let $rpl_xa_check=SELECT $rpl_xa_check_lhs = $rpl_xa_check_rhs
+--source rpl_xa.inc
+
+--connection slave
+--source include/stop_slave.inc
+SET @@global.gtid_pos_auto_engines="";
+SET @@session.sql_log_bin=0;
+DROP TABLE mysql.gtid_slave_pos_InnoDB;
+SET @@session.sql_log_bin=1;
+--source include/start_slave.inc
+
+--source include/rpl_end.inc
1
0
[Commits] 962782f: MDEV-19112 WITH clause does not work with information_schema as default database
by IgorBabaev 04 Apr '19
by IgorBabaev 04 Apr '19
04 Apr '19
revision-id: 962782f767879eb64d088aa483007dc25b4fe74b (mariadb-10.2.23-42-g962782f)
parent(s): b718ec055d41e45cbbca0bb0c9fdf86310ce9e02
author: Igor Babaev
committer: Igor Babaev
timestamp: 2019-04-03 22:52:29 -0700
message:
MDEV-19112 WITH clause does not work with information_schema as default database
With INFORMATION_SCHEMA set as the default database the check that a table
referred in the processed query is defined in INORMATION_SCHEMA must
be postponed until all CTE names can be identified.
---
mysql-test/r/cte_nonrecursive.result | 14 ++++++++++++++
mysql-test/t/cte_nonrecursive.test | 12 ++++++++++++
sql/sql_base.cc | 24 ++++++++++++++++++++++++
sql/sql_cte.cc | 1 +
sql/sql_parse.cc | 15 +--------------
5 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result
index fc65458..8ad3818 100644
--- a/mysql-test/r/cte_nonrecursive.result
+++ b/mysql-test/r/cte_nonrecursive.result
@@ -1659,3 +1659,17 @@ a
2
drop view v1;
drop table t1,t2;
+#
+# MDEV-19112: CTE usage when information_schema is set as default db
+#
+with t as (select 1 as t ) select * from t;
+t
+1
+use information_schema;
+with t as (select 1 as t) select * from t;
+t
+1
+with columns as (select 1 as t) select * from columns;
+t
+1
+use test;
diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test
index 920c27a..c0c5c22 100644
--- a/mysql-test/t/cte_nonrecursive.test
+++ b/mysql-test/t/cte_nonrecursive.test
@@ -1168,3 +1168,15 @@ select * from v1;
drop view v1;
drop table t1,t2;
+
+--echo #
+--echo # MDEV-19112: CTE usage when information_schema is set as default db
+--echo #
+
+with t as (select 1 as t ) select * from t;
+
+use information_schema;
+with t as (select 1 as t) select * from t;
+with columns as (select 1 as t) select * from columns;
+
+use test;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index c282db4..e0a907a 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3344,6 +3344,30 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
goto end;
}
}
+
+ if (!tables->derived &&
+ is_infoschema_db(tables->db, tables->db_length))
+ {
+ /*
+ Check whether the information schema contains a table
+ whose name is tables->schema_table_name
+ */
+ ST_SCHEMA_TABLE *schema_table;
+ schema_table= find_schema_table(thd, tables->schema_table_name);
+ if (!schema_table ||
+ (schema_table->hidden &&
+ ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
+ /*
+ this check is used for show columns|keys from I_S hidden table
+ */
+ lex->sql_command == SQLCOM_SHOW_FIELDS ||
+ lex->sql_command == SQLCOM_SHOW_KEYS)))
+ {
+ my_error(ER_UNKNOWN_TABLE, MYF(0),
+ tables->schema_table_name, INFORMATION_SCHEMA_NAME.str);
+ DBUG_RETURN(1);
+ }
+ }
/*
If this TABLE_LIST object is a placeholder for an information_schema
table, create a temporary table to represent the information_schema
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index bcba4d0..d922a7a 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -1094,6 +1094,7 @@ bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem)
table= 0;
}
with= with_elem;
+ schema_table= NULL;
if (!with_elem->is_referenced() || with_elem->is_recursive)
{
derived= with_elem->spec;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index a80bf00..28cf549 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -8222,7 +8222,6 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->derived= table->sel;
if (!ptr->derived && is_infoschema_db(ptr->db, ptr->db_length))
{
- ST_SCHEMA_TABLE *schema_table;
if (ptr->updating &&
/* Special cases which are processed by commands itself */
lex->sql_command != SQLCOM_CHECK &&
@@ -8234,20 +8233,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
INFORMATION_SCHEMA_NAME.str);
DBUG_RETURN(0);
}
+ ST_SCHEMA_TABLE *schema_table;
schema_table= find_schema_table(thd, ptr->table_name);
- if (!schema_table ||
- (schema_table->hidden &&
- ((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
- /*
- this check is used for show columns|keys from I_S hidden table
- */
- lex->sql_command == SQLCOM_SHOW_FIELDS ||
- lex->sql_command == SQLCOM_SHOW_KEYS)))
- {
- my_error(ER_UNKNOWN_TABLE, MYF(0),
- ptr->table_name, INFORMATION_SCHEMA_NAME.str);
- DBUG_RETURN(0);
- }
ptr->schema_table_name= ptr->table_name;
ptr->schema_table= schema_table;
}
1
0
[Commits] 7597ddb: rename mysql-configh.sh to mysql-vars.sh to avoid naming confusion
by Sergei Petrunia 03 Apr '19
by Sergei Petrunia 03 Apr '19
03 Apr '19
revision-id: 7597ddb1713e3fbae44574ab925399c88f0f8dea ()
parent(s): 8f1e3889dc57153f6594fef29a110a2be87b2e2e
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-03 14:01:07 +0300
message:
rename mysql-configh.sh to mysql-vars.sh to avoid naming confusion
---
mariadb-tpcds-tooling2/02-load-dataset.sh | 6 +++---
mariadb-tpcds-tooling2/20-run-queries.sh | 6 +++---
setup-server/setup-mariadb-current.sh | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/mariadb-tpcds-tooling2/02-load-dataset.sh b/mariadb-tpcds-tooling2/02-load-dataset.sh
index 8065ed0..79db23f 100755
--- a/mariadb-tpcds-tooling2/02-load-dataset.sh
+++ b/mariadb-tpcds-tooling2/02-load-dataset.sh
@@ -2,12 +2,12 @@
set -e
-if [ ! -f mysql-config.sh ] ; then
- echo "Cannot find mysql-config.sh - did you setup the db?"
+if [ ! -f mysql-vars.sh ] ; then
+ echo "Cannot find mysql-vars.sh - did you setup the db?"
exit 1;
fi
-source mysql-config.sh
+source mysql-vars.sh
echo "Loading data using $MYSQL $MYSQL_ARGS";
# This should fail if the database already exists, right?
diff --git a/mariadb-tpcds-tooling2/20-run-queries.sh b/mariadb-tpcds-tooling2/20-run-queries.sh
index 5cea382..60866bb 100755
--- a/mariadb-tpcds-tooling2/20-run-queries.sh
+++ b/mariadb-tpcds-tooling2/20-run-queries.sh
@@ -1,11 +1,11 @@
#!/bin/bash
-if [ ! -f mysql-config.sh ] ; then
- echo "Cannot find mysql-config.sh - did you setup the db?"
+if [ ! -f mysql-vars.sh ] ; then
+ echo "Cannot find mysql-vars.sh - did you setup the db?"
exit 1;
fi
-source mysql-config.sh
+source mysql-vars.sh
echo "Running data using $MYSQL $MYSQL_ARGS";
ls queries-for-mysql/*.sql | while read a ; do
diff --git a/setup-server/setup-mariadb-current.sh b/setup-server/setup-mariadb-current.sh
index cc2635d..2e5103a 100755
--- a/setup-server/setup-mariadb-current.sh
+++ b/setup-server/setup-mariadb-current.sh
@@ -72,7 +72,7 @@ innodb_buffer_pool_size=8G
EOF
-cat > mysql-config.sh <<EOF
+cat > mysql-vars.sh <<EOF
MYSQL="./mariadb-$BRANCH/client/mysql"
MYSQL_SOCKET="--socket=$SOCKETNAME"
MYSQL_USER="-uroot"
1
0
03 Apr '19
revision-id: 8f1e3889dc57153f6594fef29a110a2be87b2e2e ()
parent(s): 93ecbe81bd3a189c6c3ed819c8f6987fa632e4a8
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2019-04-03 13:38:17 +0300
message:
setup-os-ubuntu should install libgnutls28-dev
---
setup-server/setup-os-ubuntu.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup-server/setup-os-ubuntu.sh b/setup-server/setup-os-ubuntu.sh
old mode 100644
new mode 100755
index aec787d..5206107
--- a/setup-server/setup-os-ubuntu.sh
+++ b/setup-server/setup-os-ubuntu.sh
@@ -8,7 +8,7 @@ if [ -e /etc/debian_version ] ; then
sudo apt-get -y install libzstd0
sudo apt-get -y install libssl-dev
- sudo apt-get install libgnutls28-dev
+ sudo apt-get -y install libgnutls28-dev
# percona server:
sudo apt-get -y install libcurl4-gnutls-dev
1
0
03 Apr '19
revision-id: 86392ed0be358fff0a2da5575fcde7b37a20497d (mariadb-10.4.3-142-g86392ed0be3)
parent(s): e10f9e6c810bf0ced6d79f486d6625d2ed04932e
author: Jan Lindström
committer: Jan Lindström
timestamp: 2019-04-03 09:23:29 +0300
message:
MDEV-19156: Galera test failure on galerra_sr_cc_master
Test cleanup and fix.
---
.../suite/galera_sr/r/galera_sr_cc_master.result | 43 +++++++++++-----------
.../suite/galera_sr/t/galera_sr_cc_master.test | 16 ++++----
2 files changed, 31 insertions(+), 28 deletions(-)
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result b/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result
index 79bc29200f9..9e223414fe4 100644
--- a/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result
+++ b/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result
@@ -1,5 +1,6 @@
connection node_2;
connection node_1;
+CALL mtr.add_suppression("WSREP: discarding established.*");
connection node_1;
connection node_2;
connection node_2;
@@ -12,13 +13,13 @@ INSERT INTO t1 VALUES (2);
INSERT INTO t1 VALUES (3);
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
-SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) > 0
-1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
connection node_1;
-SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) > 0
-1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
SET SESSION wsrep_sync_wait=0;
@@ -28,18 +29,18 @@ connection node_2;
INSERT INTO t1 VALUES (6);
ERROR HY000: Lost connection to MySQL server during query
connection node_1;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
connection node_2a;
connection node_1;
connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2b;
SELECT * FROM mysql.wsrep_streaming_log;
node_uuid trx_id seqno flags frag
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
SET AUTOCOMMIT=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES (1);
@@ -48,16 +49,16 @@ INSERT INTO t1 VALUES (3);
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
COMMIT;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
connection node_1;
-SELECT COUNT(*) = 5 FROM t1;
-COUNT(*) = 5
-1
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
-COUNT(*) = 0
-1
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
DROP TABLE t1;
connection node_2b;
CALL mtr.add_suppression("WSREP: Failed to replicate rollback fragment for");
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test b/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test
index 8ae8c204f60..da547c59626 100644
--- a/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test
+++ b/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test
@@ -7,6 +7,8 @@
# leave the cluster.
#
+CALL mtr.add_suppression("WSREP: discarding established.*");
+
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
@@ -26,10 +28,10 @@ INSERT INTO t1 VALUES (3);
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
-SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
--connection node_1
-SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
#
# Trigger CC . The transaction is aborted and we expect the SR tables to be cleaned up
@@ -50,7 +52,7 @@ SET SESSION wsrep_sync_wait = DEFAULT;
INSERT INTO t1 VALUES (6);
--connection node_1
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
# Restore cluster
@@ -68,7 +70,7 @@ SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
--connection node_2b
--source include/galera_wait_ready.inc
SELECT * FROM mysql.wsrep_streaming_log;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
# Repeat transaction to confirm no locks are left from previous transaction
@@ -81,11 +83,11 @@ INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
COMMIT;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
--connection node_1
-SELECT COUNT(*) = 5 FROM t1;
-SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
DROP TABLE t1;
1
0