[Commits] db76d67: MDEV-21446 Assertion `!prebuilt->index->is_primary()' failed in
by IgorBabaev 20 Jan '20
by IgorBabaev 20 Jan '20
20 Jan '20
revision-id: db76d677345441246e1521204d9b418da36b6591 (mariadb-10.4.11-36-gdb76d67)
parent(s): 4de32015be82d0f484a7b49a427853ea7b6da5fd
author: Igor Babaev
committer: Igor Babaev
timestamp: 2020-01-19 20:03:25 -0800
message:
MDEV-21446 Assertion `!prebuilt->index->is_primary()' failed in
row_search_idx_cond_check with rowid_filter upon concurrent access to table
This bug has nothing to do with the concurrent access to table. Rather it
concerns queries for which the optimizer decides to employ a rowid filter
when accessing an InnoDB table by a secondary index, but later when
calling test_if_skip_sort_order() changes its mind to access the table by
the primary key.
Currently usage of rowid filters is not supported in InnoDB if the table
is accessed by the primary key. So in this case usage of a rowid filter
to access the table must be prohibited.
---
mysql-test/main/rowid_filter_innodb.result | 53 ++++++++++++++++++++++++++++++
mysql-test/main/rowid_filter_innodb.test | 49 +++++++++++++++++++++++++++
sql/sql_select.cc | 13 ++++++++
3 files changed, 115 insertions(+)
diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result
index 36a59b8..c59b95b 100644
--- a/mysql-test/main/rowid_filter_innodb.result
+++ b/mysql-test/main/rowid_filter_innodb.result
@@ -2469,3 +2469,56 @@ Warnings:
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrati
ve_stree
t2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_cit
y`,`test
`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= <cache>(current_timestamp() + interval -1 month) order by `test`.`t1`.`timestamp` desc
SET optimizer_switch=@save_optimizer_switch;
DROP TABLE t1;
+#
+# MDEV-21446: index to access the table is changed for primary key
+#
+SET @stats.save= @@innodb_stats_persistent;
+SET global innodb_stats_persistent=on;
+CREATE TABLE t1 (
+pk int auto_increment,
+a int,
+b int,
+primary key (pk),
+key (a),
+key (b)
+) ENGINE=InnoDB;
+INSERT INTO t1 (a,b) VALUES
+(0,0), (0,9), (0,NULL), (1,2), (4,0), (2,9), (1,0), (NULL,0), (5,NULL), (5,1),
+(0,7), (NULL,5), (NULL,0), (2,1), (2,5), (6,NULL), (0,NULL), (NULL,8), (8,5),
+(2,NULL), (2,3), (NULL,8), (NULL,6), (1,1), (5,1), (NULL,5), (4,4), (2,4),
+(2,5), (1,9), (NULL,0), (3,7), (0,4), (2,8), (1,2), (1,4), (2,1),
+(NULL,7), (6,6), (3,0), (4,5), (5,2), (8,2), (NULL,NULL), (8,NULL),
+(0,1),(0,7);
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN EXTENDED
+SELECT * FROM t1
+WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range|filter a,b b|a 5|5 NULL 192 (21%) 21.31 Using index condition; Using where; Using rowid filter
+Warnings:
+Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` between 9 and 10 or `test`.`t1`.`a` is null) and (`test`.`t1`.`b` between 9 and 10 or `test`.`t1`.`b` = 9)
+EXPLAIN EXTENDED
+SELECT * FROM t1
+WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9)
+ORDER BY pk LIMIT 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 index a,b PRIMARY 4 NULL 75 54.55 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` between 9 and 10 or `test`.`t1`.`a` is null) and (`test`.`t1`.`b` between 9 and 10 or `test`.`t1`.`b` = 9) order by `test`.`t1`.`pk` limit 1
+ANALYZE
+SELECT * FROM t1
+WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9)
+ORDER BY pk LIMIT 1;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 index a,b PRIMARY 4 NULL 3008 3008.00 1.36 0.00 Using where
+DROP TABLE t1;
+SET global innodb_stats_persistent= @stats.save;
diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test
index 1a5c8fe..30e0ede 100644
--- a/mysql-test/main/rowid_filter_innodb.test
+++ b/mysql-test/main/rowid_filter_innodb.test
@@ -332,3 +332,52 @@ eval EXPLAIN EXTENDED $q;
SET optimizer_switch=@save_optimizer_switch;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-21446: index to access the table is changed for primary key
+--echo #
+
+SET @stats.save= @@innodb_stats_persistent;
+SET global innodb_stats_persistent=on;
+
+CREATE TABLE t1 (
+ pk int auto_increment,
+ a int,
+ b int,
+ primary key (pk),
+ key (a),
+ key (b)
+) ENGINE=InnoDB;
+
+INSERT INTO t1 (a,b) VALUES
+(0,0), (0,9), (0,NULL), (1,2), (4,0), (2,9), (1,0), (NULL,0), (5,NULL), (5,1),
+(0,7), (NULL,5), (NULL,0), (2,1), (2,5), (6,NULL), (0,NULL), (NULL,8), (8,5),
+(2,NULL), (2,3), (NULL,8), (NULL,6), (1,1), (5,1), (NULL,5), (4,4), (2,4),
+(2,5), (1,9), (NULL,0), (3,7), (0,4), (2,8), (1,2), (1,4), (2,1),
+(NULL,7), (6,6), (3,0), (4,5), (5,2), (8,2), (NULL,NULL), (8,NULL),
+(0,1),(0,7);
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+INSERT INTO t1(a,b) SELECT a, b FROM t1;
+
+ANALYZE TABLE t1;
+
+EXPLAIN EXTENDED
+SELECT * FROM t1
+ WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9);
+
+EXPLAIN EXTENDED
+SELECT * FROM t1
+ WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9)
+ORDER BY pk LIMIT 1;
+
+ANALYZE
+SELECT * FROM t1
+ WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9)
+ORDER BY pk LIMIT 1;
+
+DROP TABLE t1;
+SET global innodb_stats_persistent= @stats.save;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index d9d9c22..0e0c5c9 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -23241,6 +23241,19 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
join_read_first:join_read_last;
tab->type=JT_NEXT; // Read with index_first(), index_next()
+ /*
+ Currently usage of rowid filters is not supported in InnoDB
+ if the table is accessed by the primary key
+ */
+ if (tab->rowid_filter &&
+ tab->index == table->s->primary_key &&
+ table->file->primary_key_is_clustered())
+ {
+ tab->range_rowid_filter_info= 0;
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
+
if (tab->pre_idx_push_select_cond)
{
tab->set_cond(tab->pre_idx_push_select_cond);
1
0
[Commits] e093ddb70dc: MDEV-21383: Possible range plan is not used under certain conditions
by psergey 19 Jan '20
by psergey 19 Jan '20
19 Jan '20
revision-id: e093ddb70dc93a5c0934656b201b881de185df8c (mariadb-10.3.21-26-ge093ddb70dc)
parent(s): 9be5c19c3458963ccf82450b198567976275d599
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2020-01-20 00:06:51 +0300
message:
MDEV-21383: Possible range plan is not used under certain conditions
[Variant 2 of the fix: collect the attached conditions]
Problem:
make_join_select() has a section of code which starts with
"We plan to scan all rows. Check again if we should use an index."
the code in that section will [unnecessarily] re-run the range
optimizer using this condition:
condition_attached_to_current_table AND current_table's_ON_expr
Note that the original invocation of range optimizer in
make_join_statistics was done using the whole select's WHERE condition.
Taking the whole select's WHERE condition and using multiple-equalities
allowed the range optimizer to infer more range restrictions.
The fix:
- Do range optimization using a condition that is an AND of this table's
condition and all of the previous tables' conditions.
- Also, fix the range optimizer to prefer SEL_ARGs with type=KEY_RANGE
over SEL_ARGS with type=MAYBE_KEY, regardless of the key part.
Computing
key_and(
SEL_ARG(type=MAYBE_KEY key_part=1),
SEL_ARG(type=KEY_RANGE, key_part=2)
)
will now produce the SEL_ARG with type=KEY_RANGE.
---
mysql-test/main/join.result | 56 +++++++++++++++++++++++++++++++++++
mysql-test/main/join.test | 65 ++++++++++++++++++++++++++++++++++++++++
sql/opt_range.cc | 2 ++
sql/sql_select.cc | 72 ++++++++++++++++++++++++++++++++++++++++++++-
4 files changed, 194 insertions(+), 1 deletion(-)
diff --git a/mysql-test/main/join.result b/mysql-test/main/join.result
index fb4f35ed555..fe6d18f7807 100644
--- a/mysql-test/main/join.result
+++ b/mysql-test/main/join.result
@@ -3339,3 +3339,59 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
1 SIMPLE t1 ref a a 5 test.t0.a 1
drop table t0,t1;
+#
+# MDEV-21383: Possible range plan is not used under certain conditions
+#
+drop table if exists t10, t1000, t03;
+create table t10(a int);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1000(a int);
+insert into t1000 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
+create table t03(a int);
+insert into t03 values (0),(1),(2);
+create table t1 (
+stationid int
+);
+insert into t1 select a from t10;
+CREATE TABLE t2 (
+stationId int,
+startTime int,
+filler char(100),
+key1 int,
+key2 int,
+key(key1),
+key(key2),
+PRIMARY KEY (`stationId`,`startTime`)
+);
+insert into t2 select
+A.a,
+B.a,
+repeat('filler=data-', 4),
+B.a,
+1
+from
+t03 A,
+t1000 B;
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+create table t3(a int, filler char(100), key(a));
+insert into t3 select A.a+1000*B.a, 'filler-data' from t1000 A, t10 B;
+# This should produce a join order of t1,t2,t3
+# t2 should have type=range, key=PRIMARY key_len=8 (not type=ALL or key_len<8)
+explain
+SELECT *
+FROM
+t1,t2,t3
+WHERE
+t2.startTime <= 100 and
+t2.stationId = t1.stationId and
+(t1.stationid = 1 or t1.stationid = 2 or t1.stationid = 3) and
+key1 >0 and
+t2.key2=t3.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where
+1 SIMPLE t2 range PRIMARY,key1,key2 PRIMARY 8 NULL 219 Using index condition; Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t3 ref a a 5 test.t2.key2 1
+drop table t1,t2,t3;
+drop table t1000,t10,t03;
diff --git a/mysql-test/main/join.test b/mysql-test/main/join.test
index c5d62e213d8..c72ff0e1a8c 100644
--- a/mysql-test/main/join.test
+++ b/mysql-test/main/join.test
@@ -1748,3 +1748,68 @@ show keys from t1;
explain select * from t0,t1 where t0.a=t1.a;
drop table t0,t1;
+
+--echo #
+--echo # MDEV-21383: Possible range plan is not used under certain conditions
+--echo #
+
+--disable_warnings
+drop table if exists t10, t1000, t03;
+--enable_warnings
+
+create table t10(a int);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1000(a int);
+insert into t1000 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
+
+create table t03(a int);
+insert into t03 values (0),(1),(2);
+
+
+create table t1 (
+ stationid int
+);
+insert into t1 select a from t10;
+
+CREATE TABLE t2 (
+ stationId int,
+ startTime int,
+ filler char(100),
+ key1 int,
+ key2 int,
+ key(key1),
+ key(key2),
+ PRIMARY KEY (`stationId`,`startTime`)
+);
+
+insert into t2 select
+ A.a,
+ B.a,
+ repeat('filler=data-', 4),
+ B.a,
+ 1
+from
+ t03 A,
+ t1000 B;
+analyze table t2;
+
+create table t3(a int, filler char(100), key(a));
+insert into t3 select A.a+1000*B.a, 'filler-data' from t1000 A, t10 B;
+
+--echo # This should produce a join order of t1,t2,t3
+--echo # t2 should have type=range, key=PRIMARY key_len=8 (not type=ALL or key_len<8)
+explain
+SELECT *
+FROM
+ t1,t2,t3
+WHERE
+ t2.startTime <= 100 and
+ t2.stationId = t1.stationId and
+ (t1.stationid = 1 or t1.stationid = 2 or t1.stationid = 3) and
+ key1 >0 and
+ t2.key2=t3.a;
+
+drop table t1,t2,t3;
+drop table t1000,t10,t03;
+
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 32e6a767f15..bd5f25d67c2 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -8999,6 +8999,8 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
}
if (key1->type == SEL_ARG::MAYBE_KEY)
{
+ if (key2->type == SEL_ARG::KEY_RANGE)
+ return key2;
key1->right= key1->left= &null_element;
key1->next= key1->prev= 0;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 87a45a1baed..ae26458d451 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -10543,6 +10543,74 @@ make_outerjoin_info(JOIN *join)
}
+/*
+ @brief
+ Build a temporary join prefix condition for JOIN_TABs up to the last tab
+
+ @param ret OUT the condition is returned here
+
+ @return
+ false OK
+ true Out of memory
+
+ @detail
+ Walk through the join prefix (from the first table to the last_tab) and
+ build a condition:
+
+ join_tab_1_cond AND join_tab_2_cond AND ... AND last_tab_conds
+
+ The condition is only intended to be used by the range optimizer, so:
+ - it is not normalized (can have Item_cond_and inside another
+ Item_cond_and)
+ - it does not include join->exec_const_cond and other similar conditions.
+*/
+
+bool build_tmp_join_prefix_cond(JOIN *join, JOIN_TAB *last_tab, Item **ret)
+{
+ THD *const thd= join->thd;
+ Item_cond_and *all_conds= NULL;
+
+ Item *res= NULL;
+
+ // Pick the ON-expression. Use the same logic as in get_sargable_cond():
+ if (last_tab->on_expr_ref)
+ res= *last_tab->on_expr_ref;
+ else if (last_tab->table->pos_in_table_list &&
+ last_tab->table->pos_in_table_list->embedding &&
+ !last_tab->table->pos_in_table_list->embedding->sj_on_expr)
+ {
+ res= last_tab->table->pos_in_table_list->embedding->on_expr;
+ }
+
+ for (JOIN_TAB *tab= first_depth_first_tab(join);
+ tab;
+ tab= next_depth_first_tab(join, tab))
+ {
+ if (tab->select_cond)
+ {
+ if (!res)
+ res= tab->select_cond;
+ else
+ {
+ if (!all_conds)
+ {
+ if (!(all_conds= new (thd->mem_root)Item_cond_and(thd, res,
+ tab->select_cond)))
+ return true;
+ res= all_conds;
+ }
+ else
+ all_conds->add(tab->select_cond, thd->mem_root);
+ }
+ }
+ if (tab == last_tab)
+ break;
+ }
+ *ret= all_conds? all_conds: res;
+ return false;
+}
+
+
static bool
make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
@@ -10890,7 +10958,9 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
/* Join with outer join condition */
COND *orig_cond=sel->cond;
- sel->cond= and_conds(thd, sel->cond, *tab->on_expr_ref);
+
+ if (build_tmp_join_prefix_cond(join, tab, &sel->cond))
+ return true;
/*
We can't call sel->cond->fix_fields,
1
0
revision-id: 9be5c19c3458963ccf82450b198567976275d599 (mariadb-10.3.21-25-g9be5c19c345)
parent(s): 6373ec3ec74515574f8a08535ed0d090b13d9122
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2020-01-19 21:16:15 +0300
message:
Fix another trivial merge error
---
sql/sp_head.cc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 3a113de9dd5..ba130881d68 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -493,8 +493,8 @@ void sp_head::destroy(sp_head *sp)
{
/* Make a copy of main_mem_root as free_root will free the sp */
MEM_ROOT own_root= sp->main_mem_root;
- DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
- (ulong) &sp->mem_root, (ulong) &own_root));
+ DBUG_PRINT("info", ("mem_root %p moved to %p",
+ &sp->mem_root, &own_root));
delete sp;
1
0
[Commits] 78069a2: MDEV-21356 ERROR 1032 Can't find record when running simple, single-table query
by IgorBabaev 18 Jan '20
by IgorBabaev 18 Jan '20
18 Jan '20
revision-id: 78069a2106486ebe4d0d1e5991cd9ba5c989ec7c (mariadb-10.4.11-30-g78069a2)
parent(s): 1c97cd339e9513b152727f386573c8c048db0281
author: Igor Babaev
committer: Igor Babaev
timestamp: 2020-01-18 13:26:03 -0800
message:
MDEV-21356 ERROR 1032 Can't find record when running simple, single-table query
This bug could happen when both optimizer switches 'mrr' and 'mrr_sort_keys'
are enabled and the optimizer decided to use a rowid filter when
accessing an InnoDB table by a secondary key. With the above setting
any access by a secondary is converted to the rndpos access. In InnoDB the
rndpos access uses the primary key.
Currently usage of a rowid filter within InnoDB engine is not supported
if the table is accessed by the primary key.
Do not use pushed rowid filter if the table is accessed actually by the
primary key. Use the rowid filter outside the egine code instead.
---
mysql-test/main/rowid_filter_innodb.result | 114 ++++++++++++++++++++++++++++
mysql-test/main/rowid_filter_innodb.test | 117 +++++++++++++++++++++++++++++
sql/multi_range_read.cc | 24 ++++++
3 files changed, 255 insertions(+)
diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result
index 2f57ee0..36a59b8 100644
--- a/mysql-test/main/rowid_filter_innodb.result
+++ b/mysql-test/main/rowid_filter_innodb.result
@@ -2355,3 +2355,117 @@ count(0)
0
drop table t1;
set global innodb_stats_persistent= @stats.save;
+#
+# MDEV-21356: usage of range filter with range access employing
+# optimizer_switch='mrr=on,mrr_sort_keys=on';
+#
+CREATE TABLE t1 (
+id int(11) unsigned NOT NULL AUTO_INCREMENT,
+domain varchar(255) NOT NULL,
+registrant_name varchar(255) DEFAULT NULL,
+registrant_organization varchar(255) DEFAULT NULL,
+registrant_street1 varchar(255) DEFAULT NULL,
+registrant_street2 varchar(255) DEFAULT NULL,
+registrant_street3 varchar(255) DEFAULT NULL,
+registrant_street4 varchar(255) DEFAULT NULL,
+registrant_street5 varchar(255) DEFAULT NULL,
+registrant_city varchar(255) DEFAULT NULL,
+registrant_postal_code varchar(255) DEFAULT NULL,
+registrant_country varchar(255) DEFAULT NULL,
+registrant_email varchar(255) DEFAULT NULL,
+registrant_telephone varchar(255) DEFAULT NULL,
+administrative_name varchar(255) DEFAULT NULL,
+administrative_organization varchar(255) DEFAULT NULL,
+administrative_street1 varchar(255) DEFAULT NULL,
+administrative_street2 varchar(255) DEFAULT NULL,
+administrative_street3 varchar(255) DEFAULT NULL,
+administrative_street4 varchar(255) DEFAULT NULL,
+administrative_street5 varchar(255) DEFAULT NULL,
+administrative_city varchar(255) DEFAULT NULL,
+administrative_postal_code varchar(255) DEFAULT NULL,
+administrative_country varchar(255) DEFAULT NULL,
+administrative_email varchar(255) DEFAULT NULL,
+administrative_telephone varchar(255) DEFAULT NULL,
+technical_name varchar(255) DEFAULT NULL,
+technical_organization varchar(255) DEFAULT NULL,
+technical_street1 varchar(255) DEFAULT NULL,
+technical_street2 varchar(255) DEFAULT NULL,
+technical_street3 varchar(255) DEFAULT NULL,
+technical_street4 varchar(255) DEFAULT NULL,
+technical_street5 varchar(255) DEFAULT NULL,
+technical_city varchar(255) DEFAULT NULL,
+technical_postal_code varchar(255) DEFAULT NULL,
+technical_country varchar(255) DEFAULT NULL,
+technical_email varchar(255) DEFAULT NULL,
+technical_telephone varchar(255) DEFAULT NULL,
+json longblob NOT NULL,
+timestamp timestamp NOT NULL DEFAULT current_timestamp(),
+PRIMARY KEY (id),
+KEY ixEventWhoisDomainDomain (domain),
+KEY ixEventWhoisDomainTimestamp (timestamp)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+INSERT INTO t1 (
+id, domain, registrant_name, registrant_organization, registrant_street1,
+registrant_street2, registrant_street3, registrant_street4, registrant_street5,
+registrant_city, registrant_postal_code, registrant_country, registrant_email,
+registrant_telephone, administrative_name, administrative_organization,
+administrative_street1, administrative_street2, administrative_street3,
+administrative_street4, administrative_street5, administrative_city,
+administrative_postal_code, administrative_country, administrative_email,
+administrative_telephone, technical_name, technical_organization,
+technical_street1, technical_street2, technical_street3, technical_street4,
+technical_street5, technical_city, technical_postal_code, technical_country,
+technical_email, technical_telephone, json, timestamp) VALUES
+(60380, 'www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, '', '2016-12-22 09:18:28'),
+(60383, 'www.bestwestern.fr', null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, '', '2016-12-22 09:27:06'),
+(80392, 'www.dfinitions.fr', null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, '', '2017-01-30 08:02:01'),
+(80407, 'www.firma.o2.pl', null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, 'AZ.pl Sp. z o.o.', 'Al. Papieza Jana Pawla II 19/2',
+null, null, null, null, '70-453 Szczecin', null, 'POLAND', null,
+'48914243780', '', '2017-01-30 08:24:51'),
+(80551, 'www.mailhost.i-dev.fr', null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, null, null, null, null, null, null, null, null,
+null, null, null, null, null, '', '2017-01-30 10:00:56'),
+(80560, 'www.blackmer-mouvex.com', 'MARIE-PIERRE PRODEAU', 'MOUVEX',
+'2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+'PRODEAU(a)MOUVEX.COM', null, 'MARIE-PIERRE PRODEAU', 'MOUVEX',
+'2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+'PRODEAU(a)MOUVEX.COM', '33 386498630', 'LAURENT SOUCHELEAU', 'MOUVEX',
+'2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+'SOUCHELEAU(a)MOUVEX.COM', '33 386498643', '', '2017-01-30 10:04:38'),
+(80566, 'www.inup.com', 'MAXIMILIAN V. KETELHODT', null,
+'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'GERMANY',
+'ICANN(a)EXPIRES-2009.WEBCARE24.COM', '492214307580', 'MAXIMILIAN V. KETELHODT',
+null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937',
+'GERMANY', 'ICANN(a)EXPIRES-2009.WEBCARE24.COM', '492214307580',
+'MAXIMILIAN V. KETELHODT', null, 'SUELZBURGSTRASSE 158A', null, null, null,
+null, 'KOELN', '50937', 'GERMANY', 'ICANN(a)EXPIRES-2009.WEBCARE24.COM',
+'492214307580', '', '2017-01-30 10:08:29');
+SET @save_optimizer_switch=@@optimizer_switch;
+SET optimizer_switch='mrr=on,mrr_sort_keys=on';
+SELECT * FROM t1
+WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND
+timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH)
+ORDER BY timestamp DESC;
+id domain registrant_name registrant_organization registrant_street1 registrant_street2 registrant_street3 registrant_street4 registrant_street5 registrant_city registrant_postal_code registrant_country registrant_email registrant_telephone administrative_name administrative_organization administrative_street1 administrative_street2 administrative_street3 administrative_street4 administrative_street5 administrative_city administrative_postal_code administrative_country administrative_email administrative_telephone technical_name technical_organization technical_street1 technical_street2 technical_street3 technical_street4 technical_street5 technical_city technical_postal_code technical_country technical_email technical_telephone json timestamp
+EXPLAIN EXTENDED SELECT * FROM t1
+WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND
+timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH)
+ORDER BY timestamp DESC;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 767|4 const 2 (14%) 14.29 Using index condition; Using where; Using filesort; Using rowid filter
+Warnings:
+Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrati
ve_stree
t2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_cit
y`,`test
`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= <cache>(current_timestamp() + interval -1 month) order by `test`.`t1`.`timestamp` desc
+SET optimizer_switch=@save_optimizer_switch;
+DROP TABLE t1;
diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test
index f1b7b0d..1a5c8fe 100644
--- a/mysql-test/main/rowid_filter_innodb.test
+++ b/mysql-test/main/rowid_filter_innodb.test
@@ -215,3 +215,120 @@ eval $q;
drop table t1;
set global innodb_stats_persistent= @stats.save;
+
+--echo #
+--echo # MDEV-21356: usage of range filter with range access employing
+--echo # optimizer_switch='mrr=on,mrr_sort_keys=on';
+--echo #
+
+CREATE TABLE t1 (
+ id int(11) unsigned NOT NULL AUTO_INCREMENT,
+ domain varchar(255) NOT NULL,
+ registrant_name varchar(255) DEFAULT NULL,
+ registrant_organization varchar(255) DEFAULT NULL,
+ registrant_street1 varchar(255) DEFAULT NULL,
+ registrant_street2 varchar(255) DEFAULT NULL,
+ registrant_street3 varchar(255) DEFAULT NULL,
+ registrant_street4 varchar(255) DEFAULT NULL,
+ registrant_street5 varchar(255) DEFAULT NULL,
+ registrant_city varchar(255) DEFAULT NULL,
+ registrant_postal_code varchar(255) DEFAULT NULL,
+ registrant_country varchar(255) DEFAULT NULL,
+ registrant_email varchar(255) DEFAULT NULL,
+ registrant_telephone varchar(255) DEFAULT NULL,
+ administrative_name varchar(255) DEFAULT NULL,
+ administrative_organization varchar(255) DEFAULT NULL,
+ administrative_street1 varchar(255) DEFAULT NULL,
+ administrative_street2 varchar(255) DEFAULT NULL,
+ administrative_street3 varchar(255) DEFAULT NULL,
+ administrative_street4 varchar(255) DEFAULT NULL,
+ administrative_street5 varchar(255) DEFAULT NULL,
+ administrative_city varchar(255) DEFAULT NULL,
+ administrative_postal_code varchar(255) DEFAULT NULL,
+ administrative_country varchar(255) DEFAULT NULL,
+ administrative_email varchar(255) DEFAULT NULL,
+ administrative_telephone varchar(255) DEFAULT NULL,
+ technical_name varchar(255) DEFAULT NULL,
+ technical_organization varchar(255) DEFAULT NULL,
+ technical_street1 varchar(255) DEFAULT NULL,
+ technical_street2 varchar(255) DEFAULT NULL,
+ technical_street3 varchar(255) DEFAULT NULL,
+ technical_street4 varchar(255) DEFAULT NULL,
+ technical_street5 varchar(255) DEFAULT NULL,
+ technical_city varchar(255) DEFAULT NULL,
+ technical_postal_code varchar(255) DEFAULT NULL,
+ technical_country varchar(255) DEFAULT NULL,
+ technical_email varchar(255) DEFAULT NULL,
+ technical_telephone varchar(255) DEFAULT NULL,
+ json longblob NOT NULL,
+ timestamp timestamp NOT NULL DEFAULT current_timestamp(),
+ PRIMARY KEY (id),
+ KEY ixEventWhoisDomainDomain (domain),
+ KEY ixEventWhoisDomainTimestamp (timestamp)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT INTO t1 (
+id, domain, registrant_name, registrant_organization, registrant_street1,
+registrant_street2, registrant_street3, registrant_street4, registrant_street5,
+registrant_city, registrant_postal_code, registrant_country, registrant_email,
+registrant_telephone, administrative_name, administrative_organization,
+administrative_street1, administrative_street2, administrative_street3,
+administrative_street4, administrative_street5, administrative_city,
+administrative_postal_code, administrative_country, administrative_email,
+administrative_telephone, technical_name, technical_organization,
+technical_street1, technical_street2, technical_street3, technical_street4,
+technical_street5, technical_city, technical_postal_code, technical_country,
+technical_email, technical_telephone, json, timestamp) VALUES
+(60380, 'www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, '', '2016-12-22 09:18:28'),
+(60383, 'www.bestwestern.fr', null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, '', '2016-12-22 09:27:06'),
+(80392, 'www.dfinitions.fr', null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, '', '2017-01-30 08:02:01'),
+(80407, 'www.firma.o2.pl', null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, 'AZ.pl Sp. z o.o.', 'Al. Papieza Jana Pawla II 19/2',
+ null, null, null, null, '70-453 Szczecin', null, 'POLAND', null,
+ '48914243780', '', '2017-01-30 08:24:51'),
+(80551, 'www.mailhost.i-dev.fr', null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, '', '2017-01-30 10:00:56'),
+(80560, 'www.blackmer-mouvex.com', 'MARIE-PIERRE PRODEAU', 'MOUVEX',
+ '2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+ 'PRODEAU(a)MOUVEX.COM', null, 'MARIE-PIERRE PRODEAU', 'MOUVEX',
+ '2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+ 'PRODEAU(a)MOUVEX.COM', '33 386498630', 'LAURENT SOUCHELEAU', 'MOUVEX',
+ '2 RUE DES CAILLOTES', null, null, null, null, 'AUXERRE', '89000', 'FRANCE',
+ 'SOUCHELEAU(a)MOUVEX.COM', '33 386498643', '', '2017-01-30 10:04:38'),
+(80566, 'www.inup.com', 'MAXIMILIAN V. KETELHODT', null,
+ 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'GERMANY',
+ 'ICANN(a)EXPIRES-2009.WEBCARE24.COM', '492214307580', 'MAXIMILIAN V. KETELHODT',
+ null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937',
+ 'GERMANY', 'ICANN(a)EXPIRES-2009.WEBCARE24.COM', '492214307580',
+ 'MAXIMILIAN V. KETELHODT', null, 'SUELZBURGSTRASSE 158A', null, null, null,
+ null, 'KOELN', '50937', 'GERMANY', 'ICANN(a)EXPIRES-2009.WEBCARE24.COM',
+ '492214307580', '', '2017-01-30 10:08:29');
+
+SET @save_optimizer_switch=@@optimizer_switch;
+
+SET optimizer_switch='mrr=on,mrr_sort_keys=on';
+
+let $q=
+SELECT * FROM t1
+ WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND
+ timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH)
+ORDER BY timestamp DESC;
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+
+SET optimizer_switch=@save_optimizer_switch;
+
+DROP TABLE t1;
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 4fc386a..7e4c2ed 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -19,6 +19,7 @@
#include "sql_select.h"
#include "key.h"
#include "sql_statistics.h"
+#include "rowid_filter.h"
static ulonglong key_block_no(TABLE *table, uint keyno, ha_rows keyentry_pos)
{
@@ -709,6 +710,20 @@ int Mrr_ordered_rndpos_reader::init(handler *h_arg,
is_mrr_assoc= !MY_TEST(mode & HA_MRR_NO_ASSOCIATION);
index_reader_exhausted= FALSE;
index_reader_needs_refill= TRUE;
+
+ /*
+ Currently usage of a rowid filter within InnoDB engine is not supported
+ if the table is accessed by the primary key.
+ With optimizer switches ''mrr' and 'mrr_sort_keys' are both enabled
+ any access by a secondary index is converted to the rndpos access. In
+ InnoDB the rndpos access is always uses the primary key.
+ Do not use pushed rowid filter if the table is accessed actually by the
+ primary key. Use the rowid filter outside the engine code (see
+ Mrr_ordered_rndpos_reader::refill_from_index_reader).
+ */
+ if (file->pushed_rowid_filter && file->primary_key_is_clustered())
+ file->cancel_pushed_rowid_filter();
+
return 0;
}
@@ -801,6 +816,15 @@ int Mrr_ordered_rndpos_reader::refill_from_index_reader()
index_reader->position();
+ /*
+ If the built rowid filter cannot be used at the engine level use it here.
+ */
+ Rowid_filter *rowid_filter=
+ file->get_table()->reginfo.join_tab->rowid_filter;
+ if (rowid_filter && !file->pushed_rowid_filter &&
+ !rowid_filter->check((char *)index_rowid))
+ continue;
+
/* Put rowid, or {rowid, range_id} pair into the buffer */
rowid_buffer->write_ptr1= index_rowid;
rowid_buffer->write_ptr2= (uchar*)&range_info;
1
0
[Commits] 83dccda35b4: MDEV-21263: Allow packed values of non-sorted fields in the sort buffer
by Varun 17 Jan '20
by Varun 17 Jan '20
17 Jan '20
revision-id: 83dccda35b435abee9dbc7d10e9a4638dc482bea (mariadb-10.5.0-119-g83dccda35b4)
parent(s): 6f65931f889cf920bf52c2debeadc9f559ff687b
author: Varun Gupta
committer: Varun Gupta
timestamp: 2020-01-17 23:42:10 +0530
message:
MDEV-21263: Allow packed values of non-sorted fields in the sort buffer
This task deals with packing the non-sorted fields (or addon fields).
This would lead to efficient usage of the memory allocated for the sort buffer.
The changes brought by this feature are
1) Sort buffers would have records of variable length
2) Each record in the sort buffer would be stored like
<sort_key1><sort_key2>....<addon_length><null_bytes><field1><field2>....
addon_length is the extra bytes that are required to store the variable
length of addon field across different records.
3) Changes in rr_unpack_from_buffer and rr_from_tempfile to take into account
the variable length of records.
Ported WL#1509 Pack values of non-sorted fields in the sort buffer from
MySQL by Tor Didriksen
---
mysql-test/main/order_by_pack_big.result | 329 ++++++++++
mysql-test/main/order_by_pack_big.test | 134 ++++
sql/bounded_queue.h | 4 +-
sql/field.h | 2 +
sql/filesort.cc | 700 +++++++++++++--------
sql/filesort.h | 83 ++-
sql/filesort_utils.cc | 64 +-
sql/filesort_utils.h | 214 ++++++-
sql/records.cc | 141 ++++-
sql/records.h | 14 +-
sql/sql_array.h | 4 +
sql/sql_select.cc | 4 +-
sql/sql_sort.h | 291 ++++++++-
sql/uniques.cc | 80 ++-
sql/uniques.h | 2 +-
.../mysql-test/connect/r/mysql_index.result | 4 +-
.../connect/mysql-test/connect/t/mysql_index.test | 2 +-
17 files changed, 1638 insertions(+), 434 deletions(-)
diff --git a/mysql-test/main/order_by_pack_big.result b/mysql-test/main/order_by_pack_big.result
new file mode 100644
index 00000000000..6eb699443f4
--- /dev/null
+++ b/mysql-test/main/order_by_pack_big.result
@@ -0,0 +1,329 @@
+set @save_rand_seed1= @@RAND_SEED1;
+set @save_rand_seed2= @@RAND_SEED2;
+set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772;
+create table t1(a int);
+insert into t1 select seq from seq_1_to_10000 order by rand();
+#
+# parameters:
+# mean mean for the column to be considered
+# max_val max_value for the column to be considered
+#
+# This function generate a sample of a normal distribution
+# This function return a point
+# of the normal distribution with a given mean.
+# The range being [mean-max_val, mean+max_val]
+#
+CREATE FUNCTION
+generate_normal_distribution_sample(mean DOUBLE, max_val DOUBLE)RETURNS DOUBLE
+BEGIN
+DECLARE z DOUBLE DEFAULT 0;
+SET z= (rand() + rand() + rand() + rand() + rand() + rand())/6;
+SET z= 2*max_val*z;
+SET z= z + mean - max_val;
+return z;
+END|
+#
+# parameters:
+# len length of the random string to be generated
+#
+# This function generates a random string for the length passed
+# as an argument with characters in the range of [A,Z]
+#
+CREATE FUNCTION generate_random_string(len INT) RETURNS varchar(128)
+BEGIN
+DECLARE str VARCHAR(256) DEFAULT '';
+DECLARE x INT DEFAULT 0;
+WHILE (len > 0) DO
+SET x =round(rand()*25);
+SET str= CONCAT(str, CHAR(65 + x));
+SET len= len-1;
+END WHILE;
+RETURN str;
+END|
+#
+# parameters:
+# mean mean for the column to be considered
+# min_val min_value for the column to be considered
+# max_val max_value for the column to be considered
+#
+# This function generate a normal distribution sample in the range of
+# [min_val, max_val]
+#
+CREATE FUNCTION
+clipped_normal_distribution(mean DOUBLE, min_val DOUBLE, max_val DOUBLE)
+RETURNS INT
+BEGIN
+DECLARE r DOUBLE DEFAULT 0;
+WHILE 1=1 DO
+set r= generate_normal_distribution_sample(mean, max_val);
+IF (r >= min_val AND r <= max_val) THEN
+RETURN round(r);
+end if;
+END WHILE;
+RETURN 0;
+END|
+create table t2 (id INT NOT NULL, a INT, b int);
+insert into t2
+select a, clipped_normal_distribution(12, 0, 64),
+clipped_normal_distribution(32, 0, 128)
+from t1;
+CREATE TABLE t3(
+id INT NOT NULL,
+names VARCHAR(64),
+address VARCHAR(128),
+PRIMARY KEY (id)
+);
+#
+# table t3 stores random strings calculated from the length stored in
+# table t2
+#
+insert into t3
+select id, generate_random_string(a), generate_random_string(b) from t2;
+#
+# All records fit in memory
+#
+set sort_buffer_size=262144*10;
+flush status;
+select id DIV 100 as x,
+MD5(group_concat(substring(names,1,3), substring(address,1,3)
+order by substring(names,1,3), substring(address,1,3)))
+FROM t3
+GROUP BY x;
+x MD5(group_concat(substring(names,1,3), substring(address,1,3)
+order by substring(names,1,3), substring(address,1,3)))
+0 d4ad11687da9fdd17045a0f93d724df5
+1 d5a87f65a05cd605948dbe108f9a5b05
+2 0632ddb92533d6c55f958f21341675cc
+3 c36113673deeac8eb0a7607cc8f96bba
+4 b8d88a43d7f63334d829d68d4203d9f3
+5 3c17b7f2acb944dd96c60b8ca90751db
+6 7c1d824dc55a223f3e7655a2903b94b9
+7 eb1a1f9c360b800b0f55adc05054a563
+8 844280d69206c7a1f10d58817613b102
+9 3d8d5e1928e504380a3a3c2799e9c428
+10 cb3a76d2a4dbc4c1fc78ddfa59b974d4
+11 2eaffef3148600bb13cb3d904f99a4c5
+12 5fb93adc41ba464e8185c3d3f1df7d17
+13 fa70b96b2b0b4c8b6588a4460f14b7d4
+14 4aa245a50e522abcb1a1b735fc003c8a
+15 e3684310f5850e6ed643e2e56ed7fea6
+16 c4eee44d8cc0a0f66e41f301cc91ce2b
+17 2e23b56325eccd97e575118d6a24f755
+18 d253b26d84402fe5be80d6aad44f70ed
+19 161b4177e67e2af589c9cc1449c2d667
+20 9f9ea00e6eb5b609704776e6550bfbc3
+21 3da448760a412f89f5d53276a31586ea
+22 3168591c6f2f51d16bb19eae5363f834
+23 c1b260d9fba034d13b5808b3e20eeb03
+24 0e53cf577c5d0d477fde859b4fa7a81b
+25 df02956ca733ad50d0385862096da83a
+26 79ebdef6078a10df877c59c29180a278
+27 06727b4483379f3eddb0b42dc428bd5f
+28 aa9ea96e4058a6dd56f2d2d2fb782d7e
+29 bb25c33f89ff4bfd50863060d43d61c9
+30 0f432712859ff779e08830f8da9aece0
+31 97822a645481b78ef4fba4da36ac62f4
+32 2c205b01efed57522076a8300b1cd8f8
+33 c6ad8fbfa88e4bca0277e2423d1df9e1
+34 111f8e425524edf9936a96555350c94e
+35 14a0be622f01486752771e4a5002ddfc
+36 894c34e564219b1d192ff2eb837372b1
+37 4d1fe9791758d29482d81afe5d4faa72
+38 2181ed8abc8cb71af0aa21d5271abe9d
+39 2255d9da97436886acb13cda9ab75714
+40 4c8eac659b89ebcc503cd2af956175be
+41 9d90ad817099140de50d1358976c6c4f
+42 0a5684b11e55f3bc1b8c8583ec334bca
+43 7706747e156e33ad2949d8335389c95a
+44 c8c1304b513cb1ef65002c460f6258d6
+45 054c61e32b03357ccbf455fd5d53bf05
+46 98f007bb5ca04d2d90387d242c098a40
+47 aac2a3a94db0c1c23b0762057aa7041c
+48 6d8be947ba1b8c41ed7023925db50eef
+49 44ba42761bf5fa862115ef46e985755f
+50 814c11118e69816184d6748da48c4f08
+51 98d0a7d7c8e9dd0d5e643d546890cba2
+52 892268ec715956f24082e3d02e887f29
+53 15062940c842549f2b94824bfea2d811
+54 93012d6d6fec0f6ee661cfa9cdb13dea
+55 27e5f9ffcfed6e1a55d7300c36cda21b
+56 30ae9ae8ba4a9e6268f1dfca0fca9470
+57 bed276744ab21fabac3a8af6988e3e01
+58 2785a4916860cb1a14f1443ff31bebbe
+59 c67deeeb485e47e5405c471747f23d77
+60 5fb9d556e3fbd031c1f509b892671d17
+61 0a26c94e855713855e1f7c2510ab8396
+62 470eb86ceebc80373ac21f649fd9c05e
+63 84ebbe82cbf0e9e45d8a4c532decf95c
+64 e65cdfed3ade2889922d5b30f96c1669
+65 4d69962a09905671898b53031cc6793e
+66 c077c25a07c30df40fe229ecce7dbfec
+67 03978533bb69c23e317fcbe528c3a396
+68 ea3cfd1e51bc1cd961f8e9a3094a8da4
+69 3b7168d6cd74da4ad40ee7f2031654e1
+70 bd63f97872ef083b411297143fd3e999
+71 1005210dc686397faf39263c47c86ffa
+72 bc2211caf85df0b0d5adf033f85d8b38
+73 1854dba348bab0955ea3ed6af29bae94
+74 133931ecc7a8209637034b491170f4b3
+75 a9fbf0426fa75fcf448db1150681a57a
+76 1b816675f211d3d543fb414e7ab2e3d1
+77 be7dfa74b889c4b75e15c926f542341f
+78 4a1a8f0b838682ac340f734ab87ff667
+79 dfe24b819f1d5d00ab9a64ecc60c45d0
+80 01f32b0e5f76560ea69e181162e842ca
+81 0d4929761adae4a3f9feedf0c9f5f5ef
+82 45b937d3deb3385ab51b5489eb35f4ab
+83 3aeb64118a074b2c050aa024f3992d16
+84 621d28bc210df55c14f2c227c76c1e02
+85 b5715829003467e387deef10c8a630aa
+86 59a4663b0492bc7a7d8d44bce2866982
+87 2051d9533f47361c2aaa1abda2860bad
+88 7699896a47f2c929cb45607d4e2aec82
+89 23554b3781bce1847cf98fd9bef5b063
+90 5326c6874b119ebca36817856b710018
+91 d684e3ed75dbee8d4df38b74f71b18ef
+92 7a9764916045b31df8f8bf593d3f49bf
+93 1afecbb3a50bd864990d504ca8bfc2e0
+94 db7148159865f51f9fcbb47054181210
+95 24fdcfcad88e9f5a607bb10452112583
+96 7c6798c46f646d3ac3be19ef6979e075
+97 d9dc5c1191649ad67e5330c073cb2692
+98 16aef566f921ff51d970f5c351a6ed07
+99 4a8f0e73d0235dee4730ecd37e8ee889
+100 7d66d01ed76d71318364fa2f9ca1c940
+show status like '%sort%';
+Variable_name Value
+Sort_merge_passes 0
+Sort_priority_queue_sorts 0
+Sort_range 0
+Sort_rows 10000
+Sort_scan 1
+set sort_buffer_size=default;
+#
+# Test for merge_many_buff
+#
+set sort_buffer_size=32768;
+flush status;
+select id DIV 100 as x,
+MD5(group_concat(substring(names,1,3), substring(address,1,3)
+order by substring(names,1,3), substring(address,1,3)))
+FROM t3
+GROUP BY x;
+x MD5(group_concat(substring(names,1,3), substring(address,1,3)
+order by substring(names,1,3), substring(address,1,3)))
+0 d4ad11687da9fdd17045a0f93d724df5
+1 d5a87f65a05cd605948dbe108f9a5b05
+2 0632ddb92533d6c55f958f21341675cc
+3 c36113673deeac8eb0a7607cc8f96bba
+4 b8d88a43d7f63334d829d68d4203d9f3
+5 3c17b7f2acb944dd96c60b8ca90751db
+6 7c1d824dc55a223f3e7655a2903b94b9
+7 eb1a1f9c360b800b0f55adc05054a563
+8 844280d69206c7a1f10d58817613b102
+9 3d8d5e1928e504380a3a3c2799e9c428
+10 cb3a76d2a4dbc4c1fc78ddfa59b974d4
+11 2eaffef3148600bb13cb3d904f99a4c5
+12 5fb93adc41ba464e8185c3d3f1df7d17
+13 fa70b96b2b0b4c8b6588a4460f14b7d4
+14 4aa245a50e522abcb1a1b735fc003c8a
+15 e3684310f5850e6ed643e2e56ed7fea6
+16 c4eee44d8cc0a0f66e41f301cc91ce2b
+17 2e23b56325eccd97e575118d6a24f755
+18 d253b26d84402fe5be80d6aad44f70ed
+19 161b4177e67e2af589c9cc1449c2d667
+20 9f9ea00e6eb5b609704776e6550bfbc3
+21 3da448760a412f89f5d53276a31586ea
+22 3168591c6f2f51d16bb19eae5363f834
+23 c1b260d9fba034d13b5808b3e20eeb03
+24 0e53cf577c5d0d477fde859b4fa7a81b
+25 df02956ca733ad50d0385862096da83a
+26 79ebdef6078a10df877c59c29180a278
+27 06727b4483379f3eddb0b42dc428bd5f
+28 aa9ea96e4058a6dd56f2d2d2fb782d7e
+29 bb25c33f89ff4bfd50863060d43d61c9
+30 0f432712859ff779e08830f8da9aece0
+31 97822a645481b78ef4fba4da36ac62f4
+32 2c205b01efed57522076a8300b1cd8f8
+33 c6ad8fbfa88e4bca0277e2423d1df9e1
+34 111f8e425524edf9936a96555350c94e
+35 14a0be622f01486752771e4a5002ddfc
+36 894c34e564219b1d192ff2eb837372b1
+37 4d1fe9791758d29482d81afe5d4faa72
+38 2181ed8abc8cb71af0aa21d5271abe9d
+39 2255d9da97436886acb13cda9ab75714
+40 4c8eac659b89ebcc503cd2af956175be
+41 9d90ad817099140de50d1358976c6c4f
+42 0a5684b11e55f3bc1b8c8583ec334bca
+43 7706747e156e33ad2949d8335389c95a
+44 c8c1304b513cb1ef65002c460f6258d6
+45 054c61e32b03357ccbf455fd5d53bf05
+46 98f007bb5ca04d2d90387d242c098a40
+47 aac2a3a94db0c1c23b0762057aa7041c
+48 6d8be947ba1b8c41ed7023925db50eef
+49 44ba42761bf5fa862115ef46e985755f
+50 814c11118e69816184d6748da48c4f08
+51 98d0a7d7c8e9dd0d5e643d546890cba2
+52 892268ec715956f24082e3d02e887f29
+53 15062940c842549f2b94824bfea2d811
+54 93012d6d6fec0f6ee661cfa9cdb13dea
+55 27e5f9ffcfed6e1a55d7300c36cda21b
+56 30ae9ae8ba4a9e6268f1dfca0fca9470
+57 bed276744ab21fabac3a8af6988e3e01
+58 2785a4916860cb1a14f1443ff31bebbe
+59 c67deeeb485e47e5405c471747f23d77
+60 5fb9d556e3fbd031c1f509b892671d17
+61 0a26c94e855713855e1f7c2510ab8396
+62 470eb86ceebc80373ac21f649fd9c05e
+63 84ebbe82cbf0e9e45d8a4c532decf95c
+64 e65cdfed3ade2889922d5b30f96c1669
+65 4d69962a09905671898b53031cc6793e
+66 c077c25a07c30df40fe229ecce7dbfec
+67 03978533bb69c23e317fcbe528c3a396
+68 ea3cfd1e51bc1cd961f8e9a3094a8da4
+69 3b7168d6cd74da4ad40ee7f2031654e1
+70 bd63f97872ef083b411297143fd3e999
+71 1005210dc686397faf39263c47c86ffa
+72 bc2211caf85df0b0d5adf033f85d8b38
+73 1854dba348bab0955ea3ed6af29bae94
+74 133931ecc7a8209637034b491170f4b3
+75 a9fbf0426fa75fcf448db1150681a57a
+76 1b816675f211d3d543fb414e7ab2e3d1
+77 be7dfa74b889c4b75e15c926f542341f
+78 4a1a8f0b838682ac340f734ab87ff667
+79 dfe24b819f1d5d00ab9a64ecc60c45d0
+80 01f32b0e5f76560ea69e181162e842ca
+81 0d4929761adae4a3f9feedf0c9f5f5ef
+82 45b937d3deb3385ab51b5489eb35f4ab
+83 3aeb64118a074b2c050aa024f3992d16
+84 621d28bc210df55c14f2c227c76c1e02
+85 b5715829003467e387deef10c8a630aa
+86 59a4663b0492bc7a7d8d44bce2866982
+87 2051d9533f47361c2aaa1abda2860bad
+88 7699896a47f2c929cb45607d4e2aec82
+89 23554b3781bce1847cf98fd9bef5b063
+90 5326c6874b119ebca36817856b710018
+91 d684e3ed75dbee8d4df38b74f71b18ef
+92 7a9764916045b31df8f8bf593d3f49bf
+93 1afecbb3a50bd864990d504ca8bfc2e0
+94 db7148159865f51f9fcbb47054181210
+95 24fdcfcad88e9f5a607bb10452112583
+96 7c6798c46f646d3ac3be19ef6979e075
+97 d9dc5c1191649ad67e5330c073cb2692
+98 16aef566f921ff51d970f5c351a6ed07
+99 4a8f0e73d0235dee4730ecd37e8ee889
+100 7d66d01ed76d71318364fa2f9ca1c940
+show status like '%sort%';
+Variable_name Value
+Sort_merge_passes 5
+Sort_priority_queue_sorts 0
+Sort_range 0
+Sort_rows 10000
+Sort_scan 1
+set sort_buffer_size=default;
+set @@RAND_SEED1= @save_rand_seed1;
+set @@RAND_SEED2= @save_rand_seed2;
+drop function generate_normal_distribution_sample;
+drop function generate_random_string;
+drop function clipped_normal_distribution;
+drop table t1, t2, t3;
diff --git a/mysql-test/main/order_by_pack_big.test b/mysql-test/main/order_by_pack_big.test
new file mode 100644
index 00000000000..64675ea839d
--- /dev/null
+++ b/mysql-test/main/order_by_pack_big.test
@@ -0,0 +1,134 @@
+--source include/big_test.inc
+--source include/have_sequence.inc
+--source include/have_64bit.inc
+
+set @save_rand_seed1= @@RAND_SEED1;
+set @save_rand_seed2= @@RAND_SEED2;
+set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772;
+
+create table t1(a int);
+insert into t1 select seq from seq_1_to_10000 order by rand();
+delimiter |;
+
+--echo #
+--echo # parameters:
+--echo # mean mean for the column to be considered
+--echo # max_val max_value for the column to be considered
+--echo #
+--echo # This function generate a sample of a normal distribution
+--echo # This function return a point
+--echo # of the normal distribution with a given mean.
+--echo # The range being [mean-max_val, mean+max_val]
+--echo #
+
+CREATE FUNCTION
+generate_normal_distribution_sample(mean DOUBLE, max_val DOUBLE)RETURNS DOUBLE
+BEGIN
+ DECLARE z DOUBLE DEFAULT 0;
+ SET z= (rand() + rand() + rand() + rand() + rand() + rand())/6;
+ SET z= 2*max_val*z;
+ SET z= z + mean - max_val;
+ return z;
+END|
+
+--echo #
+--echo # parameters:
+--echo # len length of the random string to be generated
+--echo #
+--echo # This function generates a random string for the length passed
+--echo # as an argument with characters in the range of [A,Z]
+--echo #
+
+CREATE FUNCTION generate_random_string(len INT) RETURNS varchar(128)
+BEGIN
+ DECLARE str VARCHAR(256) DEFAULT '';
+ DECLARE x INT DEFAULT 0;
+ WHILE (len > 0) DO
+ SET x =round(rand()*25);
+ SET str= CONCAT(str, CHAR(65 + x));
+ SET len= len-1;
+ END WHILE;
+RETURN str;
+END|
+
+--echo #
+--echo # parameters:
+--echo # mean mean for the column to be considered
+--echo # min_val min_value for the column to be considered
+--echo # max_val max_value for the column to be considered
+--echo #
+--echo # This function generate a normal distribution sample in the range of
+--echo # [min_val, max_val]
+--echo #
+
+CREATE FUNCTION
+clipped_normal_distribution(mean DOUBLE, min_val DOUBLE, max_val DOUBLE)
+RETURNS INT
+BEGIN
+ DECLARE r DOUBLE DEFAULT 0;
+ WHILE 1=1 DO
+ set r= generate_normal_distribution_sample(mean, max_val);
+ IF (r >= min_val AND r <= max_val) THEN
+ RETURN round(r);
+ end if;
+ END WHILE;
+ RETURN 0;
+END|
+
+delimiter ;|
+
+create table t2 (id INT NOT NULL, a INT, b int);
+insert into t2
+select a, clipped_normal_distribution(12, 0, 64),
+ clipped_normal_distribution(32, 0, 128)
+from t1;
+
+CREATE TABLE t3(
+ id INT NOT NULL,
+ names VARCHAR(64),
+ address VARCHAR(128),
+ PRIMARY KEY (id)
+);
+
+--echo #
+--echo # table t3 stores random strings calculated from the length stored in
+--echo # table t2
+--echo #
+
+insert into t3
+select id, generate_random_string(a), generate_random_string(b) from t2;
+
+
+let $query= select id DIV 100 as x,
+ MD5(group_concat(substring(names,1,3), substring(address,1,3)
+ order by substring(names,1,3), substring(address,1,3)))
+ FROM t3
+ GROUP BY x;
+
+--echo #
+--echo # All records fit in memory
+--echo #
+
+set sort_buffer_size=262144*10;
+flush status;
+eval $query;
+show status like '%sort%';
+set sort_buffer_size=default;
+
+--echo #
+--echo # Test for merge_many_buff
+--echo #
+
+set sort_buffer_size=32768;
+flush status;
+eval $query;
+show status like '%sort%';
+set sort_buffer_size=default;
+
+set @@RAND_SEED1= @save_rand_seed1;
+set @@RAND_SEED2= @save_rand_seed2;
+
+drop function generate_normal_distribution_sample;
+drop function generate_random_string;
+drop function clipped_normal_distribution;
+drop table t1, t2, t3;
diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h
index fd733caa019..cd710d835aa 100644
--- a/sql/bounded_queue.h
+++ b/sql/bounded_queue.h
@@ -57,7 +57,7 @@ class Bounded_queue
@param to Where to put the key.
@param from The input data.
*/
- typedef void (*keymaker_function)(Sort_param *param,
+ typedef uint (*keymaker_function)(Sort_param *param,
Key_type *to,
Element_type *from);
@@ -181,7 +181,7 @@ void Bounded_queue<Element_type, Key_type>::push(Element_type *element)
{
// Replace top element with new key, and re-order the queue.
Key_type **pq_top= reinterpret_cast<Key_type **>(queue_top(&m_queue));
- (*m_keymaker)(m_sort_param, *pq_top, element);
+ (void)(*m_keymaker)(m_sort_param, *pq_top, element);
queue_replace_top(&m_queue);
} else {
// Insert new key into the queue.
diff --git a/sql/field.h b/sql/field.h
index 58f49f78cc7..df0f97776fb 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1527,6 +1527,7 @@ class Field: public Value_source
{ return length;}
virtual uint max_packed_col_length(uint max_length)
{ return max_length;}
+ virtual bool is_packable() { return false; }
uint offset(const uchar *record) const
{
@@ -2139,6 +2140,7 @@ class Field_longstr :public Field_str
bool can_optimize_range(const Item_bool_func *cond,
const Item *item,
bool is_eq_func) const;
+ bool is_packable() { return true; }
};
/* base class for float and double and decimal (old one) */
diff --git a/sql/filesort.cc b/sql/filesort.cc
index df6e1eb9104..d93ae3b595a 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -48,17 +48,17 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
ha_rows *found_rows);
static bool write_keys(Sort_param *param, SORT_INFO *fs_info,
uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
-static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
+static uint make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
static void register_used_fields(Sort_param *param);
static bool save_index(Sort_param *param, uint count,
SORT_INFO *table_sort);
static uint suffix_length(ulong string_length);
static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
- bool *multi_byte_charset);
-static SORT_ADDON_FIELD *get_addon_fields(TABLE *table, uint sortlength,
- LEX_STRING *addon_buf);
-static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
- uchar *buff, uchar *buff_end);
+ bool *multi_byte_charset);
+static Addon_fields *get_addon_fields(TABLE *table, uint sortlength,
+ uint *addon_length,
+ uint *m_packable_length);
+
static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
TABLE *table,
ha_rows records, size_t memory_available);
@@ -66,7 +66,7 @@ static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
ha_rows maxrows, bool sort_positions)
{
- DBUG_ASSERT(addon_field == 0 && addon_buf.length == 0);
+ DBUG_ASSERT(addon_fields == NULL);
sort_length= sortlen;
ref_length= table->file->ref_length;
@@ -77,12 +77,13 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
Get the descriptors of all fields whose values are appended
to sorted fields and get its total length in addon_buf.length
*/
- addon_field= get_addon_fields(table, sort_length, &addon_buf);
+ addon_fields= get_addon_fields(table, sort_length, &addon_length,
+ &m_packable_length);
}
- if (addon_field)
+ if (using_addon_fields())
{
- DBUG_ASSERT(addon_buf.length < UINT_MAX32);
- res_length= (uint)addon_buf.length;
+ DBUG_ASSERT(addon_length < UINT_MAX32);
+ res_length= addon_length;
}
else
{
@@ -93,11 +94,43 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
*/
sort_length+= ref_length;
}
- rec_length= sort_length + (uint)addon_buf.length;
+ rec_length= sort_length + addon_length;
max_rows= maxrows;
}
+void Sort_param::try_to_pack_addons(ulong max_length_for_sort_data)
+{
+ if (!using_addon_fields() || // no addons, or
+ using_packed_addons()) // already packed
+ return;
+
+ if (!Addon_fields::can_pack_addon_fields(res_length))
+ return;
+
+ const uint sz= Addon_fields::size_of_length_field;;
+ if (rec_length + sz > max_length_for_sort_data)
+ return;
+
+ // Heuristic: skip packing if potential savings are less than 10 bytes.
+ if (m_packable_length < (10 + sz))
+ return;
+
+ SORT_ADDON_FIELD *addonf= addon_fields->begin();
+ for (;addonf != addon_fields->end(); ++addonf)
+ {
+ addonf->offset+= sz;
+ addonf->null_offset+= sz;
+ }
+
+ addon_fields->set_using_packed_addons(true);
+ m_using_packed_addons= true;
+
+ addon_length+= sz;
+ res_length+= sz;
+ rec_length+= sz;
+}
+
/**
Sort a table.
Creates a set of pointers that can be used to read the rows
@@ -134,7 +167,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
DBUG_ASSERT(thd->variables.sortbuff_size <= SIZE_T_MAX);
size_t memory_available= (size_t)thd->variables.sortbuff_size;
uint maxbuffer;
- BUFFPEK *buffpek;
+ Merge_chunk *buffpek;
ha_rows num_rows= HA_POS_ERROR;
IO_CACHE tempfile, buffpek_pointers, *outfile;
Sort_param param;
@@ -164,13 +197,16 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
if (subselect && subselect->filesort_buffer.is_allocated())
{
- /* Reuse cache from last call */
+ // Reuse cache from last call
sort->filesort_buffer= subselect->filesort_buffer;
sort->buffpek= subselect->sortbuffer;
subselect->filesort_buffer.reset();
subselect->sortbuffer.str=0;
}
+ DBUG_ASSERT(sort->sorted_result_in_fsbuf == FALSE ||
+ sort->record_pointers == NULL);
+
outfile= &sort->io_cache;
my_b_clear(&tempfile);
@@ -183,9 +219,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
&multi_byte_charset),
table, max_rows, filesort->sort_positions);
- sort->addon_buf= param.addon_buf;
- sort->addon_field= param.addon_field;
- sort->unpack= unpack_addon_fields;
+ sort->addon_fields= param.addon_fields;
+
if (multi_byte_charset &&
!(param.tmp_buffer= (char*) my_malloc(param.sort_length,
MYF(MY_WME | MY_THREAD_SPECIFIC))))
@@ -208,7 +243,15 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE;
status_var_increment(thd->status_var.filesort_pq_sorts_);
tracker->incr_pq_used();
+ param.using_pq= true;
const size_t compare_length= param.sort_length;
+ /*
+ For PQ queries (with limit) we know exactly how many pointers/records
+ we have in the buffer, so to simplify things, we initialize
+ all pointers here. (We cannot pack fields anyways, so there is no
+ point in doing lazy initialization).
+ */
+ sort->init_record_pointers();
if (pq.init(param.max_rows,
true, // max_at_top
NULL, // compare_function
@@ -223,21 +266,23 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
DBUG_ASSERT(thd->is_error());
goto err;
}
- // For PQ queries (with limit) we initialize all pointers.
- sort->init_record_pointers();
}
else
{
DBUG_PRINT("info", ("filesort PQ is not applicable"));
+ param.try_to_pack_addons(thd->variables.max_length_for_sort_data);
+ param.using_pq= false;
+
size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY,
param.sort_length*MERGEBUFF2);
- set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
+ set_if_bigger(min_sort_memory, sizeof(Merge_chunk*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys);
- if (sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length))
+ sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length);
+ if (sort->sort_buffer_size() > 0)
break;
size_t old_memory_available= memory_available;
memory_available= memory_available/4*3;
@@ -258,7 +303,9 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
goto err;
param.sort_form= table;
- param.end=(param.local_sortorder=filesort->sortorder)+s_length;
+ param.local_sortorder=
+ Bounds_checked_array<SORT_FIELD>(filesort->sortorder, s_length);
+
num_rows= find_all_keys(thd, ¶m, select,
sort,
&buffpek_pointers,
@@ -287,12 +334,20 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
my_free(sort->buffpek.str);
sort->buffpek.str= 0;
}
+
+ if (param.using_addon_fields())
+ {
+ DBUG_ASSERT(sort->addon_fields);
+ if (!sort->addon_fields->allocate_addon_buf(param.addon_length))
+ goto err;
+ }
+
if (!(sort->buffpek.str=
(char *) read_buffpek_from_file(&buffpek_pointers, maxbuffer,
(uchar*) sort->buffpek.str)))
goto err;
sort->buffpek.length= maxbuffer;
- buffpek= (BUFFPEK *) sort->buffpek.str;
+ buffpek= (Merge_chunk *) sort->buffpek.str;
close_cached_file(&buffpek_pointers);
/* Open cached file if it isn't open */
if (! my_b_inited(outfile) &&
@@ -306,25 +361,25 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
Use also the space previously used by string pointers in sort_buffer
for temporary key storage.
*/
- param.max_keys_per_buffer=((param.max_keys_per_buffer *
- (param.rec_length + sizeof(char*))) /
- param.rec_length - 1);
+
+ param.max_keys_per_buffer= static_cast<uint>(sort->sort_buffer_size()) /
+ param.rec_length;
set_if_bigger(param.max_keys_per_buffer, 1);
maxbuffer--; // Offset from 0
- if (merge_many_buff(¶m,
- (uchar*) sort->get_sort_keys(),
+
+ if (merge_many_buff(¶m, sort->get_raw_buf(),
buffpek,&maxbuffer,
- &tempfile))
+ &tempfile))
goto err;
if (flush_io_cache(&tempfile) ||
reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
goto err;
if (merge_index(¶m,
- (uchar*) sort->get_sort_keys(),
+ sort->get_raw_buf(),
buffpek,
maxbuffer,
&tempfile,
- outfile))
+ outfile))
goto err;
}
@@ -339,7 +394,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
my_free(param.tmp_buffer);
if (!subselect || !subselect->is_uncacheable())
{
- sort->free_sort_buffer();
+ if (!param.using_addon_fields())
+ sort->free_sort_buffer();
my_free(sort->buffpek.str);
}
else
@@ -347,7 +403,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
/* Remember sort buffers for next subquery call */
subselect->filesort_buffer= sort->filesort_buffer;
subselect->sortbuffer= sort->buffpek;
- sort->filesort_buffer.reset(); // Don't free this
+ sort->filesort_buffer.reset(); // Don't free this*/
}
sort->buffpek.str= 0;
@@ -361,7 +417,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
my_off_t save_pos=outfile->pos_in_file;
/* For following reads */
if (reinit_io_cache(outfile,READ_CACHE,0L,0,0))
- error=1;
+ error=1;
outfile->end_of_file=save_pos;
}
}
@@ -490,10 +546,10 @@ uint Filesort::make_sortorder(THD *thd, JOIN *join, table_map first_table_bit)
static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
uchar *buf)
{
- size_t length= sizeof(BUFFPEK)*count;
+ size_t length= sizeof(Merge_chunk)*count;
uchar *tmp= buf;
DBUG_ENTER("read_buffpek_from_file");
- if (count > UINT_MAX/sizeof(BUFFPEK))
+ if (count > UINT_MAX/sizeof(Merge_chunk))
return 0; /* sizeof(BUFFPEK)*count will overflow */
if (!tmp)
tmp= (uchar *)my_malloc(length, MYF(MY_WME | MY_THREAD_SPECIFIC));
@@ -702,7 +758,8 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
handler *file;
MY_BITMAP *save_read_set, *save_write_set;
Item *sort_cond;
- ha_rows retval;
+ ha_rows num_records= 0;
+ const bool packed_addon_fields= param->using_packed_addons();
DBUG_ENTER("find_all_keys");
DBUG_PRINT("info",("using: %s",
(select ? select->quick ? "ranges" : "where":
@@ -810,23 +867,27 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (write_record)
{
- ++(*found_rows);
if (pq)
- {
pq->push(ref_pos);
- idx= pq->num_elements();
- }
else
{
- if (idx == param->max_keys_per_buffer)
+ if (fs_info->isfull())
{
if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
goto err;
- idx= 0;
- indexpos++;
+ idx= 0;
+ indexpos++;
}
- make_sortkey(param, fs_info->get_record_buffer(idx++), ref_pos);
+ if (idx == 0)
+ fs_info->init_next_record_pointer();
+ uchar *start_of_rec= fs_info->get_next_record_pointer();
+
+ const uint rec_sz= make_sortkey(param, start_of_rec, ref_pos);
+ if (packed_addon_fields && rec_sz != param->rec_length)
+ fs_info->adjust_next_record_pointer(rec_sz);
+ idx++;
}
+ num_records++;
}
/* It does not make sense to read more keys in case of a fatal error */
@@ -862,11 +923,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (indexpos && idx &&
write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
- retval= (my_b_inited(tempfile) ?
- (ha_rows) (my_b_tell(tempfile)/param->rec_length) :
- idx);
- DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) retval));
- DBUG_RETURN(retval);
+
+ (*found_rows)= num_records;
+ if (pq)
+ num_records= pq->num_elements();
+
+
+ DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) num_records));
+ DBUG_RETURN(num_records);
err:
sort_form->column_bitmaps_set(save_read_set, save_write_set);
@@ -901,36 +965,48 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
IO_CACHE *buffpek_pointers, IO_CACHE *tempfile)
{
size_t rec_length;
- uchar **end;
- BUFFPEK buffpek;
+ Merge_chunk buffpek;
DBUG_ENTER("write_keys");
rec_length= param->rec_length;
- uchar **sort_keys= fs_info->get_sort_keys();
fs_info->sort_buffer(param, count);
if (!my_b_inited(tempfile) &&
open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE,
MYF(MY_WME)))
- goto err; /* purecov: inspected */
+ DBUG_RETURN(1); /* purecov: inspected */
/* check we won't have more buffpeks than we can possibly keep in memory */
- if (my_b_tell(buffpek_pointers) + sizeof(BUFFPEK) > (ulonglong)UINT_MAX)
- goto err;
+ if (my_b_tell(buffpek_pointers) + sizeof(Merge_chunk) > (ulonglong)UINT_MAX)
+ DBUG_RETURN(1);
+
bzero(&buffpek, sizeof(buffpek));
- buffpek.file_pos= my_b_tell(tempfile);
+ buffpek.set_file_position(my_b_tell(tempfile));
if ((ha_rows) count > param->max_rows)
count=(uint) param->max_rows; /* purecov: inspected */
- buffpek.count=(ha_rows) count;
- for (end=sort_keys+count ; sort_keys != end ; sort_keys++)
- if (my_b_write(tempfile, (uchar*) *sort_keys, (uint) rec_length))
- goto err;
+ buffpek.set_rowcount(static_cast<ha_rows>(count));
+
+ const bool packed_addon_fields= param->using_packed_addons();
+ for (uint ix= 0; ix < count; ++ix)
+ {
+ uchar *record= fs_info->get_sorted_record(ix);
+ if (packed_addon_fields)
+ {
+ rec_length= param->sort_length +
+ Addon_fields::read_addon_length(record + param->sort_length);
+ }
+ else
+ rec_length= param->rec_length;
+
+ if (my_b_write(tempfile, record, rec_length))
+ DBUG_RETURN(1); /* purecov: inspected */
+ }
+
if (my_b_write(buffpek_pointers, (uchar*) &buffpek, sizeof(buffpek)))
- goto err;
+ DBUG_RETURN(1);
+
DBUG_RETURN(0);
-err:
- DBUG_RETURN(1);
} /* write_keys */
@@ -1168,14 +1244,15 @@ Type_handler_real_result::make_sort_key(uchar *to, Item *item,
/** Make a sort-key from record. */
-static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
+static uint make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
{
Field *field;
SORT_FIELD *sort_field;
uint length;
+ uchar *orig_to= to;
- for (sort_field=param->local_sortorder ;
- sort_field != param->end ;
+ for (sort_field=param->local_sortorder.begin() ;
+ sort_field != param->local_sortorder.end() ;
sort_field++)
{
bool maybe_null=0;
@@ -1202,15 +1279,15 @@ static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
length=sort_field->length;
while (length--)
{
- *to = (uchar) (~ *to);
- to++;
+ *to = (uchar) (~ *to);
+ to++;
}
}
else
to+= sort_field->length;
}
- if (param->addon_field)
+ if (param->using_addon_fields())
{
/*
Save field values appended to sorted fields.
@@ -1218,41 +1295,44 @@ static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos)
In this implementation we use fixed layout for field values -
the same for all records.
*/
- SORT_ADDON_FIELD *addonf= param->addon_field;
+ SORT_ADDON_FIELD *addonf= param->addon_fields->begin();
uchar *nulls= to;
+ uchar *p_len= to;
DBUG_ASSERT(addonf != 0);
+ const bool packed_addon_fields= param->addon_fields->using_packed_addons();
+ uint32 res_len= addonf->offset;
memset(nulls, 0, addonf->offset);
to+= addonf->offset;
- for ( ; (field= addonf->field) ; addonf++)
+ for ( ; addonf != param->addon_fields->end() ; addonf++)
{
+ Field *field= addonf->field;
if (addonf->null_bit && field->is_null())
{
nulls[addonf->null_offset]|= addonf->null_bit;
-#ifdef HAVE_valgrind
- bzero(to, addonf->length);
-#endif
+ if (!packed_addon_fields)
+ to+= addonf->length;
}
else
{
-#ifdef HAVE_valgrind
uchar *end= field->pack(to, field->ptr);
- uint length= (uint) ((to + addonf->length) - end);
- DBUG_ASSERT((int) length >= 0);
- if (length)
- bzero(end, length);
-#else
- (void) field->pack(to, field->ptr);
-#endif
+ int sz= static_cast<int>(end - to);
+ res_len += sz;
+ if (packed_addon_fields)
+ to+= sz;
+ else
+ to+= addonf->length;
}
- to+= addonf->length;
}
+ if (packed_addon_fields)
+ Addon_fields::store_addon_length(p_len, res_len);
}
else
{
/* Save filepos last */
memcpy((uchar*) to, ref_pos, (size_t) param->ref_length);
+ to+= param->ref_length;
}
- return;
+ return static_cast<uint>(to - orig_to);
}
@@ -1265,8 +1345,8 @@ static void register_used_fields(Sort_param *param)
SORT_FIELD *sort_field;
TABLE *table=param->sort_form;
- for (sort_field= param->local_sortorder ;
- sort_field != param->end ;
+ for (sort_field= param->local_sortorder.begin() ;
+ sort_field != param->local_sortorder.end() ;
sort_field++)
{
Field *field;
@@ -1281,12 +1361,14 @@ static void register_used_fields(Sort_param *param)
}
}
- if (param->addon_field)
+ if (param->using_addon_fields())
{
- SORT_ADDON_FIELD *addonf= param->addon_field;
- Field *field;
- for ( ; (field= addonf->field) ; addonf++)
+ SORT_ADDON_FIELD *addonf= param->addon_fields->begin();
+ for ( ; (addonf != param->addon_fields->end()) ; addonf++)
+ {
+ Field *field= addonf->field;
field->register_field_in_read_map();
+ }
}
else
{
@@ -1305,16 +1387,24 @@ static bool save_index(Sort_param *param, uint count,
DBUG_ASSERT(table_sort->record_pointers == 0);
table_sort->sort_buffer(param, count);
+
+ if (param->using_addon_fields())
+ {
+ table_sort->sorted_result_in_fsbuf= TRUE;
+ table_sort->set_sort_length(param->sort_length);
+ DBUG_RETURN(0);
+ }
+
res_length= param->res_length;
offset= param->rec_length-res_length;
if (!(to= table_sort->record_pointers=
(uchar*) my_malloc(res_length*count,
MYF(MY_WME | MY_THREAD_SPECIFIC))))
DBUG_RETURN(1); /* purecov: inspected */
- uchar **sort_keys= table_sort->get_sort_keys();
- for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++)
+ for (uint ix= 0; ix < count; ++ix)
{
- memcpy(to, *sort_keys+offset, res_length);
+ uchar *record= table_sort->get_sorted_record(ix);
+ memcpy(to, record + offset, res_length);
to+= res_length;
}
DBUG_RETURN(0);
@@ -1385,8 +1475,9 @@ static bool check_if_pq_applicable(Sort_param *param,
// The whole source set fits into memory.
if (param->max_rows < num_rows/PQ_slowness )
{
- DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length) != NULL);
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length);
+ DBUG_RETURN(filesort_info->sort_buffer_size() != 0);
}
else
{
@@ -1398,12 +1489,13 @@ static bool check_if_pq_applicable(Sort_param *param,
// Do we have space for LIMIT rows in memory?
if (param->max_keys_per_buffer < num_available_keys)
{
- DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length) != NULL);
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length);
+ DBUG_RETURN(filesort_info->sort_buffer_size() != 0);
}
// Try to strip off addon fields.
- if (param->addon_field)
+ if (param->addon_fields)
{
const size_t row_length=
param->sort_length + param->ref_length + sizeof(char*);
@@ -1435,14 +1527,15 @@ static bool check_if_pq_applicable(Sort_param *param,
if (sort_merge_cost < pq_cost)
DBUG_RETURN(false);
- if (filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->sort_length +
- param->ref_length))
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->sort_length + param->ref_length);
+
+ if (filesort_info->sort_buffer_size() > 0)
{
/* Make attached data to be references instead of fields. */
- my_free(filesort_info->addon_field);
- filesort_info->addon_field= NULL;
- param->addon_field= NULL;
+ my_free(filesort_info->addon_fields);
+ filesort_info->addon_fields= NULL;
+ param->addon_fields= NULL;
param->res_length= param->ref_length;
param->sort_length+= param->ref_length;
@@ -1458,12 +1551,12 @@ static bool check_if_pq_applicable(Sort_param *param,
/** Merge buffers to make < MERGEBUFF2 buffers. */
-int merge_many_buff(Sort_param *param, uchar *sort_buffer,
- BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file)
+int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer,
+ Merge_chunk *buffpek, uint *maxbuffer, IO_CACHE *t_file)
{
uint i;
IO_CACHE t_file2,*from_file,*to_file,*temp;
- BUFFPEK *lastbuff;
+ Merge_chunk *lastbuff;
DBUG_ENTER("merge_many_buff");
if (*maxbuffer < MERGEBUFF2)
@@ -1483,11 +1576,11 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer,
lastbuff=buffpek;
for (i=0 ; i <= *maxbuffer-MERGEBUFF*3/2 ; i+=MERGEBUFF)
{
- if (merge_buffers(param,from_file,to_file,sort_buffer,lastbuff++,
+ if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++,
buffpek+i,buffpek+i+MERGEBUFF-1,0))
goto cleanup;
}
- if (merge_buffers(param,from_file,to_file,sort_buffer,lastbuff++,
+ if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++,
buffpek+i,buffpek+ *maxbuffer,0))
break; /* purecov: inspected */
if (flush_io_cache(to_file))
@@ -1513,24 +1606,68 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer,
(ulong)-1 if something goes wrong
*/
-ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
- uint rec_length)
+ulong read_to_buffer(IO_CACHE *fromfile, Merge_chunk *buffpek,
+ Sort_param *param)
{
- ulong count;
- ulong length= 0;
+ ha_rows count;
+ uint rec_length= param->rec_length;
- if ((count= (ulong) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= MY_MIN(buffpek->max_keys(),buffpek->rowcount())))
{
- length= rec_length*count;
- if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length,
- buffpek->file_pos)))
+ size_t bytes_to_read;
+ if (param->using_packed_addons())
+ {
+ count= buffpek->rowcount();
+ bytes_to_read= MY_MIN(buffpek->buffer_size(),
+ static_cast<size_t>(fromfile->end_of_file -
+ buffpek->file_position()));
+ }
+ else
+ bytes_to_read= rec_length * static_cast<size_t>(count);
+
+ if (unlikely(my_b_pread(fromfile, buffpek->buffer_start(),
+ bytes_to_read, buffpek->file_position())))
return ((ulong) -1);
- buffpek->key=buffpek->base;
- buffpek->file_pos+= length; /* New filepos */
- buffpek->count-= count;
- buffpek->mem_count= count;
+
+ size_t num_bytes_read;
+ if (param->using_packed_addons())
+ {
+ /*
+ The last record read is most likely not complete here.
+ We need to loop through all the records, reading the length fields,
+ and then "chop off" the final incomplete record.
+ */
+ uchar *record= buffpek->buffer_start();
+ uint ix= 0;
+ for (; ix < count; ++ix)
+ {
+ if (record + param->sort_length + Addon_fields::size_of_length_field >
+ buffpek->buffer_end())
+ break; // Incomplete record.
+ uchar *plen= record + param->sort_length;
+ uint res_length= Addon_fields::read_addon_length(plen);
+ if (plen + res_length > buffpek->buffer_end())
+ break; // Incomplete record.
+ DBUG_ASSERT(res_length > 0);
+ record+= param->sort_length;
+ record+= res_length;
+ }
+ DBUG_ASSERT(ix > 0);
+ count= ix;
+ num_bytes_read= record - buffpek->buffer_start();
+ DBUG_PRINT("info", ("read %llu bytes of complete records",
+ static_cast<ulonglong>(bytes_to_read)));
+ }
+ else
+ num_bytes_read= bytes_to_read;
+
+ buffpek->init_current_key();
+ buffpek->advance_file_position(num_bytes_read); /* New filepos */
+ buffpek->decrement_rowcount(count);
+ buffpek->set_mem_count(count);
+ return (ulong) num_bytes_read;
}
- return (length);
+ return 0;
} /* read_to_buffer */
@@ -1545,25 +1682,15 @@ ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
@param[in] key_length key length
*/
-void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length)
+void reuse_freed_buff(QUEUE *queue, Merge_chunk *reuse, uint key_length)
{
- uchar *reuse_end= reuse->base + reuse->max_keys * key_length;
for (uint i= queue_first_element(queue);
i <= queue_last_element(queue);
i++)
{
- BUFFPEK *bp= (BUFFPEK *) queue_element(queue, i);
- if (bp->base + bp->max_keys * key_length == reuse->base)
- {
- bp->max_keys+= reuse->max_keys;
+ Merge_chunk *bp= (Merge_chunk *) queue_element(queue, i);
+ if (reuse->merge_freed_buff(bp))
return;
- }
- else if (bp->base == reuse_end)
- {
- bp->base= reuse->base;
- bp->max_keys+= reuse->max_keys;
- return;
- }
}
DBUG_ASSERT(0);
}
@@ -1588,8 +1715,8 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length)
*/
bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
- IO_CACHE *to_file, uchar *sort_buffer,
- BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb,
+ IO_CACHE *to_file, Sort_buffer sort_buffer,
+ Merge_chunk *lastbuff, Merge_chunk *Fb, Merge_chunk *Tb,
int flag)
{
bool error= 0;
@@ -1599,7 +1726,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
ha_rows max_rows,org_max_rows;
my_off_t to_start_filepos;
uchar *strpos;
- BUFFPEK *buffpek;
+ Merge_chunk *buffpek;
QUEUE queue;
qsort2_cmp cmp;
void *first_cmp_arg;
@@ -1625,7 +1752,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
uint wr_offset= flag ? offset : 0;
maxcount= (ulong) (param->max_keys_per_buffer/((uint) (Tb-Fb) +1));
to_start_filepos= my_b_tell(to_file);
- strpos= sort_buffer;
+ strpos= sort_buffer.array();
org_max_rows=max_rows= param->max_rows;
set_if_bigger(maxcount, 1);
@@ -1640,19 +1767,23 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
cmp= get_ptr_compare(sort_length);
first_cmp_arg= (void*) &sort_length;
}
- if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
+ if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1,
+ offsetof(Merge_chunk,m_current_key), 0,
(queue_compare) cmp, first_cmp_arg, 0, 0)))
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
- buffpek->base= strpos;
- buffpek->max_keys= maxcount;
- bytes_read= read_to_buffer(from_file, buffpek, rec_length);
+ buffpek->set_buffer(strpos,
+ strpos + (sort_buffer.size()/((uint) (Tb-Fb) +1)));
+
+ buffpek->set_max_keys(maxcount);
+ bytes_read= read_to_buffer(from_file, buffpek, param);
if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
-
strpos+= bytes_read;
- buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected
+ buffpek->set_buffer_end(strpos);
+ // If less data in buffers than expected
+ buffpek->set_max_keys(buffpek->mem_count());
queue_insert(&queue, (uchar*) buffpek);
}
@@ -1663,16 +1794,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
Copy the first argument to unique_buff for unique removal.
Store it also in 'to_file'.
*/
- buffpek= (BUFFPEK*) queue_top(&queue);
- memcpy(unique_buff, buffpek->key, rec_length);
+ buffpek= (Merge_chunk*) queue_top(&queue);
+ memcpy(unique_buff, buffpek->current_key(), rec_length);
if (min_dupl_count)
memcpy(&dupl_count, unique_buff+dupl_count_ofs,
sizeof(dupl_count));
- buffpek->key+= rec_length;
- if (! --buffpek->mem_count)
+ buffpek->advance_current_key(rec_length);
+ buffpek->decrement_mem_count();
+ if (buffpek->mem_count() == 0)
{
if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
- rec_length))))
+ param))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
@@ -1692,61 +1824,68 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
for (;;)
{
- buffpek= (BUFFPEK*) queue_top(&queue);
- src= buffpek->key;
+ buffpek= (Merge_chunk*) queue_top(&queue);
+ src= buffpek->current_key();
if (cmp) // Remove duplicates
{
- if (!(*cmp)(first_cmp_arg, &unique_buff,
- (uchar**) &buffpek->key))
- {
+ uchar *current_key= buffpek->current_key();
+ if (!(*cmp)(first_cmp_arg, &unique_buff, ¤t_key))
+ {
if (min_dupl_count)
- {
+ {
element_count cnt;
- memcpy(&cnt, (uchar *) buffpek->key+dupl_count_ofs, sizeof(cnt));
+ memcpy(&cnt, buffpek->current_key() + dupl_count_ofs, sizeof(cnt));
dupl_count+= cnt;
}
goto skip_duplicate;
}
if (min_dupl_count)
- {
+ {
memcpy(unique_buff+dupl_count_ofs, &dupl_count,
sizeof(dupl_count));
}
- src= unique_buff;
- }
-
- /*
- Do not write into the output file if this is the final merge called
- for a Unique object used for intersection and dupl_count is less
- than min_dupl_count.
- If the Unique object is used to intersect N sets of unique elements
- then for any element:
- dupl_count >= N <=> the element is occurred in each of these N sets.
- */
- if (!check_dupl_count || dupl_count >= min_dupl_count)
- {
- if (my_b_write(to_file, src+wr_offset, wr_len))
- goto err; /* purecov: inspected */
+ src= unique_buff;
}
- if (cmp)
- {
- memcpy(unique_buff, (uchar*) buffpek->key, rec_length);
- if (min_dupl_count)
- memcpy(&dupl_count, unique_buff+dupl_count_ofs,
- sizeof(dupl_count));
- }
- if (!--max_rows)
+
{
- /* Nothing more to do */
- goto end; /* purecov: inspected */
- }
+ param->get_rec_and_res_len(buffpek->current_key(),
+ &rec_length, &res_length);
+ const uint bytes_to_write= (flag == 0) ? rec_length : res_length;
+ /*
+ Do not write into the output file if this is the final merge called
+ for a Unique object used for intersection and dupl_count is less
+ than min_dupl_count.
+ If the Unique object is used to intersect N sets of unique elements
+ then for any element:
+ dupl_count >= N <=> the element is occurred in each of these N sets.
+ */
+ if (!check_dupl_count || dupl_count >= min_dupl_count)
+ {
+ if (my_b_write(to_file, src + wr_offset, bytes_to_write))
+ goto err; /* purecov: inspected */
+ }
+ if (cmp)
+ {
+ memcpy(unique_buff, buffpek->current_key(), rec_length);
+ if (min_dupl_count)
+ memcpy(&dupl_count, unique_buff+dupl_count_ofs,
+ sizeof(dupl_count));
+ }
+ if (!--max_rows)
+ {
+ /* Nothing more to do */
+ goto end; /* purecov: inspected */
+ }
+ }
skip_duplicate:
- buffpek->key+= rec_length;
- if (! --buffpek->mem_count)
+ buffpek->advance_current_key(rec_length);
+ buffpek->decrement_mem_count();
+
+ if (buffpek->mem_count() == 0)
{
if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
- rec_length))))
+ param))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
@@ -1758,9 +1897,10 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
queue_replace_top(&queue); /* Top element has been replaced */
}
}
- buffpek= (BUFFPEK*) queue_top(&queue);
- buffpek->base= (uchar*) sort_buffer;
- buffpek->max_keys= param->max_keys_per_buffer;
+ buffpek= (Merge_chunk*) queue_top(&queue);
+ buffpek->set_buffer(sort_buffer.array(),
+ sort_buffer.array() + sort_buffer.size());
+ buffpek->set_max_keys(param->max_keys_per_buffer);
/*
As we know all entries in the buffer are unique, we only have to
@@ -1768,16 +1908,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
*/
if (cmp)
{
- if (!(*cmp)(first_cmp_arg, &unique_buff, (uchar**) &buffpek->key))
+ uchar *current_key= buffpek->current_key();
+ if (!(*cmp)(first_cmp_arg, &unique_buff, ¤t_key))
{
if (min_dupl_count)
{
element_count cnt;
- memcpy(&cnt, (uchar *) buffpek->key+dupl_count_ofs, sizeof(cnt));
+ memcpy(&cnt, buffpek->current_key() + dupl_count_ofs, sizeof(cnt));
dupl_count+= cnt;
}
- buffpek->key+= rec_length;
- --buffpek->mem_count;
+ buffpek->advance_current_key(rec_length);
+ buffpek->decrement_mem_count();
}
if (min_dupl_count)
@@ -1796,45 +1937,40 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
do
{
- if ((ha_rows) buffpek->mem_count > max_rows)
+ if (buffpek->mem_count() > max_rows)
{ /* Don't write too many records */
- buffpek->mem_count= (uint) max_rows;
- buffpek->count= 0; /* Don't read more */
+ buffpek->set_mem_count(max_rows);
+ buffpek->set_rowcount(0); /* Don't read more */
}
- max_rows-= buffpek->mem_count;
- if (flag == 0)
+ max_rows-= buffpek->mem_count();
+ for (uint ix= 0; ix < buffpek->mem_count(); ++ix)
{
- if (my_b_write(to_file, (uchar*) buffpek->key,
- (size_t)(rec_length*buffpek->mem_count)))
- goto err; /* purecov: inspected */
- }
- else
- {
- uchar *end;
- src= buffpek->key+offset;
- for (end= src+buffpek->mem_count*rec_length ;
- src != end ;
- src+= rec_length)
+ param->get_rec_and_res_len(buffpek->current_key(),
+ &rec_length, &res_length);
+ const uint bytes_to_write= (flag == 0) ? rec_length : res_length;
+ if (check_dupl_count)
{
- if (check_dupl_count)
- {
- memcpy((uchar *) &dupl_count, src+dupl_count_ofs, sizeof(dupl_count));
- if (dupl_count < min_dupl_count)
- continue;
- }
- if (my_b_write(to_file, src, wr_len))
- goto err;
+ memcpy((uchar *) &dupl_count,
+ buffpek->current_key() + offset + dupl_count_ofs,
+ sizeof(dupl_count));
+ if (dupl_count < min_dupl_count)
+ continue;
}
+ if (my_b_write(to_file, buffpek->current_key() + wr_offset,
+ bytes_to_write))
+ goto err;
+ buffpek->advance_current_key(rec_length);
}
}
while (likely(!(error=
(bytes_read= read_to_buffer(from_file, buffpek,
- rec_length)) == (ulong) -1)) &&
+ param)) == (ulong) -1)) &&
bytes_read != 0);
end:
- lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);
- lastbuff->file_pos= to_start_filepos;
+ lastbuff->set_rowcount(MY_MIN(org_max_rows-max_rows, param->max_rows));
+ lastbuff->set_file_position(to_start_filepos);
+
cleanup:
delete_queue(&queue);
DBUG_RETURN(error);
@@ -1848,13 +1984,13 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
/* Do a merge to output-file (save only positions) */
-int merge_index(Sort_param *param, uchar *sort_buffer,
- BUFFPEK *buffpek, uint maxbuffer,
- IO_CACHE *tempfile, IO_CACHE *outfile)
+int merge_index(Sort_param *param, Sort_buffer sort_buffer,
+ Merge_chunk *buffpek, uint maxbuffer,
+ IO_CACHE *tempfile, IO_CACHE *outfile)
{
DBUG_ENTER("merge_index");
- if (merge_buffers(param,tempfile,outfile,sort_buffer,buffpek,buffpek,
- buffpek+maxbuffer,1))
+ if (merge_buffers(param, tempfile, outfile, sort_buffer, buffpek, buffpek,
+ buffpek + maxbuffer, 1))
DBUG_RETURN(1); /* purecov: inspected */
DBUG_RETURN(0);
} /* merge_index */
@@ -1977,7 +2113,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
sortorder->length= (uint)cs->coll->strnxfrmlen(cs, sortorder->length);
}
if (sortorder->field->maybe_null())
- length++; // Place for NULL marker
+ length++; // Place for NULL marker
}
else
{
@@ -1988,21 +2124,40 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
*multi_byte_charset= true;
}
if (sortorder->item->maybe_null)
- length++; // Place for NULL marker
+ length++; // Place for NULL marker
}
set_if_smaller(sortorder->length, thd->variables.max_sort_length);
length+=sortorder->length;
}
- sortorder->field= (Field*) 0; // end marker
+ sortorder->field= NULL; // end marker
DBUG_PRINT("info",("sort_length: %d",length));
return length;
}
+
+/*
+ Check whether addon fields can be used or not.
+
+ @param table Table structure
+ @param sortlength Length of sort key
+ @param length [OUT] Max length of addon fields
+ @param fields [OUT] Number of addon fields
+ @param null_fields [OUT] Number of nullable addon fields
+ @param packable_length [OUT] Max length of addon fields that can be
+ packed
+
+ @retval
+ TRUE Addon fields can be used
+ FALSE Otherwise
+*/
+
bool filesort_use_addons(TABLE *table, uint sortlength,
- uint *length, uint *fields, uint *null_fields)
+ uint *length, uint *fields, uint *null_fields,
+ uint *packable_length)
{
Field **pfield, *field;
- *length= *fields= *null_fields= 0;
+ *length= *fields= *null_fields= *packable_length= 0;
+ uint field_length=0;
for (pfield= table->field; (field= *pfield) ; pfield++)
{
@@ -2010,7 +2165,12 @@ bool filesort_use_addons(TABLE *table, uint sortlength,
continue;
if (field->flags & BLOB_FLAG)
return false;
- (*length)+= field->max_packed_col_length(field->pack_length());
+ field_length= field->max_packed_col_length(field->pack_length());
+ (*length)+= field_length;
+
+ if (field->maybe_null() || field->is_packable())
+ (*packable_length)+= field_length;
+
if (field->maybe_null())
(*null_fields)++;
(*fields)++;
@@ -2035,11 +2195,11 @@ bool filesort_use_addons(TABLE *table, uint sortlength,
layouts for the values of the non-sorted fields in the buffer and
fills them.
- @param thd Current thread
- @param ptabfield Array of references to the table fields
- @param sortlength Total length of sorted fields
- @param [out] addon_buf Buffer to us for appended fields
-
+ @param table Table structure
+ @param sortlength Total length of sorted fields
+ @param addon_length [OUT] Length of addon fields
+ @param m_packable_length [OUT] Length of the addon fields that can be
+ packed
@note
The null bits for the appended values are supposed to be put together
and stored the buffer just ahead of the value of the first field.
@@ -2050,13 +2210,13 @@ bool filesort_use_addons(TABLE *table, uint sortlength,
NULL if we do not store field values with sort data.
*/
-static SORT_ADDON_FIELD *
-get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf)
+static Addon_fields*
+get_addon_fields(TABLE *table, uint sortlength,
+ uint *addon_length, uint *m_packable_length)
{
Field **pfield;
Field *field;
- SORT_ADDON_FIELD *addonf;
- uint length, fields, null_fields;
+ uint length, fields, null_fields, packable_length;
MY_BITMAP *read_set= table->read_set;
DBUG_ENTER("get_addon_fields");
@@ -2070,23 +2230,34 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf)
the values directly from sorted fields.
But beware the case when item->cmp_type() != item->result_type()
*/
- addon_buf->str= 0;
- addon_buf->length= 0;
// see remove_const() for HA_SLOW_RND_POS explanation
if (table->file->ha_table_flags() & HA_SLOW_RND_POS)
sortlength= 0;
- if (!filesort_use_addons(table, sortlength, &length, &fields, &null_fields) ||
- !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC), &addonf,
- sizeof(SORT_ADDON_FIELD) * (fields+1),
- &addon_buf->str, length, NullS))
+ void *raw_mem_addon_field, *raw_mem;
+ if (!filesort_use_addons(table, sortlength, &length, &fields, &null_fields,
+ &packable_length) ||
+ !(my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC),
+ &raw_mem, sizeof(Addon_fields),
+ &raw_mem_addon_field,
+ sizeof(SORT_ADDON_FIELD) * fields,
+ NullS)))
DBUG_RETURN(0);
- addon_buf->length= length;
+ Addon_fields_array
+ addon_array(static_cast<SORT_ADDON_FIELD*>(raw_mem_addon_field), fields);
+ Addon_fields *addon_fields= new (raw_mem) Addon_fields(addon_array);
+
+ DBUG_ASSERT(addon_fields);
+
+ (*addon_length)= length;
+ (*m_packable_length)= packable_length;
+
length= (null_fields+7)/8;
null_fields= 0;
+ SORT_ADDON_FIELD* addonf= addon_fields->begin();
for (pfield= table->field; (field= *pfield) ; pfield++)
{
if (!bitmap_is_set(read_set, field->field_index))
@@ -2108,10 +2279,9 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf)
length+= addonf->length;
addonf++;
}
- addonf->field= 0; // Put end marker
DBUG_PRINT("info",("addon_length: %d",length));
- DBUG_RETURN(addonf-fields);
+ DBUG_RETURN(addon_fields);
}
@@ -2130,24 +2300,7 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf)
void.
*/
-static void
-unpack_addon_fields(struct st_sort_addon_field *addon_field, uchar *buff,
- uchar *buff_end)
-{
- Field *field;
- SORT_ADDON_FIELD *addonf= addon_field;
- for ( ; (field= addonf->field) ; addonf++)
- {
- if (addonf->null_bit && (addonf->null_bit & buff[addonf->null_offset]))
- {
- field->set_null();
- continue;
- }
- field->set_notnull();
- field->unpack(field->ptr, buff + addonf->offset, buff_end, 0);
- }
-}
/*
** functions to change a double or float to a sortable string
@@ -2197,6 +2350,17 @@ void change_double_for_sort(double nr,uchar *to)
}
}
+bool SORT_INFO::using_packed_addons()
+{
+ return addon_fields != NULL && addon_fields->using_packed_addons();
+}
+
+void SORT_INFO::free_addon_buff()
+{
+ if (addon_fields)
+ addon_fields->free_addon_buff();
+}
+
/**
Free SORT_INFO
*/
diff --git a/sql/filesort.h b/sql/filesort.h
index 5f79a5095cc..5102ee2326f 100644
--- a/sql/filesort.h
+++ b/sql/filesort.h
@@ -27,7 +27,7 @@ class Filesort_tracker;
struct SORT_FIELD;
typedef struct st_order ORDER;
class JOIN;
-
+class Addon_fields;
/**
Sorting related info.
@@ -87,7 +87,8 @@ class SORT_INFO
public:
SORT_INFO()
- :addon_field(0), record_pointers(0)
+ :addon_fields(NULL), record_pointers(0),
+ sorted_result_in_fsbuf(FALSE)
{
buffpek.str= 0;
my_b_clear(&io_cache);
@@ -98,9 +99,11 @@ class SORT_INFO
void free_data()
{
close_cached_file(&io_cache);
+ free_addon_buff();
my_free(record_pointers);
my_free(buffpek.str);
- my_free(addon_field);
+ my_free(addon_fields);
+ free_sort_buffer();
}
void reset()
@@ -108,17 +111,26 @@ class SORT_INFO
free_data();
record_pointers= 0;
buffpek.str= 0;
- addon_field= 0;
+ addon_fields= 0;
+ sorted_result_in_fsbuf= false;
}
+ void free_addon_buff();
IO_CACHE io_cache; /* If sorted through filesort */
LEX_STRING buffpek; /* Buffer for buffpek structures */
- LEX_STRING addon_buf; /* Pointer to a buffer if sorted with fields */
- struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
- /* To unpack back */
- void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
+ Addon_fields *addon_fields; /* Addon field descriptors */
uchar *record_pointers; /* If sorted in memory */
+
+ /**
+ If the entire result of filesort fits in memory, we skip the merge phase.
+ We may leave the result in filesort_buffer
+ (indicated by sorted_result_in_fsbuf), or we may strip away
+ the sort keys, and copy the sorted result into a new buffer.
+ @see save_index()
+ */
+ bool sorted_result_in_fsbuf;
+
/*
How many rows in final result.
Also how many rows in record_pointers, if used
@@ -131,27 +143,65 @@ class SORT_INFO
void sort_buffer(Sort_param *param, uint count)
{ filesort_buffer.sort_buffer(param, count); }
- /**
- Accessors for Filesort_buffer (which @c).
- */
- uchar *get_record_buffer(uint idx)
- { return filesort_buffer.get_record_buffer(idx); }
-
uchar **get_sort_keys()
{ return filesort_buffer.get_sort_keys(); }
- uchar **alloc_sort_buffer(uint num_records, uint record_length)
+ uchar *get_sorted_record(uint ix)
+ { return filesort_buffer.get_sorted_record(ix); }
+
+ uchar *alloc_sort_buffer(uint num_records, uint record_length)
{ return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
void free_sort_buffer()
{ filesort_buffer.free_sort_buffer(); }
+ bool isfull() const
+ { return filesort_buffer.isfull(); }
void init_record_pointers()
{ filesort_buffer.init_record_pointers(); }
+ void init_next_record_pointer()
+ { filesort_buffer.init_next_record_pointer(); }
+ uchar *get_next_record_pointer()
+ { return filesort_buffer.get_next_record_pointer(); }
+ void adjust_next_record_pointer(uint val)
+ { filesort_buffer.adjust_next_record_pointer(val); }
+
+ Bounds_checked_array<uchar> get_raw_buf()
+ { return filesort_buffer.get_raw_buf(); }
size_t sort_buffer_size() const
{ return filesort_buffer.sort_buffer_size(); }
+ bool is_allocated() const
+ { return filesort_buffer.is_allocated(); }
+ void set_sort_length(uint val)
+ { filesort_buffer.set_sort_length(val); }
+ uint get_sort_length() const
+ { return filesort_buffer.get_sort_length(); }
+
+ bool has_filesort_result_in_memory() const
+ {
+ return record_pointers || sorted_result_in_fsbuf;
+ }
+
+ /// Are we using "addon fields"?
+ bool using_addon_fields() const
+ {
+ return addon_fields != NULL;
+ }
+
+ /// Are we using "packed addon fields"?
+ bool using_packed_addons();
+
+ /**
+ Copies (unpacks) values appended to sorted fields from a buffer back to
+ their regular positions specified by the Field::ptr pointers.
+ @param buff Buffer which to unpack the value from
+ */
+ template<bool Packed_addon_fields>
+ inline void unpack_addon_fields(uchar *buff);
+
+
friend SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
Filesort_tracker* tracker, JOIN *join,
table_map first_table_bit);
@@ -162,7 +212,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
table_map first_table_bit=0);
bool filesort_use_addons(TABLE *table, uint sortlength,
- uint *length, uint *fields, uint *null_fields);
+ uint *length, uint *fields, uint *null_fields,
+ uint *m_packable_length);
void change_double_for_sort(double nr,uchar *to);
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index 703db84495f..06e3f477993 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -96,82 +96,92 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
# Pointer to allocated buffer
*/
-uchar **Filesort_buffer::alloc_sort_buffer(uint num_records,
- uint record_length)
+uchar *Filesort_buffer::alloc_sort_buffer(uint num_records,
+ uint record_length)
{
size_t buff_size;
- uchar **sort_keys, **start_of_data;
DBUG_ENTER("alloc_sort_buffer");
DBUG_EXECUTE_IF("alloc_sort_buffer_fail",
DBUG_SET("+d,simulate_out_of_memory"););
- buff_size= ((size_t)num_records) * (record_length + sizeof(uchar*));
- set_if_bigger(buff_size, record_length * MERGEBUFF2);
+ buff_size= ALIGN_SIZE(num_records * (record_length + sizeof(uchar*)));
- if (!m_idx_array.is_null())
+ /*
+ The minimum memory required should be each merge buffer can hold atmost
+ one key.
+ TODO varun: move this to the place where min_sort_memory is used.
+ */
+ set_if_bigger(buff_size, (record_length +sizeof(uchar*)) * MERGEBUFF2);
+
+ if (m_rawmem)
{
/*
Reuse old buffer if exists and is large enough
Note that we don't make the buffer smaller, as we want to be
prepared for next subquery iteration.
*/
-
- sort_keys= m_idx_array.array();
- if (buff_size > allocated_size)
+ if (buff_size > m_size_in_bytes)
{
/*
Better to free and alloc than realloc as we don't have to remember
the old values
*/
- my_free(sort_keys);
- if (!(sort_keys= (uchar**) my_malloc(buff_size,
- MYF(MY_THREAD_SPECIFIC))))
+ my_free(m_rawmem);
+ if (!(m_rawmem= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
{
- reset();
+ m_size_in_bytes= 0;
DBUG_RETURN(0);
}
- allocated_size= buff_size;
}
}
else
{
- if (!(sort_keys= (uchar**) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
+ if (!(m_rawmem= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
+ {
+ m_size_in_bytes= 0;
DBUG_RETURN(0);
- allocated_size= buff_size;
+ }
+
}
- m_idx_array= Idx_array(sort_keys, num_records);
+ m_size_in_bytes= buff_size;
+ m_record_pointers= reinterpret_cast<uchar**>(m_rawmem) +
+ ((m_size_in_bytes / sizeof(uchar*)) - 1);
+ m_num_records= num_records;
m_record_length= record_length;
- start_of_data= m_idx_array.array() + m_idx_array.size();
- m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
-
- DBUG_RETURN(m_idx_array.array());
+ m_idx= 0;
+ DBUG_RETURN(m_rawmem);
}
void Filesort_buffer::free_sort_buffer()
{
- my_free(m_idx_array.array());
- m_idx_array.reset();
- m_start_of_data= NULL;
+ my_free(m_rawmem);
+ *this= Filesort_buffer();
}
void Filesort_buffer::sort_buffer(const Sort_param *param, uint count)
{
size_t size= param->sort_length;
+ m_sort_keys= get_sort_keys();
+
if (count <= 1 || size == 0)
return;
- uchar **keys= get_sort_keys();
+
+ // dont reverse for PQ, it is already done
+ if (!param->using_pq)
+ reverse_record_pointers();
+
uchar **buffer= NULL;
if (radixsort_is_appliccable(count, param->sort_length) &&
(buffer= (uchar**) my_malloc(count*sizeof(char*),
MYF(MY_THREAD_SPECIFIC))))
{
- radixsort_for_str_ptr(keys, count, param->sort_length, buffer);
+ radixsort_for_str_ptr(m_sort_keys, count, param->sort_length, buffer);
my_free(buffer);
return;
}
- my_qsort2(keys, count, sizeof(uchar*), get_ptr_compare(size), &size);
+ my_qsort2(m_sort_keys, count, sizeof(uchar*), get_ptr_compare(size), &size);
}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
index 1ab1ba2daa8..e8b93940abf 100644
--- a/sql/filesort_utils.h
+++ b/sql/filesort_utils.h
@@ -46,68 +46,194 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
/**
A wrapper class around the buffer used by filesort().
- The buffer is a contiguous chunk of memory,
- where the first part is <num_records> pointers to the actual data.
+ The sort buffer is a contiguous chunk of memory,
+ containing both records to be sorted, and pointers to said records:
+
+ <start of buffer | still unused | end of buffer>
+ |rec 0|record 1 |rec 2| ............ |ptr to rec2|ptr to rec1|ptr to rec0|
+
+ Records will be inserted "left-to-right". Records are not necessarily
+ fixed-size, they can be packed and stored without any "gaps".
+
+ Record pointers will be inserted "right-to-left", as a side-effect
+ of inserting the actual records.
We wrap the buffer in order to be able to do lazy initialization of the
pointers: the buffer is often much larger than what we actually need.
+ With this allocation scheme, and lazy initialization of the pointers,
+ we are able to pack variable-sized records in the buffer,
+ and thus possibly have space for more records than we initially estimated.
+
The buffer must be kept available for multiple executions of the
same sort operation, so we have explicit allocate and free functions,
rather than doing alloc/free in CTOR/DTOR.
*/
+
class Filesort_buffer
{
public:
- Filesort_buffer()
- : m_idx_array(), m_start_of_data(NULL), allocated_size(0)
+ Filesort_buffer() :
+ m_next_rec_ptr(NULL), m_rawmem(NULL), m_record_pointers(NULL),
+ m_sort_keys(NULL),
+ m_num_records(0), m_record_length(0),
+ m_sort_length(0),
+ m_size_in_bytes(0), m_idx(0)
{}
-
- ~Filesort_buffer()
+
+ /** Sort me... */
+ void sort_buffer(const Sort_param *param, uint count);
+
+ /**
+ Reverses the record pointer array, to avoid recording new results for
+ non-deterministic mtr tests.
+ */
+ void reverse_record_pointers()
{
- my_free(m_idx_array.array());
+ if (m_idx < 2) // There is nothing to swap.
+ return;
+ uchar **keys= get_sort_keys();
+ const longlong count= m_idx - 1;
+ for (longlong ix= 0; ix <= count/2; ++ix)
+ {
+ uchar *tmp= keys[count - ix];
+ keys[count - ix] = keys[ix];
+ keys[ix]= tmp;
+ }
}
- bool is_allocated()
+ /**
+ Initializes all the record pointers.
+ */
+ void init_record_pointers()
{
- return m_idx_array.array() != 0;
+ init_next_record_pointer();
+ while (m_idx < m_num_records)
+ (void) get_next_record_pointer();
+ reverse_record_pointers();
}
- void reset()
+
+ /**
+ Prepares the buffer for the next batch of records to process.
+ */
+ void init_next_record_pointer()
{
- m_idx_array.reset();
+ m_idx= 0;
+ m_next_rec_ptr= m_rawmem;
+ m_sort_keys= NULL;
}
- /** Sort me... */
- void sort_buffer(const Sort_param *param, uint count);
+ /**
+ @returns the number of bytes currently in use for data.
+ */
+ size_t space_used_for_data() const
+ {
+ return m_next_rec_ptr ? m_next_rec_ptr - m_rawmem : 0;
+ }
- /// Initializes a record pointer.
- uchar *get_record_buffer(uint idx)
+ /**
+ @returns the number of bytes left in the buffer.
+ */
+ size_t spaceleft() const
{
- m_idx_array[idx]= m_start_of_data + (idx * m_record_length);
- return m_idx_array[idx];
+ DBUG_ASSERT(m_next_rec_ptr >= m_rawmem);
+ const size_t spaceused=
+ (m_next_rec_ptr - m_rawmem) +
+ (static_cast<size_t>(m_idx) * sizeof(uchar*));
+ return m_size_in_bytes - spaceused;
}
- /// Initializes all the record pointers.
- void init_record_pointers()
+ /**
+ Is the buffer full?
+ */
+ bool isfull() const
+ {
+ if (m_idx < m_num_records)
+ return false;
+ return spaceleft() < (m_record_length + sizeof(uchar*));
+ }
+
+ /**
+ Where should the next record be stored?
+ */
+ uchar *get_next_record_pointer()
+ {
+ uchar *retval= m_next_rec_ptr;
+ // Save the return value in the record pointer array.
+ m_record_pointers[-m_idx]= m_next_rec_ptr;
+ // Prepare for the subsequent request.
+ m_idx++;
+ m_next_rec_ptr+= m_record_length;
+ return retval;
+ }
+
+ /**
+ Adjusts for actual record length. get_next_record_pointer() above was
+ pessimistic, and assumed that the record could not be packed.
+ */
+ void adjust_next_record_pointer(uint val)
{
- for (uint ix= 0; ix < m_idx_array.size(); ++ix)
- (void) get_record_buffer(ix);
+ m_next_rec_ptr-= (m_record_length - val);
}
/// Returns total size: pointer array + record buffers.
size_t sort_buffer_size() const
{
- return allocated_size;
+ return m_size_in_bytes;
}
- /// Allocates the buffer, but does *not* initialize pointers.
- uchar **alloc_sort_buffer(uint num_records, uint record_length);
+ bool is_allocated() const
+ {
+ return m_rawmem;
+ }
+
+ /**
+ Allocates the buffer, but does *not* initialize pointers.
+ Total size = (num_records * record_length) + (num_records * sizeof(pointer))
+ space for records space for pointer to records
+ Caller is responsible for raising an error if allocation fails.
+
+ @param num_records Number of records.
+ @param record_length (maximum) size of each record.
+ @returns Pointer to allocated area, or NULL in case of out-of-memory.
+ */
+ uchar *alloc_sort_buffer(uint num_records, uint record_length);
/// Frees the buffer.
void free_sort_buffer();
- /// Getter, for calling routines which still use the uchar** interface.
- uchar **get_sort_keys() { return m_idx_array.array(); }
+ void reset()
+ {
+ m_rawmem= NULL;
+ }
+ /**
+ Used to access the "right-to-left" array of record pointers as an ordinary
+ "left-to-right" array, so that we can pass it directly on to std::sort().
+ */
+ uchar **get_sort_keys()
+ {
+ if (m_idx == 0)
+ return NULL;
+ return &m_record_pointers[1 - m_idx];
+ }
+
+ /**
+ Gets sorted record number ix. @see get_sort_keys()
+ Only valid after buffer has been sorted!
+ */
+ uchar *get_sorted_record(uint ix)
+ {
+ return m_sort_keys[ix];
+ }
+
+ /**
+ @returns The entire buffer, as a character array.
+ This is for reusing the memory for merge buffers.
+ */
+ Bounds_checked_array<uchar> get_raw_buf()
+ {
+ return Bounds_checked_array<uchar>(m_rawmem, m_size_in_bytes);
+ }
/**
We need an assignment operator, see filesort().
@@ -117,20 +243,40 @@ class Filesort_buffer
*/
Filesort_buffer &operator=(const Filesort_buffer &rhs)
{
- m_idx_array= rhs.m_idx_array;
+ m_next_rec_ptr= rhs.m_next_rec_ptr;
+ m_rawmem= rhs.m_rawmem;
+ m_record_pointers= rhs.m_record_pointers;
+ m_sort_keys= rhs.m_sort_keys;
+ m_num_records= rhs.m_num_records;
m_record_length= rhs.m_record_length;
- m_start_of_data= rhs.m_start_of_data;
- allocated_size= rhs.allocated_size;
+ m_sort_length= rhs.m_sort_length;
+ m_size_in_bytes= rhs.m_size_in_bytes;
+ m_idx= rhs.m_idx;
return *this;
}
+ uint get_sort_length() const { return m_sort_length; }
+ void set_sort_length(uint val) { m_sort_length= val; }
+
private:
- typedef Bounds_checked_array<uchar*> Idx_array;
+ uchar *m_next_rec_ptr; /// The next record will be inserted here.
+ uchar *m_rawmem; /// The raw memory buffer.
+ uchar **m_record_pointers; /// The "right-to-left" array of record pointers.
+ uchar **m_sort_keys; /// Caches the value of get_sort_keys()
+ uint m_num_records; /// Saved value from alloc_sort_buffer()
+ uint m_record_length; /// Saved value from alloc_sort_buffer()
+ uint m_sort_length; /// The length of the sort key.
+ size_t m_size_in_bytes; /// Size of raw buffer, in bytes.
- Idx_array m_idx_array; /* Pointers to key data */
- uint m_record_length;
- uchar *m_start_of_data; /* Start of key data */
- size_t allocated_size;
+ /**
+ This is the index in the "right-to-left" array of the next record to
+ be inserted into the buffer. It is signed, because we use it in signed
+ expressions like:
+ m_record_pointers[-m_idx];
+ It is longlong rather than int, to ensure that it covers UINT_MAX32
+ without any casting/warning.
+ */
+ longlong m_idx;
};
#endif // FILESORT_UTILS_INCLUDED
diff --git a/sql/records.cc b/sql/records.cc
index 3d709182a4e..2b146abb005 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -38,8 +38,8 @@
static int rr_quick(READ_RECORD *info);
int rr_sequential(READ_RECORD *info);
static int rr_from_tempfile(READ_RECORD *info);
-static int rr_unpack_from_tempfile(READ_RECORD *info);
-static int rr_unpack_from_buffer(READ_RECORD *info);
+template<bool> static int rr_unpack_from_tempfile(READ_RECORD *info);
+template<bool> static int rr_unpack_from_buffer(READ_RECORD *info);
int rr_from_pointers(READ_RECORD *info);
static int rr_from_cache(READ_RECORD *info);
static int init_rr_cache(THD *thd, READ_RECORD *info);
@@ -187,23 +187,23 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
bool disable_rr_cache)
{
IO_CACHE *tempfile;
- SORT_ADDON_FIELD *addon_field= filesort ? filesort->addon_field : 0;
DBUG_ENTER("init_read_record");
+ const bool using_addon_fields= filesort && filesort->using_addon_fields();
+
bzero((char*) info,sizeof(*info));
info->thd=thd;
info->table=table;
- info->addon_field= addon_field;
+ info->sort_info= filesort;
if ((table->s->tmp_table == INTERNAL_TMP_TABLE) &&
- !addon_field)
+ !using_addon_fields)
(void) table->file->extra(HA_EXTRA_MMAP);
- if (addon_field)
+ if (using_addon_fields)
{
- info->rec_buf= (uchar*) filesort->addon_buf.str;
- info->ref_length= (uint)filesort->addon_buf.length;
- info->unpack= filesort->unpack;
+ info->rec_buf= filesort->addon_fields->get_addon_buf();
+ info->ref_length= filesort->addon_fields->get_addon_buf_length();
}
else
{
@@ -223,9 +223,20 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
if (tempfile && !(select && select->quick))
{
- DBUG_PRINT("info",("using rr_from_tempfile"));
- info->read_record_func=
- addon_field ? rr_unpack_from_tempfile : rr_from_tempfile;
+ if (using_addon_fields)
+ {
+ DBUG_PRINT("info",("using rr_from_tempfile"));
+ if (filesort->addon_fields->using_packed_addons())
+ info->read_record_func= rr_unpack_from_tempfile<true>;
+ else
+ info->read_record_func= rr_unpack_from_tempfile<false>;
+ }
+ else
+ {
+ DBUG_PRINT("info",("using rr_from_tempfile"));
+ info->read_record_func= rr_from_tempfile;
+ }
+
info->io_cache= tempfile;
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
info->ref_pos=table->file->ref;
@@ -239,7 +250,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
and filesort->io_cache is read sequentially
*/
if (!disable_rr_cache &&
- !addon_field &&
+ !using_addon_fields &&
thd->variables.read_rnd_buff_size &&
!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
(table->db_stat & HA_READ_ONLY ||
@@ -264,16 +275,29 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
DBUG_PRINT("info",("using rr_quick"));
info->read_record_func= rr_quick;
}
- else if (filesort && filesort->record_pointers)
+ else if (filesort && filesort->has_filesort_result_in_memory())
{
DBUG_PRINT("info",("using record_pointers"));
if (unlikely(table->file->ha_rnd_init_with_error(0)))
DBUG_RETURN(1);
+
info->cache_pos= filesort->record_pointers;
- info->cache_end= (info->cache_pos+
- filesort->return_rows * info->ref_length);
- info->read_record_func=
- addon_field ? rr_unpack_from_buffer : rr_from_pointers;
+ if (using_addon_fields)
+ {
+ DBUG_PRINT("info",("using rr_unpack_from_buffer"));
+ DBUG_ASSERT(filesort->sorted_result_in_fsbuf);
+ info->unpack_counter= 0;
+ if (filesort->using_packed_addons())
+ info->read_record_func= rr_unpack_from_buffer<true>;
+ else
+ info->read_record_func= rr_unpack_from_buffer<false>;
+ }
+ else
+ {
+ info->cache_end= (info->cache_pos+
+ filesort->return_rows * info->ref_length);
+ info->read_record_func= rr_from_pointers;
+ }
}
else if (table->file->keyread_enabled())
{
@@ -510,7 +534,11 @@ static int rr_from_tempfile(READ_RECORD *info)
the fields values use in the result set from this buffer into their
positions in the regular record buffer.
- @param info Reference to the context including record descriptors
+ @param info Reference to the context including record
+ descriptors
+ @param Packed_addon_fields Are the addon fields packed?
+ This is a compile-time constant, to
+ avoid if (....) tests during execution.
@retval
0 Record successfully read.
@@ -518,12 +546,38 @@ static int rr_from_tempfile(READ_RECORD *info)
-1 There is no record to be read anymore.
*/
+template<bool Packed_addon_fields>
static int rr_unpack_from_tempfile(READ_RECORD *info)
{
- if (my_b_read(info->io_cache, info->rec_buf, info->ref_length))
- return -1;
- (*info->unpack)(info->addon_field, info->rec_buf,
- info->rec_buf + info->ref_length);
+ uchar *destination= info->rec_buf;
+#ifndef DBUG_OFF
+ my_off_t where= my_b_tell(info->io_cache);
+#endif
+ if (Packed_addon_fields)
+ {
+ const uint len_sz= Addon_fields::size_of_length_field;
+
+ // First read length of the record.
+ if (my_b_read(info->io_cache, destination, len_sz))
+ return -1;
+ uint res_length= Addon_fields::read_addon_length(destination);
+ DBUG_PRINT("info", ("rr_unpack from %llu to %p sz %u",
+ static_cast<ulonglong>(where),
+ destination, res_length));
+ DBUG_ASSERT(res_length > len_sz);
+ DBUG_ASSERT(info->sort_info->using_addon_fields());
+
+ // Then read the rest of the record.
+ if (my_b_read(info->io_cache, destination + len_sz, res_length - len_sz))
+ return -1; /* purecov: inspected */
+ }
+ else
+ {
+ if (my_b_read(info->io_cache, destination, info->ref_length))
+ return -1;
+ }
+
+ info->sort_info->unpack_addon_fields<Packed_addon_fields>(destination);
return 0;
}
@@ -560,7 +614,11 @@ int rr_from_pointers(READ_RECORD *info)
the fields values use in the result set from this buffer into their
positions in the regular record buffer.
- @param info Reference to the context including record descriptors
+ @param info Reference to the context including record
+ descriptors
+ @param Packed_addon_fields Are the addon fields packed?
+ This is a compile-time constant, to
+ avoid if (....) tests during execution.
@retval
0 Record successfully read.
@@ -568,13 +626,17 @@ int rr_from_pointers(READ_RECORD *info)
-1 There is no record to be read anymore.
*/
+template<bool Packed_addon_fields>
static int rr_unpack_from_buffer(READ_RECORD *info)
{
- if (info->cache_pos == info->cache_end)
+ if (info->unpack_counter == info->sort_info->return_rows)
return -1; /* End of buffer */
- (*info->unpack)(info->addon_field, info->cache_pos,
- info->cache_end);
- info->cache_pos+= info->ref_length;
+
+ uchar *record= info->sort_info->get_sorted_record(
+ static_cast<uint>(info->unpack_counter));
+ uchar *plen= record + info->sort_info->get_sort_length();
+ info->sort_info->unpack_addon_fields<Packed_addon_fields>(plen);
+ info->unpack_counter++;
return 0;
}
/* cacheing of records from a database */
@@ -709,3 +771,26 @@ static int rr_cmp(uchar *a,uchar *b)
return (int) a[7] - (int) b[7];
#endif
}
+
+template<bool Packed_addon_fields>
+inline void SORT_INFO::unpack_addon_fields(uchar *buff)
+{
+ SORT_ADDON_FIELD *addonf= addon_fields->begin();
+ uchar *buff_end= buff + sort_buffer_size();
+ const uchar *start_of_record= buff + addonf->offset;
+
+ for ( ; addonf != addon_fields->end() ; addonf++)
+ {
+ Field *field= addonf->field;
+ if (addonf->null_bit && (addonf->null_bit & buff[addonf->null_offset]))
+ {
+ field->set_null();
+ continue;
+ }
+ field->set_notnull();
+ if (Packed_addon_fields)
+ start_of_record= field->unpack(field->ptr, start_of_record, buff_end, 0);
+ else
+ field->unpack(field->ptr, buff + addonf->offset, buff_end, 0);
+ }
+}
diff --git a/sql/records.h b/sql/records.h
index faf0d13c9a9..04dc06b3c74 100644
--- a/sql/records.h
+++ b/sql/records.h
@@ -58,13 +58,23 @@ struct READ_RECORD
THD *thd;
SQL_SELECT *select;
uint ref_length, reclength, rec_cache_size, error_offset;
+
+ /**
+ Counting records when reading result from filesort().
+ Used when filesort leaves the result in the filesort buffer.
+ */
+ ha_rows unpack_counter;
+
uchar *ref_pos; /* pointer to form->refpos */
uchar *rec_buf; /* to read field values after filesort */
uchar *cache,*cache_pos,*cache_end,*read_positions;
- struct st_sort_addon_field *addon_field; /* Pointer to the fields info */
+
+ /*
+ Structure storing information about sorting
+ */
+ SORT_INFO *sort_info;
struct st_io_cache *io_cache;
bool print_error;
- void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *);
int read_record() { return read_record_func(this); }
uchar *record() const { return table->record[0]; }
diff --git a/sql/sql_array.h b/sql/sql_array.h
index bcfbb98ef19..b05e8f779bd 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -85,6 +85,10 @@ template <typename Element_type> class Bounds_checked_array
Element_type *array() const { return m_array; }
+ Element_type *begin() const { return array(); }
+ Element_type *end() const { return array() + m_size; }
+
+
bool operator==(const Bounds_checked_array<Element_type>&rhs) const
{
return m_array == rhs.m_array && m_size == rhs.m_size;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index bece1f9bee2..4bd1ddf7164 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -13987,7 +13987,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
*simple_order= head->on_expr_ref[0] == NULL;
if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS)
{
- uint u1, u2, u3;
+ uint u1, u2, u3, u4;
/*
normally the condition is (see filesort_use_addons())
@@ -13998,7 +13998,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
TODO proper cost estimations
*/
- *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3);
+ *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3, &u4);
}
}
else
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index 7abbc808632..5aa8f4545a4 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -20,8 +20,6 @@
#include <my_sys.h> /* qsort2_cmp */
#include "queues.h"
-typedef struct st_buffpek BUFFPEK;
-
struct SORT_FIELD;
class Field;
struct TABLE;
@@ -64,21 +62,236 @@ struct BUFFPEK_COMPARE_CONTEXT
};
+/**
+ Descriptor for a merge chunk to be sort-merged.
+ A merge chunk is a sequence of pre-sorted records, written to a
+ temporary file. A Merge_chunk instance describes where this chunk is stored
+ in the file, and where it is located when it is in memory.
+
+ It is a POD because
+ - we read/write them from/to files.
+
+ We have accessors (getters/setters) for all struct members.
+ */
+
+struct Merge_chunk {
+public:
+ Merge_chunk(): m_current_key(NULL),
+ m_file_position(0),
+ m_buffer_start(NULL),
+ m_buffer_end(NULL),
+ m_rowcount(0),
+ m_mem_count(0),
+ m_max_keys(0)
+ {}
+
+ my_off_t file_position() const { return m_file_position; }
+ void set_file_position(my_off_t val) { m_file_position= val; }
+ void advance_file_position(my_off_t val) { m_file_position+= val; }
+
+ uchar *buffer_start() { return m_buffer_start; }
+ const uchar *buffer_end() const { return m_buffer_end; }
+
+ void set_buffer(uchar *start, uchar *end)
+ {
+ m_buffer_start= start;
+ m_buffer_end= end;
+ }
+ void set_buffer_start(uchar *start)
+ {
+ m_buffer_start= start;
+ }
+ void set_buffer_end(uchar *end)
+ {
+ DBUG_ASSERT(m_buffer_end == NULL || end <= m_buffer_end);
+ m_buffer_end= end;
+ }
+
+ void init_current_key() { m_current_key= m_buffer_start; }
+ uchar *current_key() { return m_current_key; }
+ void advance_current_key(uint val) { m_current_key+= val; }
+
+ void decrement_rowcount(ha_rows val) { m_rowcount-= val; }
+ void set_rowcount(ha_rows val) { m_rowcount= val; }
+ ha_rows rowcount() const { return m_rowcount; }
+
+ ha_rows mem_count() const { return m_mem_count; }
+ void set_mem_count(ha_rows val) { m_mem_count= val; }
+ ha_rows decrement_mem_count() { return --m_mem_count; }
+
+ ha_rows max_keys() const { return m_max_keys; }
+ void set_max_keys(ha_rows val) { m_max_keys= val; }
+
+ size_t buffer_size() const { return m_buffer_end - m_buffer_start; }
+
+ /**
+ Tries to merge *this with *mc, returns true if successful.
+ The assumption is that *this is no longer in use,
+ and the space it has been allocated can be handed over to a
+ buffer which is adjacent to it.
+ */
+ bool merge_freed_buff(Merge_chunk *mc) const
+ {
+ if (mc->m_buffer_end == m_buffer_start)
+ {
+ mc->m_buffer_end= m_buffer_end;
+ mc->m_max_keys+= m_max_keys;
+ return true;
+ }
+ else if (mc->m_buffer_start == m_buffer_end)
+ {
+ mc->m_buffer_start= m_buffer_start;
+ mc->m_max_keys+= m_max_keys;
+ return true;
+ }
+ return false;
+ }
+
+ uchar *m_current_key; /// The current key for this chunk.
+ my_off_t m_file_position;/// Current position in the file to be sorted.
+ uchar *m_buffer_start; /// Start of main-memory buffer for this chunk.
+ uchar *m_buffer_end; /// End of main-memory buffer for this chunk.
+ ha_rows m_rowcount; /// Number of unread rows in this chunk.
+ ha_rows m_mem_count; /// Number of rows in the main-memory buffer.
+ ha_rows m_max_keys; /// If we have fixed-size rows:
+ /// max number of rows in buffer.
+};
+
+typedef Bounds_checked_array<SORT_ADDON_FIELD> Addon_fields_array;
+
+/**
+ This class wraps information about usage of addon fields.
+ An Addon_fields object is used both during packing of data in the filesort
+ buffer, and later during unpacking in 'Filesort_info::unpack_addon_fields'.
+
+ @see documentation for the Sort_addon_field struct.
+ @see documentation for get_addon_fields()
+ */
+class Addon_fields {
+public:
+ Addon_fields(Addon_fields_array arr)
+ : m_field_descriptors(arr),
+ m_addon_buf(),
+ m_addon_buf_length(),
+ m_using_packed_addons(false)
+ {
+ DBUG_ASSERT(!arr.is_null());
+ }
+
+ SORT_ADDON_FIELD *begin() { return m_field_descriptors.begin(); }
+ SORT_ADDON_FIELD *end() { return m_field_descriptors.end(); }
+
+ /// rr_unpack_from_tempfile needs an extra buffer when unpacking.
+ uchar *allocate_addon_buf(uint sz)
+ {
+ m_addon_buf= (uchar *)my_malloc(sz, MYF(MY_WME | MY_THREAD_SPECIFIC));
+ if (m_addon_buf)
+ m_addon_buf_length= sz;
+ return m_addon_buf;
+ }
+
+ void free_addon_buff()
+ {
+ my_free(m_addon_buf);
+ m_addon_buf= NULL;
+ m_addon_buf_length= 0;
+ }
+
+ uchar *get_addon_buf() { return m_addon_buf; }
+ uint get_addon_buf_length() const { return m_addon_buf_length; }
+
+ void set_using_packed_addons(bool val)
+ {
+ m_using_packed_addons= val;
+ }
+
+ bool using_packed_addons() const
+ {
+ return m_using_packed_addons;
+ }
+
+ static bool can_pack_addon_fields(uint record_length)
+ {
+ return (record_length <= (0xFFFF));
+ }
+
+ /**
+ @returns Total number of bytes used for packed addon fields.
+ the size of the length field + size of null bits + sum of field sizes.
+ */
+ static uint read_addon_length(uchar *p)
+ {
+ return size_of_length_field + uint2korr(p);
+ }
+
+ /**
+ Stores the number of bytes used for packed addon fields.
+ */
+ static void store_addon_length(uchar *p, uint sz)
+ {
+ // We actually store the length of everything *after* the length field.
+ int2store(p, sz - size_of_length_field);
+ }
+
+ static const uint size_of_length_field= 2;
+
+private:
+ Addon_fields_array m_field_descriptors;
+
+ uchar *m_addon_buf; ///< Buffer for unpacking addon fields.
+ uint m_addon_buf_length; ///< Length of the buffer.
+ bool m_using_packed_addons; ///< Are we packing the addon fields?
+};
+
+
+/**
+ There are two record formats for sorting:
+ |<key a><key b>...|<rowid>|
+ / sort_length / ref_l /
+
+ or with "addon fields"
+ |<key a><key b>...|<null bits>|<field a><field b>...|
+ / sort_length / addon_length /
+
+ The packed format for "addon fields"
+ |<key a><key b>...|<length>|<null bits>|<field a><field b>...|
+ / sort_length / addon_length /
+
+ <key> Fields are fixed-size, specially encoded with
+ Field::make_sort_key() so we can do byte-by-byte compare.
+ <length> Contains the *actual* packed length (after packing) of
+ everything after the sort keys.
+ The size of the length field is 2 bytes,
+ which should cover most use cases: addon data <= 65535 bytes.
+ This is the same as max record size in MySQL.
+ <null bits> One bit for each nullable field, indicating whether the field
+ is null or not. May have size zero if no fields are nullable.
+ <field xx> Are stored with field->pack(), and retrieved with
+ field->unpack(). Addon fields within a record are stored
+ consecutively, with no "holes" or padding. They will have zero
+ size for NULL values.
+
+*/
+
class Sort_param {
public:
uint rec_length; // Length of sorted records.
uint sort_length; // Length of sorted columns.
uint ref_length; // Length of record ref.
+ uint addon_length; // Length of addon_fields
uint res_length; // Length of records in final sorted file/buffer.
uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
ha_rows max_rows; // Select limit, or HA_POS_ERROR if unlimited.
ha_rows examined_rows; // Number of examined rows.
TABLE *sort_form; // For quicker make_sortkey.
- SORT_FIELD *local_sortorder;
- SORT_FIELD *end;
- SORT_ADDON_FIELD *addon_field; // Descriptors for companion fields.
- LEX_STRING addon_buf; // Buffer & length of added packed fields.
+ /**
+ ORDER BY list with some precalculated info for filesort.
+ Array is created and owned by a Filesort instance.
+ */
+ Bounds_checked_array<SORT_FIELD> local_sortorder;
+ Addon_fields *addon_fields; // Descriptors for companion fields.
+ bool using_pq;
uchar *unique_buff;
bool not_killable;
@@ -93,21 +306,63 @@ class Sort_param {
}
void init_for_filesort(uint sortlen, TABLE *table,
ha_rows maxrows, bool sort_positions);
+ /// Enables the packing of addons if possible.
+ void try_to_pack_addons(ulong max_length_for_sort_data);
+
+ /// Are we packing the "addon fields"?
+ bool using_packed_addons() const
+ {
+ DBUG_ASSERT(m_using_packed_addons ==
+ (addon_fields != NULL &&
+ addon_fields->using_packed_addons()));
+ return m_using_packed_addons;
+ }
+
+ /// Are we using "addon fields"?
+ bool using_addon_fields() const
+ {
+ return addon_fields != NULL;
+ }
+
+ /**
+ Getter for record length and result length.
+ @param record_start Pointer to record.
+ @param [out] recl Store record length here.
+ @param [out] resl Store result length here.
+ */
+ void get_rec_and_res_len(uchar *record_start, uint *recl, uint *resl)
+ {
+ if (!using_packed_addons())
+ {
+ *recl= rec_length;
+ *resl= res_length;
+ return;
+ }
+ uchar *plen= record_start + sort_length;
+ *resl= Addon_fields::read_addon_length(plen);
+ DBUG_ASSERT(*resl <= res_length);
+ const uchar *record_end= plen + *resl;
+ *recl= static_cast<uint>(record_end - record_start);
+ }
+
+private:
+ uint m_packable_length;
+ bool m_using_packed_addons; ///< caches the value of using_packed_addons()
};
+typedef Bounds_checked_array<uchar> Sort_buffer;
-int merge_many_buff(Sort_param *param, uchar *sort_buffer,
- BUFFPEK *buffpek,
- uint *maxbuffer, IO_CACHE *t_file);
-ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
- uint sort_length);
+int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer,
+ Merge_chunk *buffpek, uint *maxbuffer, IO_CACHE *t_file);
+ulong read_to_buffer(IO_CACHE *fromfile, Merge_chunk *buffpek,
+ Sort_param *param);
bool merge_buffers(Sort_param *param,IO_CACHE *from_file,
- IO_CACHE *to_file, uchar *sort_buffer,
- BUFFPEK *lastbuff,BUFFPEK *Fb,
- BUFFPEK *Tb,int flag);
-int merge_index(Sort_param *param, uchar *sort_buffer,
- BUFFPEK *buffpek, uint maxbuffer,
- IO_CACHE *tempfile, IO_CACHE *outfile);
-void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length);
+ IO_CACHE *to_file, Sort_buffer sort_buffer,
+ Merge_chunk *lastbuff, Merge_chunk *Fb,
+ Merge_chunk *Tb, int flag);
+int merge_index(Sort_param *param, Sort_buffer sort_buffer,
+ Merge_chunk *buffpek, uint maxbuffer,
+ IO_CACHE *tempfile, IO_CACHE *outfile);
+void reuse_freed_buff(QUEUE *queue, Merge_chunk *reuse, uint key_length);
#endif /* SQL_SORT_INCLUDED */
diff --git a/sql/uniques.cc b/sql/uniques.cc
index fafb44b56a0..a8170951e88 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -39,7 +39,6 @@
#include "my_tree.h" // element_count
#include "uniques.h" // Unique
#include "sql_sort.h"
-#include "myisamchk.h" // BUFFPEK
int unique_write_to_file(uchar* key, element_count count, Unique *unique)
{
@@ -94,7 +93,7 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
init_tree(&tree, (max_in_memory_size / 16), 0, size, comp_func,
NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC));
/* If the following fail's the next add will also fail */
- my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16,
+ my_init_dynamic_array(&file_ptrs, sizeof(Merge_chunk), 16, 16,
MYF(MY_THREAD_SPECIFIC));
/*
If you change the following, change it in get_max_elements function, too.
@@ -375,10 +374,10 @@ Unique::~Unique()
/* Write tree to disk; clear tree */
bool Unique::flush()
{
- BUFFPEK file_ptr;
+ Merge_chunk file_ptr;
elements+= tree.elements_in_tree;
- file_ptr.count=tree.elements_in_tree;
- file_ptr.file_pos=my_b_tell(&file);
+ file_ptr.set_rowcount(tree.elements_in_tree);
+ file_ptr.set_file_position(my_b_tell(&file));
tree_walk_action action= min_dupl_count ?
(tree_walk_action) unique_write_to_file_with_count :
@@ -490,7 +489,7 @@ void put_counter_into_merged_element(void *ptr, uint ofs, element_count cnt)
*/
static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
- uint key_length, BUFFPEK *begin, BUFFPEK *end,
+ uint key_length, Merge_chunk *begin, Merge_chunk *end,
tree_walk_action walk_action, void *walk_action_arg,
qsort_cmp2 compare, void *compare_arg,
IO_CACHE *file, bool with_counters)
@@ -499,7 +498,8 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
QUEUE queue;
if (end <= begin ||
merge_buffer_size < (size_t) (key_length * (end - begin + 1)) ||
- init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0,
+ init_queue(&queue, (uint) (end - begin),
+ offsetof(Merge_chunk, m_current_key), 0,
buffpek_compare, &compare_context, 0, 0))
return 1;
/* we need space for one key when a piece of merge buffer is re-read */
@@ -510,10 +510,16 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
/* if piece_size is aligned reuse_freed_buffer will always hit */
uint piece_size= max_key_count_per_piece * key_length;
ulong bytes_read; /* to hold return value of read_to_buffer */
- BUFFPEK *top;
+ Merge_chunk *top;
int res= 1;
uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0);
element_count cnt;
+
+ // read_to_buffer() needs only rec_length.
+ Sort_param sort_param;
+ sort_param.rec_length= key_length;
+ DBUG_ASSERT(!sort_param.using_addon_fields());
+
/*
Invariant: queue must contain top element from each tree, until a tree
is not completely walked through.
@@ -522,15 +528,16 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
*/
for (top= begin; top != end; ++top)
{
- top->base= merge_buffer + (top - begin) * piece_size;
- top->max_keys= max_key_count_per_piece;
- bytes_read= read_to_buffer(file, top, key_length);
+ top->set_buffer_start(merge_buffer + (top - begin) * piece_size);
+ top->set_buffer_end(top->buffer_start() + piece_size);
+ top->set_max_keys(max_key_count_per_piece);
+ bytes_read= read_to_buffer(file, top, &sort_param);
if (unlikely(bytes_read == (ulong) -1))
goto end;
DBUG_ASSERT(bytes_read);
queue_insert(&queue, (uchar *) top);
}
- top= (BUFFPEK *) queue_top(&queue);
+ top= (Merge_chunk *) queue_top(&queue);
while (queue.elements > 1)
{
/*
@@ -540,20 +547,21 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
elements in each tree are unique. Action is applied only to unique
elements.
*/
- void *old_key= top->key;
+ void *old_key= top->current_key();
/*
read next key from the cache or from the file and push it to the
queue; this gives new top.
*/
- top->key+= key_length;
- if (--top->mem_count)
+ top->advance_current_key(key_length);
+ top->decrement_mem_count();
+ if (top->mem_count())
queue_replace_top(&queue);
else /* next piece should be read */
{
/* save old_key not to overwrite it in read_to_buffer */
memcpy(save_key_buff, old_key, key_length);
old_key= save_key_buff;
- bytes_read= read_to_buffer(file, top, key_length);
+ bytes_read= read_to_buffer(file, top, &sort_param);
if (unlikely(bytes_read == (ulong) -1))
goto end;
else if (bytes_read) /* top->key, top->mem_count are reset */
@@ -568,9 +576,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
reuse_freed_buff(&queue, top, key_length);
}
}
- top= (BUFFPEK *) queue_top(&queue);
+ top= (Merge_chunk *) queue_top(&queue);
/* new top has been obtained; if old top is unique, apply the action */
- if (compare(compare_arg, old_key, top->key))
+ if (compare(compare_arg, old_key, top->current_key()))
{
cnt= with_counters ?
get_counter_from_merged_element(old_key, cnt_ofs) : 1;
@@ -579,9 +587,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
}
else if (with_counters)
{
- cnt= get_counter_from_merged_element(top->key, cnt_ofs);
+ cnt= get_counter_from_merged_element(top->current_key(), cnt_ofs);
cnt+= get_counter_from_merged_element(old_key, cnt_ofs);
- put_counter_into_merged_element(top->key, cnt_ofs, cnt);
+ put_counter_into_merged_element(top->current_key(), cnt_ofs, cnt);
}
}
/*
@@ -595,13 +603,13 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
{
cnt= with_counters ?
- get_counter_from_merged_element(top->key, cnt_ofs) : 1;
- if (walk_action(top->key, cnt, walk_action_arg))
+ get_counter_from_merged_element(top->current_key(), cnt_ofs) : 1;
+ if (walk_action(top->current_key(), cnt, walk_action_arg))
goto end;
- top->key+= key_length;
+ top->advance_current_key(key_length);
}
- while (--top->mem_count);
- bytes_read= read_to_buffer(file, top, key_length);
+ while (top->decrement_mem_count());
+ bytes_read= read_to_buffer(file, top, &sort_param);
if (unlikely(bytes_read == (ulong) -1))
goto end;
}
@@ -657,13 +665,14 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
if (!(merge_buffer = (uchar *)my_malloc(buff_sz, MYF(MY_WME))))
return 1;
if (buff_sz < full_size * (file_ptrs.elements + 1UL))
- res= merge(table, merge_buffer, buff_sz >= full_size * MERGEBUFF2) ;
+ res= merge(table, merge_buffer, buff_sz,
+ buff_sz >= full_size * MERGEBUFF2) ;
if (!res)
{
res= merge_walk(merge_buffer, buff_sz, full_size,
- (BUFFPEK *) file_ptrs.buffer,
- (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
+ (Merge_chunk *) file_ptrs.buffer,
+ (Merge_chunk *) file_ptrs.buffer + file_ptrs.elements,
action, walk_action_arg,
tree.compare, tree.custom_arg, &file, with_counters);
}
@@ -684,16 +693,18 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
All params are 'IN':
table the parameter to access sort context
buff merge buffer
+ buff_size size of merge buffer
without_last_merge TRUE <=> do not perform the last merge
RETURN VALUE
0 OK
<> 0 error
*/
-bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
+bool Unique::merge(TABLE *table, uchar *buff, size_t buff_size,
+ bool without_last_merge)
{
IO_CACHE *outfile= &sort.io_cache;
- BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
+ Merge_chunk *file_ptr= (Merge_chunk*) file_ptrs.buffer;
uint maxbuffer= file_ptrs.elements - 1;
my_off_t save_pos;
bool error= 1;
@@ -724,7 +735,9 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
sort_param.cmp_context.key_compare_arg= tree.custom_arg;
/* Merge the buffers to one file, removing duplicates */
- if (merge_many_buff(&sort_param,buff,file_ptr,&maxbuffer,&file))
+ if (merge_many_buff(&sort_param,
+ Bounds_checked_array<uchar>(buff, buff_size),
+ file_ptr,&maxbuffer,&file))
goto err;
if (flush_io_cache(&file) ||
reinit_io_cache(&file,READ_CACHE,0L,0,0))
@@ -736,7 +749,8 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
file_ptrs.elements= maxbuffer+1;
return 0;
}
- if (merge_index(&sort_param, buff, file_ptr, maxbuffer, &file, outfile))
+ if (merge_index(&sort_param, Bounds_checked_array<uchar>(buff, buff_size),
+ file_ptr, maxbuffer, &file, outfile))
goto err;
error= 0;
err:
@@ -791,7 +805,7 @@ bool Unique::get(TABLE *table)
MYF(MY_THREAD_SPECIFIC|MY_WME))))
DBUG_RETURN(1);
- if (merge(table, sort_buffer, FALSE))
+ if (merge(table, sort_buffer, buff_sz, FALSE))
goto err;
rc= 0;
diff --git a/sql/uniques.h b/sql/uniques.h
index 654b3692aaa..f83eac36855 100644
--- a/sql/uniques.h
+++ b/sql/uniques.h
@@ -39,7 +39,7 @@ class Unique :public Sql_alloc
uint min_dupl_count; /* always 0 for unions, > 0 for intersections */
bool with_counters;
- bool merge(TABLE *table, uchar *buff, bool without_last_merge);
+ bool merge(TABLE *table, uchar *buff, size_t size, bool without_last_merge);
bool flush();
public:
diff --git a/storage/connect/mysql-test/connect/r/mysql_index.result b/storage/connect/mysql-test/connect/r/mysql_index.result
index b0c88b16fef..5f8f41f6218 100644
--- a/storage/connect/mysql-test/connect/r/mysql_index.result
+++ b/storage/connect/mysql-test/connect/r/mysql_index.result
@@ -299,11 +299,11 @@ matricule nom prenom
7626 HENIN PHILIPPE
403 HERMITTE PHILIPPE
9096 HELENA PHILIPPE
-SELECT matricule, nom, prenom FROM t2 ORDER BY nom LIMIT 10;
+SELECT matricule, nom, prenom FROM t2 ORDER BY nom,prenom LIMIT 10;
matricule nom prenom
4552 ABBADIE MONIQUE
-6627 ABBAYE GERALD
307 ABBAYE ANNICK
+6627 ABBAYE GERALD
7961 ABBE KATIA
1340 ABBE MICHELE
9270 ABBE SOPHIE
diff --git a/storage/connect/mysql-test/connect/t/mysql_index.test b/storage/connect/mysql-test/connect/t/mysql_index.test
index 74dc48f42c8..e36a827ac3c 100644
--- a/storage/connect/mysql-test/connect/t/mysql_index.test
+++ b/storage/connect/mysql-test/connect/t/mysql_index.test
@@ -120,7 +120,7 @@ SELECT matricule, nom, prenom FROM t2 WHERE nom <= 'ABEL' OR nom > 'YVON';
SELECT matricule, nom, prenom FROM t2 WHERE nom > 'HELEN' AND nom < 'HEROS';
SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS';
SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS' AND prenom = 'PHILIPPE';
-SELECT matricule, nom, prenom FROM t2 ORDER BY nom LIMIT 10;
+SELECT matricule, nom, prenom FROM t2 ORDER BY nom,prenom LIMIT 10;
SELECT a.nom, a.prenom, b.nom FROM t1 a STRAIGHT_JOIN t2 b ON a.prenom = b.prenom WHERE a.nom = 'FOCH' AND a.nom != b.nom;
DROP TABLE t2;
1
0
revision-id: e709eb9bf712006d070767629518f827cd2f6bed (mariadb-10.3.21-23-ge709eb9bf71)
parent(s): d531b4ee3a9bcd89a2fa6b49a2207eaf966f53e3 b04429434ad1ee7f49d263762a560f4ff31dd111
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2020-01-17 00:46:40 +0300
message:
Merge branch '10.2' into 10.3
# Conflicts:
# mysql-test/suite/galera/r/MW-388.result
# mysql-test/suite/galera/t/MW-388.test
# mysql-test/suite/innodb/r/truncate_inject.result
# mysql-test/suite/innodb/t/truncate_inject.test
# mysql-test/suite/rpl/r/rpl_stop_slave.result
# mysql-test/suite/rpl/t/rpl_stop_slave.test
# sql/sp_head.cc
# sql/sp_head.h
# sql/sql_lex.cc
# sql/sql_yacc.yy
# storage/xtradb/buf/buf0dblwr.cc
mysql-test/include/binlog_inject_error.inc | 4 +-
mysql-test/main/cache_temporal_4265.result | 3 +-
mysql-test/main/cache_temporal_4265.test | 6 +-
mysql-test/main/create_or_replace2.result | 4 +-
mysql-test/main/create_or_replace2.test | 4 +-
mysql-test/main/drop_bad_db_type.result | 6 +-
mysql-test/main/drop_bad_db_type.test | 6 +-
mysql-test/main/engine_error_in_alter-8453.result | 5 +-
mysql-test/main/engine_error_in_alter-8453.test | 5 +-
mysql-test/main/error_simulation.result | 11 +-
mysql-test/main/error_simulation.test | 11 +-
mysql-test/main/func_regexp_pcre_debug.result | 3 +-
mysql-test/main/func_regexp_pcre_debug.test | 3 +-
mysql-test/main/log_slow_debug.result | 4 +-
mysql-test/main/log_slow_debug.test | 4 +-
mysql-test/main/mdev6830.result | 2 +
mysql-test/main/mdev6830.test | 4 +-
mysql-test/main/merge-big.result | 2 +-
mysql-test/main/merge-big.test | 2 +-
mysql-test/main/merge_debug.result | 2 +-
mysql-test/main/merge_debug.test | 2 +-
mysql-test/main/myisam_debug.result | 4 +-
mysql-test/main/myisam_debug.test | 4 +-
mysql-test/main/range_innodb.result | 3 +-
mysql-test/main/range_innodb.test | 3 +-
mysql-test/main/range_interrupted-13751.result | 4 +-
mysql-test/main/range_interrupted-13751.test | 4 +-
mysql-test/main/select_debug.result | 2 +
mysql-test/main/select_debug.test | 2 +
mysql-test/main/show_explain.result | 178 ++++++++++----------
mysql-test/main/show_explain.test | 182 ++++++++++-----------
mysql-test/main/show_explain_non_select.result | 8 +-
mysql-test/main/show_explain_non_select.test | 8 +-
mysql-test/main/show_explain_ps.result | 5 +-
mysql-test/main/show_explain_ps.test | 5 +-
mysql-test/main/slowlog_enospace-10508.result | 5 +-
mysql-test/main/slowlog_enospace-10508.test | 5 +-
mysql-test/main/stat_tables-enospc.result | 5 +-
mysql-test/main/stat_tables-enospc.test | 5 +-
mysql-test/main/union_crash-714.result | 4 +-
mysql-test/main/union_crash-714.test | 4 +-
mysql-test/main/warnings_debug.result | 2 +
mysql-test/main/warnings_debug.test | 2 +
mysql-test/suite/binlog/include/binlog_ioerr.inc | 3 +-
mysql-test/suite/binlog/r/binlog_ioerr.result | 3 +-
.../suite/binlog/r/binlog_write_error.result | 51 ++++--
.../suite/binlog_encryption/binlog_ioerr.result | 3 +-
.../binlog_encryption/binlog_write_error.result | 51 ++++--
.../suite/binlog_encryption/rpl_checksum.result | 11 +-
.../suite/binlog_encryption/rpl_corruption.result | 5 +-
.../suite/binlog_encryption/rpl_incident.result | 2 +
.../binlog_encryption/rpl_init_slave_errors.result | 3 +-
mysql-test/suite/galera/disabled.def | 17 +-
mysql-test/suite/galera/r/MW-388.result | 4 +-
mysql-test/suite/galera/r/galera_events2.result | 1 +
.../galera/r/galera_ist_restart_joiner.result | 1 -
mysql-test/suite/galera/t/MW-388.test | 8 +-
mysql-test/suite/galera/t/galera_events2.test | 4 +
.../suite/gcol/r/innodb_virtual_debug_purge.result | 4 +-
.../suite/gcol/t/innodb_virtual_debug_purge.test | 4 +-
mysql-test/suite/innodb/r/blob-update-debug.result | 4 +-
.../suite/innodb/r/innodb-replace-debug.result | 4 +-
.../r/innodb-stats-initialize-failure.result | 3 +-
mysql-test/suite/innodb/r/innodb-wl5522-1.result | 2 +-
mysql-test/suite/innodb/r/innodb-wl5522.result | 2 +-
.../suite/innodb/r/innodb_bug11754376.result | 2 +
mysql-test/suite/innodb/r/innodb_bug56947.result | 2 +
.../suite/innodb/r/innodb_corrupt_bit.result | 2 +-
.../innodb/r/innodb_sys_semaphore_waits.result | 3 +-
.../innodb/r/redo_log_during_checkpoint.result | 6 +-
mysql-test/suite/innodb/t/blob-update-debug.test | 4 +-
.../suite/innodb/t/innodb-replace-debug.test | 4 +-
.../innodb/t/innodb-stats-initialize-failure.test | 5 +-
mysql-test/suite/innodb/t/innodb_bug11754376.test | 3 +-
mysql-test/suite/innodb/t/innodb_bug56947.test | 2 +
mysql-test/suite/innodb/t/innodb_corrupt_bit.test | 2 +-
.../suite/innodb/t/innodb_sys_semaphore_waits.test | 3 +-
.../suite/innodb/t/redo_log_during_checkpoint.test | 6 +-
.../suite/innodb_fts/r/concurrent_insert.result | 3 +-
.../suite/innodb_fts/t/concurrent_insert.test | 3 +-
mysql-test/suite/innodb_gis/r/rtree_debug.result | 2 +-
mysql-test/suite/innodb_gis/t/rtree_debug.test | 2 +-
mysql-test/suite/innodb_zip/r/wl5522_zip.result | 2 +-
.../suite/optimizer_unfixed_bugs/r/bug36981.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug40992.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug41996.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug42991.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug43249.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug43360.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug43448.result | 2 +
.../suite/optimizer_unfixed_bugs/r/bug43617.result | 2 +
.../suite/optimizer_unfixed_bugs/t/bug36981.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug40992.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug41996.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug42991.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug43249.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug43360.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug43448.test | 2 +
.../suite/optimizer_unfixed_bugs/t/bug43617.test | 3 +-
mysql-test/suite/parts/r/partition_debug.result | 4 +-
mysql-test/suite/parts/t/partition_debug.test | 4 +-
.../suite/parts/t/partition_debug_innodb.test | 4 +-
.../r/hostcache_ipv4_addrinfo_again_allow.result | 3 +-
.../r/hostcache_ipv4_addrinfo_again_deny.result | 3 +-
.../r/hostcache_ipv4_addrinfo_bad_allow.result | 3 +-
.../r/hostcache_ipv4_addrinfo_bad_deny.result | 3 +-
.../r/hostcache_ipv4_addrinfo_good_allow.result | 3 +-
.../r/hostcache_ipv4_addrinfo_good_deny.result | 3 +-
.../r/hostcache_ipv4_addrinfo_noname_allow.result | 3 +-
.../r/hostcache_ipv4_addrinfo_noname_deny.result | 3 +-
.../perfschema/r/hostcache_ipv4_auth_plugin.result | 3 +-
.../perfschema/r/hostcache_ipv4_blocked.result | 3 +-
.../perfschema/r/hostcache_ipv4_format.result | 3 +-
.../perfschema/r/hostcache_ipv4_max_con.result | 3 +-
.../r/hostcache_ipv4_nameinfo_again_allow.result | 3 +-
.../r/hostcache_ipv4_nameinfo_again_deny.result | 3 +-
.../r/hostcache_ipv4_nameinfo_noname_allow.result | 3 +-
.../r/hostcache_ipv4_nameinfo_noname_deny.result | 3 +-
.../perfschema/r/hostcache_ipv4_passwd.result | 3 +-
.../suite/perfschema/r/hostcache_ipv4_ssl.result | 3 +-
.../r/hostcache_ipv6_addrinfo_again_allow.result | 3 +-
.../r/hostcache_ipv6_addrinfo_again_deny.result | 3 +-
.../r/hostcache_ipv6_addrinfo_bad_allow.result | 3 +-
.../r/hostcache_ipv6_addrinfo_bad_deny.result | 3 +-
.../r/hostcache_ipv6_addrinfo_good_allow.result | 3 +-
.../r/hostcache_ipv6_addrinfo_good_deny.result | 3 +-
.../r/hostcache_ipv6_addrinfo_noname_allow.result | 3 +-
.../r/hostcache_ipv6_addrinfo_noname_deny.result | 3 +-
.../perfschema/r/hostcache_ipv6_auth_plugin.result | 3 +-
.../perfschema/r/hostcache_ipv6_blocked.result | 3 +-
.../perfschema/r/hostcache_ipv6_max_con.result | 3 +-
.../r/hostcache_ipv6_nameinfo_again_allow.result | 3 +-
.../r/hostcache_ipv6_nameinfo_again_deny.result | 3 +-
.../r/hostcache_ipv6_nameinfo_noname_allow.result | 3 +-
.../r/hostcache_ipv6_nameinfo_noname_deny.result | 3 +-
.../perfschema/r/hostcache_ipv6_passwd.result | 3 +-
.../suite/perfschema/r/hostcache_ipv6_ssl.result | 3 +-
.../suite/perfschema/r/hostcache_peer_addr.result | 3 +-
.../t/hostcache_ipv4_addrinfo_again_allow.test | 3 +-
.../t/hostcache_ipv4_addrinfo_again_deny.test | 3 +-
.../t/hostcache_ipv4_addrinfo_bad_allow.test | 3 +-
.../t/hostcache_ipv4_addrinfo_bad_deny.test | 3 +-
.../t/hostcache_ipv4_addrinfo_good_allow.test | 3 +-
.../t/hostcache_ipv4_addrinfo_good_deny.test | 3 +-
.../t/hostcache_ipv4_addrinfo_noname_allow.test | 3 +-
.../t/hostcache_ipv4_addrinfo_noname_deny.test | 3 +-
.../perfschema/t/hostcache_ipv4_auth_plugin.test | 3 +-
.../suite/perfschema/t/hostcache_ipv4_blocked.test | 3 +-
.../suite/perfschema/t/hostcache_ipv4_format.test | 3 +-
.../suite/perfschema/t/hostcache_ipv4_max_con.test | 3 +-
.../t/hostcache_ipv4_nameinfo_again_allow.test | 3 +-
.../t/hostcache_ipv4_nameinfo_again_deny.test | 3 +-
.../t/hostcache_ipv4_nameinfo_noname_allow.test | 3 +-
.../t/hostcache_ipv4_nameinfo_noname_deny.test | 3 +-
.../suite/perfschema/t/hostcache_ipv4_passwd.test | 3 +-
.../suite/perfschema/t/hostcache_ipv4_ssl.test | 3 +-
.../t/hostcache_ipv6_addrinfo_again_allow.test | 3 +-
.../t/hostcache_ipv6_addrinfo_again_deny.test | 3 +-
.../t/hostcache_ipv6_addrinfo_bad_allow.test | 3 +-
.../t/hostcache_ipv6_addrinfo_bad_deny.test | 3 +-
.../t/hostcache_ipv6_addrinfo_good_allow.test | 3 +-
.../t/hostcache_ipv6_addrinfo_good_deny.test | 3 +-
.../t/hostcache_ipv6_addrinfo_noname_allow.test | 4 +-
.../t/hostcache_ipv6_addrinfo_noname_deny.test | 4 +-
.../perfschema/t/hostcache_ipv6_auth_plugin.test | 3 +-
.../suite/perfschema/t/hostcache_ipv6_blocked.test | 3 +-
.../suite/perfschema/t/hostcache_ipv6_max_con.test | 3 +-
.../t/hostcache_ipv6_nameinfo_again_allow.test | 3 +-
.../t/hostcache_ipv6_nameinfo_again_deny.test | 3 +-
.../t/hostcache_ipv6_nameinfo_noname_allow.test | 3 +-
.../t/hostcache_ipv6_nameinfo_noname_deny.test | 3 +-
.../suite/perfschema/t/hostcache_ipv6_passwd.test | 3 +-
.../suite/perfschema/t/hostcache_ipv6_ssl.test | 3 +-
.../suite/perfschema/t/hostcache_peer_addr.test | 3 +-
mysql-test/suite/rpl/include/rpl_checksum.inc | 12 +-
mysql-test/suite/rpl/include/rpl_corruption.inc | 5 +-
mysql-test/suite/rpl/include/rpl_incident.inc | 6 +-
.../suite/rpl/include/rpl_init_slave_errors.inc | 3 +-
mysql-test/suite/rpl/r/kill_race_condition.result | 3 +-
.../suite/rpl/r/rpl_binlog_rollback_cleanup.result | 9 +
mysql-test/suite/rpl/r/rpl_bug33931.result | 3 +-
mysql-test/suite/rpl/r/rpl_bug41902.result | 3 +
mysql-test/suite/rpl/r/rpl_checksum.result | 11 +-
mysql-test/suite/rpl/r/rpl_corruption.result | 5 +-
mysql-test/suite/rpl/r/rpl_heartbeat_debug.result | 4 +-
mysql-test/suite/rpl/r/rpl_incident.result | 2 +
.../suite/rpl/r/rpl_init_slave_errors.result | 3 +-
mysql-test/suite/rpl/r/rpl_row_big_table_id.result | 2 +
.../suite/rpl/r/rpl_row_find_row_debug.result | 3 +-
.../suite/rpl/r/rpl_semi_sync_skip_repl.result | 3 +-
.../suite/rpl/r/rpl_show_slave_running.result | 3 +-
.../rpl/r/rpl_slave_load_remove_tmpfile.result | 3 +-
mysql-test/suite/rpl/r/rpl_stop_slave.result | 6 +-
mysql-test/suite/rpl/r/rpl_view_debug.result | 2 +
mysql-test/suite/rpl/t/kill_race_condition.test | 3 +-
.../suite/rpl/t/rpl_binlog_rollback_cleanup.test | 46 ++++++
mysql-test/suite/rpl/t/rpl_bug33931.test | 3 +-
mysql-test/suite/rpl/t/rpl_bug41902.test | 5 +-
.../rpl/t/rpl_get_master_version_and_clock.test | 6 +-
mysql-test/suite/rpl/t/rpl_heartbeat_debug.test | 4 +-
mysql-test/suite/rpl/t/rpl_row_big_table_id.test | 3 +-
mysql-test/suite/rpl/t/rpl_row_find_row_debug.test | 4 +-
mysql-test/suite/rpl/t/rpl_row_index_choice.test | 4 +-
.../suite/rpl/t/rpl_semi_sync_skip_repl.test | 4 +-
mysql-test/suite/rpl/t/rpl_show_slave_running.test | 4 +-
.../suite/rpl/t/rpl_slave_load_remove_tmpfile.test | 4 +-
mysql-test/suite/rpl/t/rpl_stop_slave.test | 12 +-
mysql-test/suite/rpl/t/rpl_view_debug.test | 3 +-
mysql-test/suite/sys_vars/r/debug_dbug_func.result | 6 +-
mysql-test/suite/sys_vars/t/debug_dbug_func.test | 6 +-
mysql-test/suite/wsrep/disabled.def | 2 +-
mysql-test/suite/wsrep/t/binlog_format.opt | 2 +-
sql/item_func.cc | 3 +-
sql/log.cc | 3 +-
sql/sp_head.cc | 12 +-
sql/sp_head.h | 2 -
sql/sql_select.cc | 13 +-
storage/innobase/buf/buf0dblwr.cc | 4 +-
storage/innobase/row/row0import.cc | 70 +++++---
219 files changed, 841 insertions(+), 524 deletions(-)
diff --cc mysql-test/main/log_slow_debug.result
index 71a76342aba,00000000000..16d67d7fdc7
mode 100644,000000..100644
--- a/mysql-test/main/log_slow_debug.result
+++ b/mysql-test/main/log_slow_debug.result
@@@ -1,226 -1,0 +1,226 @@@
+SET @org_slow_query_log= @@global.slow_query_log;
+SET @org_log_output= @@global.log_output;
+SET @org_log_slow_admin_statements= @@global.log_slow_admin_statements;
+SET @@GLOBAL.slow_query_log=OFF;
+SET @@GLOBAL.log_output='TABLE';
+FLUSH SLOW LOGS;
+SET @@GLOBAL.slow_query_log=ON;
+SET @@GLOBAL.log_slow_admin_statements=ON;
- SET @save_dbug = @@debug_dbug;
++SET @saved_dbug = @@debug_dbug;
+SET SESSION debug_dbug="+d,simulate_slow_query";
+CREATE PROCEDURE show_slow_log()
+BEGIN
+SELECT CONCAT('[slow] ', sql_text) AS sql_text
+FROM mysql.slow_log
+WHERE sql_text NOT LIKE '%debug_dbug%';
+END
+$$
+#
+# Expect all admin statements in the slow log (ON,DEFAULT)
+#
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+CREATE TABLE t1 (a INT);
+CREATE INDEX t1a ON t1 (a);
+DROP INDEX t1a ON t1;
+DROP TABLE t1;
+CREATE TABLE t2 (a INT);
+ALTER TABLE t2 RENAME t2;
+RENAME TABLE t2 TO t3;
+DROP TABLE t3;
+CREATE TABLE t4 (a INT);
+PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t4;
+CREATE SEQUENCE s4;
+ALTER SEQUENCE s4 MAXVALUE 100;
+PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP SEQUENCE s4;
+CALL show_slow_log();
+sql_text
+[slow] TRUNCATE TABLE mysql.slow_log
+[slow] CREATE TABLE t1 (a INT)
+[slow] CREATE INDEX t1a ON t1 (a)
+[slow] DROP INDEX t1a ON t1
+[slow] DROP TABLE t1
+[slow] CREATE TABLE t2 (a INT)
+[slow] ALTER TABLE t2 RENAME t2
+[slow] RENAME TABLE t2 TO t3
+[slow] DROP TABLE t3
+[slow] CREATE TABLE t4 (a INT)
+[slow] PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1'
+[slow] ALTER TABLE t4 MODIFY a INT DEFAULT 1
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP TABLE t4
+[slow] CREATE SEQUENCE s4
+[slow] ALTER SEQUENCE s4 MAXVALUE 100
+[slow] PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101'
+[slow] ALTER SEQUENCE s4 MAXVALUE=101
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP SEQUENCE s4
+#
+# Expect all admin statements in the slow log (ON,admin)
+#
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=admin;
+TRUNCATE TABLE mysql.slow_log;
+CREATE TABLE t1 (a INT);
+CREATE INDEX t1a ON t1 (a);
+DROP INDEX t1a ON t1;
+DROP TABLE t1;
+CREATE TABLE t2 (a INT);
+ALTER TABLE t2 RENAME t2;
+RENAME TABLE t2 TO t3;
+DROP TABLE t3;
+CREATE TABLE t4 (a INT);
+PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t4;
+CREATE SEQUENCE s4;
+ALTER SEQUENCE s4 MAXVALUE 100;
+PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP SEQUENCE s4;
+CALL show_slow_log();
+sql_text
+[slow] CREATE INDEX t1a ON t1 (a)
+[slow] DROP INDEX t1a ON t1
+[slow] ALTER TABLE t2 RENAME t2
+[slow] RENAME TABLE t2 TO t3
+[slow] ALTER TABLE t4 MODIFY a INT DEFAULT 1
+[slow] ALTER SEQUENCE s4 MAXVALUE 100
+[slow] ALTER SEQUENCE s4 MAXVALUE=101
+#
+# Expect none of admin DDL statements in the slow log (ON,filesort)
+#
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=filesort;
+TRUNCATE TABLE mysql.slow_log;
+CREATE TABLE t1 (a INT);
+CREATE INDEX t1a ON t1 (a);
+DROP INDEX t1a ON t1;
+DROP TABLE t1;
+CREATE TABLE t2 (a INT);
+ALTER TABLE t2 RENAME t2;
+RENAME TABLE t2 TO t3;
+DROP TABLE t3;
+CREATE TABLE t4 (a INT);
+PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t4;
+CREATE SEQUENCE s4;
+ALTER SEQUENCE s4 MAXVALUE 100;
+PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP SEQUENCE s4;
+CALL show_slow_log();
+sql_text
+#
+# Expect none of admin statements in the slow log (OFF,DEFAULT)
+#
+SET @@SESSION.log_slow_admin_statements=OFF;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+CREATE TABLE t1 (a INT);
+CREATE INDEX t1a ON t1 (a);
+DROP INDEX t1a ON t1;
+DROP TABLE t1;
+CREATE TABLE t2 (a INT);
+ALTER TABLE t2 RENAME t2;
+RENAME TABLE t2 TO t3;
+DROP TABLE t3;
+CREATE TABLE t4 (a INT);
+PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t4;
+CREATE SEQUENCE s4;
+ALTER SEQUENCE s4 MAXVALUE 100;
+PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP SEQUENCE s4;
+CALL show_slow_log();
+sql_text
+[slow] TRUNCATE TABLE mysql.slow_log
+[slow] CREATE TABLE t1 (a INT)
+[slow] DROP TABLE t1
+[slow] CREATE TABLE t2 (a INT)
+[slow] DROP TABLE t3
+[slow] CREATE TABLE t4 (a INT)
+[slow] PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1'
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP TABLE t4
+[slow] CREATE SEQUENCE s4
+[slow] PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101'
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP SEQUENCE s4
+#
+# Expect all admin statements in the slow log (GLOBAL OFF,LOCAL ON,DEFAULT)
+# In the original implementation, this combination disabled slow log for admin commands.
+# However, instead of this exception in GLOBAL vs LOCAL variable behaviour,
+# we should make max_system_variables.log_slow_admin_statements=0
+# prevent enabling globally suppressed logging by setting the session variable to ON.
+#
+SET @@GLOBAL.log_slow_admin_statements=OFF;
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+CREATE TABLE t1 (a INT);
+CREATE INDEX t1a ON t1 (a);
+DROP INDEX t1a ON t1;
+DROP TABLE t1;
+CREATE TABLE t2 (a INT);
+ALTER TABLE t2 RENAME t2;
+RENAME TABLE t2 TO t3;
+DROP TABLE t3;
+CREATE TABLE t4 (a INT);
+PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t4;
+CREATE SEQUENCE s4;
+ALTER SEQUENCE s4 MAXVALUE 100;
+PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP SEQUENCE s4;
+CALL show_slow_log();
+sql_text
+[slow] TRUNCATE TABLE mysql.slow_log
+[slow] CREATE TABLE t1 (a INT)
+[slow] CREATE INDEX t1a ON t1 (a)
+[slow] DROP INDEX t1a ON t1
+[slow] DROP TABLE t1
+[slow] CREATE TABLE t2 (a INT)
+[slow] ALTER TABLE t2 RENAME t2
+[slow] RENAME TABLE t2 TO t3
+[slow] DROP TABLE t3
+[slow] CREATE TABLE t4 (a INT)
+[slow] PREPARE stmt FROM 'ALTER TABLE t4 MODIFY a INT DEFAULT 1'
+[slow] ALTER TABLE t4 MODIFY a INT DEFAULT 1
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP TABLE t4
+[slow] CREATE SEQUENCE s4
+[slow] ALTER SEQUENCE s4 MAXVALUE 100
+[slow] PREPARE stmt FROM 'ALTER SEQUENCE s4 MAXVALUE=101'
+[slow] ALTER SEQUENCE s4 MAXVALUE=101
+[slow] DEALLOCATE PREPARE stmt
+[slow] DROP SEQUENCE s4
+#
+# Clean up
+#
- SET SESSION debug_dbug=@save_dbug;
++SET SESSION debug_dbug=@saved_dbug;
+TRUNCATE mysql.slow_log;
+SET @@global.slow_query_log= @org_slow_query_log;
+SET @@global.log_output= @org_log_output;
+SET @@global.log_slow_admin_statements= @org_log_slow_admin_statements;
+DROP PROCEDURE show_slow_log;
diff --cc mysql-test/main/log_slow_debug.test
index 6b7cdd62f8a,00000000000..aba4cbc8dcb
mode 100644,000000..100644
--- a/mysql-test/main/log_slow_debug.test
+++ b/mysql-test/main/log_slow_debug.test
@@@ -1,95 -1,0 +1,95 @@@
+-- source include/have_debug.inc
+
+SET @org_slow_query_log= @@global.slow_query_log;
+SET @org_log_output= @@global.log_output;
+SET @org_log_slow_admin_statements= @@global.log_slow_admin_statements;
+
+SET @@GLOBAL.slow_query_log=OFF;
+SET @@GLOBAL.log_output='TABLE';
+FLUSH SLOW LOGS;
+SET @@GLOBAL.slow_query_log=ON;
+SET @@GLOBAL.log_slow_admin_statements=ON;
- SET @save_dbug = @@debug_dbug;
++SET @saved_dbug = @@debug_dbug;
+SET SESSION debug_dbug="+d,simulate_slow_query";
+
+DELIMITER $$;
+CREATE PROCEDURE show_slow_log()
+BEGIN
+ SELECT CONCAT('[slow] ', sql_text) AS sql_text
+ FROM mysql.slow_log
+ WHERE sql_text NOT LIKE '%debug_dbug%';
+END
+$$
+DELIMITER ;$$
+
+
+--echo #
+--echo # Expect all admin statements in the slow log (ON,DEFAULT)
+--echo #
+
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+--source include/log_slow_debug_common.inc
+CALL show_slow_log();
+
+
+--echo #
+--echo # Expect all admin statements in the slow log (ON,admin)
+--echo #
+
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=admin;
+TRUNCATE TABLE mysql.slow_log;
+--source include/log_slow_debug_common.inc
+CALL show_slow_log();
+
+
+--echo #
+--echo # Expect none of admin DDL statements in the slow log (ON,filesort)
+--echo #
+
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=filesort;
+TRUNCATE TABLE mysql.slow_log;
+--source include/log_slow_debug_common.inc
+CALL show_slow_log();
+
+
+--echo #
+--echo # Expect none of admin statements in the slow log (OFF,DEFAULT)
+--echo #
+
+SET @@SESSION.log_slow_admin_statements=OFF;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+--source include/log_slow_debug_common.inc
+CALL show_slow_log();
+
+
+--echo #
+--echo # Expect all admin statements in the slow log (GLOBAL OFF,LOCAL ON,DEFAULT)
+--echo # In the original implementation, this combination disabled slow log for admin commands.
+--echo # However, instead of this exception in GLOBAL vs LOCAL variable behaviour,
+--echo # we should make max_system_variables.log_slow_admin_statements=0
+--echo # prevent enabling globally suppressed logging by setting the session variable to ON.
+--echo #
+
+SET @@GLOBAL.log_slow_admin_statements=OFF;
+SET @@SESSION.log_slow_admin_statements=ON;
+SET log_slow_filter=DEFAULT;
+TRUNCATE TABLE mysql.slow_log;
+--source include/log_slow_debug_common.inc
+CALL show_slow_log();
+
+
+--echo #
+--echo # Clean up
+--echo #
+
- SET SESSION debug_dbug=@save_dbug;
++SET SESSION debug_dbug=@saved_dbug;
+TRUNCATE mysql.slow_log;
+SET @@global.slow_query_log= @org_slow_query_log;
+SET @@global.log_output= @org_log_output;
+SET @@global.log_slow_admin_statements= @org_log_slow_admin_statements;
+DROP PROCEDURE show_slow_log;
diff --cc mysql-test/main/show_explain_ps.result
index 69ee2cab31d,00000000000..1c3be0d5953
mode 100644,000000..100644
--- a/mysql-test/main/show_explain_ps.result
+++ b/mysql-test/main/show_explain_ps.result
@@@ -1,41 -1,0 +1,42 @@@
+truncate table performance_schema.events_statements_history_long;
+truncate table performance_schema.events_stages_history_long;
+drop table if exists t0, t1;
+select * from performance_schema.setup_instruments where name like '%show_explain%';
+NAME ENABLED TIMED
+wait/synch/cond/sql/show_explain YES YES
+stage/sql/Show explain YES YES
+statement/sql/show_explain YES YES
+# We've got no instances
+select * from performance_schema.cond_instances where name like '%show_explain%';
+NAME OBJECT_INSTANCE_BEGIN
+# Check out if our cond is hit.
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+connect con1, localhost, root,,;
+connection con1;
+connection default;
+connection con1;
+set @show_explain_probe_select_id=1;
- set debug_dbug='d,show_explain_probe_join_exec_start';
++SET @saved_dbug = @@SESSION.debug_dbug;
++SET debug_dbug='d,show_explain_probe_join_exec_start';
+select count(*) from t0 where a < 100000;
+connection default;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select count(*) from t0 where a < 100000
+connection con1;
+count(*)
+10
- set debug_dbug='';
++SET debug_dbug= @saved_dbug;
+select event_name
+from
+performance_schema.events_stages_history_long join
+performance_schema.threads using (thread_id)
+where
+event_name like '%show explain' and
+processlist_id=$thr1;
+event_name
+stage/sql/Show explain
+drop table t0;
diff --cc mysql-test/suite/galera/r/MW-388.result
index a2cf02712bb,2785c1d0326..59d499e117d
--- a/mysql-test/suite/galera/r/MW-388.result
+++ b/mysql-test/suite/galera/r/MW-388.result
@@@ -18,12 -18,10 +18,10 @@@ connection node_1a
SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
connection node_1;
SET SESSION wsrep_sync_wait = 0;
- SET SESSION DEBUG_SYNC = 'wsrep_after_replication SIGNAL wsrep_after_replication_reached WAIT_FOR wsrep_after_replication_continue';
CALL insert_proc ();;
connection node_1a;
- SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_replication_reached";
+ SET SESSION wsrep_sync_wait = 0;
-SET GLOBAL debug_dbug = "";
+SET GLOBAL DEBUG_DBUG = "";
- SET DEBUG_SYNC = "now SIGNAL wsrep_after_replication_continue";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
connection node_2;
connection node_1;
diff --cc mysql-test/suite/galera/t/MW-388.test
index 042b7e2fee5,40522f30abb..09fc8a8bfc9
--- a/mysql-test/suite/galera/t/MW-388.test
+++ b/mysql-test/suite/galera/t/MW-388.test
@@@ -29,8 -29,10 +29,10 @@@ DELIMITER ;
# local monitor, and our INSERT remains stuck there.
SET GLOBAL wsrep_slave_threads = 2;
-SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+ --let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'`
+
--connection node_2
--send INSERT INTO t1 VALUES (1, 'node 2');
@@@ -43,12 -45,11 +45,12 @@@ SET SESSION wsrep_sync_wait = 0
--send CALL insert_proc ();
--connection node_1a
- SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_replication_reached";
+ SET SESSION wsrep_sync_wait = 0;
+ --let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'
+ --source include/wait_condition.inc
-SET GLOBAL debug_dbug = "";
+
+SET GLOBAL DEBUG_DBUG = "";
- SET DEBUG_SYNC = "now SIGNAL wsrep_after_replication_continue";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
--connection node_2
diff --cc mysql-test/suite/rpl/r/rpl_row_find_row_debug.result
index 269dc16733a,650f6eeee51..f1a0059a04f
--- a/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result
+++ b/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result
@@@ -15,10 -16,9 +16,10 @@@ DELETE FROM t1
DROP TABLE t1;
connection slave;
# Check if any note related to long DELETE_ROWS and UPDATE_ROWS appears in the error log
-Occurrences: update=1, delete=1
+FOUND 1 /The slave is applying a ROW event on behalf of an UPDATE statement on table t1 and is currently taking a considerable amount/ in mysqld.2.err
+FOUND 1 /The slave is applying a ROW event on behalf of a DELETE statement on table t1 and is currently taking a considerable amount/ in mysqld.2.err
include/stop_slave.inc
- SET GLOBAL debug_dbug = '';
+ SET @@GLOBAL.debug_dbug = @saved_dbug;
SET GLOBAL log_warnings = 2;
include/start_slave.inc
include/rpl_end.inc
diff --cc mysql-test/suite/rpl/r/rpl_stop_slave.result
index 4b9c544527b,597df34c302..a4dbf13290a
--- a/mysql-test/suite/rpl/r/rpl_stop_slave.result
+++ b/mysql-test/suite/rpl/r/rpl_stop_slave.result
@@@ -15,8 -15,8 +15,8 @@@ include/stop_slave.in
# Suspend the INSERT statement in current transaction on SQL thread.
# It guarantees that SQL thread is applying the transaction when
# STOP SLAVE command launchs.
- set @old_debug=@@global.debug;
+ SET @saved_dbug = @@GLOBAL.debug_dbug;
-SET GLOBAL debug_dbug= '+d,after_mysql_insert,*';
+set global debug_dbug= '+d,after_mysql_insert';
include/start_slave.inc
# CREATE TEMPORARY TABLE with InnoDB engine
@@@ -99,8 -100,8 +100,8 @@@ connection slave
include/stop_slave.inc
connection master;
include/stop_dump_threads.inc
- set @old_debug=@@global.debug;
+ SET @saved_dbug = @@GLOBAL.debug_dbug;
-SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*';
+set global debug_dbug= '+d,dump_thread_wait_before_send_xid';
connection slave;
include/start_slave.inc
BEGIN;
diff --cc mysql-test/suite/rpl/r/rpl_view_debug.result
index 497f4303698,e23f33c035f..1ec3dda9daa
--- a/mysql-test/suite/rpl/r/rpl_view_debug.result
+++ b/mysql-test/suite/rpl/r/rpl_view_debug.result
@@@ -21,9 -21,10 +21,10 @@@ Tables_in_tes
t1
v1
connection master;
+ SET @saved_dbug = @@SESSION.debug_dbug;
set @@debug_dbug="d,simulate_register_view_failure";
CREATE VIEW v2 as SELECT * FROM t1;
-ERROR HY000: Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space
+ERROR HY000: Out of memory.
show tables;
Tables_in_test
t1
diff --cc mysql-test/suite/rpl/t/rpl_get_master_version_and_clock.test
index 9c4cc1bc34d,a61c06a92e5..02665c70b9e
--- a/mysql-test/suite/rpl/t/rpl_get_master_version_and_clock.test
+++ b/mysql-test/suite/rpl/t/rpl_get_master_version_and_clock.test
@@@ -26,25 -26,23 +26,23 @@@ call mtr.add_suppression("Fatal error:
call mtr.add_suppression("Slave I/O thread .* register on master");
#Test case 1: Try to get the value of the UNIX_TIMESTAMP from master under network disconnection
- let $debug_saved= `select @@global.debug`;
+ SET @saved_dbug = @@GLOBAL.debug_dbug;
-# set up two parameters to pass into extra/rpl_tests/rpl_get_master_version_and_clock
+# set up two parameters to pass into include/rpl_get_master_version_and_clock
let $dbug_sync_point= 'debug_lock.before_get_UNIX_TIMESTAMP';
let $debug_sync_action= 'now SIGNAL signal.get_unix_timestamp';
-source extra/rpl_tests/rpl_get_master_version_and_clock.test;
+source include/rpl_get_master_version_and_clock.test;
#Test case 2: Try to get the value of the SERVER_ID from master under network disconnection
connection slave;
let $dbug_sync_point= 'debug_lock.before_get_SERVER_ID';
let $debug_sync_action= 'now SIGNAL signal.get_server_id';
-source extra/rpl_tests/rpl_get_master_version_and_clock.test;
+source include/rpl_get_master_version_and_clock.test;
- eval set global debug_dbug= '$debug_saved';
-
# cleanup
-
+ SET @@GLOBAL.debug_dbug = @saved_dbug;
# is not really necessary but avoids mtr post-run env check warnings
SET DEBUG_SYNC= 'RESET';
diff --cc mysql-test/suite/rpl/t/rpl_stop_slave.test
index 291524126ec,afe36334f18..17efa7ade3b
--- a/mysql-test/suite/rpl/t/rpl_stop_slave.test
+++ b/mysql-test/suite/rpl/t/rpl_stop_slave.test
@@@ -22,8 -22,8 +22,8 @@@ source include/stop_slave.inc
--echo # Suspend the INSERT statement in current transaction on SQL thread.
--echo # It guarantees that SQL thread is applying the transaction when
--echo # STOP SLAVE command launchs.
- set @old_debug=@@global.debug;
+ SET @saved_dbug = @@GLOBAL.debug_dbug;
-SET GLOBAL debug_dbug= '+d,after_mysql_insert,*';
+set global debug_dbug= '+d,after_mysql_insert';
source include/start_slave.inc;
--echo
@@@ -79,8 -77,8 +77,8 @@@ connection master
# make sure that there are no zombie threads
--source include/stop_dump_threads.inc
- set @old_debug=@@global.debug;
+ SET @saved_dbug = @@GLOBAL.debug_dbug;
-SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*';
+set global debug_dbug= '+d,dump_thread_wait_before_send_xid';
connection slave;
--source include/start_slave.inc
diff --cc sql/log.cc
index 900cc1b8197,91dfac07993..0aac2ee61fb
--- a/sql/log.cc
+++ b/sql/log.cc
@@@ -481,18 -445,19 +481,19 @@@ private
It truncates the cache to a certain position. This includes deleting the
pending event.
*/
- void truncate(my_off_t pos)
+ void truncate(my_off_t pos, bool reset_cache=0)
{
DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos));
+ cache_log.error=0;
if (pending())
{
delete pending();
set_pending(0);
}
- reinit_io_cache(&cache_log, WRITE_CACHE, pos, 0, 0);
+ reinit_io_cache(&cache_log, WRITE_CACHE, pos, 0, reset_cache);
cache_log.end_of_file= saved_max_binlog_cache_size;
}
-
+
binlog_cache_data& operator=(const binlog_cache_data& info);
binlog_cache_data(const binlog_cache_data& info);
};
diff --cc sql/sp_head.cc
index af4316085b7,1f5c6e96906..3a113de9dd5
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@@ -468,19 -532,12 +468,19 @@@ check_routine_name(const LEX_CSTRING *i
}
-sp_head* sp_head::create()
+/*
+ *
+ * sp_head
+ *
+ */
-
++
+sp_head *sp_head::create(sp_package *parent, const Sp_handler *handler)
{
MEM_ROOT own_root;
- init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
+ init_sql_alloc(&own_root, "sp_head", MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC,
+ MYF(0));
sp_head *sp;
- if (!(sp= new (&own_root) sp_head(&own_root)))
+ if (!(sp= new (&own_root) sp_head(&own_root, parent, handler)))
free_root(&own_root, MYF(0));
return sp;
@@@ -493,35 -550,23 +493,41 @@@ void sp_head::destroy(sp_head *sp
{
/* Make a copy of main_mem_root as free_root will free the sp */
MEM_ROOT own_root= sp->main_mem_root;
- delete sp;
-
- DBUG_PRINT("info", ("mem_root %p moved to %p",
- &sp->main_mem_root, &own_root));
+ DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
+ (ulong) &sp->mem_root, (ulong) &own_root));
+ delete sp;
++
++
free_root(&own_root, MYF(0));
}
}
+ /*
+ *
+ * sp_head
+ *
+ */
-sp_head::sp_head(MEM_ROOT *mem_root_arg)
+sp_head::sp_head(MEM_ROOT *mem_root_arg, sp_package *parent,
+ const Sp_handler *sph)
:Query_arena(NULL, STMT_INITIALIZED_FOR_SP),
- main_mem_root(*mem_root_arg), // todo: std::move operator.
+ Database_qualified_name(&null_clex_str, &null_clex_str),
+ main_mem_root(*mem_root_arg),
+ m_parent(parent),
+ m_handler(sph),
m_flags(0),
+ m_tmp_query(NULL),
+ m_explicit_name(false),
+ /*
+ FIXME: the only use case when name is NULL is events, and it should
+ be rewritten soon. Remove the else part and replace 'if' with
+ an assert when this is done.
+ */
+ m_qname(null_clex_str),
+ m_params(null_clex_str),
+ m_body(null_clex_str),
+ m_body_utf8(null_clex_str),
+ m_defstr(null_clex_str),
m_sp_cache_version(0),
m_creation_ctx(0),
unsafe_flags(0),
diff --cc sql/sp_head.h
index 7e00cf7a0d8,882ff32f5e6..493bb777bdf
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@@ -316,16 -298,17 +316,14 @@@ public
being opened is probably enough).
*/
SQL_I_List<Item_trigger_field> m_trg_table_fields;
-private:
- // users must use sp= sp_head::create()
- sp_head(MEM_ROOT *mem_root_arg);
- // users must use sp_head::destroy(sp)
+protected:
+ sp_head(MEM_ROOT *mem_root, sp_package *parent, const Sp_handler *handler);
virtual ~sp_head();
--
public:
- static sp_head* create();
static void destroy(sp_head *sp);
+ static sp_head *create(sp_package *parent, const Sp_handler *handler);
-
/// Initialize after we have reset mem_root
void
init(LEX *lex);
1
0
revision-id: b04429434ad1ee7f49d263762a560f4ff31dd111 (mariadb-10.2.30-58-gb04429434ad)
parent(s): bb8226deabd177d70151d5e0729bf08533954ffd bde7e0ba6e94d576c4563022f38e8d81b1f6d54a
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2020-01-17 00:24:17 +0300
message:
Merge branch '10.1' into 10.2
# Conflicts:
# sql/sp_head.cc
# sql/sql_select.cc
# sql/sql_trigger.cc
sql/item_func.cc | 3 +-
sql/sp.cc | 2 +-
sql/sp_cache.cc | 2 +-
sql/sp_head.cc | 63 +++++++++++++++++----------------------
sql/sp_head.h | 17 ++++++-----
sql/sql_lex.cc | 4 +--
sql/sql_parse.cc | 2 +-
sql/sql_prepare.cc | 2 +-
sql/sql_select.cc | 13 ++++----
sql/sql_show.cc | 6 ++--
sql/sql_trigger.cc | 2 +-
sql/sql_yacc.yy | 2 +-
storage/innobase/buf/buf0dblwr.cc | 4 ++-
storage/xtradb/buf/buf0dblwr.cc | 4 ++-
14 files changed, 63 insertions(+), 63 deletions(-)
diff --cc sql/sp_head.cc
index 0179214e508,f940040b480..1f5c6e96906
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@@ -532,51 -550,41 +532,40 @@@ check_routine_name(LEX_STRING *ident
}
- /*
- *
- * sp_head
- *
- */
-
- void *
- sp_head::operator new(size_t size) throw()
+ sp_head* sp_head::create()
{
- DBUG_ENTER("sp_head::operator new");
MEM_ROOT own_root;
+ init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
sp_head *sp;
+ if (!(sp= new (&own_root) sp_head(&own_root)))
+ free_root(&own_root, MYF(0));
- init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
- sp= (sp_head *) alloc_root(&own_root, size);
- if (sp == NULL)
- DBUG_RETURN(NULL);
- sp->main_mem_root= own_root;
- DBUG_PRINT("info", ("mem_root %p", &sp->mem_root));
- DBUG_RETURN(sp);
+ return sp;
}
- void
- sp_head::operator delete(void *ptr, size_t size) throw()
- {
- DBUG_ENTER("sp_head::operator delete");
- MEM_ROOT own_root;
-
- if (ptr == NULL)
- DBUG_VOID_RETURN;
-
- sp_head *sp= (sp_head *) ptr;
- /* Make a copy of main_mem_root as free_root will free the sp */
- own_root= sp->main_mem_root;
- DBUG_PRINT("info", ("mem_root %p moved to %p",
- &sp->mem_root, &own_root));
- free_root(&own_root, MYF(0));
-
- DBUG_VOID_RETURN;
+ void sp_head::destroy(sp_head *sp)
+ {
+ if (sp)
+ {
+ /* Make a copy of main_mem_root as free_root will free the sp */
+ MEM_ROOT own_root= sp->main_mem_root;
++ DBUG_PRINT("info", ("mem_root %p moved to %p",
++ &sp->main_mem_root, &own_root));
+ delete sp;
-
- DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
- (ulong) &sp->mem_root, (ulong) &own_root));
+ free_root(&own_root, MYF(0));
+ }
}
+ /*
+ *
+ * sp_head
+ *
+ */
- sp_head::sp_head()
- :Query_arena(&main_mem_root, STMT_INITIALIZED_FOR_SP),
+ sp_head::sp_head(MEM_ROOT *mem_root_arg)
+ :Query_arena(NULL, STMT_INITIALIZED_FOR_SP),
+ main_mem_root(*mem_root_arg), // todo: std::move operator.
m_flags(0),
m_sp_cache_version(0),
m_creation_ctx(0),
diff --cc sql/sql_select.cc
index 72c1e876359,2358615affc..44ac1056a8a
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@@ -13863,19 -13669,23 +13863,22 @@@ static int compare_fields_by_table_orde
{
int cmp= 0;
bool outer_ref= 0;
- Item_field *f1= (Item_field *) (field1->real_item());
- Item_field *f2= (Item_field *) (field2->real_item());
- if (field1->const_item() || f1->const_item())
+ Item *field1_real= field1->real_item();
+ Item *field2_real= field2->real_item();
+
+ if (field1->const_item() || field1_real->const_item())
- return 1;
- if (field2->const_item() || field2_real->const_item())
return -1;
- if (field2->const_item() || f2->const_item())
-
++ if (field2->const_item() || field2_real->const_item())
+ return 1;
+ Item_field *f1= (Item_field *) field1_real;
+ Item_field *f2= (Item_field *) field2_real;
- if (f2->used_tables() & OUTER_REF_TABLE_BIT)
- {
- outer_ref= 1;
- cmp= -1;
- }
if (f1->used_tables() & OUTER_REF_TABLE_BIT)
{
- outer_ref= 1;
++ outer_ref= -1;
+ cmp= -1;
+ }
+ if (f2->used_tables() & OUTER_REF_TABLE_BIT)
+ {
outer_ref= 1;
cmp++;
}
diff --cc sql/sql_trigger.cc
index daea60c26bc,c4d348ce400..2d1c3e1cafb
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@@ -335,38 -356,6 +335,38 @@@ public
};
+Trigger::~Trigger()
+{
- delete body;
++ sp_head::destroy(body);
+}
+
+
+/**
+ Call a Table_triggers_list function for all triggers
+
+ @return 0 ok
+ @return # Something went wrong. Pointer to the trigger that mailfuncted
+ returned
+*/
+
+Trigger* Table_triggers_list::for_all_triggers(Triggers_processor func,
+ void *arg)
+{
+ for (uint i= 0; i < (uint)TRG_EVENT_MAX; i++)
+ {
+ for (uint j= 0; j < (uint)TRG_ACTION_MAX; j++)
+ {
+ for (Trigger *trigger= get_trigger(i,j) ;
+ trigger ;
+ trigger= trigger->next)
+ if ((trigger->*func)(arg))
+ return trigger;
+ }
+ }
+ return 0;
+}
+
+
/**
Create or drop trigger for table.
diff --cc storage/innobase/buf/buf0dblwr.cc
index 93201c3793f,32b4399b41d..d44bfbf2b9e
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@@ -989,9 -960,10 +989,10 @@@ try_again
if (buf_dblwr->batch_running) {
/* Another thread is running the batch right now. Wait
for it to finish. */
- ib_int64_t sig_count = os_event_reset(buf_dblwr->b_event);
+ int64_t sig_count = os_event_reset(buf_dblwr->b_event);
mutex_exit(&buf_dblwr->mutex);
+ os_aio_simulated_wake_handler_threads();
os_event_wait_low(buf_dblwr->b_event, sig_count);
goto try_again;
}
@@@ -1119,8 -1095,9 +1120,9 @@@ try_again
point. The only exception is when a user thread is
forced to do a flush batch because of a sync
checkpoint. */
- ib_int64_t sig_count = os_event_reset(buf_dblwr->b_event);
+ int64_t sig_count = os_event_reset(buf_dblwr->b_event);
mutex_exit(&buf_dblwr->mutex);
+ os_aio_simulated_wake_handler_threads();
os_event_wait_low(buf_dblwr->b_event, sig_count);
goto try_again;
1
0
revision-id: afdd6191d5dcb004ec9ac0b908871ad8a370da34 (mariadb-10.4.11-18-gafdd6191d5d)
parent(s): 59d4f2a373a7960a533e653877ab69a97e91444a
author: Varun Gupta
committer: Varun Gupta
timestamp: 2020-01-03 02:26:58 +0530
message:
Big Test added for sorting
---
mysql-test/main/order_by_pack_big.result | 194 +++++++++++++++++++++++++++++++
mysql-test/main/order_by_pack_big.test | 107 +++++++++++++++++
2 files changed, 301 insertions(+)
diff --git a/mysql-test/main/order_by_pack_big.result b/mysql-test/main/order_by_pack_big.result
new file mode 100644
index 00000000000..66aad449c38
--- /dev/null
+++ b/mysql-test/main/order_by_pack_big.result
@@ -0,0 +1,194 @@
+set @save_rand_seed1= @@RAND_SEED1;
+set @save_rand_seed2= @@RAND_SEED2;
+set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772;
+create table t1(a int);
+insert into t1 select seq from seq_1_to_10000 order by rand();
+#
+# parameters:
+# mean mean for the column to be considered
+# max_val max_value for the column to be considered
+#
+# This function also calculates the standard deviation
+# which is required to convert standard normal distribution
+# to normal distribution
+#
+CREATE FUNCTION f1(mean DOUBLE, max_val DOUBLE) RETURNS DOUBLE
+BEGIN
+DECLARE std_dev DOUBLE DEFAULT 0;
+SET @z= (rand() + rand() + rand() + rand() + rand() + rand() +
+rand() + rand() + rand() + rand() + rand() + rand() - 6);
+SET std_dev= (max_val - mean)/6;
+SET @z= std_dev*@z + mean;
+return @z;
+END|
+#
+# parameters:
+# len length of the random string to be generated
+#
+# This function generates a random string for the length passed
+# as an argument with characters in the range of [A,Z]
+#
+CREATE function f2(len INT) RETURNS varchar(128)
+BEGIN
+DECLARE str VARCHAR(256) DEFAULT '';
+DECLARE x INT DEFAULT 0;
+WHILE (len > 0) DO
+SET x =round(rand()*25);
+SET str= CONCAT(str, CHAR(65 + x));
+SET len= len-1;
+END WHILE;
+RETURN str;
+END|
+#
+# parameters:
+# mean mean for the column to be considered
+# min_val min_value for the column to be considered
+# max_val max_value for the column to be considered
+#
+CREATE function f3(mean DOUBLE, min_val DOUBLE, max_val DOUBLE) RETURNS INT
+BEGIN
+DECLARE r DOUBLE DEFAULT 0;
+WHILE 1=1 DO
+set r= f1(mean, max_val);
+IF (r >= min_val) THEN
+RETURN round(r);
+end if;
+END WHILE;
+RETURN 0;
+END|
+create table t2 (id INT NOT NULL, a INT, b int);
+insert into t2 select a, f3(12, 0, 64), f3(32, 0, 128) from t1;
+CREATE TABLE t3(
+id INT NOT NULL,
+names VARCHAR(64),
+address VARCHAR(128),
+PRIMARY KEY (id)
+);
+#
+# table t3 stores string calculated from the length stored in
+# table t2
+#
+insert into t3 select id, f2(a), f2(b) from t2;
+set sort_buffer_size=262144*10;
+flush status;
+select id,
+MD5(group_concat(substring(names,1,3), substring(address,1,3)))
+FROM t3
+GROUP BY id DIV 100
+ORDER BY id;
+id MD5(group_concat(substring(names,1,3), substring(address,1,3)))
+10 351239227a41de08388ea422f928cc29
+149 67299eb34e363edabe31576890087e97
+232 7ac931ef07a24ebe1293093ec6fa8f3d
+311 8625cade62c8b45c63d8978f8968ebb5
+430 362761f4180d40372667c8dd7cdcc436
+502 5380af74db071a35fb1d2491368e641b
+665 d3e3e2a2cb4e0de17c4f12e5b7745802
+719 5d93632d4c30ec99802f7be7582f4f2d
+883 27747ef400898c7eeeba3ebea8c42fb1
+942 d1e4ae80ca57b99ee49201b658a7b040
+1007 fceb25160237c8a3c262735b81d027ac
+1134 cfa9c86c901aaace0e9e94dc6a837468
+1226 4fb8e9ab9acdd251e7bc51db9e4d2f3b
+1367 e17fa4948562b3411f0b64084de0c605
+1486 85dd0f507e660600820f106dc8887edf
+1502 5bf6015f936908eed31f5769ad4b0d72
+1674 01f6c54ea21c4acd26f6c1df6abd793c
+1781 6d38cd061db1f30e2e37cd7d9ac600ad
+1803 2ac17a3853677ffde105735c92a9f2ea
+1969 e1e2e39e9d26baebe23232a429783feb
+2087 af67a443d21665bbb425a783f4e434fa
+2111 1906e379e9ae0b3b580fa134d2a5a146
+2268 2afaf9091f92fb8e409142552724a85e
+2328 5a8fd5d24c9f7c7bcfbcde84a5b0cfe2
+2416 d9a69c46523f71fce606c6d6c92ca516
+2599 55a436a6fb744eefd6878473c34fa41e
+2602 98317430fe15bcc9bb5968b5052c9106
+2777 8b5c30ae940ff7f31839309b535e3a15
+2858 0db2f3bcb138c2f91445c4205374a3b4
+2922 fed051b9185591bc0aaebd1e1471944d
+3027 f0cff102210e7fa32db222ac3444e4cf
+3131 c2f3f5a92d4c2b45cadd9c8cbf04d1be
+3220 8db6dfcca0461654dcb963fe2e1d8f41
+3331 42031ed42643c755dfd936eb96b28ed5
+3452 09f418c82012ff6789a6429be0c10f98
+3519 7d26aac1dbbcff68b528b8c1d80a2c7b
+3680 0ff5b4295168db71b997f6001bba7015
+3799 3460724c5fc7271a0a3189bf275b9b89
+3876 13f21a3dfc2bad54c12fffae7cdf8326
+3937 a240132ca8905b8165bf6e16fa6e7b3a
+4029 5fabf8408215c5bf896eda4e173a8a98
+4158 c7829b1eeda97ff8c9b2a24ead3f6df6
+4291 0d24e7e9da38dc44ffb43976560c4730
+4355 bc804d019300149cb891b8fe8afbe445
+4461 bb5a658677030b64ca3fd095f8a054fd
+4544 e04f6bfc8dcb8d8014ce39e1b707ed0b
+4646 06af0dd12faee32a07e785c4d56856b8
+4714 d0c99cc1aead7d06e5323867867d4b00
+4848 208d1ca5ade34053d92f96937f76380b
+4935 3b62eb6129970e714bdc74565183e183
+5014 9e19c021b79e32ea6fceb7ced26a3a68
+5184 41fa16423738302b2fdd6cda8e52f2c9
+5219 3ab8090c30c0206c1e30ce6cd76cb617
+5349 bd3e73dd60fbd1819aa468d3d0e6999c
+5400 80dc0e71fcbd2abfec9b585cc04a7545
+5507 96ed16d40a9e6a1231bc88bd6b3f9c3e
+5672 764347fc7e265a1478c890fa38d8c892
+5725 6767ae39fec9b789b8b542080162af46
+5849 41df99caa43ee3f3b162c66c3eb61a44
+5941 0725e779ca53da50461ef0d3758d819d
+6064 06d28bf28138d5726ab61e51a2e87edc
+6135 b2567b682dd449e358e11c4fb7f7bb72
+6289 8aa8131d32436add670fed1e7628b297
+6329 127b1600d2a9f857501f0263536d200b
+6404 266b87348831b9cc5b570e2b16c3006a
+6580 f70b98a00f6adb163c0f89bb6bb6d1ad
+6653 a13a591ba0c88985040c51fda2af7a72
+6773 ee4306ceb6a3266617707a1ca637c328
+6822 a8c368cc486b650f6254614535b5b051
+6938 a7c160cec86018b78942b60b62b5b7fd
+7085 eb360d65bc8080cd5879fb8ddee830cd
+7180 c54bebbb560d9e9196a6f986022d4253
+7290 4d1820f520483d785ba4e1c89b938f20
+7390 0d3cd69b8e02fde232df802f3e9fc7a2
+7449 7328ee3fe9383f891b9af5244c63a0e0
+7589 467169481de385077ebcad083dd36b0b
+7686 ae22b711e21ba0e0fe20ba713408263a
+7713 e20cd84a1ee8bd1d743947c9c381731d
+7844 bc3f0534e283616d6a4dbb0902c03fa6
+7935 146ea350d8f1cfef44aa7470cf9e02f8
+8059 3a88201a77ccbd8ce651eeb555c29fe5
+8153 9db1e67ef602768b7182401905bacc26
+8245 c5e6c51763b0bbc1a7e72fe1615f9440
+8310 ee37ab957141c733350e21a6ed2176f5
+8432 34ae43ecbfa6c96e12a8c315937d511f
+8596 710f7c0bc4fadbdd859352b584b19d66
+8647 df6f807e47599027749e1b09b04f6083
+8742 5efcaddfa993721074a1691947ca611e
+8856 40ad2459d26129770ac6ac2da757ad7e
+8967 344f6b2c8242b9b3bbd09898a80ba4ee
+9057 3084c365110820be5bbfc721f4b2f37d
+9148 13b2a5aa09a1f107f656e848a963e8ea
+9275 908187dba9416102a566b955b29f709e
+9311 d6c8096f5763c6ebdaccb3e2cc3ae686
+9488 62deb4d1a8900ea7cd7daa1909917490
+9518 730ecae84924d86922c82152c191d0f6
+9696 0a15d3446ba3d4b7ca8224633fbab666
+9752 a74f840a4e599466799d4e0879533da0
+9887 a7c29b0e5edfcd20572e0fda12a9e9aa
+9903 e89c3ab708646a5d73683ea68c4e366a
+10000 9cc0d2b033602eaea73fa9b2201b01b6
+show status like '%sort%';
+Variable_name Value
+Sort_merge_passes 0
+Sort_priority_queue_sorts 0
+Sort_range 0
+Sort_rows 10101
+Sort_scan 2
+set sort_buffer_size=default;
+set @@RAND_SEED1= @save_rand_seed1;
+set @@RAND_SEED2= @save_rand_seed2;
+drop function f1;
+drop function f2;
+drop function f3;
+drop table t1, t2, t3;
diff --git a/mysql-test/main/order_by_pack_big.test b/mysql-test/main/order_by_pack_big.test
new file mode 100644
index 00000000000..021edfee13f
--- /dev/null
+++ b/mysql-test/main/order_by_pack_big.test
@@ -0,0 +1,107 @@
+--source include/big_test.inc
+--source include/have_sequence.inc
+--source include/have_64bit.inc
+
+set @save_rand_seed1= @@RAND_SEED1;
+set @save_rand_seed2= @@RAND_SEED2;
+set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772;
+
+create table t1(a int);
+insert into t1 select seq from seq_1_to_10000 order by rand();
+delimiter |;
+
+--echo #
+--echo # parameters:
+--echo # mean mean for the column to be considered
+--echo # max_val max_value for the column to be considered
+--echo #
+--echo # This function also calculates the standard deviation
+--echo # which is required to convert standard normal distribution
+--echo # to normal distribution
+--echo #
+
+CREATE FUNCTION f1(mean DOUBLE, max_val DOUBLE) RETURNS DOUBLE
+BEGIN
+ DECLARE std_dev DOUBLE DEFAULT 0;
+ SET @z= (rand() + rand() + rand() + rand() + rand() + rand() +
+ rand() + rand() + rand() + rand() + rand() + rand() - 6);
+ SET std_dev= (max_val - mean)/6;
+ SET @z= std_dev*@z + mean;
+ return @z;
+END|
+
+--echo #
+--echo # parameters:
+--echo # len length of the random string to be generated
+--echo #
+--echo # This function generates a random string for the length passed
+--echo # as an argument with characters in the range of [A,Z]
+--echo #
+
+CREATE function f2(len INT) RETURNS varchar(128)
+BEGIN
+ DECLARE str VARCHAR(256) DEFAULT '';
+ DECLARE x INT DEFAULT 0;
+ WHILE (len > 0) DO
+ SET x =round(rand()*25);
+ SET str= CONCAT(str, CHAR(65 + x));
+ SET len= len-1;
+ END WHILE;
+RETURN str;
+END|
+
+--echo #
+--echo # parameters:
+--echo # mean mean for the column to be considered
+--echo # min_val min_value for the column to be considered
+--echo # max_val max_value for the column to be considered
+--echo #
+
+CREATE function f3(mean DOUBLE, min_val DOUBLE, max_val DOUBLE) RETURNS INT
+BEGIN
+ DECLARE r DOUBLE DEFAULT 0;
+ WHILE 1=1 DO
+ set r= f1(mean, max_val);
+ IF (r >= min_val) THEN
+ RETURN round(r);
+ end if;
+ END WHILE;
+ RETURN 0;
+END|
+
+delimiter ;|
+
+create table t2 (id INT NOT NULL, a INT, b int);
+insert into t2 select a, f3(12, 0, 64), f3(32, 0, 128) from t1;
+
+CREATE TABLE t3(
+ id INT NOT NULL,
+ names VARCHAR(64),
+ address VARCHAR(128),
+ PRIMARY KEY (id)
+);
+
+--echo #
+--echo # table t3 stores string calculated from the length stored in
+--echo # table t2
+--echo #
+
+insert into t3 select id, f2(a), f2(b) from t2;
+
+set sort_buffer_size=262144*10;
+flush status;
+select id,
+ MD5(group_concat(substring(names,1,3), substring(address,1,3)))
+FROM t3
+GROUP BY id DIV 100
+ORDER BY id;
+show status like '%sort%';
+set sort_buffer_size=default;
+
+set @@RAND_SEED1= @save_rand_seed1;
+set @@RAND_SEED2= @save_rand_seed2;
+
+drop function f1;
+drop function f2;
+drop function f3;
+drop table t1, t2, t3;
2
1
[Commits] cabc7b62de1: MDEV-21377: Eq_ref access not picked by query with optimizer_use_condition_selectivity > 1
by Varun 13 Jan '20
by Varun 13 Jan '20
13 Jan '20
revision-id: cabc7b62de11d13b631cd643cacfd7f1e7b379e3 (mariadb-10.4.11-27-gcabc7b62de1)
parent(s): 983163209d026bfd979b4298053fcbdb373efa9d
author: Varun Gupta
committer: Varun Gupta
timestamp: 2020-01-11 18:47:16 +0530
message:
MDEV-21377: Eq_ref access not picked by query with optimizer_use_condition_selectivity > 1
The issue here is the estimate of the number of distinct ref access made by the optimizer
are greater than the records in the join prefix. This is incorrect, we need to make
sure that the number of distinct ref accesses are less than the records in the join
prefix
---
mysql-test/main/join_outer_innodb.result | 28 ++++++++--------
mysql-test/main/selectivity.result | 56 +++++++++++++++++++++++++++++++
mysql-test/main/selectivity.test | 38 +++++++++++++++++++++
mysql-test/main/selectivity_innodb.result | 56 +++++++++++++++++++++++++++++++
mysql-test/main/subselect_mat.result | 16 ++++-----
mysql-test/main/subselect_sj2_mat.result | 16 ++++-----
mysql-test/main/subselect_sj_mat.result | 16 ++++-----
mysql-test/main/subselect_sj_mat.test | 4 +--
sql/sql_select.cc | 3 ++
9 files changed, 190 insertions(+), 43 deletions(-)
diff --git a/mysql-test/main/join_outer_innodb.result b/mysql-test/main/join_outer_innodb.result
index a0358094baa..5d0c4f4afde 100644
--- a/mysql-test/main/join_outer_innodb.result
+++ b/mysql-test/main/join_outer_innodb.result
@@ -434,46 +434,46 @@ where t1.a10 = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t13 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
+1 SIMPLE t10 eq_ref PRIMARY PRIMARY 1 test.t1.a6 1
1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
1 SIMPLE m2 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
-1 SIMPLE t14 ALL PRIMARY NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
-1 SIMPLE t10 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
explain select * from v1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t13 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
+1 SIMPLE t10 eq_ref PRIMARY PRIMARY 1 test.t1.a6 1
1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
1 SIMPLE m2 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
-1 SIMPLE t14 ALL PRIMARY NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
-1 SIMPLE t10 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
drop view v1;
drop table t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16;
#
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 4366ef6a564..95069c74391 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -1867,4 +1867,60 @@ set optimizer_switch= @save_optimizer_switch;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
drop table t1,t2;
# End of 10.1 tests
+#
+# MDEV-21377: Eq_ref access not picked by query with
+# optimizer_use_condition_selectivity > 1
+#
+create table t1(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t1 select seq, seq, seq from seq_1_to_100;
+create table t2(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t2 select seq, seq, seq from seq_1_to_1000;
+create table t3(a int, b int, primary key(a));
+insert into t3 select seq, seq from seq_1_to_100;
+analyze table t1,t2,t3;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='rowid_filter=off';
+set optimizer_use_condition_selectivity=1;
+explain SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY,b,c b 5 NULL 5 Using index condition; Using where
+1 SIMPLE t2 ref b,c c 5 test.t1.c 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.a 1
+SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+a b c a b c a b
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+6 6 6 6 6 6 6 6
+7 7 7 7 7 7 7 7
+8 8 8 8 8 8 8 8
+9 9 9 9 9 9 9 9
+set optimizer_use_condition_selectivity=4;
+explain SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY,b,c b 5 NULL 5 Using index condition; Using where
+1 SIMPLE t2 ref b,c c 5 test.t1.c 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.a 1
+SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+a b c a b c a b
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+6 6 6 6 6 6 6 6
+7 7 7 7 7 7 7 7
+8 8 8 8 8 8 8 8
+9 9 9 9 9 9 9 9
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set optimizer_switch=@save_optimizer_switch;
+drop table t1,t2,t3;
+# End of 10.4 tests
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index d0158fb717e..6ed3ae67d83 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -1269,6 +1269,44 @@ drop table t1,t2;
--echo # End of 10.1 tests
+--echo #
+--echo # MDEV-21377: Eq_ref access not picked by query with
+--echo # optimizer_use_condition_selectivity > 1
+--echo #
+
+create table t1(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t1 select seq, seq, seq from seq_1_to_100;
+
+create table t2(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t2 select seq, seq, seq from seq_1_to_1000;
+
+create table t3(a int, b int, primary key(a));
+insert into t3 select seq, seq from seq_1_to_100;
+--disable_result_log
+analyze table t1,t2,t3;
+--enable_result_log
+
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='rowid_filter=off';
+let $q= SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+ t2.b < 10 AND t1.b < 10;
+
+set optimizer_use_condition_selectivity=1;
+eval explain $q;
+eval $q;
+
+set optimizer_use_condition_selectivity=4;
+eval explain $q;
+eval $q;
+
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set optimizer_switch=@save_optimizer_switch;
+
+drop table t1,t2,t3;
+
+--echo # End of 10.4 tests
+
#
# Clean up
#
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 062b2da7b4f..d48848a92bf 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -1877,6 +1877,62 @@ set optimizer_switch= @save_optimizer_switch;
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
drop table t1,t2;
# End of 10.1 tests
+#
+# MDEV-21377: Eq_ref access not picked by query with
+# optimizer_use_condition_selectivity > 1
+#
+create table t1(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t1 select seq, seq, seq from seq_1_to_100;
+create table t2(a int, b int,c int, primary key(a), key(b), key(c));
+insert into t2 select seq, seq, seq from seq_1_to_1000;
+create table t3(a int, b int, primary key(a));
+insert into t3 select seq, seq from seq_1_to_100;
+analyze table t1,t2,t3;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='rowid_filter=off';
+set optimizer_use_condition_selectivity=1;
+explain SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY,b,c b 5 NULL 9 Using index condition; Using where
+1 SIMPLE t2 ref b,c c 5 test.t1.c 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.a 1
+SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+a b c a b c a b
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+6 6 6 6 6 6 6 6
+7 7 7 7 7 7 7 7
+8 8 8 8 8 8 8 8
+9 9 9 9 9 9 9 9
+set optimizer_use_condition_selectivity=4;
+explain SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY,b,c b 5 NULL 9 Using index condition; Using where
+1 SIMPLE t2 ref b,c c 5 test.t1.c 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.a 1
+SELECT * FROM t1, t2, t3 WHERE t1.c= t2.c and t1.a = t3.a and
+t2.b < 10 AND t1.b < 10;
+a b c a b c a b
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+6 6 6 6 6 6 6 6
+7 7 7 7 7 7 7 7
+8 8 8 8 8 8 8 8
+9 9 9 9 9 9 9 9
+set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set optimizer_switch=@save_optimizer_switch;
+drop table t1,t2,t3;
+# End of 10.4 tests
set @@global.histogram_size=@save_histogram_size;
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
set @tmp_ust= @@use_stat_tables;
diff --git a/mysql-test/main/subselect_mat.result b/mysql-test/main/subselect_mat.result
index 34b58daa50e..c2f78a8453a 100644
--- a/mysql-test/main/subselect_mat.result
+++ b/mysql-test/main/subselect_mat.result
@@ -2368,9 +2368,9 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 1 Using index condition; Using where; Start temporary
1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
@@ -2382,13 +2382,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
assignment_group
df50316637232000158bbfc8bcbe5d23
e08fad2637232000158bbfc8bcbe5d39
ec70316637232000158bbfc8bcbe5d60
7b10fd2637232000158bbfc8bcbe5d30
-ebb4620037332000158bbfc8bcbe5d89
set optimizer_switch='materialization=on';
explain SELECT t1.assignment_group
FROM t1, t3
@@ -2398,12 +2397,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1
1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
+2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 1 Using index condition; Using where
2 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
SELECT t1.assignment_group
FROM t1, t3
@@ -2413,13 +2412,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
assignment_group
df50316637232000158bbfc8bcbe5d23
e08fad2637232000158bbfc8bcbe5d39
ec70316637232000158bbfc8bcbe5d60
7b10fd2637232000158bbfc8bcbe5d30
-ebb4620037332000158bbfc8bcbe5d89
DROP TABLE t1,t2,t3;
set optimizer_switch=@local_optimizer_switch;
#
diff --git a/mysql-test/main/subselect_sj2_mat.result b/mysql-test/main/subselect_sj2_mat.result
index 589144f1238..388b6d5e901 100644
--- a/mysql-test/main/subselect_sj2_mat.result
+++ b/mysql-test/main/subselect_sj2_mat.result
@@ -1825,16 +1825,15 @@ explain
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 30 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.id 1 Using index
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 14
-2 MATERIALIZED t1 eq_ref PRIMARY PRIMARY 4 test.t3.id 1 Using index
+1 PRIMARY t3 ALL NULL NULL NULL NULL 14 Using where; Start temporary
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t3.ref_id 1 Using where; Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.id 1 Using index; End temporary
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.ref_id 1 Using where; Using index
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
id
-10
11
+10
set optimizer_switch='materialization=off';
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
@@ -1934,15 +1933,14 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 12
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t2_2.id_product 1 Using where; Using index
1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY <subquery6> eq_ref distinct_key distinct_key 4 func 1 Using where
1 PRIMARY t4 eq_ref PRIMARY PRIMARY 8 test.t3.id_product,const 1 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
+1 PRIMARY t2_5 ref id_t2,id_product id_product 5 test.t3.id_product 44 Using index condition; Using where; Start temporary; End temporary
+1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 Using where
1 PRIMARY t1 index NULL PRIMARY 8 NULL 73 Using where; Using index; Using join buffer (flat, BNL join)
3 MATERIALIZED t2_2 ref id_t2,id_product id_t2 5 const 12 Using where
5 MATERIALIZED t2_4 range id_t2,id_product id_t2 5 NULL 18 Using index condition; Using where
-6 MATERIALIZED t2_5 range id_t2,id_product id_t2 5 NULL 31 Using index condition; Using where
2 MATERIALIZED t2_1 ref id_t2,id_product id_t2 5 const 51
4 MATERIALIZED t2_3 range id_t2,id_product id_t2 5 NULL 33 Using index condition; Using where
set optimizer_switch='rowid_filter=default';
diff --git a/mysql-test/main/subselect_sj_mat.result b/mysql-test/main/subselect_sj_mat.result
index afc75a22962..fe9bf0015bb 100644
--- a/mysql-test/main/subselect_sj_mat.result
+++ b/mysql-test/main/subselect_sj_mat.result
@@ -2404,9 +2404,9 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 1 Using index condition; Using where; Start temporary
1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
@@ -2418,13 +2418,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
assignment_group
df50316637232000158bbfc8bcbe5d23
e08fad2637232000158bbfc8bcbe5d39
ec70316637232000158bbfc8bcbe5d60
7b10fd2637232000158bbfc8bcbe5d30
-ebb4620037332000158bbfc8bcbe5d89
set optimizer_switch='materialization=on';
explain SELECT t1.assignment_group
FROM t1, t3
@@ -2434,12 +2433,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1
1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
+2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 1 Using index condition; Using where
2 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
SELECT t1.assignment_group
FROM t1, t3
@@ -2449,13 +2448,12 @@ t1.dispatch_group IN
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
-t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+t2.user = '931644d4d773020058c92cf65e61034c');
assignment_group
df50316637232000158bbfc8bcbe5d23
e08fad2637232000158bbfc8bcbe5d39
ec70316637232000158bbfc8bcbe5d60
7b10fd2637232000158bbfc8bcbe5d30
-ebb4620037332000158bbfc8bcbe5d89
DROP TABLE t1,t2,t3;
set optimizer_switch=@local_optimizer_switch;
#
diff --git a/mysql-test/main/subselect_sj_mat.test b/mysql-test/main/subselect_sj_mat.test
index 1de8701ecbb..22f60622756 100644
--- a/mysql-test/main/subselect_sj_mat.test
+++ b/mysql-test/main/subselect_sj_mat.test
@@ -2047,7 +2047,7 @@ INSERT INTO t1 VALUES
INSERT INTO t2 VALUES
('17801ac21b13200050fdfbcd2c0713e8','8e826bf03710200044e0bfc8bcbe5d86',
'14c19a061b13200050fdfbcd2c07134b'),
-('577ed708d773020058c92cf65e61037a','931644d4d773020058c92cf65e61034c',
+('96fb652637232000158bbfc8bcbe5db4','931644d4d773020058c92cf65e61034c',
'339888d4d773020058c92cf65e6103aa'),
('df50316637232000158bbfc8bcbe5d23','92826bf03710200044e0bfc8bcbe5da9',
'3682f56637232000158bbfc8bcbe5d44'),
@@ -2156,7 +2156,7 @@ WHERE t1.assignment_group = t3.sys_id AND
FROM t2, t3 t3_i
WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
- t2.user = '86826bf03710200044e0bfc8bcbe5d79');
+ t2.user = '931644d4d773020058c92cf65e61034c');
set optimizer_switch='materialization=off';
eval explain $q;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index d9d9c229c2f..f5b08dcdd79 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7370,6 +7370,7 @@ best_access_path(JOIN *join,
double tmp2= prev_record_reads(join_positions, idx,
(found_ref | keyuse->used_tables));
+ tmp2= MY_MIN(tmp2, record_count);
if (tmp2 < best_prev_record_reads)
{
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
@@ -7411,6 +7412,7 @@ best_access_path(JOIN *join,
but 1.0 would be probably safer
*/
tmp= prev_record_reads(join_positions, idx, found_ref);
+ MY_MIN(tmp, record_count);
records= 1.0;
type= JT_FT;
trace_access_idx.add("access_type", join_type_str[type])
@@ -7440,6 +7442,7 @@ best_access_path(JOIN *join,
trace_access_idx.add("access_type", join_type_str[type])
.add("index", keyinfo->name);
tmp = prev_record_reads(join_positions, idx, found_ref);
+ tmp= MY_MIN(tmp, record_count);
records=1.0;
}
else
1
0
12 Jan '20
revision-id: 9271843e31adef1f8c50e1d9622d07dee3386758 (mariadb-10.1.43-36-g9271843e31a)
parent(s): 984b3c15449e0b5c7b3d66047a3c490c7be40faf
author: Sergei Petrunia
committer: Sergei Petrunia
timestamp: 2020-01-12 20:50:12 +0200
message:
MDEV-21341: Fix UBSAN failures: Issue Six
(Variant #2 of the patch, which keeps the sp_head object inside the
MEM_ROOT that sp_head object owns)
(10.3 requires extra work due to sp_package, will commit a separate
patch for it)
sp_head::operator new() and operator delete() were dereferencing sp_head*
pointers to memory that didn't hold a valid sp_head object (it was
not created/already destroyed).
This caused UBSan to crash when looking up type information.
Fixed by providing static sp_head::create() and sp_head::destroy() methods.
---
sql/sp.cc | 2 +-
sql/sp_cache.cc | 2 +-
sql/sp_head.cc | 62 ++++++++++++++++++++++++------------------------------
sql/sp_head.h | 17 ++++++++-------
sql/sql_lex.cc | 4 ++--
sql/sql_parse.cc | 2 +-
sql/sql_prepare.cc | 2 +-
sql/sql_show.cc | 6 +++---
sql/sql_trigger.cc | 2 +-
sql/sql_yacc.yy | 2 +-
10 files changed, 47 insertions(+), 54 deletions(-)
diff --git a/sql/sp.cc b/sql/sp.cc
index 966ea0280b4..1d340644ba1 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -754,7 +754,7 @@ static sp_head *sp_compile(THD *thd, String *defstr, ulonglong sql_mode,
if (parse_sql(thd, & parser_state, creation_ctx) || thd->lex == NULL)
{
sp= thd->lex->sphead;
- delete sp;
+ sp_head::destroy(sp);
sp= 0;
}
else
diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc
index f99c0bd0b6e..bc91634eb32 100644
--- a/sql/sp_cache.cc
+++ b/sql/sp_cache.cc
@@ -284,7 +284,7 @@ uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen,
void hash_free_sp_head(void *p)
{
sp_head *sp= (sp_head *)p;
- delete sp;
+ sp_head::destroy(sp);
}
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 5c5688be4a3..f940040b480 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -550,51 +550,41 @@ check_routine_name(LEX_STRING *ident)
}
-/*
- *
- * sp_head
- *
- */
-
-void *
-sp_head::operator new(size_t size) throw()
+sp_head* sp_head::create()
{
- DBUG_ENTER("sp_head::operator new");
MEM_ROOT own_root;
+ init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
sp_head *sp;
+ if (!(sp= new (&own_root) sp_head(&own_root)))
+ free_root(&own_root, MYF(0));
- init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
- sp= (sp_head *) alloc_root(&own_root, size);
- if (sp == NULL)
- DBUG_RETURN(NULL);
- sp->main_mem_root= own_root;
- DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root));
- DBUG_RETURN(sp);
+ return sp;
}
-void
-sp_head::operator delete(void *ptr, size_t size) throw()
-{
- DBUG_ENTER("sp_head::operator delete");
- MEM_ROOT own_root;
- if (ptr == NULL)
- DBUG_VOID_RETURN;
-
- sp_head *sp= (sp_head *) ptr;
-
- /* Make a copy of main_mem_root as free_root will free the sp */
- own_root= sp->main_mem_root;
- DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
- (ulong) &sp->mem_root, (ulong) &own_root));
- free_root(&own_root, MYF(0));
+void sp_head::destroy(sp_head *sp)
+{
+ if (sp)
+ {
+ /* Make a copy of main_mem_root as free_root will free the sp */
+ MEM_ROOT own_root= sp->main_mem_root;
+ delete sp;
- DBUG_VOID_RETURN;
+ DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx",
+ (ulong) &sp->mem_root, (ulong) &own_root));
+ free_root(&own_root, MYF(0));
+ }
}
+/*
+ *
+ * sp_head
+ *
+ */
-sp_head::sp_head()
- :Query_arena(&main_mem_root, STMT_INITIALIZED_FOR_SP),
+sp_head::sp_head(MEM_ROOT *mem_root_arg)
+ :Query_arena(NULL, STMT_INITIALIZED_FOR_SP),
+ main_mem_root(*mem_root_arg), // todo: std::move operator.
m_flags(0),
m_sp_cache_version(0),
m_creation_ctx(0),
@@ -603,6 +593,8 @@ sp_head::sp_head()
m_next_cached_sp(0),
m_cont_level(0)
{
+ mem_root= &main_mem_root;
+
m_first_instance= this;
m_first_free_instance= this;
m_last_cached_sp= this;
@@ -848,7 +840,7 @@ sp_head::~sp_head()
my_hash_free(&m_sptabs);
my_hash_free(&m_sroutines);
- delete m_next_cached_sp;
+ sp_head::destroy(m_next_cached_sp);
DBUG_VOID_RETURN;
}
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 2b3e568fb9a..47cb0985b05 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -142,7 +142,7 @@ class sp_name : public Sql_alloc
bool
check_routine_name(LEX_STRING *ident);
-class sp_head :private Query_arena
+class sp_head :private Query_arena, public Sql_alloc
{
sp_head(const sp_head &); /**< Prevent use of these */
void operator=(sp_head &);
@@ -301,14 +301,16 @@ class sp_head :private Query_arena
being opened is probably enough).
*/
SQL_I_List<Item_trigger_field> m_trg_table_fields;
+private:
+ // users must use sp= sp_head::create()
+ sp_head(MEM_ROOT *mem_root_arg);
- static void *
- operator new(size_t size) throw ();
-
- static void
- operator delete(void *ptr, size_t size) throw ();
+ // users must use sp_head::destroy(sp)
+ virtual ~sp_head();
- sp_head();
+public:
+ static sp_head* create();
+ static void destroy(sp_head *sp);
/// Initialize after we have reset mem_root
void
@@ -326,7 +328,6 @@ class sp_head :private Query_arena
void
set_stmt_end(THD *thd);
- virtual ~sp_head();
bool
execute_trigger(THD *thd,
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index a36a19357eb..a0d9cbea211 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -785,7 +785,7 @@ void lex_end_stage1(LEX *lex)
}
else
{
- delete lex->sphead;
+ sp_head::destroy(lex->sphead);
lex->sphead= NULL;
}
@@ -2781,7 +2781,7 @@ void LEX::cleanup_lex_after_parse_error(THD *thd)
if (thd->lex->sphead)
{
thd->lex->sphead->restore_thd_mem_root(thd);
- delete thd->lex->sphead;
+ sp_head::destroy(thd->lex->sphead);
thd->lex->sphead= NULL;
}
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 218d0dbd357..e5626ccbd7c 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -4347,7 +4347,7 @@ mysql_execute_command(THD *thd)
/* Don't do it, if we are inside a SP */
if (!thd->spcont)
{
- delete lex->sphead;
+ sp_head::destroy(lex->sphead);
lex->sphead= NULL;
}
/* lex->unit.cleanup() is called outside, no need to call it here */
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 2c6aeda794a..f5adaeaa956 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -3607,7 +3607,7 @@ Prepared_statement::~Prepared_statement()
free_items();
if (lex)
{
- delete lex->sphead;
+ sp_head::destroy(lex->sphead);
delete lex->result;
delete (st_lex_local *) lex;
}
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index fbdb76e9e71..4f217159e5c 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -5863,7 +5863,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
{
free_table_share(&share);
if (free_sp_head)
- delete sp;
+ sp_head::destroy(sp);
DBUG_RETURN(1);
}
}
@@ -5919,7 +5919,7 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
}
}
if (free_sp_head)
- delete sp;
+ sp_head::destroy(sp);
}
free_table_share(&share);
DBUG_RETURN(error);
@@ -6012,7 +6012,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
store_column_type(table, field, cs, 5);
free_table_share(&share);
if (free_sp_head)
- delete sp;
+ sp_head::destroy(sp);
}
}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 4ecd8139921..c4d348ce400 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1063,7 +1063,7 @@ Table_triggers_list::~Table_triggers_list()
{
for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
- delete bodies[i][j];
+ sp_head::destroy(bodies[i][j]);
/* Free blobs used in insert */
if (record0_field)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 71e0a18b1a3..2a46bb2a027 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -224,7 +224,7 @@ static sp_head *make_sp_head(THD *thd, sp_name *name,
sp_head *sp;
/* Order is important here: new - reset - init */
- if ((sp= new sp_head()))
+ if ((sp= sp_head::create()))
{
sp->reset_thd_mem_root(thd);
sp->init(lex);
1
0