lists.mariadb.org
Sign In Sign Up
Manage this list Sign In Sign Up

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

commits

Thread Start a new thread
Threads by month
  • ----- 2025 -----
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
commits@lists.mariadb.org

  • 14605 discussions
[Commits] 91e925e199c: MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED
by Sergei Petrunia 28 Jul '21

28 Jul '21
revision-id: 91e925e199ce61623f8413bfa789d0e7098c3d72 (mariadb-10.5.11-54-g91e925e199c) parent(s): 0805cdebd33eb39515d05041a924df12396ebc69 author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2021-07-28 19:56:51 +0300 message: MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED (cherry-pick into 10.5) Provide basic coverage in the Optimizer Trace --- mysql-test/main/opt_trace.result | 116 +++++++++++++++++++++++++++++++++++++++ mysql-test/main/opt_trace.test | 49 +++++++++++++++++ sql/opt_split.cc | 17 ++++++ sql/sql_select.cc | 2 +- 4 files changed, 183 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index 8f912e0500d..9bf1cda18a3 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -8794,4 +8794,120 @@ json_detailed(json_extract(trace, '$**.in_to_subquery_conversion')) set in_predicate_conversion_threshold=@tmp; drop table t0; # End of 10.5 tests +# +# MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED +# +create table t1 (a int, b int, index idx_b(b)) engine=myisam; +insert into t1 values +(8,3), (5,7), (1,2), (2,1), (9,7), (7,5), (2,2), (7,3), +(9,3), (8,1), (4,5), (2,3); +create table t2 (a int, b int, c char(127), index idx_a(a)) engine=myisam; +insert into t2 values +(7,10,'x'), (1,20,'a'), (2,23,'b'), (7,18,'z'), (1,30,'c'), +(4,71,'d'), (3,15,'x'), (7,82,'y'), (8,12,'t'), (4,15,'b'), +(11,33,'a'), (10,42,'u'), (4,53,'p'), (10,17,'r'), (2,90,'x'), +(17,10,'s'), (11,20,'v'), (12,23,'y'), (17,18,'a'), (11,30,'d'), +(24,71,'h'), (23,15,'i'), (27,82,'k'), (28,12,'p'), (24,15,'q'), +(31,33,'f'), (30,42,'h'), (40,53,'m'), (30,17,'o'), (21,90,'b'), +(37,10,'e'), (31,20,'g'), (32,23,'f'), (37,18,'n'), (41,30,'l'), +(54,71,'j'), (53,15,'w'), (57,82,'z'), (58,12,'k'), (54,15,'p'), +(61,33,'c'), (60,42,'a'), (62,53,'x'), (67,17,'g'), (64,90,'v'); +insert into t2 select a+10, b+10, concat(c,'f') from t2; +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +explain +select t1.a,t.s,t.m +from t1 join +(select a, sum(t2.b) as s, min(t2.c) as m from t2 group by t2.a) t +on t1.a=t.a +where t1.b < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range idx_b idx_b 5 NULL 4 Using index condition; Using where +1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 +2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 +select +json_detailed(json_extract(trace, '$**.choose_best_splitting')) +from +information_schema.optimizer_trace; +json_detailed(json_extract(trace, '$**.choose_best_splitting')) +[ + + [ + + { + "considered_execution_plans": + [ + + { + "plan_prefix": + [ + ], + "table": "t2", + "best_access_path": + { + "considered_access_paths": + [ + + { + "access_type": "ref", + "index": "idx_a", + "used_range_estimates": false, + "cause": "not available", + "rows": 1.8367, + "cost": 2.000585794, + "chosen": true + }, + + { + "type": "scan", + "chosen": false, + "cause": "cost" + } + ], + "chosen_access_method": + { + "type": "ref", + "records": 1.8367, + "cost": 2.000585794, + "uses_join_buffering": false + } + }, + "rows_for_plan": 1.8367, + "cost_for_plan": 2.367925794, + "cost_for_sorting": 1.8367, + "estimated_join_cardinality": 1.8367 + } + ] + }, + + { + "best_splitting": + { + "table": "t2", + "key": "idx_a", + "record_count": 4, + "cost": 2.488945919, + "unsplit_cost": 25.72361682 + } + } + ] +] +select +json_detailed(json_extract(trace, '$**.lateral_derived')) +from +information_schema.optimizer_trace; +json_detailed(json_extract(trace, '$**.lateral_derived')) +[ + + { + "startup_cost": 9.955783677, + "splitting_cost": 2.488945919, + "records": 1 + } +] +drop table t1,t2; set optimizer_trace='enabled=off'; diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index ecb6658e338..66a333d7dc5 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -697,4 +697,53 @@ set in_predicate_conversion_threshold=@tmp; drop table t0; --echo # End of 10.5 tests + +--echo # +--echo # MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED +--echo # +create table t1 (a int, b int, index idx_b(b)) engine=myisam; +insert into t1 values +(8,3), (5,7), (1,2), (2,1), (9,7), (7,5), (2,2), (7,3), +(9,3), (8,1), (4,5), (2,3); + +create table t2 (a int, b int, c char(127), index idx_a(a)) engine=myisam; +insert into t2 values + (7,10,'x'), (1,20,'a'), (2,23,'b'), (7,18,'z'), (1,30,'c'), + (4,71,'d'), (3,15,'x'), (7,82,'y'), (8,12,'t'), (4,15,'b'), + (11,33,'a'), (10,42,'u'), (4,53,'p'), (10,17,'r'), (2,90,'x'), + (17,10,'s'), (11,20,'v'), (12,23,'y'), (17,18,'a'), (11,30,'d'), + (24,71,'h'), (23,15,'i'), (27,82,'k'), (28,12,'p'), (24,15,'q'), + (31,33,'f'), (30,42,'h'), (40,53,'m'), (30,17,'o'), (21,90,'b'), + (37,10,'e'), (31,20,'g'), (32,23,'f'), (37,18,'n'), (41,30,'l'), + (54,71,'j'), (53,15,'w'), (57,82,'z'), (58,12,'k'), (54,15,'p'), + (61,33,'c'), (60,42,'a'), (62,53,'x'), (67,17,'g'), (64,90,'v'); + +insert into t2 select a+10, b+10, concat(c,'f') from t2; + +analyze table t1,t2; + +explain +select t1.a,t.s,t.m +from t1 join + (select a, sum(t2.b) as s, min(t2.c) as m from t2 group by t2.a) t + on t1.a=t.a +where t1.b < 3; + +# +# Just show that choose_best_splitting function has coverage in the +# optimizer trace and re-optmization of child select inside it is distinct +# from the rest of join optimization. +select + json_detailed(json_extract(trace, '$**.choose_best_splitting')) +from + information_schema.optimizer_trace; + +# Same as above. just to show that splitting plan has some coverage in the +# trace. +select + json_detailed(json_extract(trace, '$**.lateral_derived')) +from + information_schema.optimizer_trace; + +drop table t1,t2; set optimizer_trace='enabled=off'; diff --git a/sql/opt_split.cc b/sql/opt_split.cc index 316919c4a81..41b8acf5dcb 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -187,6 +187,7 @@ #include "mariadb.h" #include "sql_select.h" +#include "opt_trace.h" /* Info on a splitting field */ struct SplM_field_info @@ -957,6 +958,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, The key for splitting was chosen, look for the plan for this key in the cache */ + Json_writer_array spl_trace(thd, "choose_best_splitting"); spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts); if (!spl_plan) { @@ -1005,6 +1007,16 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, spl_plan->cost= join->best_positions[join->table_count-1].read_time + + oper_cost; + if (unlikely(thd->trace_started())) + { + Json_writer_object wrapper(thd); + Json_writer_object find_trace(thd, "best_splitting"); + find_trace.add("table", best_table->alias.c_ptr()); + find_trace.add("key", best_table->key_info[best_key].name); + find_trace.add("record_count", record_count); + find_trace.add("cost", spl_plan->cost); + find_trace.add("unsplit_cost", spl_opt_info->unsplit_cost); + } memcpy((char *) spl_plan->best_positions, (char *) join->best_positions, sizeof(POSITION) * join->table_count); @@ -1031,6 +1043,11 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, { startup_cost= record_count * spl_plan->cost; records= (ha_rows) (records * spl_plan->split_sel); + + Json_writer_object trace(thd, "lateral_derived"); + trace.add("startup_cost", startup_cost); + trace.add("splitting_cost", spl_plan->cost); + trace.add("records", records); } else startup_cost= spl_opt_info->unsplit_cost; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 80bc24cd3cd..7057ed1b5e1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7449,7 +7449,6 @@ best_access_path(JOIN *join, DBUG_ENTER("best_access_path"); Json_writer_object trace_wrapper(thd, "best_access_path"); - Json_writer_array trace_paths(thd, "considered_access_paths"); bitmap_clear_all(eq_join_set); @@ -7457,6 +7456,7 @@ best_access_path(JOIN *join, if (s->table->is_splittable()) spl_plan= s->choose_best_splitting(record_count, remaining_tables); + Json_writer_array trace_paths(thd, "considered_access_paths"); if (s->keyuse) { /* Use key if possible */
1 0
0 0
[Commits] c276daf8d35: MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED
by Sergei Petrunia 28 Jul '21

28 Jul '21
revision-id: c276daf8d3528b05784bba9706fab4b307c18426 (mariadb-10.4.20-49-gc276daf8d35) parent(s): 2173f382ca5e02b8c05ae2d75b040df701bf497e author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2021-07-28 19:54:11 +0300 message: MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED (cherry-pick into 10.5) Provide basic coverage in the Optimizer Trace --- mysql-test/main/opt_trace.result | 116 +++++++++++++++++++++++++++++++++++++++ mysql-test/main/opt_trace.test | 50 +++++++++++++++++ sql/opt_split.cc | 17 ++++++ sql/sql_select.cc | 2 +- 4 files changed, 184 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index 0ced5f19e14..023f2a30745 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -8601,3 +8601,119 @@ set max_session_mem_used=default; # # End of 10.4 tests # +# +# MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED +# +create table t1 (a int, b int, index idx_b(b)) engine=myisam; +insert into t1 values +(8,3), (5,7), (1,2), (2,1), (9,7), (7,5), (2,2), (7,3), +(9,3), (8,1), (4,5), (2,3); +create table t2 (a int, b int, c char(127), index idx_a(a)) engine=myisam; +insert into t2 values +(7,10,'x'), (1,20,'a'), (2,23,'b'), (7,18,'z'), (1,30,'c'), +(4,71,'d'), (3,15,'x'), (7,82,'y'), (8,12,'t'), (4,15,'b'), +(11,33,'a'), (10,42,'u'), (4,53,'p'), (10,17,'r'), (2,90,'x'), +(17,10,'s'), (11,20,'v'), (12,23,'y'), (17,18,'a'), (11,30,'d'), +(24,71,'h'), (23,15,'i'), (27,82,'k'), (28,12,'p'), (24,15,'q'), +(31,33,'f'), (30,42,'h'), (40,53,'m'), (30,17,'o'), (21,90,'b'), +(37,10,'e'), (31,20,'g'), (32,23,'f'), (37,18,'n'), (41,30,'l'), +(54,71,'j'), (53,15,'w'), (57,82,'z'), (58,12,'k'), (54,15,'p'), +(61,33,'c'), (60,42,'a'), (62,53,'x'), (67,17,'g'), (64,90,'v'); +insert into t2 select a+10, b+10, concat(c,'f') from t2; +analyze table t1,t2; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +explain +select t1.a,t.s,t.m +from t1 join +(select a, sum(t2.b) as s, min(t2.c) as m from t2 group by t2.a) t +on t1.a=t.a +where t1.b < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range idx_b idx_b 5 NULL 4 Using index condition; Using where +1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 +2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 +select +json_detailed(json_extract(trace, '$**.choose_best_splitting')) +from +information_schema.optimizer_trace; +json_detailed(json_extract(trace, '$**.choose_best_splitting')) +[ + + [ + + { + "considered_execution_plans": + [ + + { + "plan_prefix": + [ + ], + "table": "t2", + "best_access_path": + { + "considered_access_paths": + [ + + { + "access_type": "ref", + "index": "idx_a", + "used_range_estimates": false, + "cause": "not available", + "rows": 1.8367, + "cost": 2.000585794, + "chosen": true + }, + + { + "type": "scan", + "chosen": false, + "cause": "cost" + } + ], + "chosen_access_method": + { + "type": "ref", + "records": 1.8367, + "cost": 2.000585794, + "uses_join_buffering": false + } + }, + "rows_for_plan": 1.8367, + "cost_for_plan": 2.367925794, + "cost_for_sorting": 1.8367, + "estimated_join_cardinality": 1.8367 + } + ] + }, + + { + "best_splitting": + { + "table": "t2", + "key": "idx_a", + "record_count": 4, + "cost": 2.488945919, + "unsplit_cost": 25.72361682 + } + } + ] +] +select +json_detailed(json_extract(trace, '$**.lateral_derived')) +from +information_schema.optimizer_trace; +json_detailed(json_extract(trace, '$**.lateral_derived')) +[ + + { + "startup_cost": 9.955783677, + "splitting_cost": 2.488945919, + "records": 1 + } +] +drop table t1,t2; diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index 3fae7f34750..b3ba937e958 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -637,3 +637,53 @@ set max_session_mem_used=default; --echo # --echo # End of 10.4 tests --echo # + + +--echo # +--echo # MDEV-24325: Optimizer trace doesn't cover LATERAL DERIVED +--echo # +create table t1 (a int, b int, index idx_b(b)) engine=myisam; +insert into t1 values +(8,3), (5,7), (1,2), (2,1), (9,7), (7,5), (2,2), (7,3), +(9,3), (8,1), (4,5), (2,3); + +create table t2 (a int, b int, c char(127), index idx_a(a)) engine=myisam; +insert into t2 values + (7,10,'x'), (1,20,'a'), (2,23,'b'), (7,18,'z'), (1,30,'c'), + (4,71,'d'), (3,15,'x'), (7,82,'y'), (8,12,'t'), (4,15,'b'), + (11,33,'a'), (10,42,'u'), (4,53,'p'), (10,17,'r'), (2,90,'x'), + (17,10,'s'), (11,20,'v'), (12,23,'y'), (17,18,'a'), (11,30,'d'), + (24,71,'h'), (23,15,'i'), (27,82,'k'), (28,12,'p'), (24,15,'q'), + (31,33,'f'), (30,42,'h'), (40,53,'m'), (30,17,'o'), (21,90,'b'), + (37,10,'e'), (31,20,'g'), (32,23,'f'), (37,18,'n'), (41,30,'l'), + (54,71,'j'), (53,15,'w'), (57,82,'z'), (58,12,'k'), (54,15,'p'), + (61,33,'c'), (60,42,'a'), (62,53,'x'), (67,17,'g'), (64,90,'v'); + +insert into t2 select a+10, b+10, concat(c,'f') from t2; + +analyze table t1,t2; + +explain +select t1.a,t.s,t.m +from t1 join + (select a, sum(t2.b) as s, min(t2.c) as m from t2 group by t2.a) t + on t1.a=t.a +where t1.b < 3; + +# +# Just show that choose_best_splitting function has coverage in the +# optimizer trace and re-optmization of child select inside it is distinct +# from the rest of join optimization. +select + json_detailed(json_extract(trace, '$**.choose_best_splitting')) +from + information_schema.optimizer_trace; + +# Same as above. just to show that splitting plan has some coverage in the +# trace. +select + json_detailed(json_extract(trace, '$**.lateral_derived')) +from + information_schema.optimizer_trace; + +drop table t1,t2; \ No newline at end of file diff --git a/sql/opt_split.cc b/sql/opt_split.cc index d272638f00c..fc584d94c45 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -187,6 +187,7 @@ #include "mariadb.h" #include "sql_select.h" +#include "opt_trace.h" /* Info on a splitting field */ struct SplM_field_info @@ -959,6 +960,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, The key for splitting was chosen, look for the plan for this key in the cache */ + Json_writer_array spl_trace(thd, "choose_best_splitting"); spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts); if (!spl_plan) { @@ -1007,6 +1009,16 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, spl_plan->cost= join->best_positions[join->table_count-1].read_time + + oper_cost; + if (unlikely(thd->trace_started())) + { + Json_writer_object wrapper(thd); + Json_writer_object find_trace(thd, "best_splitting"); + find_trace.add("table", best_table->alias.c_ptr()); + find_trace.add("key", best_table->key_info[best_key].name); + find_trace.add("record_count", record_count); + find_trace.add("cost", spl_plan->cost); + find_trace.add("unsplit_cost", spl_opt_info->unsplit_cost); + } memcpy((char *) spl_plan->best_positions, (char *) join->best_positions, sizeof(POSITION) * join->table_count); @@ -1033,6 +1045,11 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count, { startup_cost= record_count * spl_plan->cost; records= (ha_rows) (records * spl_plan->split_sel); + + Json_writer_object trace(thd, "lateral_derived"); + trace.add("startup_cost", startup_cost); + trace.add("splitting_cost", spl_plan->cost); + trace.add("records", records); } else startup_cost= spl_opt_info->unsplit_cost; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8a251ae339e..dcbed6cba25 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7375,7 +7375,6 @@ best_access_path(JOIN *join, DBUG_ENTER("best_access_path"); Json_writer_object trace_wrapper(thd, "best_access_path"); - Json_writer_array trace_paths(thd, "considered_access_paths"); bitmap_clear_all(eq_join_set); @@ -7383,6 +7382,7 @@ best_access_path(JOIN *join, if (s->table->is_splittable()) spl_plan= s->choose_best_splitting(record_count, remaining_tables); + Json_writer_array trace_paths(thd, "considered_access_paths"); if (s->keyuse) { /* Use key if possible */
1 0
0 0
[Commits] 8bb3a6e5bd4: Range Locking: make LockingIterator not lock the same range twice.
by psergey 26 Jul '21

26 Jul '21
revision-id: 8bb3a6e5bd499f81d704ed1f764c43517f14d390 (percona-202103-72-g8bb3a6e5bd4) parent(s): 7c8da0555e60c4eb5bdfda1d77016fa283eb8e49 author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2021-07-26 21:33:56 +0300 message: Range Locking: make LockingIterator not lock the same range twice. LockingIterator may attempt to lock the same range multiple times if another transaction is inserting records into the range it is scanning through. Make the code avoid this. --- .../r/range_locking_seek_for_update2.result | 42 ++++++++++++++++ .../r/range_locking_seek_for_update2_rev_cf.result | 42 ++++++++++++++++ .../rocksdb/t/range_locking_seek_for_update2.inc | 50 +++++++++++++++++++ .../rocksdb/t/range_locking_seek_for_update2.test | 4 ++ .../t/range_locking_seek_for_update2_rev_cf.test | 4 ++ storage/rocksdb/rdb_locking_iter.cc | 2 + storage/rocksdb/rdb_locking_iter.h | 57 ++++++++++++++++------ 7 files changed, 185 insertions(+), 16 deletions(-) diff --git a/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2.result b/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2.result new file mode 100644 index 00000000000..329fd0063fc --- /dev/null +++ b/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2.result @@ -0,0 +1,42 @@ +show variables like 'rocksdb_use_range_locking'; +Variable_name Value +rocksdb_use_range_locking ON +create table t0(a int primary key); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int, +a int, +primary key (pk) comment 'rlsfu_test' +) engine=rocksdb; +insert into t1 (pk) +select +A.a + B.a*10 + C.a*100 +from +t0 A, t0 B, t0 C; +delete from t1 where pk<100; +connect con1,localhost,root,,; +connection con1; +begin; +set debug_sync='rocksdb.locking_iter_scan SIGNAL about_to_lock_range WAIT_FOR spoiler_inserted'; +select * from t1 where pk >=5 order by pk limit 5 for update; +connection default; +set debug_sync='now WAIT_FOR about_to_lock_range'; +insert into t1 (pk) values +(10),(20),(30),(40),(50); +set debug_sync='now SIGNAL spoiler_inserted'; +connection con1; +pk a +10 NULL +20 NULL +30 NULL +40 NULL +50 NULL +# This must return 1, no 5: +select lock_count from information_schema.rocksdb_trx +where thread_id=CONNECTION_ID(); +lock_count +1 +rollback; +disconnect con1; +connection default; +drop table t0, t1; diff --git a/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2_rev_cf.result b/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2_rev_cf.result new file mode 100644 index 00000000000..3177f94821b --- /dev/null +++ b/mysql-test/suite/rocksdb/r/range_locking_seek_for_update2_rev_cf.result @@ -0,0 +1,42 @@ +show variables like 'rocksdb_use_range_locking'; +Variable_name Value +rocksdb_use_range_locking ON +create table t0(a int primary key); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int, +a int, +primary key (pk) comment 'rev:rlsfu_test' +) engine=rocksdb; +insert into t1 (pk) +select +A.a + B.a*10 + C.a*100 +from +t0 A, t0 B, t0 C; +delete from t1 where pk<100; +connect con1,localhost,root,,; +connection con1; +begin; +set debug_sync='rocksdb.locking_iter_scan SIGNAL about_to_lock_range WAIT_FOR spoiler_inserted'; +select * from t1 where pk >=5 order by pk limit 5 for update; +connection default; +set debug_sync='now WAIT_FOR about_to_lock_range'; +insert into t1 (pk) values +(10),(20),(30),(40),(50); +set debug_sync='now SIGNAL spoiler_inserted'; +connection con1; +pk a +10 NULL +20 NULL +30 NULL +40 NULL +50 NULL +# This must return 1, no 5: +select lock_count from information_schema.rocksdb_trx +where thread_id=CONNECTION_ID(); +lock_count +1 +rollback; +disconnect con1; +connection default; +drop table t0, t1; diff --git a/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.inc b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.inc new file mode 100644 index 00000000000..8287cd59030 --- /dev/null +++ b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.inc @@ -0,0 +1,50 @@ + + +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc +--source suite/rocksdb/include/have_range_locking.inc +--enable_connect_log +show variables like 'rocksdb_use_range_locking'; + + +create table t0(a int primary key); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +eval create table t1 ( + pk int, + a int, + primary key (pk) comment '$cf' +) engine=rocksdb; + +insert into t1 (pk) +select + A.a + B.a*10 + C.a*100 +from + t0 A, t0 B, t0 C; +delete from t1 where pk<100; + +connect (con1,localhost,root,,); +connection con1; + +begin; +set debug_sync='rocksdb.locking_iter_scan SIGNAL about_to_lock_range WAIT_FOR spoiler_inserted'; +send +select * from t1 where pk >=5 order by pk limit 5 for update; + +connection default; +set debug_sync='now WAIT_FOR about_to_lock_range'; +insert into t1 (pk) values +(10),(20),(30),(40),(50); +set debug_sync='now SIGNAL spoiler_inserted'; + +connection con1; +reap; +--echo # This must return 1, no 5: +select lock_count from information_schema.rocksdb_trx +where thread_id=CONNECTION_ID(); + +rollback; +disconnect con1; +connection default; +drop table t0, t1; + diff --git a/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.test b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.test new file mode 100644 index 00000000000..703331cab9a --- /dev/null +++ b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2.test @@ -0,0 +1,4 @@ + +--let cf=rlsfu_test +--source range_locking_seek_for_update2.inc + diff --git a/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2_rev_cf.test b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2_rev_cf.test new file mode 100644 index 00000000000..0620593b4e7 --- /dev/null +++ b/mysql-test/suite/rocksdb/t/range_locking_seek_for_update2_rev_cf.test @@ -0,0 +1,4 @@ + +--let cf=rev:rlsfu_test + +--source range_locking_seek_for_update2.inc diff --git a/storage/rocksdb/rdb_locking_iter.cc b/storage/rocksdb/rdb_locking_iter.cc index 1378de6f655..39858bd6f35 100644 --- a/storage/rocksdb/rdb_locking_iter.cc +++ b/storage/rocksdb/rdb_locking_iter.cc @@ -27,12 +27,14 @@ rocksdb::Iterator* GetLockingIterator( */ void LockingIterator::Seek(const rocksdb::Slice& target) { + m_have_locked_until = false; m_iter = m_txn->GetIterator(m_read_opts, m_cfh); m_iter->Seek(target); ScanForward(target, false); } void LockingIterator::SeekForPrev(const rocksdb::Slice& target) { + m_have_locked_until = false; m_iter = m_txn->GetIterator(m_read_opts, m_cfh); m_iter->SeekForPrev(target); ScanBackward(target, false); diff --git a/storage/rocksdb/rdb_locking_iter.h b/storage/rocksdb/rdb_locking_iter.h index 433e2230c7d..25ad4b7fd5a 100644 --- a/storage/rocksdb/rdb_locking_iter.h +++ b/storage/rocksdb/rdb_locking_iter.h @@ -44,6 +44,14 @@ class LockingIterator : public rocksdb::Iterator { bool m_valid; ulonglong *m_lock_count; + + // If true, m_locked_until has a valid key value. + bool m_have_locked_until; + + // The key value until we've locked the range. That is, we have a range lock + // on [current_position ... m_locked_until]. + // This is used to avoid making extra GetRangeLock() calls. + std::string m_locked_until; public: LockingIterator(rocksdb::Transaction *txn, rocksdb::ColumnFamilyHandle *cfh, @@ -53,7 +61,7 @@ class LockingIterator : public rocksdb::Iterator { ) : m_txn(txn), m_cfh(cfh), m_is_rev_cf(is_rev_cf), m_read_opts(opts), m_iter(nullptr), m_status(rocksdb::Status::InvalidArgument()), m_valid(false), - m_lock_count(lock_count) {} + m_lock_count(lock_count), m_have_locked_until(false) {} ~LockingIterator() { delete m_iter; @@ -111,24 +119,43 @@ class LockingIterator : public rocksdb::Iterator { return; } + const int inv = forward ? 1 : -1; + auto cmp= m_cfh->GetComparator(); + auto end_key = m_iter->key(); bool endp_arg= m_is_rev_cf; - if (forward) { - m_status = m_txn->GetRangeLock(m_cfh, - rocksdb::Endpoint(target, endp_arg), - rocksdb::Endpoint(end_key, endp_arg)); + + if (m_have_locked_until && + cmp->Compare(end_key, rocksdb::Slice(m_locked_until))*inv <= 0) { + // We've already locked this range. The following has happened: + // - m_iter->key() returned $KEY + // - we got a range lock on [range_start, $KEY] + // - other transaction(s) have inserted row $ROW before the $KEY. + // - we've read $ROW and returned. + // Now, we're looking to lock [$ROW, $KEY] but we don't need to, + // we already have a lock on this range. } else { - m_status = m_txn->GetRangeLock(m_cfh, - rocksdb::Endpoint(end_key, endp_arg), - rocksdb::Endpoint(target, endp_arg)); - } + if (forward) { + m_status = m_txn->GetRangeLock(m_cfh, + rocksdb::Endpoint(target, endp_arg), + rocksdb::Endpoint(end_key, endp_arg)); + } else { + m_status = m_txn->GetRangeLock(m_cfh, + rocksdb::Endpoint(end_key, endp_arg), + rocksdb::Endpoint(target, endp_arg)); + } - if (!m_status.ok()) { - // Failed to get a lock (most likely lock wait timeout) - m_valid = false; - return; + // Save the bound where we locked until: + m_have_locked_until= true; + m_locked_until.assign(end_key.data(), end_key.size()); + if (!m_status.ok()) { + // Failed to get a lock (most likely lock wait timeout) + m_valid = false; + return; + } + if (m_lock_count) (*m_lock_count)++; } - if (m_lock_count) (*m_lock_count)++; + std::string end_key_copy= end_key.ToString(); //Ok, now we have a lock which is inhibiting modifications in the range @@ -146,7 +173,6 @@ class LockingIterator : public rocksdb::Iterator { else m_iter->SeekForPrev(target); - auto cmp= m_cfh->GetComparator(); if (call_next && m_iter->Valid() && !cmp->Compare(m_iter->key(), target)) { if (forward) @@ -156,7 +182,6 @@ class LockingIterator : public rocksdb::Iterator { } if (m_iter->Valid()) { - int inv = forward ? 1 : -1; if (cmp->Compare(m_iter->key(), rocksdb::Slice(end_key_copy))*inv <= 0) { // Ok, the found key is within the range. m_status = rocksdb::Status::OK();
1 0
0 0
[Commits] d930847: MDEV-25484 Crash when parsing query using derived table containing TVC
by IgorBabaev 23 Jul '21

23 Jul '21
revision-id: d930847b540c7ae37c2d33f449341bb3f050d338 (mariadb-10.3.26-206-gd930847) parent(s): 6190a02f3593f82e2d4916ed660afb5e5d631a5a author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-23 06:42:50 -0700 message: MDEV-25484 Crash when parsing query using derived table containing TVC This patch fixes parsing problems concerning derived tables that use table value constructors (TVC) with LIMIT and ORDER BY clauses of the form ((VALUES ... LIMIT ...) ORDER BY ...) as dt The fix has to be applied only to 10.3 as 10.4 that employs a different grammar rules has no such problems. The test cases should be merged upstream. Approved by Oleksandr Byelkin <sanja(a)mariadb.com> --- mysql-test/main/table_value_constr.result | 38 +++++++++++++++++++++++++++++++ mysql-test/main/table_value_constr.test | 22 ++++++++++++++++++ sql/sql_lex.cc | 9 +++++++- sql/sql_yacc.yy | 12 ++++++---- 4 files changed, 76 insertions(+), 5 deletions(-) diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result index ff6d19a..1d1cd05 100644 --- a/mysql-test/main/table_value_constr.result +++ b/mysql-test/main/table_value_constr.result @@ -3062,4 +3062,42 @@ a 2 3 drop table t1; +# +# MDEV-25484: Derived table using TVC with LIMIT and ORDER BY +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +select * from ( (select * from t1 limit 2) order by 1 desc) as dt; +a +7 +3 +(values (3), (7), (1) limit 2) order by 1 desc; +3 +7 +3 +select * from ( (values (3), (7), (1) limit 2) order by 1 desc) as dt; +3 +7 +3 +select * from ( select * from t1 order by 1 limit 2 ) as dt; +a +1 +3 +values (3),(7),(1) order by 1 limit 2; +3 +1 +3 +select * from ( values (3),(7),(1) order by 1 limit 2 ) as dt; +3 +1 +3 +values (3),(7),(1) union values (2),(4) order by 1 limit 2; +3 +1 +2 +select * from (values (3),(7),(1) union values (2),(4) order by 1 limit 2) as dt; +3 +1 +2 +drop table t1; End of 10.3 tests diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 3e976f8..d139625 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1628,4 +1628,26 @@ select * from t1; drop table t1; + +--echo # +--echo # MDEV-25484: Derived table using TVC with LIMIT and ORDER BY +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +select * from ( (select * from t1 limit 2) order by 1 desc) as dt; +(values (3), (7), (1) limit 2) order by 1 desc; +select * from ( (values (3), (7), (1) limit 2) order by 1 desc) as dt; + + +select * from ( select * from t1 order by 1 limit 2 ) as dt; +values (3),(7),(1) order by 1 limit 2; +select * from ( values (3),(7),(1) order by 1 limit 2 ) as dt; + +values (3),(7),(1) union values (2),(4) order by 1 limit 2; +select * from (values (3),(7),(1) union values (2),(4) order by 1 limit 2) as dt; + +drop table t1; + --echo End of 10.3 tests diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 2a337b0..8de645a 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -8396,8 +8396,15 @@ bool LEX::tvc_finalize_derived() thd->parse_error(); return true; } + if (unlikely(!(current_select->tvc= + new (thd->mem_root) + table_value_constr(many_values, + current_select, + current_select->options)))) + return true; + restore_values_list_state(); current_select->linkage= DERIVED_TABLE_TYPE; - return tvc_finalize(); + return false; } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b915667..7d13dd8 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -12855,10 +12855,13 @@ order_clause: created yet. */ SELECT_LEX *first_sl= unit->first_select(); - if (unlikely(!unit->is_unit_op() && - (first_sl->order_list.elements || - first_sl->select_limit) && + if (unlikely(!first_sl->next_select() && first_sl->tvc && unit->add_fake_select_lex(thd))) + MYSQL_YYABORT; + else if (unlikely(!unit->is_unit_op() && + (first_sl->order_list.elements || + first_sl->select_limit) && + unit->add_fake_select_lex(thd))) MYSQL_YYABORT; } if (sel->master_unit()->is_unit_op() && !sel->braces) @@ -12907,7 +12910,8 @@ limit_clause_init: LIMIT { SELECT_LEX *sel= Select; - if (sel->master_unit()->is_unit_op() && !sel->braces) + if (sel->master_unit()->is_unit_op() && !sel->braces && + sel->master_unit()->fake_select_lex) { /* Move LIMIT that belongs to UNION to fake_select_lex */ Lex->current_select= sel->master_unit()->fake_select_lex;
1 0
0 0
[Commits] c164835: MDEV-26202 Unexpected failure with query using indirectly a recursive CTE twice
by IgorBabaev 23 Jul '21

23 Jul '21
revision-id: c164835a45ae3fea59eb49ff0cb00fbfbccc12d0 (mariadb-10.2.31-1075-gc164835) parent(s): 4aeb2b1c6c542e09be2474644358038279197529 author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-22 17:07:17 -0700 message: MDEV-26202 Unexpected failure with query using indirectly a recursive CTE twice This bug was fixed by the patch for bug MDEV-26025. Only a new test case is added here. --- mysql-test/r/cte_recursive.result | 22 ++++++++++++++++++++++ mysql-test/t/cte_recursive.test | 14 ++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 1b1fd8b..168777f 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -4519,5 +4519,27 @@ drop procedure sp1; drop procedure sp2; drop table t1; # +# MDEV-26202: Recursive CTE used indirectly twice +# (fixed by the patch forMDEV-26025) +# +with recursive +rcte as ( SELECT 1 AS a +UNION ALL +SELECT cast(a + 1 as unsigned int) FROM rcte WHERE a < 3), +cte1 AS (SELECT a FROM rcte), +cte2 AS (SELECT a FROM cte1), +cte3 AS ( SELECT a FROM cte2) +SELECT * FROM cte2, cte3; +a a +1 1 +2 1 +3 1 +1 2 +2 2 +3 2 +1 3 +2 3 +3 3 +# # End of 10.2 tests # diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index cdd3a07..3c845f9 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -2885,5 +2885,19 @@ drop procedure sp2; drop table t1; --echo # +--echo # MDEV-26202: Recursive CTE used indirectly twice +--echo # (fixed by the patch forMDEV-26025) +--echo # + +with recursive + rcte as ( SELECT 1 AS a + UNION ALL + SELECT cast(a + 1 as unsigned int) FROM rcte WHERE a < 3), + cte1 AS (SELECT a FROM rcte), + cte2 AS (SELECT a FROM cte1), + cte3 AS ( SELECT a FROM cte2) +SELECT * FROM cte2, cte3; + +--echo # --echo # End of 10.2 tests --echo #
1 0
0 0
[Commits] 3ac32917ab6: MDEV-21130: Histograms: use JSON as on-disk format
by Sergei Petrunia 22 Jul '21

22 Jul '21
revision-id: 3ac32917ab6c42a5a0f9ed817dd8d3c7e20ce34d (mariadb-10.6.2-68-g3ac32917ab6) parent(s): 5ddb8069145b518426be7fd31881d1d3fa5f53b4 author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2021-07-23 01:26:50 +0300 message: MDEV-21130: Histograms: use JSON as on-disk format Preparation for handling different kinds of histograms: - In Column_statistics, change "Histogram histogram" into "Histogram *histogram_". This allows for different kinds of Histogram classes with virtual functions. - [Almost] remove the usage of Histogram->set_values and Histogram->set_size. The code outside the histogram should not make any assumptions about what/how is stored in the Histogram. - Introduce drafts of methods to read/save histograms to/from disk. --- sql/sql_statistics.cc | 214 ++++++++++++++++++++++++++++++++++++-------------- sql/sql_statistics.h | 90 ++++++++++++++------- sql/table.h | 10 ++- 3 files changed, 226 insertions(+), 88 deletions(-) diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 56e20ecf48e..2ec9aaa5965 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -311,7 +311,7 @@ class Column_statistics_collected :public Column_statistics inline void init(THD *thd, Field * table_field); inline bool add(); - inline void finish(ha_rows rows, double sample_fraction); + inline void finish(MEM_ROOT *mem_root, ha_rows rows, double sample_fraction); inline void cleanup(); }; @@ -1068,21 +1068,22 @@ class Column_stat: public Stat_table stat_field->store(stats->get_avg_frequency()); break; case COLUMN_STAT_HIST_SIZE: - stat_field->store(stats->histogram.get_size()); + // Note: this is dumb. the histogram size is stored with the + // histogram! + stat_field->store(stats->histogram_? + stats->histogram_->get_size() : 0); break; case COLUMN_STAT_HIST_TYPE: - stat_field->store(stats->histogram.get_type() + 1); + if (stats->histogram_) + stat_field->store(stats->histogram_->get_type() + 1); + else + stat_field->set_null(); break; case COLUMN_STAT_HISTOGRAM: - if (stats->histogram.get_type() == JSON) - { - stat_field->store((char *) stats->histogram.get_values(), - strlen((char *) stats->histogram.get_values()), &my_charset_bin); - } else - { - stat_field->store((char *) stats->histogram.get_values(), - stats->histogram.get_size(), &my_charset_bin); - } + if (stats->histogram_) + stats->histogram_->serialize(stat_field); + else + stat_field->set_null(); break; } } @@ -1111,6 +1112,7 @@ class Column_stat: public Stat_table void get_stat_values() { table_field->read_stats->set_all_nulls(); + table_field->read_stats->histogram_type_on_disk= INVALID_HISTOGRAM; if (table_field->read_stats->min_value) table_field->read_stats->min_value->set_null(); @@ -1122,7 +1124,7 @@ class Column_stat: public Stat_table char buff[MAX_FIELD_WIDTH]; String val(buff, sizeof(buff), &my_charset_bin); - for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HIST_TYPE; i++) + for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++) { Field *stat_field= stat_table->field[i]; @@ -1166,13 +1168,22 @@ class Column_stat: public Stat_table table_field->read_stats->set_avg_frequency(stat_field->val_real()); break; case COLUMN_STAT_HIST_SIZE: - table_field->read_stats->histogram.set_size(stat_field->val_int()); + //TODO: ignore this. The size is a part of histogram! + //table_field->read_stats->histogram.set_size(stat_field->val_int()); break; case COLUMN_STAT_HIST_TYPE: - Histogram_type hist_type= (Histogram_type) (stat_field->val_int() - - 1); - table_field->read_stats->histogram.set_type(hist_type); - break; + // TODO: save this next to histogram. + // For some reason, the histogram itself is read in + // read_histograms_for_table + { + Histogram_type hist_type= (Histogram_type) (stat_field->val_int() - + 1); + table_field->read_stats->histogram_type_on_disk= hist_type; + break; + } + case COLUMN_STAT_HISTOGRAM: + //TODO: if stat_field->length() == 0 then histogram_type_on_disk is set to INVALID_HISTOGRAM + break; } } } @@ -1195,7 +1206,7 @@ class Column_stat: public Stat_table of read_stats->histogram. */ - void get_histogram_value() + Histogram * load_histogram(MEM_ROOT *mem_root) { if (find_stat()) { @@ -1205,13 +1216,54 @@ class Column_stat: public Stat_table Field *stat_field= stat_table->field[fldno]; table_field->read_stats->set_not_null(fldno); stat_field->val_str(&val); - memcpy(table_field->read_stats->histogram.get_values(), - val.ptr(), table_field->read_stats->histogram.get_size()); + // histogram-todo: here, create the histogram of appropriate type. + Histogram *hist= new (mem_root) Histogram(); + if (!hist->parse(mem_root, table_field->read_stats->histogram_type_on_disk, + (const uchar*)val.ptr(), val.length())) + { + table_field->read_stats->histogram_= hist; + return hist; + } + //memcpy(table_field->read_stats->histogram_.get_values(), + // val.ptr(), table_field->read_stats->histogram.get_size()); } + return NULL; } - }; +bool Histogram::parse(MEM_ROOT *mem_root, Histogram_type type_arg, const uchar *ptr_arg, uint size_arg) +{ + // Just copy the data + size = (uint8) size_arg; + type = type_arg; + values = (uchar*)alloc_root(mem_root, size_arg); + memcpy(values, ptr_arg, size_arg); + return false; +} + + +/* + Save the histogram data info a table field. +*/ +void Histogram::serialize(Field *field) +{ + if (get_type() == JSON) + { + field->store((char*)get_values(), strlen((char*)get_values()), + &my_charset_bin); + } + else + field->store((char*)get_values(), get_size(), &my_charset_bin); +} + +void Histogram::init_for_collection(MEM_ROOT *mem_root, + Histogram_type htype_arg, + ulonglong size_arg) +{ + type= htype_arg; + values = (uchar*)alloc_root(mem_root, size_arg); + size= (uint8) size_arg; +} /* An object of the class Index_stat is created to read statistical @@ -1552,7 +1604,7 @@ class Histogram_builder Column_statistics *col_stats= col->collected_stats; min_value= col_stats->min_value; max_value= col_stats->max_value; - histogram= &col_stats->histogram; + histogram= col_stats->histogram_; hist_width= histogram->get_width(); bucket_capacity= (double) records / (hist_width + 1); curr_bucket= 0; @@ -1605,7 +1657,7 @@ std::vector<std::string> bucket_bounds = {}; Column_statistics *col_stats= col->collected_stats; min_value= col_stats->min_value; max_value= col_stats->max_value; - histogram= &col_stats->histogram; + histogram= col_stats->histogram_; hist_width= histogram->get_width(); bucket_capacity= (double) records / (hist_width + 1); curr_bucket= 0; @@ -1765,9 +1817,9 @@ class Count_distinct_field: public Sql_alloc @brief Calculate a histogram of the tree */ - void walk_tree_with_histogram(ha_rows rows) + void walk_tree_with_histogram(ha_rows rows) { - if(table_field->collected_stats->histogram.get_type() == JSON) + if (table_field->collected_stats->histogram_->get_type() == JSON) { Histogram_builder_json hist_builder(table_field, tree_key_length, rows); tree->walk(table_field->table, json_histogram_build_walk, @@ -1775,7 +1827,8 @@ class Count_distinct_field: public Sql_alloc hist_builder.build(); distincts= hist_builder.get_count_distinct(); distincts_single_occurence= hist_builder.get_count_single_occurence(); - } else + } + else { Histogram_builder hist_builder(table_field, tree_key_length, rows); tree->walk(table_field->table, histogram_build_walk, @@ -1799,18 +1852,19 @@ class Count_distinct_field: public Sql_alloc @brief Get the size of the histogram in bytes built for table_field */ + /* uint get_hist_size() { return table_field->collected_stats->histogram.get_size(); - } + }*/ /* @brief Get the pointer to the histogram built for table_field */ - uchar *get_histogram() + Histogram *get_histogram() { - return table_field->collected_stats->histogram.get_values(); + return table_field->collected_stats->histogram_; } }; @@ -2209,7 +2263,7 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) uint key_parts= table->s->ext_key_parts; ulonglong *idx_avg_frequency= (ulonglong*) alloc_root(&table->mem_root, sizeof(ulonglong) * key_parts); - +/* uint hist_size= thd->variables.histogram_size; Histogram_type hist_type= (Histogram_type) (thd->variables.histogram_type); uchar *histogram= NULL; @@ -2220,16 +2274,16 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) bzero(histogram, hist_size * columns); } - - if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency || - (hist_size && !histogram)) +*/ + if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency) + //|| (hist_size && !histogram)) DBUG_RETURN(1); table->collected_stats= table_stats; table_stats->column_stats= column_stats; table_stats->index_stats= index_stats; table_stats->idx_avg_frequency= idx_avg_frequency; - table_stats->histograms= histogram; + //table_stats->histograms= histogram; memset(column_stats, 0, sizeof(Column_statistics) * columns); @@ -2237,10 +2291,12 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) { if (bitmap_is_set(table->read_set, (*field_ptr)->field_index)) { + column_stats->histogram_ = NULL; + /* column_stats->histogram.set_size(hist_size); column_stats->histogram.set_type(hist_type); column_stats->histogram.set_values(histogram); - histogram+= hist_size; + histogram+= hist_size;*/ (*field_ptr)->collected_stats= column_stats++; } } @@ -2459,6 +2515,25 @@ bool Column_statistics_collected::add() } +/* + Create an empty Histogram object from histogram_type. + + Note: it is not yet clear whether collection-time histogram should be the same + as lookup-time histogram. At the moment, they are. +*/ + +Histogram* get_histogram_by_type(MEM_ROOT *mem_root, Histogram_type hist_type) { + switch (hist_type) { + case SINGLE_PREC_HB: + case DOUBLE_PREC_HB: + case JSON: + return new Histogram(); + default: + DBUG_ASSERT(0); + } + return NULL; +}; + /** @brief Get the results of aggregation when collecting the statistics on a column @@ -2468,7 +2543,7 @@ bool Column_statistics_collected::add() */ inline -void Column_statistics_collected::finish(ha_rows rows, double sample_fraction) +void Column_statistics_collected::finish(MEM_ROOT *mem_root, ha_rows rows, double sample_fraction) { double val; @@ -2486,10 +2561,19 @@ void Column_statistics_collected::finish(ha_rows rows, double sample_fraction) } if (count_distinct) { - uint hist_size= count_distinct->get_hist_size(); + //uint hist_size= count_distinct->get_hist_size(); + uint hist_size= current_thd->variables.histogram_size; + Histogram_type hist_type= (Histogram_type) (current_thd->variables.histogram_type); + bool have_histogram= false; + if (hist_size != 0 && hist_type != INVALID_HISTOGRAM) + { + have_histogram= true; + histogram_= new Histogram; + histogram_->init_for_collection(mem_root, hist_type, hist_size); + } /* Compute cardinality statistics and optionally histogram. */ - if (hist_size == 0) + if (!have_histogram) count_distinct->walk_tree(); else count_distinct->walk_tree_with_histogram(rows - nulls); @@ -2527,13 +2611,14 @@ void Column_statistics_collected::finish(ha_rows rows, double sample_fraction) set_not_null(COLUMN_STAT_AVG_FREQUENCY); } else - hist_size= 0; - histogram.set_size(hist_size); + have_histogram= false ; // TODO: need this? + //histogram.set_size(hist_size); set_not_null(COLUMN_STAT_HIST_SIZE); - if (hist_size && distincts) + if (have_histogram && distincts) { set_not_null(COLUMN_STAT_HIST_TYPE); - histogram.set_values(count_distinct->get_histogram()); + //histogram.set_values(count_distinct->get_histogram()); + histogram_= count_distinct->get_histogram(); set_not_null(COLUMN_STAT_HISTOGRAM); } delete count_distinct; @@ -2795,7 +2880,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table) continue; bitmap_set_bit(table->write_set, table_field->field_index); if (!rc) - table_field->collected_stats->finish(rows, sample_fraction); + table_field->collected_stats->finish(&table->mem_root, rows, sample_fraction); else table_field->collected_stats->cleanup(); } @@ -3001,16 +3086,19 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) /* Read statistics from the statistical table column_stats */ stat_table= stat_tables[COLUMN_STAT].table; - ulong total_hist_size= 0; + //ulong total_hist_size= 0; + bool have_histograms= false; Column_stat column_stat(stat_table, table); for (field_ptr= table_share->field; *field_ptr; field_ptr++) { table_field= *field_ptr; column_stat.set_key_fields(table_field); column_stat.get_stat_values(); - total_hist_size+= table_field->read_stats->histogram.get_size(); + //total_hist_size+= table_field->read_stats->histogram.get_size(); + if (table_field->read_stats->histogram_type_on_disk != INVALID_HISTOGRAM) + have_histograms= true; } - table_share->stats_cb.total_hist_size= total_hist_size; + table_share->stats_cb.total_hist_size= have_histograms? 1:0; // total_hist_size /* Read statistics from the statistical table index_stats */ stat_table= stat_tables[INDEX_STAT].table; @@ -3147,28 +3235,36 @@ int read_histograms_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) { TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb; DBUG_ENTER("read_histograms_for_table"); - + + // histograms-todo: why do we use synchronization here, when we load + // histogram for the TABLE object, not TABLE_SHARE? + // is it because of the use of stats_cb->mem_root? if (stats_cb->start_histograms_load()) { - uchar *histogram= (uchar *) alloc_root(&stats_cb->mem_root, - stats_cb->total_hist_size); + //uchar *histogram= (uchar *) alloc_root(&stats_cb->mem_root, + // stats_cb->total_hist_size); + /* if (!histogram) { stats_cb->abort_histograms_load(); DBUG_RETURN(1); } - memset(histogram, 0, stats_cb->total_hist_size); + */ + //memset(histogram, 0, stats_cb->total_hist_size); Column_stat column_stat(stat_tables[COLUMN_STAT].table, table); for (Field **field_ptr= table->s->field; *field_ptr; field_ptr++) { Field *table_field= *field_ptr; - if (uint hist_size= table_field->read_stats->histogram.get_size()) + //if (uint hist_size= table_field->read_stats->histogram.get_size()) + if (table_field->read_stats->histogram_type_on_disk != INVALID_HISTOGRAM) { column_stat.set_key_fields(table_field); - table_field->read_stats->histogram.set_values(histogram); - column_stat.get_histogram_value(); - histogram+= hist_size; + //table_field->read_stats->histogram.set_values(histogram); + + table_field->read_stats->histogram_= + column_stat.load_histogram(&stats_cb->mem_root); + //histogram+= hist_size; } } stats_cb->end_histograms_load(); @@ -3860,8 +3956,8 @@ double get_column_range_cardinality(Field *field, if (avg_frequency > 1.0 + 0.000001 && col_stats->min_max_values_are_provided()) { - Histogram *hist= &col_stats->histogram; - if (hist->is_usable(thd)) + Histogram *hist= col_stats->histogram_; + if (hist && hist->is_usable(thd)) { store_key_image_to_rec(field, (uchar *) min_endp->key, field->key_length()); @@ -3904,8 +4000,8 @@ double get_column_range_cardinality(Field *field, else max_mp_pos= 1.0; - Histogram *hist= &col_stats->histogram; - if (hist->is_usable(thd)) + Histogram *hist= col_stats->histogram_; + if (hist && hist->is_usable(thd)) sel= hist->range_selectivity(min_mp_pos, max_mp_pos); else sel= (max_mp_pos - min_mp_pos); diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index a554721d50b..178bc11a278 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -43,7 +43,8 @@ enum enum_histogram_type { SINGLE_PREC_HB, DOUBLE_PREC_HB, - JSON + JSON, + INVALID_HISTOGRAM } Histogram_type; enum enum_stat_tables @@ -141,40 +142,70 @@ double get_column_range_cardinality(Field *field, bool is_stat_table(const LEX_CSTRING *db, LEX_CSTRING *table); bool is_eits_usable(Field* field); -class Histogram +/* + Common base for all histograms +*/ +class Histogram_base : public Sql_alloc { +public: + virtual bool parse(MEM_ROOT *mem_root, Histogram_type type_arg, + const uchar *ptr, uint size)= 0; + virtual void serialize(Field *to_field)= 0; -private: - Histogram_type type; - uint8 size; /* Size of values array, in bytes */ - uchar *values; + virtual Histogram_type get_type()=0; + + // Legacy: return the size of the histogram on disk. + // This will be stored in mysql.column_stats.hist_size column. + // Newer, JSON-based histograms may return 0. + virtual uint get_size()=0; - uint prec_factor() + virtual ~Histogram_base(){} +}; + +class Histogram : public Histogram_base +{ +public: + bool parse(MEM_ROOT *mem_root, Histogram_type type_arg, + const uchar *ptr_arg, uint size_arg) override; + void serialize(Field *to_field) override; + Histogram_type get_type() override { return type; } + + uint get_size() override { return (uint) size; } + + // returns number of buckets in the histogram + uint get_width() { switch (type) { case SINGLE_PREC_HB: case JSON: - return ((uint) (1 << 8) - 1); + return size; case DOUBLE_PREC_HB: - return ((uint) (1 << 16) - 1); + return size / 2; + default: + DBUG_ASSERT(0); } - return 1; + return 0; } -public: - uint get_width() +private: + Histogram_type type; + uint8 size; /* Size of values array, in bytes */ + uchar *values; + + uint prec_factor() { switch (type) { case SINGLE_PREC_HB: case JSON: - return size; + return ((uint) (1 << 8) - 1); case DOUBLE_PREC_HB: - return size / 2; + return ((uint) (1 << 16) - 1); + default: + DBUG_ASSERT(0); } - return 0; + return 1; } -private: uint get_value(uint i) { DBUG_ASSERT(i < get_width()); @@ -184,6 +215,8 @@ class Histogram return (uint) (((uint8 *) values)[i]); case DOUBLE_PREC_HB: return (uint) uint2korr(values + i * 2); + default: + DBUG_ASSERT(0); } return 0; } @@ -227,19 +260,13 @@ class Histogram return i; } -public: - - uint get_size() { return (uint) size; } - - Histogram_type get_type() { return type; } - uchar *get_values() { return (uchar *) values; } +public: + void init_for_collection(MEM_ROOT *mem_root, Histogram_type htype_arg, ulonglong size); - void set_size (ulonglong sz) { size= (uint8) sz; } - - void set_type (Histogram_type t) { type= t; } - + // Note: these two are used only for saving the JSON text: void set_values (uchar *vals) { values= (uchar *) vals; } + void set_size (ulonglong sz) { size= (uint8) sz; } bool is_available() { return get_size() > 0 && get_values(); } @@ -264,6 +291,9 @@ class Histogram case DOUBLE_PREC_HB: int2store(values + i * 2, val * prec_factor()); return; + default: + DBUG_ASSERT(0); + return; } } @@ -277,6 +307,9 @@ class Histogram case DOUBLE_PREC_HB: int2store(values + i * 2, uint2korr(values + i * 2 - 2)); return; + default: + DBUG_ASSERT(0); + return; } } @@ -314,7 +347,7 @@ class Table_statistics /* Array of records per key for index prefixes */ ulonglong *idx_avg_frequency; - uchar *histograms; /* Sequence of histograms */ + //uchar *histograms; /* Sequence of histograms */ }; @@ -377,7 +410,8 @@ class Column_statistics public: - Histogram histogram; + Histogram_type histogram_type_on_disk; + Histogram *histogram_; uint32 no_values_provided_bitmap() { diff --git a/sql/table.h b/sql/table.h index 2e074abcea0..f557f4ca59e 100644 --- a/sql/table.h +++ b/sql/table.h @@ -679,7 +679,15 @@ class TABLE_STATISTICS_CB public: MEM_ROOT mem_root; /* MEM_ROOT to allocate statistical data for the table */ Table_statistics *table_stats; /* Structure to access the statistical data */ - ulong total_hist_size; /* Total size of all histograms */ + + /* + Total size of all histograms. A value of 0 means historams are not present, + and histograms_are_ready() can finish sooner. + + Currently we just set it to 1 when we expect to load histograms. + histogram-todo: rename this or even remove? + */ + ulong total_hist_size; bool histograms_are_ready() const {
1 0
0 0
[Commits] 5289376: MDEV-25484 Crash when parsing query using derived table containing TVC
by IgorBabaev 22 Jul '21

22 Jul '21
revision-id: 528937607f8da8b99ef72d28ead839d7f319b980 (mariadb-10.3.26-206-g5289376) parent(s): 6190a02f3593f82e2d4916ed660afb5e5d631a5a author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-22 11:21:39 -0700 message: MDEV-25484 Crash when parsing query using derived table containing TVC This patch fixes parsing problems concerning derived tables that use table value constructors (TVC) with LIMIT and ORDER BY clauses of the form ((VALUES ... LIMIT ...) ORDER BY ...) as dt The fix has to be applied only to 10.3 as 10.4 that employs a different grammar rules has no such problems. The test cases should be merged upstream. Approved by Oleksandr Byelkin <sanja(a)mariadb.com> --- mysql-test/main/table_value_constr.test | 22 ++++++++++++++++++++++ sql/sql_lex.cc | 9 ++++++++- sql/sql_yacc.yy | 12 ++++++++---- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 3e976f8..d139625 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1628,4 +1628,26 @@ select * from t1; drop table t1; + +--echo # +--echo # MDEV-25484: Derived table using TVC with LIMIT and ORDER BY +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +select * from ( (select * from t1 limit 2) order by 1 desc) as dt; +(values (3), (7), (1) limit 2) order by 1 desc; +select * from ( (values (3), (7), (1) limit 2) order by 1 desc) as dt; + + +select * from ( select * from t1 order by 1 limit 2 ) as dt; +values (3),(7),(1) order by 1 limit 2; +select * from ( values (3),(7),(1) order by 1 limit 2 ) as dt; + +values (3),(7),(1) union values (2),(4) order by 1 limit 2; +select * from (values (3),(7),(1) union values (2),(4) order by 1 limit 2) as dt; + +drop table t1; + --echo End of 10.3 tests diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 2a337b0..8de645a 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -8396,8 +8396,15 @@ bool LEX::tvc_finalize_derived() thd->parse_error(); return true; } + if (unlikely(!(current_select->tvc= + new (thd->mem_root) + table_value_constr(many_values, + current_select, + current_select->options)))) + return true; + restore_values_list_state(); current_select->linkage= DERIVED_TABLE_TYPE; - return tvc_finalize(); + return false; } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b915667..7d13dd8 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -12855,10 +12855,13 @@ order_clause: created yet. */ SELECT_LEX *first_sl= unit->first_select(); - if (unlikely(!unit->is_unit_op() && - (first_sl->order_list.elements || - first_sl->select_limit) && + if (unlikely(!first_sl->next_select() && first_sl->tvc && unit->add_fake_select_lex(thd))) + MYSQL_YYABORT; + else if (unlikely(!unit->is_unit_op() && + (first_sl->order_list.elements || + first_sl->select_limit) && + unit->add_fake_select_lex(thd))) MYSQL_YYABORT; } if (sel->master_unit()->is_unit_op() && !sel->braces) @@ -12907,7 +12910,8 @@ limit_clause_init: LIMIT { SELECT_LEX *sel= Select; - if (sel->master_unit()->is_unit_op() && !sel->braces) + if (sel->master_unit()->is_unit_op() && !sel->braces && + sel->master_unit()->fake_select_lex) { /* Move LIMIT that belongs to UNION to fake_select_lex */ Lex->current_select= sel->master_unit()->fake_select_lex;
1 0
0 0
[Commits] 9545837: MDEV-26189 Missing handling of unknown column in WHERE of recursive CTE
by IgorBabaev 21 Jul '21

21 Jul '21
revision-id: 9545837ad14c64e2c4491eaa1bd1c56bc2cbc589 (mariadb-10.2.31-1070-g9545837) parent(s): 872422dcbbe3681a794935fb2cae422d9d5f4108 author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-20 23:14:43 -0700 message: MDEV-26189 Missing handling of unknown column in WHERE of recursive CTE SQL processor failed to catch references to unknown columns and other errors of the phase of semantic analysis in the specification of a hanging recursive CTE. This happened because the function With_clause::prepare_unreferenced_elements() failed to detect a CTE as a hanging CTE if the CTE was recursive. Fixing this problem in the code of the mentioned function opened another problem: EXPLAIN started including the lines for the specifications of hanging recursive CTEs in its output. This problem also was fixed in this patch. Approved by Dmitry Shulga <dmitry.shulga(a)mariadb.com> --- mysql-test/r/cte_recursive.result | 46 +++++++++++++++++++++++++++++++++---- mysql-test/t/cte_recursive.test | 48 +++++++++++++++++++++++++++++++++++++-- sql/sql_cte.cc | 3 ++- sql/sql_cte.h | 2 ++ sql/sql_select.cc | 12 ++++++---- 5 files changed, 100 insertions(+), 11 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index a4d821e..1b1fd8b 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -3689,7 +3689,7 @@ select * from t1 as t; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t ALL NULL NULL NULL NULL 4 100.00 Warnings: -Note 1003 with recursive cte as (select `*` AS `*` from `test`.`t1` where `a` = 1 union select `a` + 1 AS `a+1` from `cte` where `a` < 3)select `test`.`t`.`a` AS `a` from `test`.`t1` `t` +Note 1003 with recursive cte as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 union select `cte`.`a` + 1 AS `a+1` from `cte` where `cte`.`a` < 3)select `test`.`t`.`a` AS `a` from `test`.`t1` `t` with recursive cte as (select * from t1 where a=1 union select a+1 from cte where a<3) select * from t1 as t; @@ -3702,10 +3702,10 @@ create table t2 ( i1 int, i2 int); insert into t2 values (1,1),(2,2); explain with recursive cte as -( select * from t1 union select s1.* from t1 as s1, cte where s1.i1 = cte.i2 ) -select * from t1 as t; +( select * from t2 union select s1.* from t2 as s1, cte where s1.i1 = cte.i2 ) +select * from t2 as t; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t ALL NULL NULL NULL NULL 4 +1 PRIMARY t ALL NULL NULL NULL NULL 2 drop table t1,t2; # # MDEV-22042: ANALYZE of query using stored function and recursive CTE @@ -4481,5 +4481,43 @@ b deallocate prepare stmt; drop table t1,t2; # +# MDEV-26189: Unknown column reference within hanging recursive CTE +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.a = r.b) +select * from t1 as t; +ERROR 42S22: Unknown column 'r.b' in 'where clause' +explain with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.a = r.b) +select * from t1 as t; +ERROR 42S22: Unknown column 'r.b' in 'where clause' +create procedure sp1() with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.a = r.b) +select * from t1 as t; +call sp1(); +ERROR 42S22: Unknown column 'r.b' in 'where clause' +call sp1(); +ERROR 42S22: Unknown column 'r.b' in 'where clause' +with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.b = r.a) +select * from t1 as t; +ERROR 42S22: Unknown column 's1.b' in 'where clause' +explain with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.b = r.a) +select * from t1 as t; +ERROR 42S22: Unknown column 's1.b' in 'where clause' +create procedure sp2() with recursive +r as (select * from t1 union select s1.* from t1 as s1, r where s1.b = r.a) +select * from t1 as t; +call sp2(); +ERROR 42S22: Unknown column 's1.b' in 'where clause' +call sp2(); +ERROR 42S22: Unknown column 's1.b' in 'where clause' +drop procedure sp1; +drop procedure sp2; +drop table t1; +# # End of 10.2 tests # diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 49f9c1f..cdd3a07 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -2556,8 +2556,8 @@ insert into t2 values (1,1),(2,2); explain with recursive cte as - ( select * from t1 union select s1.* from t1 as s1, cte where s1.i1 = cte.i2 ) -select * from t1 as t; + ( select * from t2 union select s1.* from t2 as s1, cte where s1.i1 = cte.i2 ) +select * from t2 as t; drop table t1,t2; @@ -2841,5 +2841,49 @@ deallocate prepare stmt; drop table t1,t2; --echo # +--echo # MDEV-26189: Unknown column reference within hanging recursive CTE +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +let $q1= +with recursive + r as (select * from t1 union select s1.* from t1 as s1, r where s1.a = r.b) +select * from t1 as t; + +--ERROR ER_BAD_FIELD_ERROR +eval $q1; +--ERROR ER_BAD_FIELD_ERROR +eval explain $q1; + +eval create procedure sp1() $q1; +--ERROR ER_BAD_FIELD_ERROR +call sp1(); +--ERROR ER_BAD_FIELD_ERROR +call sp1(); + +let $q2= +with recursive + r as (select * from t1 union select s1.* from t1 as s1, r where s1.b = r.a) +select * from t1 as t; + +--ERROR ER_BAD_FIELD_ERROR +eval $q2; +--ERROR ER_BAD_FIELD_ERROR +eval explain $q2; + +eval create procedure sp2() $q2; +--ERROR ER_BAD_FIELD_ERROR +call sp2(); +--ERROR ER_BAD_FIELD_ERROR +call sp2(); + +drop procedure sp1; +drop procedure sp2; + +drop table t1; + +--echo # --echo # End of 10.2 tests --echo # diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index b720eac..22a9984 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -911,7 +911,8 @@ bool With_clause::prepare_unreferenced_elements(THD *thd) with_elem; with_elem= with_elem->next) { - if (!with_elem->is_referenced() && with_elem->prepare_unreferenced(thd)) + if ((with_elem->is_hanging_recursive() || !with_elem->is_referenced()) && + with_elem->prepare_unreferenced(thd)) return true; } diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 5f30894..d484dcf 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -242,6 +242,8 @@ class With_element : public Sql_alloc bool is_referenced() { return referenced; } + bool is_hanging_recursive() { return is_recursive && !rec_outer_references; } + void inc_references() { references++; } bool rename_columns_of_derived_unit(THD *thd, st_select_lex_unit *unit); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2bb01ee..ff584e9 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -25276,8 +25276,10 @@ int JOIN::save_explain_data_intern(Explain_query *output, if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1) (!tmp_unit->derived || tmp_unit->derived->is_materialized_derived()) && // (2) - !(tmp_unit->with_element && - (!tmp_unit->derived || !tmp_unit->derived->derived_result))) // (3) + (!tmp_unit->with_element || + (tmp_unit->derived && + tmp_unit->derived->derived_result && + !tmp_unit->with_element->is_hanging_recursive()))) // (3) { explain->add_child(tmp_unit->first_select()->select_number); } @@ -25342,8 +25344,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, */ if (!(unit->item && unit->item->eliminated) && // (1) !(unit->derived && unit->derived->merged_for_insert) && // (2) - !(unit->with_element && - (!unit->derived || !unit->derived->derived_result))) // (3) + (!unit->with_element || + (unit->derived && + unit->derived->derived_result && + !unit->with_element->is_hanging_recursive()))) // (3) { if (mysql_explain_union(thd, unit, result)) DBUG_VOID_RETURN;
1 0
0 0
[Commits] 6f1628b: MDEV-25565 Crash on 2-nd execution of SP/PS for query calculating window functions
by IgorBabaev 20 Jul '21

20 Jul '21
revision-id: 6f1628b917d365ecfc9c4c9951011613f4212592 (mariadb-10.2.31-1048-g6f1628b) parent(s): fb0b28932ce82903f2fcfb690a71bff52355507f author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-09 18:56:34 -0700 message: MDEV-25565 Crash on 2-nd execution of SP/PS for query calculating window functions from view A crash of the server happened when executing a stored procedure whose the only query calculated window functions over a mergeable view specified as a select from non-mergeable view. The crash could be reproduced if the window specifications of the window functions were identical and both contained PARTITION lists and ORDER BY lists. A crash also happened on the second execution of the prepared statement created for such query. If to use derived tables or CTE instead of views the problem still manifests itself crashing the server. When optimizing the window specifications of a window function the server can substitute the partition lists and the order lists for the corresponding lists from another window specification in the case when the lists are identical. This substitution is not permanent and should be rolled back before the second execution. It was not done and this ultimately led to a crash when resolving the column names at the second execution of SP/PS. --- mysql-test/r/win.result | 287 ++++++++++++++++++++++++++++++++++++++++++++++++ mysql-test/t/win.test | 147 +++++++++++++++++++++++++ sql/sql_union.cc | 26 +++++ sql/sql_window.cc | 12 ++ sql/sql_window.h | 5 +- 5 files changed, 476 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/win.result b/mysql-test/r/win.result index 8a31dcc..bc017ea 100644 --- a/mysql-test/r/win.result +++ b/mysql-test/r/win.result @@ -3911,5 +3911,292 @@ sum(i) over () IN ( SELECT 1 FROM t1 a) 0 DROP TABLE t1; # +# MDEV-25565: 2-nd call of SP with SELECT from view / derived table / CTE +# returning the result of calculation of 2 window +# functions that use the same window specification +# +create table t1 (a int); +insert into t1 values (3), (7), (1), (7), (1), (1), (3), (1), (5); +create view v2 as select a from t1 group by a; +create view v1 as select * from v2; +create procedure sp1() select v1.a, +sum(v1.a) over (partition by v1.a order by v1.a) as k, +avg(v1.a) over (partition by v1.a order by v1.a) as m +from v1; +call sp1(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp1(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "select v1.a, +sum(v1.a) over (partition by v1.a order by v1.a) as k, +avg(v1.a) over (partition by v1.a order by v1.a) as m +from v1"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp2() select * from +( select dt1.a, +sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, +avg(dt1.a) over (partition by dt1.a order by dt1.a) as m +from (select * from v2) as dt1 +) as dt; +call sp2(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp2(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "select * from +( select dt1.a, +sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, +avg(dt1.a) over (partition by dt1.a order by dt1.a) as m +from (select * from v2) as dt1 +) as dt"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp3() select * from +( select dt1.a, +sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, +avg(dt1.a) over (partition by dt1.a order by dt1.a) as m +from ( select * from (select * from t1 group by a) as dt2 ) as dt1 +) as dt; +call sp3(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp3(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "select * from +( select dt1.a, +sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, +avg(dt1.a) over (partition by dt1.a order by dt1.a) as m +from ( select * from (select * from t1 group by a) as dt2 ) as dt1 +) as dt"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp4() with cte1 as (select * from (select * from t1 group by a) as dt2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte; +call sp4(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp4(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "with cte1 as (select * from (select * from t1 group by a) as dt2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp5() with cte1 as (select * from v2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte; +call sp5(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp5(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "with cte1 as (select * from v2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp6() with +cte1 as (with cte2 as (select * from t1 group by a) select * from cte2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte; +call sp6(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp6(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "with +cte1 as (with cte2 as (select * from t1 group by a) select * from cte2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +create procedure sp7() with +cte2 as (select * from v1), +cte1 as (select * from cte2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte; +call sp7(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +call sp7(); +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +prepare stmt from "with +cte2 as (select * from v1), +cte1 as (select * from cte2), +cte as +( select cte1.a, +sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, +avg(cte1.a) over (partition by cte1.a order by cte1.a) as m +from cte1 ) +select * from cte"; +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +execute stmt; +a k m +1 1 1.0000 +3 3 3.0000 +5 5 5.0000 +7 7 7.0000 +deallocate prepare stmt; +drop procedure sp1; +drop procedure sp2; +drop procedure sp3; +drop procedure sp4; +drop procedure sp5; +drop procedure sp6; +drop procedure sp7; +drop view v1,v2; +drop table t1; +# # End of 10.2 tests # diff --git a/mysql-test/t/win.test b/mysql-test/t/win.test index c07a81f..72e789d 100644 --- a/mysql-test/t/win.test +++ b/mysql-test/t/win.test @@ -2557,5 +2557,152 @@ SELECT sum(i) over () IN ( SELECT 1 FROM t1 a) FROM t1; DROP TABLE t1; --echo # +--echo # MDEV-25565: 2-nd call of SP with SELECT from view / derived table / CTE +--echo # returning the result of calculation of 2 window +--echo # functions that use the same window specification +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1), (7), (1), (1), (3), (1), (5); + +create view v2 as select a from t1 group by a; +create view v1 as select * from v2; + +let $q1= +select v1.a, + sum(v1.a) over (partition by v1.a order by v1.a) as k, + avg(v1.a) over (partition by v1.a order by v1.a) as m +from v1; + +eval create procedure sp1() $q1; +call sp1(); +call sp1(); + +eval prepare stmt from "$q1"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q2= +select * from + ( select dt1.a, + sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, + avg(dt1.a) over (partition by dt1.a order by dt1.a) as m + from (select * from v2) as dt1 + ) as dt; + +eval create procedure sp2() $q2; +call sp2(); +call sp2(); + +eval prepare stmt from "$q2"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q3= +select * from + ( select dt1.a, + sum(dt1.a) over (partition by dt1.a order by dt1.a) as k, + avg(dt1.a) over (partition by dt1.a order by dt1.a) as m + from ( select * from (select * from t1 group by a) as dt2 ) as dt1 + ) as dt; + +eval create procedure sp3() $q3; +call sp3(); +call sp3(); + +eval prepare stmt from "$q3"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q4= +with cte1 as (select * from (select * from t1 group by a) as dt2), + cte as + ( select cte1.a, + sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, + avg(cte1.a) over (partition by cte1.a order by cte1.a) as m + from cte1 ) +select * from cte; + +eval create procedure sp4() $q4; +call sp4(); +call sp4(); + +eval prepare stmt from "$q4"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q5= +with cte1 as (select * from v2), + cte as + ( select cte1.a, + sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, + avg(cte1.a) over (partition by cte1.a order by cte1.a) as m + from cte1 ) +select * from cte; + +eval create procedure sp5() $q5; +call sp5(); +call sp5(); + +eval prepare stmt from "$q5"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q6= +with +cte1 as (with cte2 as (select * from t1 group by a) select * from cte2), + cte as + ( select cte1.a, + sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, + avg(cte1.a) over (partition by cte1.a order by cte1.a) as m + from cte1 ) +select * from cte; + +eval create procedure sp6() $q6; +call sp6(); +call sp6(); + +eval prepare stmt from "$q6"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +let $q7= +with + cte2 as (select * from v1), + cte1 as (select * from cte2), + cte as + ( select cte1.a, + sum(cte1.a) over (partition by cte1.a order by cte1.a) as k, + avg(cte1.a) over (partition by cte1.a order by cte1.a) as m + from cte1 ) +select * from cte; + +eval create procedure sp7() $q7; +call sp7(); +call sp7(); + +eval prepare stmt from "$q7"; +execute stmt; +execute stmt; +deallocate prepare stmt; + + +drop procedure sp1; +drop procedure sp2; +drop procedure sp3; +drop procedure sp4; +drop procedure sp5; +drop procedure sp6; +drop procedure sp7; +drop view v1,v2; +drop table t1; + +--echo # --echo # End of 10.2 tests --echo # diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 7baedfb..f3c90b8 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -30,6 +30,7 @@ #include "filesort.h" // filesort_free_buffers #include "sql_view.h" #include "sql_cte.h" +#include "item_windowfunc.h" bool mysql_union(THD *thd, LEX *lex, select_result *result, SELECT_LEX_UNIT *unit, ulong setup_tables_done_option) @@ -1550,6 +1551,29 @@ static void cleanup_order(ORDER *order) } +static void cleanup_window_funcs(List<Item_window_func> &win_funcs) +{ + List_iterator_fast<Item_window_func> it(win_funcs); + Item_window_func *win_func; + while ((win_func= it++)) + { + Window_spec *win_spec= win_func->window_spec; + if (!win_spec) + continue; + if (win_spec->save_partition_list) + { + win_spec->partition_list= win_spec->save_partition_list; + win_spec->save_partition_list= NULL; + } + if (win_spec->save_order_list) + { + win_spec->order_list= win_spec->save_order_list; + win_spec->save_order_list= NULL; + } + } +} + + bool st_select_lex::cleanup() { bool error= FALSE; @@ -1558,6 +1582,8 @@ bool st_select_lex::cleanup() cleanup_order(order_list.first); cleanup_order(group_list.first); + cleanup_window_funcs(window_funcs); + if (join) { List_iterator<TABLE_LIST> ti(leaf_tables); diff --git a/sql/sql_window.cc b/sql/sql_window.cc index 612c6e6..3ef751b 100644 --- a/sql/sql_window.cc +++ b/sql/sql_window.cc @@ -479,9 +479,15 @@ int compare_window_funcs_by_window_specs(Item_window_func *win_func1, Let's use only one of the lists. */ if (!win_spec1->name() && win_spec2->name()) + { + win_spec1->save_partition_list= win_spec1->partition_list; win_spec1->partition_list= win_spec2->partition_list; + } else + { + win_spec2->save_partition_list= win_spec2->partition_list; win_spec2->partition_list= win_spec1->partition_list; + } cmp= compare_order_lists(win_spec1->order_list, win_spec2->order_list); @@ -494,9 +500,15 @@ int compare_window_funcs_by_window_specs(Item_window_func *win_func1, Let's use only one of the lists. */ if (!win_spec1->name() && win_spec2->name()) + { + win_spec1->save_order_list= win_spec2->order_list; win_spec1->order_list= win_spec2->order_list; + } else + { + win_spec1->save_order_list= win_spec2->order_list; win_spec2->order_list= win_spec1->order_list; + } cmp= compare_window_frames(win_spec1->window_frame, win_spec2->window_frame); diff --git a/sql/sql_window.h b/sql/sql_window.h index e0c1563..417d0bc 100644 --- a/sql/sql_window.h +++ b/sql/sql_window.h @@ -99,8 +99,10 @@ class Window_spec : public Sql_alloc LEX_STRING *window_ref; SQL_I_List<ORDER> *partition_list; + SQL_I_List<ORDER> *save_partition_list; SQL_I_List<ORDER> *order_list; + SQL_I_List<ORDER> *save_order_list; Window_frame *window_frame; @@ -111,7 +113,8 @@ class Window_spec : public Sql_alloc SQL_I_List<ORDER> *ord_list, Window_frame *win_frame) : window_names_are_checked(false), window_ref(win_ref), - partition_list(part_list), order_list(ord_list), + partition_list(part_list), save_partition_list(NULL), + order_list(ord_list), save_order_list(NULL), window_frame(win_frame), referenced_win_spec(NULL) {} virtual char *name() { return NULL; }
2 1
0 0
[Commits] 0eaeb28: MDEV-26025 Server crashes while executing query with CTE in PS/SP
by IgorBabaev 20 Jul '21

20 Jul '21
revision-id: 0eaeb28c4d1914b6fa648214c18d111fe86ef773 (mariadb-10.2.31-1062-g0eaeb28) parent(s): f053349797a1dca5206a3b8d5ff33353f45430d8 author: Igor Babaev committer: Igor Babaev timestamp: 2021-07-20 00:07:31 -0700 message: MDEV-26025 Server crashes while executing query with CTE in PS/SP This bug appeared after the patch for bug MDEV-23886. Due to this bug execution of queries with CTEs used the same CTE at least twice via prepared statements or with stored procedures caused crashes of the server. It happened because the select created for any of not the first usage of a CTE erroneously was not included into all_selects_list. This patch corrects the patch applied to fix the bug MDEV-26108. Approved by Oleksandr Byelkin <sanja(a)mariadb.com> --- mysql-test/r/cte_nonrecursive.result | 42 ++++++++++++++++++++++++++++++++++++ mysql-test/t/cte_nonrecursive.test | 27 +++++++++++++++++++++++ sql/sql_cte.cc | 15 ++++++++----- 3 files changed, 79 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 5cc5a25..2504e55 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -2044,4 +2044,46 @@ select a from t1 union select a+1 as a from cte_r r where a < 10 ) select * from cte_e; ERROR 42S02: Table 'test.cte_r' doesn't exist drop table t1; +# +# MDEV-26025: query with two usage of a CTE executing via PS /SP +# +create table t1 (a int, b int); +insert into t1 value (1,3), (3,2), (1,3), (4,1); +prepare stmt from "with +cte1 as ( select a,b from t1 where a = 1 AND b = 3 ), +cte2 as ( select a,b from cte1 ), +cte3 as ( select a,b from cte2 ) +select * from cte3, cte2"; +execute stmt; +a b a b +1 3 1 3 +1 3 1 3 +1 3 1 3 +1 3 1 3 +execute stmt; +a b a b +1 3 1 3 +1 3 1 3 +1 3 1 3 +1 3 1 3 +deallocate prepare stmt; +create procedure sp() with +cte1 as ( select a,b from t1 where a = 1 AND b = 3 ), +cte2 as ( select a,b from cte1 ), +cte3 as ( select a,b from cte2 ) +select * from cte3, cte2; +call sp(); +a b a b +1 3 1 3 +1 3 1 3 +1 3 1 3 +1 3 1 3 +call sp(); +a b a b +1 3 1 3 +1 3 1 3 +1 3 1 3 +1 3 1 3 +drop procedure sp; +drop table t1; # End of 10.2 tests diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 68dbc0c..c20a0dc 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -1515,4 +1515,31 @@ with cte_e as ( drop table t1; +--echo # +--echo # MDEV-26025: query with two usage of a CTE executing via PS /SP +--echo # + +create table t1 (a int, b int); +insert into t1 value (1,3), (3,2), (1,3), (4,1); + +let $q= +with + cte1 as ( select a,b from t1 where a = 1 AND b = 3 ), + cte2 as ( select a,b from cte1 ), + cte3 as ( select a,b from cte2 ) +select * from cte3, cte2; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +eval create procedure sp() $q; + +call sp(); +call sp(); + +drop procedure sp; +drop table t1; + --echo # End of 10.2 tests diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 702db8f..b720eac 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -1012,6 +1012,7 @@ st_select_lex_unit *With_element::clone_parsed_spec(LEX *old_lex, bool parse_status= false; st_select_lex *with_select; + st_select_lex *last_clone_select; char save_end= unparsed_spec.str[unparsed_spec.length]; unparsed_spec.str[unparsed_spec.length]= '\0'; @@ -1099,11 +1100,6 @@ st_select_lex_unit *With_element::clone_parsed_spec(LEX *old_lex, lex->unit.include_down(with_table->select_lex); lex->unit.set_slave(with_select); lex->unit.cloned_from= spec; - old_lex->all_selects_list= - (st_select_lex*) (lex->all_selects_list-> - insert_chain_before( - (st_select_lex_node **) &(old_lex->all_selects_list), - with_select)); /* Now all references to the CTE defined outside of the cloned specification @@ -1119,6 +1115,15 @@ st_select_lex_unit *With_element::clone_parsed_spec(LEX *old_lex, goto err; } + last_clone_select= lex->all_selects_list; + while (last_clone_select->next_select_in_list()) + last_clone_select= last_clone_select->next_select_in_list(); + old_lex->all_selects_list= + (st_select_lex*) (lex->all_selects_list-> + insert_chain_before( + (st_select_lex_node **) &(old_lex->all_selects_list), + last_clone_select)); + lex->sphead= NULL; // in order not to delete lex->sphead lex_end(lex); err:
1 0
0 0
  • ← Newer
  • 1
  • ...
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • ...
  • 1461
  • Older →

HyperKitty Powered by HyperKitty version 1.3.12.