revision-id: c64ff168f2fdb3c3675908f9fe6565653cce867d (mariadb-10.1.35-37-gc64ff168f2f)
parent(s): 6ca6f25d4e96a479eb144a8da1066a27d0abce40
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-07 08:04:01 +0300
message:
Run #2.
---
mysql-test/suite/galera/r/MW-44.result | 7 ++++---
mysql-test/suite/galera/t/MW-44.test | 5 ++++-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index 394c749b253..7e3d2f4b7ec 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -6,9 +6,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
-COUNT(*) = 2
-1
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+argument
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+ALTER TABLE t1 ADD COLUMN f2 INTEGER
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index cb5db1b208a..6defa432879 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -19,7 +19,10 @@ SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--let $wait_condition = SELECT COUNT(argument) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--source include/wait_condition.inc
+
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
--connection node_2
SET GLOBAL general_log='ON';
1
0
[Commits] 96572b7: EV-16992 Assertion `table_ref->table || table_ref->view' failed in
by IgorBabaev 07 Sep '18
by IgorBabaev 07 Sep '18
07 Sep '18
revision-id: 96572b7aa1698ddd1ff64672aac920e667b35790 (mariadb-10.3.6-117-g96572b7)
parent(s): f6694b62447454028dd087802cd3b326ed721dd7
author: Igor Babaev
committer: Igor Babaev
timestamp: 2018-09-06 20:19:37 -0700
message:
EV-16992 Assertion `table_ref->table || table_ref->view' failed in
Field_iterator_table_ref::set_field_iterator
Several functions that processed different prepare statements missed
the DT_INIT flag in last parameter of the open_normal_and_derived_tables()
calls. It made context analysis of derived tables dependent on the order in
which the derived tables were processed by mysql_handle_derived(). This
order was induced by the order of SELECTs in all_select_list.
In 10.4 the order of SELECTs in all_select_list became different and lack
of the DT_INIT flags in some open_normal_and_derived_tables() call became
critical as some derived tables were not identified as such.
---
mysql-test/main/ps.result | 43 +++++++++++++++++++++++++++++++++++++++++++
mysql-test/main/ps.test | 46 ++++++++++++++++++++++++++++++++++++++++++++++
sql/sql_prepare.cc | 13 +++++++------
3 files changed, 96 insertions(+), 6 deletions(-)
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index 540315b..d619b21 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -5299,5 +5299,48 @@ DROP PROCEDURE p2;
DROP PROCEDURE p1;
DROP ROLE testrole;
#
+# MDEV-16992: prepare of CREATE TABLE, CREATE VIEW, DO, SET, CALL
+# statements with CTE containing materialized derived
+# (the bug is reproducible on 10.4)
+#
+prepare stmt from
+"CREATE TABLE t1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from t1;
+a
+1
+prepare stmt from
+"CREATE VIEW v1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from v1;
+a
+1
+prepare stmt from
+"DO (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt);";
+execute stmt;
+prepare stmt from
+"SET @a = (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS t);";
+execute stmt;
+create procedure p (i int) insert into t1 values(i);
+prepare stmt from
+"CALL p
+ ((SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt));";
+execute stmt;
+select * from t1;
+a
+1
+1
+drop procedure p;
+drop view v1;
+drop table t1;
+#
# End of 10.2 tests
#
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index ff93c46..86ae11c 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -4777,5 +4777,51 @@ DROP PROCEDURE p1;
DROP ROLE testrole;
--echo #
+--echo # MDEV-16992: prepare of CREATE TABLE, CREATE VIEW, DO, SET, CALL
+--echo # statements with CTE containing materialized derived
+--echo # (the bug is reproducible on 10.4)
+--echo #
+
+--enable_result_log
+
+prepare stmt from
+"CREATE TABLE t1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from t1;
+
+prepare stmt from
+"CREATE VIEW v1 AS
+ WITH cte(a) AS (SELECT * FROM (SELECT 1) AS t) SELECT * FROM cte;";
+execute stmt;
+select * from v1;
+
+prepare stmt from
+"DO (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt);";
+execute stmt;
+
+prepare stmt from
+"SET @a = (SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS t);";
+execute stmt;
+
+create procedure p (i int) insert into t1 values(i);
+
+prepare stmt from
+"CALL p
+ ((SELECT 1
+ FROM (WITH cte AS (SELECT * FROM (SELECT 1) AS t)
+ SELECT * FROM cte) AS tt));";
+execute stmt;
+select * from t1;
+
+drop procedure p;
+drop view v1;
+drop table t1;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index c615356..b0b0c81 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1582,7 +1582,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
DBUG_RETURN(TRUE);
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
DBUG_RETURN(setup_fields(thd, Ref_ptr_array(),
*values, COLUMNS_READ, 0, NULL, 0));
@@ -1614,7 +1614,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
goto error;
while ((var= it++))
@@ -1651,7 +1651,8 @@ static bool mysql_test_call_fields(Prepared_statement *stmt,
if ((tables &&
check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) ||
- open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL, DT_PREPARE))
+ open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
+ DT_INIT | DT_PREPARE))
goto err;
while ((item= it++))
@@ -1777,7 +1778,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE | DT_CREATE))
+ DT_INIT | DT_PREPARE | DT_CREATE))
DBUG_RETURN(TRUE);
select_lex->context.resolve_in_select_list= TRUE;
@@ -1798,7 +1799,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
*/
if (open_normal_and_derived_tables(stmt->thd, lex->query_tables,
MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
DBUG_RETURN(TRUE);
}
@@ -2025,7 +2026,7 @@ static bool mysql_test_create_view(Prepared_statement *stmt)
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
- DT_PREPARE))
+ DT_INIT | DT_PREPARE))
goto err;
res= select_like_stmt_test(stmt, 0, 0);
1
0
revision-id: 31081593aabb116b6d8f86b6c7e76126edb392b4 (mariadb-10.1.35-38-g31081593aab)
parent(s): b0026e33af8fc3b25a42099c096a84591fd550e2 3a4242fd57b3a2235d2478ed080941b67a82ad1b
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 22:45:19 +0200
message:
Merge branch '11.0' into 10.1
mysql-test/extra/rpl_tests/rpl_foreign_key.test | 60 -
mysql-test/mysql-test-run.pl | 2 +-
mysql-test/r/gis.result | 16 +
mysql-test/r/group_min_max.result | 28 +
mysql-test/r/join.result | 6 +-
mysql-test/r/selectivity.result | 44 +-
mysql-test/r/selectivity_innodb.result | 44 +-
mysql-test/r/sp.result | 17 +
mysql-test/r/stat_tables.result | 13 +
mysql-test/r/stat_tables_innodb.result | 13 +
mysql-test/suite/innodb/r/foreign-keys.result | 73 +
mysql-test/suite/innodb/r/foreign_key.result | 19 +
mysql-test/suite/innodb/t/foreign-keys.test | 87 +
mysql-test/suite/innodb/t/foreign_key.test | 25 +
mysql-test/suite/maria/create.result | 33 +
mysql-test/suite/maria/create.test | 42 +
mysql-test/suite/maria/maria.result | 4 +
mysql-test/suite/maria/maria.test | 10 +
.../suite/rpl/r/rpl_foreign_key_innodb.result | 3 +-
mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test | 62 +-
mysql-test/t/gis.test | 15 +
mysql-test/t/group_min_max.test | 17 +
mysql-test/t/join.test | 3 +-
mysql-test/t/selectivity.test | 36 +
mysql-test/t/sp.test | 21 +
mysql-test/t/stat_tables.test | 12 +
scripts/mysql_install_db.sh | 2 +-
sql/field.cc | 17 +-
sql/item.cc | 10 +-
sql/lex.h | 6 +-
sql/opt_range.cc | 27 +-
sql/sp_head.cc | 33 +-
sql/sql_base.cc | 79 +-
sql/sql_base.h | 2 +
sql/sql_class.cc | 3 +-
sql/sql_class.h | 2 -
sql/sql_lex.h | 4 +-
sql/sql_select.cc | 2 +-
sql/sql_show.cc | 9 +-
sql/sql_statistics.cc | 3 +
sql/sql_statistics.h | 29 +-
sql/sql_table.cc | 62 +-
sql/sql_yacc.yy | 38 +-
sql/table.cc | 21 +
sql/table.h | 59 +-
sql/table_cache.cc | 2 -
storage/connect/javaconn.cpp | 6 +-
storage/connect/jdbconn.cpp | 10 +-
storage/connect/jmgoconn.cpp | 4 +-
storage/connect/tabjmg.cpp | 2 +-
storage/innobase/handler/ha_innodb.cc | 36 +-
storage/innobase/handler/handler0alter.cc | 20 +-
storage/maria/ma_blockrec.c | 14 +-
storage/mroonga/ha_mroonga.cpp | 7 -
storage/mroonga/ha_mroonga.hpp | 4 -
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/src/ydb_lib.cc | 2 +-
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 51 +-
storage/tokudb/hatoku_hton.cc | 183 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
storage/xtradb/handler/ha_innodb.cc | 36 +-
storage/xtradb/handler/handler0alter.cc | 20 +-
250 files changed, 4958 insertions(+), 194807 deletions(-)
diff --cc mysql-test/r/gis.result
index 76f4f6accdb,fc69c13516f..f89cceb3664
--- a/mysql-test/r/gis.result
+++ b/mysql-test/r/gis.result
@@@ -1733,131 -1659,21 +1733,147 @@@ c
DROP TABLE t1;
SET optimizer_switch=@save_optimizer_switch;
#
+ # MDEV-16995: ER_CANT_CREATE_GEOMETRY_OBJECT encountered for a query with
+ # optimizer_use_condition_selectivity>=3
+ #
+ CREATE TABLE t1 (a POINT);
+ INSERT INTO t1 VALUES (POINT(1,1)),(POINT(1,2)),(POINT(1,3));
+ set @save_use_stat_tables= @@use_stat_tables;
+ set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+ set @@use_stat_tables= PREFERABLY;
+ set @@optimizer_use_condition_selectivity=3;
+ SELECT COUNT(*) FROM t1 WHERE a IN ('test','test1');
+ COUNT(*)
+ 0
+ set @@use_stat_tables= @save_use_stat_tables;
+ set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+ drop table t1;
+ #
# End 10.0 tests
#
+SHOW CREATE TABLE information_schema.geometry_columns;
+Table Create Table
+GEOMETRY_COLUMNS CREATE TEMPORARY TABLE `GEOMETRY_COLUMNS` (
+ `F_TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',
+ `F_TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',
+ `F_TABLE_NAME` varchar(64) NOT NULL DEFAULT '',
+ `F_GEOMETRY_COLUMN` varchar(64) NOT NULL DEFAULT '',
+ `G_TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',
+ `G_TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',
+ `G_TABLE_NAME` varchar(64) NOT NULL DEFAULT '',
+ `G_GEOMETRY_COLUMN` varchar(64) NOT NULL DEFAULT '',
+ `STORAGE_TYPE` tinyint(2) NOT NULL DEFAULT '0',
+ `GEOMETRY_TYPE` int(7) NOT NULL DEFAULT '0',
+ `COORD_DIMENSION` tinyint(2) NOT NULL DEFAULT '0',
+ `MAX_PPR` tinyint(2) NOT NULL DEFAULT '0',
+ `SRID` smallint(5) NOT NULL DEFAULT '0'
+) ENGINE=MEMORY DEFAULT CHARSET=utf8
+SHOW CREATE TABLE information_schema.spatial_ref_sys;
+Table Create Table
+SPATIAL_REF_SYS CREATE TEMPORARY TABLE `SPATIAL_REF_SYS` (
+ `SRID` smallint(5) NOT NULL DEFAULT '0',
+ `AUTH_NAME` varchar(512) NOT NULL DEFAULT '',
+ `AUTH_SRID` int(5) NOT NULL DEFAULT '0',
+ `SRTEXT` varchar(2048) NOT NULL DEFAULT ''
+) ENGINE=MEMORY DEFAULT CHARSET=utf8
+create table t1(g GEOMETRY, pt POINT);
+create table t2(g LINESTRING, pl POLYGON);
+select * from information_schema.geometry_columns where f_table_schema='test';
+F_TABLE_CATALOG F_TABLE_SCHEMA F_TABLE_NAME F_GEOMETRY_COLUMN G_TABLE_CATALOG G_TABLE_SCHEMA G_TABLE_NAME G_GEOMETRY_COLUMN STORAGE_TYPE GEOMETRY_TYPE COORD_DIMENSION MAX_PPR SRID
+def test t1 def test t1 g 1 0 2 0 0
+def test t1 def test t1 pt 1 1 2 0 0
+def test t2 def test t2 g 1 2 2 0 0
+def test t2 def test t2 pl 1 3 2 0 0
+drop table t1, t2;
+10.1 tests
+create table t1(g GEOMETRY(9,4) REF_SYSTEM_ID=101, pt POINT(8,2), pg GEOMETRY REF_SYSTEM_ID=102);
+SELECT SRID from information_schema.geometry_columns WHERE f_table_schema='test' and G_TABLE_NAME='t1';
+SRID
+101
+0
+102
+drop table t1;
+# Expect an int(1) column to be created
+CREATE TABLE t1 AS SELECT CONTAINS(NULL, NULL);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `CONTAINS(NULL, NULL)` int(1) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# MDEV-7334 valgrind warning "unitialized bytes" in 10.1.
+#
+CREATE TABLE t1 (
+gp point,
+ln linestring,
+pg polygon,
+mp multipoint,
+mln multilinestring,
+mpg multipolygon,
+gc geometrycollection,
+gm geometry
+);
+ALTER TABLE t1 ADD fid INT NOT NULL;
+select SRID from information_schema.geometry_columns where F_TABLE_NAME='t1';
+SRID
+0
+0
+0
+0
+0
+0
+0
+0
+drop table t1;
+#
+# MDEV-7510 GIS: IsRing returns false for a primitive triangle.
+#
+select ST_IsRing(ST_LineFromText('LINESTRING(0 0,0 10,10 10,0 0)'));
+ST_IsRing(ST_LineFromText('LINESTRING(0 0,0 10,10 10,0 0)'))
+1
+select ST_IsRing(ST_LineFromText('LINESTRING(0 0,0 10,10 10,-10 -10, 0 -10, 0 0)'));
+ST_IsRing(ST_LineFromText('LINESTRING(0 0,0 10,10 10,-10 -10, 0 -10, 0 0)'))
+0
+#
+# MDEV-7514 GIS: PointOnSurface returns NULL instead of the point.
+#
+SELECT ST_GEOMETRYTYPE(ST_PointOnSurface(ST_PolyFromText('POLYGON((-70.916 42.1002,-70.9468 42.0946,-70.9754 42.0875,-70.9749 42.0879,-70.9759 42.0897,-70.916 42.1002))')));
+ST_GEOMETRYTYPE(ST_PointOnSurface(ST_PolyFromText('POLYGON((-70.916 42.1002,-70.9468 42.0946,-70.9754 42.0875,-70.9749 42.0879,-70.9759 42.0897,-70.916 42.1002))')))
+NULL
+#
+# MDEV-7529 GIS: ST_Relate returns unexpected results for POINT relations
+#
+select ST_Relate(ST_PointFromText('POINT(0 0)'),ST_PointFromText('POINT(0 0)'),'T*F**FFF*') AS equals;
+equals
+1
+select ST_Relate(ST_PointFromText('POINT(0 0)'),ST_PointFromText('POINT(0 0)'),'T*****FF*') AS contains;
+contains
+1
+select ST_Relate(ST_PointFromText('POINT(0 0)'),ST_PointFromText('POINT(0 0)'),'T*F**F***') AS within;
+within
+1
+select ST_Relate(ST_PointFromText('POINT(0 0)'),ST_PointFromText('POINT(1 1)'),'FF*FF****') as disjoint;
+disjoint
+1
+select ST_Relate(ST_PointFromText('POINT(0 0)'),ST_PointFromText('POINT(0 0)'),'FF*FF****') as disjoint;
+disjoint
+0
+#
+# MDEV-7528 GIS: Functions return NULL instead of specified -1 for NULL arguments.
+#
+select ST_IsRing(NULL);
+ST_IsRing(NULL)
+-1
+#
+# MDEV-8675 Different results of GIS functions on NULL vs NOT NULL columns
+#
+CREATE TABLE t1 (g1 GEOMETRY NOT NULL,g2 GEOMETRY NULL);
+CREATE TABLE t2 AS SELECT WITHIN(g1,g1) as w1,WITHIN(g2,g2) AS w2 FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `w1` int(1) DEFAULT NULL,
+ `w2` int(1) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1,t2;
diff --cc mysql-test/r/group_min_max.result
index ec3f4d9bf99,a4cb8da5cce..777780f8400
--- a/mysql-test/r/group_min_max.result
+++ b/mysql-test/r/group_min_max.result
@@@ -3733,182 -3733,33 +3733,210 @@@ id MIN(a) MAX(a
4 2001-01-04 2001-01-04
DROP TABLE t1;
#
+ # MDEV-17039: Query plan changes when we use GROUP BY optimization with optimizer_use_condition_selectivity=4
+ # and use_stat_tables= PREFERABLY
+ #
+ CREATE TABLE t1 (a INT, b INT,c INT DEFAULT 0, INDEX (a,b));
+ INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5),
+ (2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+ set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+ set @save_use_stat_tables= @@use_stat_tables;
+ set @@optimizer_use_condition_selectivity=4;
+ set @@use_stat_tables=PREFERABLY;
+ explain extended SELECT a FROM t1 AS t1_outer WHERE a IN (SELECT max(b) FROM t1 GROUP BY a);
+ id select_type table type possible_keys key key_len ref rows filtered Extra
+ 1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 8 100.00
+ 1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 2 100.00 Using index
+ 2 MATERIALIZED t1 range NULL a 5 NULL 8 100.00 Using index for group-by
+ Warnings:
+ Note 1003 select `test`.`t1_outer`.`a` AS `a` from <materialize> (select max(`test`.`t1`.`b`) from `test`.`t1` group by `test`.`t1`.`a`) join `test`.`t1` `t1_outer` where (`test`.`t1_outer`.`a` = `<subquery2>`.`max(b)`)
+ set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+ set @@use_stat_tables=@save_use_stat_tables;
+ explain extended SELECT a FROM t1 AS t1_outer WHERE a IN (SELECT max(b) FROM t1 GROUP BY a);
+ id select_type table type possible_keys key key_len ref rows filtered Extra
+ 1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 8 100.00
+ 1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 2 100.00 Using index
+ 2 MATERIALIZED t1 range NULL a 5 NULL 8 100.00 Using index for group-by
+ Warnings:
+ Note 1003 select `test`.`t1_outer`.`a` AS `a` from <materialize> (select max(`test`.`t1`.`b`) from `test`.`t1` group by `test`.`t1`.`a`) join `test`.`t1` `t1_outer` where (`test`.`t1_outer`.`a` = `<subquery2>`.`max(b)`)
+ drop table t1;
+ #
# End of 10.0 tests
#
+#
+# Start of 10.1 tests
+#
+#
+# MDEV-6990 GROUP_MIN_MAX optimization is not applied in some cases when it could
+#
+CREATE TABLE t1 (id INT NOT NULL, a DATE, KEY(id,a)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,'2001-01-01');
+INSERT INTO t1 VALUES (1,'2001-01-02');
+INSERT INTO t1 VALUES (1,'2001-01-03');
+INSERT INTO t1 VALUES (1,'2001-01-04');
+INSERT INTO t1 VALUES (2,'2001-01-01');
+INSERT INTO t1 VALUES (2,'2001-01-02');
+INSERT INTO t1 VALUES (2,'2001-01-03');
+INSERT INTO t1 VALUES (2,'2001-01-04');
+INSERT INTO t1 VALUES (3,'2001-01-01');
+INSERT INTO t1 VALUES (3,'2001-01-02');
+INSERT INTO t1 VALUES (3,'2001-01-03');
+INSERT INTO t1 VALUES (3,'2001-01-04');
+INSERT INTO t1 VALUES (4,'2001-01-01');
+INSERT INTO t1 VALUES (4,'2001-01-02');
+INSERT INTO t1 VALUES (4,'2001-01-03');
+INSERT INTO t1 VALUES (4,'2001-01-04');
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>='2001-01-04' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104.0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>='2001-01-04' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104.0 GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+DROP TABLE t1;
+#
+# MDEV-8229 GROUP_MIN_MAX is erroneously applied for BETWEEN in some cases
+#
+SET NAMES latin1;
+CREATE TABLE t1 (id INT NOT NULL, a VARCHAR(20)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,'2001-01-01');
+INSERT INTO t1 VALUES (1,'2001-01-02');
+INSERT INTO t1 VALUES (1,'2001-01-03');
+INSERT INTO t1 VALUES (1,' 2001-01-04');
+INSERT INTO t1 VALUES (2,'2001-01-01');
+INSERT INTO t1 VALUES (2,'2001-01-02');
+INSERT INTO t1 VALUES (2,'2001-01-03');
+INSERT INTO t1 VALUES (2,' 2001-01-04');
+INSERT INTO t1 VALUES (3,'2001-01-01');
+INSERT INTO t1 VALUES (3,'2001-01-02');
+INSERT INTO t1 VALUES (3,'2001-01-03');
+INSERT INTO t1 VALUES (3,' 2001-01-04');
+INSERT INTO t1 VALUES (4,'2001-01-01');
+INSERT INTO t1 VALUES (4,'2001-01-02');
+INSERT INTO t1 VALUES (4,'2001-01-03');
+INSERT INTO t1 VALUES (4,' 2001-01-04');
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-03
+2 2001-01-04 2001-01-03
+3 2001-01-04 2001-01-03
+4 2001-01-04 2001-01-03
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+ALTER TABLE t1 ADD KEY(id,a);
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-03
+2 2001-01-04 2001-01-03
+3 2001-01-04 2001-01-03
+4 2001-01-04 2001-01-03
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND '2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id MIN(a) MAX(a)
+1 2001-01-04 2001-01-04
+2 2001-01-04 2001-01-04
+3 2001-01-04 2001-01-04
+4 2001-01-04 2001-01-04
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 27 NULL 9 Using where; Using index for group-by
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND '2001-01-05' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 27 NULL 9 Using where; Using index for group-by
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND '2001-01-05' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND DATE'2001-01-05' GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+DROP TABLE t1;
+#
+# MIN() optimization didn't work correctly with BETWEEN when using too
+# long strings.
+#
+create table t1 (a varchar(10), key (a)) engine=myisam;
+insert into t1 values("bar"),("Cafe");
+explain select min(a) from t1 where a between "a" and "Cafe2";
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+explain select min(a) from t1 where a between "a" and "Cafeeeeeeeeeeeeeeeeeeeeeeeeee";
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index a a 13 NULL 2 Using where; Using index
+explain select min(a) from t1 where a between "abbbbbbbbbbbbbbbbbbbb" and "Cafe2";
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index a a 13 NULL 2 Using where; Using index
+drop table t1;
+#
+# MDEV-15433: Optimizer does not use group by optimization with distinct
+#
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+EXPLAIN SELECT DISTINCT a FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL a 4 NULL 5 Using index for group-by
+SELECT DISTINCT a FROM t1;
+a
+1
+2
+3
+4
+drop table t1;
+#
+# End of 10.1 tests
+#
diff --cc mysql-test/r/stat_tables.result
index ceadb61feea,cd78d44462e..224c734118b
--- a/mysql-test/r/stat_tables.result
+++ b/mysql-test/r/stat_tables.result
@@@ -578,71 -578,15 +578,84 @@@ db_name table_name column_name min_valu
DROP TABLE t1;
set use_stat_tables=@save_use_stat_tables;
#
+ # MDEV-17023: Crash during read_histogram_for_table with optimizer_use_condition_selectivity set to 4
+ #
+ set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+ set @@optimizer_use_condition_selectivity=4;
+ set @@use_stat_tables= PREFERABLY;
+ explain
+ SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
+ 1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+ set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+ set use_stat_tables=@save_use_stat_tables;
++#
+# MDEV-16757: manual addition of min/max statistics for BLOB
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 't'
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+DELETE FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='t';
+INSERT INTO mysql.column_stats VALUES
+('test','t1','t','bar','foo', 0.0, 3.0, 1.0, 0, NULL, NULL);
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+test t1 t bar foo 0.0000 3.0000 1.0000 0 NULL NULL
+SELECT pk FROM t1;
+pk
+1
+2
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-16760: CREATE OR REPLACE TABLE after ANALYZE TABLE
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk int PRIMARY KEY, c varchar(32));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SELECT * FROM t1;
+pk c
+1 foo
+2 bar
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+test t1 c bar foo 0.0000 3.0000 1.0000 0 NULL NULL
+CREATE OR REPLACE TABLE t1 (pk int PRIMARY KEY, a char(7));
+SELECT * FROM t1;
+pk a
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-16711:CREATE OR REPLACE TABLE introducing BLOB column
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk INT PRIMARY KEY, t CHAR(60));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+CREATE OR REPLACE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+SELECT MAX(pk) FROM t1;
+MAX(pk)
+NULL
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
diff --cc mysql-test/r/stat_tables_innodb.result
index c5e7309861c,02a07fa8bbb..ba1dee3106d
--- a/mysql-test/r/stat_tables_innodb.result
+++ b/mysql-test/r/stat_tables_innodb.result
@@@ -605,73 -605,17 +605,86 @@@ db_name table_name column_name min_valu
DROP TABLE t1;
set use_stat_tables=@save_use_stat_tables;
#
+ # MDEV-17023: Crash during read_histogram_for_table with optimizer_use_condition_selectivity set to 4
+ #
+ set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+ set @@optimizer_use_condition_selectivity=4;
+ set @@use_stat_tables= PREFERABLY;
+ explain
+ SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
+ 1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+ set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+ set use_stat_tables=@save_use_stat_tables;
++#
+# MDEV-16757: manual addition of min/max statistics for BLOB
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 't'
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+DELETE FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='t';
+INSERT INTO mysql.column_stats VALUES
+('test','t1','t','bar','foo', 0.0, 3.0, 1.0, 0, NULL, NULL);
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+test t1 t bar foo 0.0000 3.0000 1.0000 0 NULL NULL
+SELECT pk FROM t1;
+pk
+1
+2
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-16760: CREATE OR REPLACE TABLE after ANALYZE TABLE
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk int PRIMARY KEY, c varchar(32));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SELECT * FROM t1;
+pk c
+1 foo
+2 bar
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 pk 1 2 0.0000 4.0000 1.0000 0 NULL NULL
+test t1 c bar foo 0.0000 3.0000 1.0000 0 NULL NULL
+CREATE OR REPLACE TABLE t1 (pk int PRIMARY KEY, a char(7));
+SELECT * FROM t1;
+pk a
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-16711:CREATE OR REPLACE TABLE introducing BLOB column
+#
+SET use_stat_tables= PREFERABLY;
+CREATE TABLE t1 (pk INT PRIMARY KEY, t CHAR(60));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+CREATE OR REPLACE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+SELECT MAX(pk) FROM t1;
+MAX(pk)
+NULL
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --cc mysql-test/t/stat_tables.test
index 2c9c1eca7d3,a0b2a22b946..c318cc5e75f
--- a/mysql-test/t/stat_tables.test
+++ b/mysql-test/t/stat_tables.test
@@@ -357,64 -357,14 +357,76 @@@ DROP TABLE t1
set use_stat_tables=@save_use_stat_tables;
+ --echo #
+ --echo # MDEV-17023: Crash during read_histogram_for_table with optimizer_use_condition_selectivity set to 4
+ --echo #
+
+ set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+ set @@optimizer_use_condition_selectivity=4;
+ set @@use_stat_tables= PREFERABLY;
+ explain
+ SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
+ set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+ set use_stat_tables=@save_use_stat_tables;
++
+--echo #
+--echo # MDEV-16757: manual addition of min/max statistics for BLOB
+--echo #
+
+SET use_stat_tables= PREFERABLY;
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+--sorted_result
+SELECT * FROM mysql.column_stats;
+DELETE FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='t';
+INSERT INTO mysql.column_stats VALUES
+ ('test','t1','t','bar','foo', 0.0, 3.0, 1.0, 0, NULL, NULL);
+--sorted_result
+SELECT * FROM mysql.column_stats;
+
+SELECT pk FROM t1;
+
+DROP TABLE t1;
+
+set use_stat_tables=@save_use_stat_tables;
+
+--echo #
+--echo # MDEV-16760: CREATE OR REPLACE TABLE after ANALYZE TABLE
+--echo #
+
+SET use_stat_tables= PREFERABLY;
+
+CREATE TABLE t1 (pk int PRIMARY KEY, c varchar(32));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+SELECT * FROM t1;
+SELECT * FROM mysql.column_stats;
+
+CREATE OR REPLACE TABLE t1 (pk int PRIMARY KEY, a char(7));
+SELECT * FROM t1;
+SELECT * FROM mysql.column_stats;
+
+DROP TABLE t1;
+
+set use_stat_tables=@save_use_stat_tables;
+
+
+--echo #
+--echo # MDEV-16711:CREATE OR REPLACE TABLE introducing BLOB column
+--echo #
+
+SET use_stat_tables= PREFERABLY;
+
+CREATE TABLE t1 (pk INT PRIMARY KEY, t CHAR(60));
+INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+ANALYZE TABLE t1;
+CREATE OR REPLACE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
+
+SELECT MAX(pk) FROM t1;
+
+DROP TABLE t1;
+
+set use_stat_tables=@save_use_stat_tables;
diff --cc sql/lex.h
index 6a1cb6653e9,868f19ed9c4..87c87d03fb3
--- a/sql/lex.h
+++ b/sql/lex.h
@@@ -633,8 -630,9 +633,8 @@@ static SYMBOL symbols[] =
{ "UPGRADE", SYM(UPGRADE_SYM)},
{ "USAGE", SYM(USAGE)},
{ "USE", SYM(USE_SYM)},
- { "USER", SYM(USER)},
+ { "USER", SYM(USER_SYM)},
{ "USER_RESOURCES", SYM(RESOURCES)},
- { "USER_STATISTICS", SYM(USER_STATS_SYM)},
{ "USE_FRM", SYM(USE_FRM)},
{ "USING", SYM(USING)},
{ "UTC_DATE", SYM(UTC_DATE_SYM)},
diff --cc sql/opt_range.cc
index 1e29efda0d1,0fd2cd267fc..d8ecf8077b8
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@@ -2913,7 -3510,18 +2922,18 @@@ bool calculate_cond_selectivity_for_tab
table->cond_selectivity= 1.0;
- if (!*cond || table_records == 0)
+ if (table_records == 0)
+ DBUG_RETURN(FALSE);
+
+ QUICK_SELECT_I *quick;
+ if ((quick=table->reginfo.join_tab->quick) &&
+ quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
+ {
+ table->cond_selectivity*= (quick->records/table_records);
+ DBUG_RETURN(FALSE);
+ }
+
- if (!cond)
++ if (!*cond)
DBUG_RETURN(FALSE);
if (table->pos_in_table_list->schema_table)
diff --cc sql/sql_lex.h
index 3b47b1d25c9,d58be8b336a..05e31c28277
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@@ -2590,10 -2479,11 +2590,10 @@@ public
uint profile_query_id;
uint profile_options;
- uint uint_geom_type;
uint grant, grant_tot_col, which_columns;
enum Foreign_key::fk_match_opt fk_match_option;
- enum Foreign_key::fk_option fk_update_opt;
- enum Foreign_key::fk_option fk_delete_opt;
+ enum_fk_option fk_update_opt;
+ enum_fk_option fk_delete_opt;
uint slave_thd_opt, start_transaction_opt;
int nest_level;
/*
diff --cc sql/sql_table.cc
index dc55754ff01,5d20ad3967c..da65f168d84
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@@ -9136,13 -9066,62 +9142,63 @@@ bool mysql_alter_table(THD *thd,char *n
{
/* table is a normal table: Create temporary table in same directory */
/* Open our intermediate table. */
- new_table= open_table_uncached(thd, new_db_type, alter_ctx.get_tmp_path(),
+ new_table= open_table_uncached(thd, new_db_type, &frm,
+ alter_ctx.get_tmp_path(),
alter_ctx.new_db, alter_ctx.tmp_name,
true, true);
+ if (!new_table)
+ goto err_new_table_cleanup;
+
+ /*
+ Normally, an attempt to modify an FK parent table will cause
+ FK children to be prelocked, so the table-being-altered cannot
+ be modified by a cascade FK action, because ALTER holds a lock
+ and prelocking will wait.
+
+ But if a new FK is being added by this very ALTER, then the target
+ table is not locked yet (it's a temporary table). So, we have to
+ lock FK parents explicitly.
+ */
+ if (alter_info->flags & Alter_info::ADD_FOREIGN_KEY)
+ {
+ List <FOREIGN_KEY_INFO> fk_list;
+ List_iterator<FOREIGN_KEY_INFO> fk_list_it(fk_list);
+ FOREIGN_KEY_INFO *fk;
+
+ /* tables_opened can be > 1 only for MERGE tables */
+ DBUG_ASSERT(tables_opened == 1);
+ DBUG_ASSERT(&table_list->next_global == thd->lex->query_tables_last);
+
+ new_table->file->get_foreign_key_list(thd, &fk_list);
+ while ((fk= fk_list_it++))
+ {
+ if (lower_case_table_names)
+ {
+ char buf[NAME_LEN];
+ uint len;
+ strmake_buf(buf, fk->referenced_db->str);
+ len = my_casedn_str(files_charset_info, buf);
+ thd->make_lex_string(fk->referenced_db, buf, len);
+ strmake_buf(buf, fk->referenced_table->str);
+ len = my_casedn_str(files_charset_info, buf);
+ thd->make_lex_string(fk->referenced_table, buf, len);
+ }
+ if (table_already_fk_prelocked(table_list, fk->referenced_db,
+ fk->referenced_table, TL_READ_NO_INSERT))
+ continue;
+
+ TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST));
+ tl->init_one_table_for_prelocking(fk->referenced_db->str, fk->referenced_db->length,
+ fk->referenced_table->str, fk->referenced_table->length,
+ NULL, TL_READ_NO_INSERT, false, NULL, 0,
+ &thd->lex->query_tables_last);
+ }
+
+ if (open_tables(thd, &table_list->next_global, &tables_opened, 0,
+ &alter_prelocking_strategy))
+ goto err_new_table_cleanup;
+ }
}
- if (!new_table)
- goto err_new_table_cleanup;
/*
Note: In case of MERGE table, we do not attach children. We do not
copy data for MERGE tables. Only the children have data.
diff --cc sql/sql_yacc.yy
index fcfc63439cb,1ae964ac7f8..6ae65e0c50f
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@@ -950,72 -899,60 +950,72 @@@ bool LEX::set_bincmp(CHARSET_INFO *cs,
ulong ulong_num;
ulonglong ulonglong_number;
longlong longlong_number;
+
+ /* structs */
LEX_STRING lex_str;
- LEX_STRING *lex_str_ptr;
LEX_SYMBOL symbol;
- LEX_TYPE lex_type;
- Table_ident *table;
- char *simple_string;
+ struct sys_var_with_base variable;
+ struct { int vars, conds, hndlrs, curs; } spblock;
+
+ /* pointers */
+ CHARSET_INFO *charset;
+ Condition_information_item *cond_info_item;
+ DYNCALL_CREATE_DEF *dyncol_def;
+ Diagnostics_information *diag_info;
Item *item;
Item_num *item_num;
+ Item_param *item_param;
+ Key_part_spec *key_part;
+ LEX *lex;
+ LEX_STRING *lex_str_ptr;
+ LEX_USER *lex_user;
+ List<Condition_information_item> *cond_info_list;
+ List<DYNCALL_CREATE_DEF> *dyncol_def_list;
List<Item> *item_list;
+ List<Statement_information_item> *stmt_info_list;
List<String> *string_list;
+ Statement_information_item *stmt_info_item;
String *string;
- Key_part_spec *key_part;
TABLE_LIST *table_list;
- udf_func *udf;
- LEX_USER *lex_user;
- struct sys_var_with_base variable;
- enum enum_var_type var_type;
- Key::Keytype key_type;
- enum ha_key_alg key_alg;
- handlerton *db_type;
- enum row_type row_type;
- enum ha_rkey_function ha_rkey_mode;
- enum enum_tx_isolation tx_isolation;
- enum Cast_target cast_type;
- enum Item_udftype udf_type;
- enum ha_choice choice;
- CHARSET_INFO *charset;
- thr_lock_type lock_type;
- interval_type interval, interval_time_st;
- timestamp_type date_time_type;
- st_select_lex *select_lex;
+ Table_ident *table;
+ char *simple_string;
chooser_compare_func_creator boolfunc2creator;
+ class my_var *myvar;
class sp_condition_value *spcondvalue;
- struct { int vars, conds, hndlrs, curs; } spblock;
- sp_name *spname;
- LEX *lex;
- sp_head *sphead;
+ class sp_head *sphead;
+ class sp_label *splabel;
+ class sp_name *spname;
+ class sp_variable *spvar;
+ handlerton *db_type;
+ st_select_lex *select_lex;
struct p_elem_val *p_elem_value;
- enum index_hint_type index_hint;
- enum enum_filetype filetype;
+ udf_func *udf;
+
+ /* enums */
+ enum Cast_target cast_type;
+ enum Condition_information_item::Name cond_info_item_name;
+ enum enum_diag_condition_item_name diag_condition_item_name;
+ enum Diagnostics_information::Which_area diag_area;
+ enum Field::geometry_type geom_type;
- enum Foreign_key::fk_option m_fk_option;
+ enum enum_fk_option m_fk_option;
+ enum Item_udftype udf_type;
+ enum Key::Keytype key_type;
+ enum Statement_information_item::Name stmt_info_item_name;
+ enum enum_field_types field_type;
+ enum enum_filetype filetype;
+ enum enum_tx_isolation tx_isolation;
+ enum enum_var_type var_type;
enum enum_yes_no_unknown m_yes_no_unk;
- Diag_condition_item_name diag_condition_item_name;
- Diagnostics_information::Which_area diag_area;
- Diagnostics_information *diag_info;
- Statement_information_item *stmt_info_item;
- Statement_information_item::Name stmt_info_item_name;
- List<Statement_information_item> *stmt_info_list;
- Condition_information_item *cond_info_item;
- Condition_information_item::Name cond_info_item_name;
- List<Condition_information_item> *cond_info_list;
- DYNCALL_CREATE_DEF *dyncol_def;
- List<DYNCALL_CREATE_DEF> *dyncol_def_list;
- bool is_not_empty;
+ enum ha_choice choice;
+ enum ha_key_alg key_alg;
+ enum ha_rkey_function ha_rkey_mode;
+ enum index_hint_type index_hint;
+ enum interval_type interval, interval_time_st;
+ enum row_type row_type;
+ enum sp_variable::enum_mode spvar_mode;
+ enum thr_lock_type lock_type;
+ enum enum_mysql_timestamp_type date_time_type;
+ DDL_options_st object_ddl_options;
}
%{
@@@ -1651,7 -1584,8 +1651,7 @@@ bool my_yyoverflow(short **a, YYSTYPE *
%token UPDATE_SYM /* SQL-2003-R */
%token UPGRADE_SYM
%token USAGE /* SQL-2003-N */
- %token USER /* SQL-2003-R */
+ %token USER_SYM /* SQL-2003-R */
-%token USER_STATS_SYM
%token USE_FRM
%token USE_SYM
%token USING /* SQL-2003-R */
@@@ -2538,17 -2446,22 +2538,17 @@@ create
Lex->create_view_algorithm= DTYPE_ALGORITHM_UNDEFINED;
Lex->create_view_suid= TRUE;
}
- view_or_trigger_or_sp_or_event
- {
- if ($1 && Lex->sql_command != SQLCOM_CREATE_VIEW)
- {
- my_error(ER_WRONG_USAGE, MYF(0), "OR REPLACE",
- "TRIGGERS / SP / EVENT");
- MYSQL_YYABORT;
- }
- }
- | CREATE USER_SYM clear_privileges grant_list
+ view_or_trigger_or_sp_or_event { }
- | create_or_replace USER opt_if_not_exists clear_privileges grant_list
++ | create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list
{
- Lex->sql_command = SQLCOM_CREATE_USER;
+ if (Lex->set_command_with_check(SQLCOM_CREATE_USER, $1 | $3))
+ MYSQL_YYABORT;
}
- | CREATE ROLE_SYM clear_privileges role_list opt_with_admin
+ | create_or_replace ROLE_SYM opt_if_not_exists
+ clear_privileges role_list opt_with_admin
{
- Lex->sql_command = SQLCOM_CREATE_ROLE;
+ if (Lex->set_command_with_check(SQLCOM_CREATE_ROLE, $1 | $3))
+ MYSQL_YYABORT;
}
| CREATE LOGFILE_SYM GROUP_SYM logfile_group_info
{
@@@ -2581,10 -2496,9 +2581,10 @@@ server_options_list
;
server_option:
- USER TEXT_STRING_sys
+ USER_SYM TEXT_STRING_sys
{
- Lex->server_options.username= $2.str;
+ MYSQL_YYABORT_UNLESS(Lex->server_options.username.str == 0);
+ Lex->server_options.username= $2;
}
| HOST_SYM TEXT_STRING_sys
{
@@@ -6736,13 -6784,17 +6736,13 @@@ opt_on_update_delete
;
delete_option:
- RESTRICT { $$= Foreign_key::FK_OPTION_RESTRICT; }
- | CASCADE { $$= Foreign_key::FK_OPTION_CASCADE; }
- | SET NULL_SYM { $$= Foreign_key::FK_OPTION_SET_NULL; }
- | NO_SYM ACTION { $$= Foreign_key::FK_OPTION_NO_ACTION; }
- | SET DEFAULT { $$= Foreign_key::FK_OPTION_DEFAULT; }
+ RESTRICT { $$= FK_OPTION_RESTRICT; }
+ | CASCADE { $$= FK_OPTION_CASCADE; }
+ | SET NULL_SYM { $$= FK_OPTION_SET_NULL; }
+ | NO_SYM ACTION { $$= FK_OPTION_NO_ACTION; }
+ | SET DEFAULT { $$= FK_OPTION_SET_DEFAULT; }
;
-normal_key_type:
- key_or_index { $$= Key::MULTIPLE; }
- ;
-
constraint_key_type:
PRIMARY_SYM KEY_SYM { $$= Key::PRIMARY; }
| UNIQUE_SYM opt_key_or_index { $$= Key::UNIQUE; }
@@@ -9383,9 -9470,9 +9383,9 @@@ function_call_keyword
if ($$ == NULL)
MYSQL_YYABORT;
}
- | USER '(' ')'
+ | USER_SYM '(' ')'
{
- $$= new (thd->mem_root) Item_func_user();
+ $$= new (thd->mem_root) Item_func_user(thd);
if ($$ == NULL)
MYSQL_YYABORT;
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
@@@ -11635,17 -11752,21 +11635,17 @@@ drop
{
LEX *lex=Lex;
if (lex->sphead)
- {
- my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE");
- MYSQL_YYABORT;
- }
- lex->sql_command = SQLCOM_DROP_PROCEDURE;
- lex->check_exists= $3;
+ my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"));
+ lex->set_command(SQLCOM_DROP_PROCEDURE, $3);
lex->spname= $4;
}
- | DROP USER opt_if_exists clear_privileges user_list
- | DROP USER_SYM clear_privileges user_list
++ | DROP USER_SYM opt_if_exists clear_privileges user_list
{
- Lex->sql_command = SQLCOM_DROP_USER;
+ Lex->set_command(SQLCOM_DROP_USER, $3);
}
- | DROP ROLE_SYM clear_privileges role_list
+ | DROP ROLE_SYM opt_if_exists clear_privileges role_list
{
- Lex->sql_command = SQLCOM_DROP_ROLE;
+ Lex->set_command(SQLCOM_DROP_ROLE, $3);
}
| DROP VIEW_SYM opt_if_exists
{
@@@ -12945,11 -13016,11 +12945,11 @@@ kill_option
kill_expr:
expr
{
- Lex->value_list.push_front($$);
+ Lex->value_list.push_front($$, thd->mem_root);
}
- | USER user
+ | USER_SYM user
{
- Lex->users_list.push_back($2);
+ Lex->users_list.push_back($2, thd->mem_root);
Lex->kill_type= KILL_TYPE_USER;
}
;
@@@ -14274,7 -14366,8 +14274,7 @@@ keyword_sp
| UNDOFILE_SYM {}
| UNKNOWN_SYM {}
| UNTIL_SYM {}
- | USER {}
+ | USER_SYM {}
- | USER_STATS_SYM {}
| USE_FRM {}
| VARIABLES {}
| VIEW_SYM {}
diff --cc sql/table_cache.cc
index b911252019f,bdb7914c32b..16a47b37417
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@@ -49,12 -49,10 +49,11 @@@
*/
#include "my_global.h"
-#include "hash.h"
+#include "lf.h"
#include "table.h"
#include "sql_base.h"
- #include "sql_statistics.h"
+
/** Configuration. */
ulong tdc_size; /**< Table definition cache threshold for LRU eviction. */
ulong tc_size; /**< Table cache threshold for LRU eviction. */
@@@ -762,34 -852,34 +761,33 @@@ void tdc_release_share(TABLE_SHARE *sha
DBUG_PRINT("enter",
("share: 0x%lx table: %s.%s ref_count: %u version: %lu",
(ulong) share, share->db.str, share->table_name.str,
- share->tdc.ref_count, share->tdc.version));
- DBUG_ASSERT(share->tdc.ref_count);
+ share->tdc->ref_count, share->tdc->version));
+ DBUG_ASSERT(share->tdc->ref_count);
- if (share->tdc.ref_count > 1)
+ if (share->tdc->ref_count > 1)
{
- share->tdc.ref_count--;
+ share->tdc->ref_count--;
if (!share->is_view)
- mysql_cond_broadcast(&share->tdc.COND_release);
- mysql_mutex_unlock(&share->tdc.LOCK_table_share);
+ mysql_cond_broadcast(&share->tdc->COND_release);
+ mysql_mutex_unlock(&share->tdc->LOCK_table_share);
DBUG_VOID_RETURN;
}
- mysql_mutex_unlock(&share->tdc.LOCK_table_share);
+ mysql_mutex_unlock(&share->tdc->LOCK_table_share);
mysql_mutex_lock(&LOCK_unused_shares);
- mysql_mutex_lock(&share->tdc.LOCK_table_share);
- if (share->tdc.flushed)
+ mysql_mutex_lock(&share->tdc->LOCK_table_share);
+ if (--share->tdc->ref_count)
{
- mysql_mutex_unlock(&share->tdc.LOCK_table_share);
+ if (!share->is_view)
+ mysql_cond_broadcast(&share->tdc->COND_release);
+ mysql_mutex_unlock(&share->tdc->LOCK_table_share);
mysql_mutex_unlock(&LOCK_unused_shares);
- tdc_delete_share_from_hash(share);
DBUG_VOID_RETURN;
}
- if (--share->tdc.ref_count)
+ if (share->tdc->flushed || tdc_records() > tdc_size)
{
- delete_stat_values_for_table_share(share);
- if (!share->is_view)
- mysql_cond_broadcast(&share->tdc.COND_release);
- mysql_mutex_unlock(&share->tdc.LOCK_table_share);
mysql_mutex_unlock(&LOCK_unused_shares);
+ tdc_delete_share_from_hash(share->tdc);
DBUG_VOID_RETURN;
}
/* Link share last in used_table_share list */
diff --cc storage/maria/ma_blockrec.c
index c0a93415379,367c8da26c0..8ef463d2918
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@@ -5161,11 -5123,19 +5161,19 @@@ int _ma_read_block_record(MARIA_HA *inf
info->buff, share->page_type,
PAGECACHE_LOCK_LEFT_UNLOCKED, 0)))
DBUG_RETURN(my_errno);
+
+ /*
+ Unallocated page access can happen if this is an access to a page where
+ all rows where deleted as part of this statement.
+ */
+ DBUG_ASSERT((buff[PAGE_TYPE_OFFSET] & PAGE_TYPE_MASK) == HEAD_PAGE ||
+ (buff[PAGE_TYPE_OFFSET] & PAGE_TYPE_MASK) == UNALLOCATED_PAGE);
+
- if (((buff[PAGE_TYPE_OFFSET] & PAGE_TYPE_MASK) == UNALLOCATED_PAGE) ||
- !(data= get_record_position(buff, block_size, offset, &end_of_data)))
+ DBUG_ASSERT((buff[PAGE_TYPE_OFFSET] & PAGE_TYPE_MASK) == HEAD_PAGE);
+ if (!(data= get_record_position(share, buff, offset, &end_of_data)))
{
DBUG_ASSERT(!maria_assert_if_crashed_table);
- DBUG_PRINT("error", ("Wrong directory entry in data block"));
+ DBUG_PRINT("warning", ("Wrong directory entry in data block"));
my_errno= HA_ERR_RECORD_DELETED; /* File crashed */
DBUG_RETURN(HA_ERR_RECORD_DELETED);
}
diff --cc storage/mroonga/ha_mroonga.cpp
index cbf902b7c0e,e4b6698cdf6..d317fef44fb
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@@ -16761,22 -15755,12 +16761,15 @@@ int ha_mroonga::storage_get_foreign_key
ref_table_buff,
ref_table_name_length,
TRUE);
- #ifdef MRN_FOREIGN_KEY_USE_METHOD_ENUM
f_key_info.update_method = FK_OPTION_RESTRICT;
f_key_info.delete_method = FK_OPTION_RESTRICT;
- #else
- f_key_info.update_method = thd_make_lex_string(thd, NULL, "RESTRICT",
- 8, TRUE);
- f_key_info.delete_method = thd_make_lex_string(thd, NULL, "RESTRICT",
- 8, TRUE);
- #endif
f_key_info.referenced_key_name = thd_make_lex_string(thd, NULL, "PRIMARY",
7, TRUE);
- LEX_STRING *field_name = thd_make_lex_string(thd, NULL, column_name,
- column_name_size, TRUE);
+ LEX_STRING *field_name = thd_make_lex_string(thd,
+ NULL,
+ column_name.c_str(),
+ column_name.length(),
+ TRUE);
f_key_info.foreign_fields.push_back(field_name);
char ref_path[FN_REFLEN + 1];
diff --cc storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index 50d35ee4906,d8796db8cbe..8f65895cc9c
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@@ -106,7 -138,13 +106,10 @@@ endif (
set_cflags_if_supported(
-Wno-error=strict-overflow
)
-set_ldflags_if_supported(
- -Wno-error=strict-overflow
- )
+ # new flag sets in MySQL 8.0 seem to explicitly disable this
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
+
## set extra debugging flags and preprocessor definitions
set(CMAKE_C_FLAGS_DEBUG "-g3 -O0 ${CMAKE_C_FLAGS_DEBUG}")
set(CMAKE_CXX_FLAGS_DEBUG "-g3 -O0 ${CMAKE_CXX_FLAGS_DEBUG}")
diff --cc storage/tokudb/PerconaFT/portability/toku_race_tools.h
index 96712ffffdc,c70ee1969a6..eee08185513
--- a/storage/tokudb/PerconaFT/portability/toku_race_tools.h
+++ b/storage/tokudb/PerconaFT/portability/toku_race_tools.h
@@@ -40,12 -40,7 +40,12 @@@ Copyright (c) 2006, 2015, Percona and/o
#include <portability/toku_config.h>
+#ifdef HAVE_valgrind
+#undef USE_VALGRIND
+#define USE_VALGRIND 1
+#endif
+
- #if defined(__linux__) && USE_VALGRIND
+ #if defined(__linux__) && defined(USE_VALGRIND) && USE_VALGRIND
# include <valgrind/helgrind.h>
# include <valgrind/drd.h>
diff --cc storage/tokudb/ha_tokudb_admin.cc
index e1443101bb6,c400e5bd22c..8d4a67186fe
--- a/storage/tokudb/ha_tokudb_admin.cc
+++ b/storage/tokudb/ha_tokudb_admin.cc
@@@ -1000,10 -1001,11 +1001,11 @@@ struct check_context
THD* thd;
};
- static int ha_tokudb_check_progress(void* extra, float progress) {
+ static int ha_tokudb_check_progress(void* extra,
+ TOKUDB_UNUSED(float progress)) {
struct check_context* context = (struct check_context*)extra;
int result = 0;
- if (thd_killed(context->thd))
+ if (thd_kill_level(context->thd))
result = ER_ABORTING_CONNECTION;
return result;
}
diff --cc storage/tokudb/ha_tokudb_alter_56.cc
index 473c4984eb6,293086b897e..b579d00f67b
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@@ -911,9 -929,10 +929,10 @@@ bool ha_tokudb::commit_inplace_alter_ta
ha_alter_info->group_commit_ctx = NULL;
}
#endif
+ #if defined(TOKU_INCLUDE_WRITE_FRM_DATA) && TOKU_INCLUDE_WRITE_FRM_DATA
#if (50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599) || \
- (100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
+ (100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199)
- #if WITH_PARTITION_STORAGE_ENGINE
+ #if defined(WITH_PARTITION_STORAGE_ENGINE) && WITH_PARTITION_STORAGE_ENGINE
if (TOKU_PARTITION_WRITE_FRM_DATA || altered_table->part_info == NULL) {
#else
if (true) {
diff --cc storage/tokudb/hatoku_defines.h
index 360272969e5,66a8fa5d982..ab9e0f79ef8
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -69,8 -74,20 +74,20 @@@ Copyright (c) 2006, 2015, Percona and/o
#pragma interface /* gcc class implementation */
#endif
- #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199
+ // TOKU_INCLUDE_WRITE_FRM_DATA, TOKU_PARTITION_WRITE_FRM_DATA, and
+ // TOKU_INCLUDE_DISCOVER_FRM all work together as two opposing sides
+ // of the same functionality. The 'WRITE' includes functionality to
+ // write a copy of every tables .frm data into the tables status dictionary on
+ // CREATE or ALTER. When WRITE is in, the .frm data is also verified whenever a
+ // table is opened.
+ //
+ // The 'DISCOVER' then implements the MySQL table discovery API which reads
+ // this same data and returns it back to MySQL.
+ // In most cases, they should all be in or out without mixing. There may be
+ // extreme cases though where one side (WRITE) is supported but perhaps
+ // 'DISCOVERY' may not be, thus the need for individual indicators.
-#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
++#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199
// mariadb 10.0
#define TOKU_USE_DB_TYPE_TOKUDB 1
#define TOKU_INCLUDE_ALTER_56 1
diff --cc storage/tokudb/hatoku_hton.cc
index 1016ae83ad2,39ffa6daa70..50fffd0a999
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -913,12 -922,12 +928,12 @@@ static int tokudb_commit(handlerton * h
DB_TXN *this_txn = *txn;
if (this_txn) {
uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
+ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
- "commit trx %u txn %p syncflag %u",
+ "commit trx %u txn %p %" PRIu64 " syncflag %u",
all,
- this_txn,
+ this_txn, this_txn->id64(this_txn),
syncflag);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_commit_before", DBUG_SUICIDE(););
diff --cc storage/tokudb/hatoku_hton.h
index 80e13fa9b0c,c5b6aab1769..e632a9afe88
--- a/storage/tokudb/hatoku_hton.h
+++ b/storage/tokudb/hatoku_hton.h
@@@ -175,12 -181,13 +181,13 @@@ inline uint64_t tokudb_get_killed_time_
inline int tokudb_killed_callback(void) {
THD *thd = current_thd;
- return thd_killed(thd);
+ return thd_kill_level(thd);
}
- inline bool tokudb_killed_thd_callback(void *extra, uint64_t deleted_rows) {
+ inline bool tokudb_killed_thd_callback(void* extra,
+ TOKUDB_UNUSED(uint64_t deleted_rows)) {
THD *thd = static_cast<THD *>(extra);
- return thd_killed(thd) != 0;
+ return thd_kill_level(thd) != 0;
}
extern HASH tokudb_open_tables;
1
0
06 Sep '18
revision-id: d527bf5390aa0a7810ebafbe15fd96310062e44e (mariadb-10.0.36-25-gd527bf5390a)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 21:04:56 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..58694112e63
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::bindir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0
06 Sep '18
revision-id: 3d82f0c76377718567f1583b5d38de57c569f94d (mariadb-10.0.36-25-g3d82f0c7637)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 18:51:41 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..70a65de4a2a
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::basedir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0
06 Sep '18
revision-id: 3d82f0c76377718567f1583b5d38de57c569f94d (mariadb-10.0.36-25-g3d82f0c7637)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 18:51:41 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..70a65de4a2a
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::basedir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0
revision-id: db9c1fb454af71cf564aac8a6901238e66190549 (mariadb-10.1.35-37-gdb9c1fb454a)
parent(s): 6ca6f25d4e96a479eb144a8da1066a27d0abce40
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 19:31:25 +0300
message:
Add wait and output.
---
mysql-test/suite/galera/r/MW-44.result | 7 ++++---
mysql-test/suite/galera/t/MW-44.test | 5 ++++-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index 394c749b253..7e3d2f4b7ec 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -6,9 +6,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
-COUNT(*) = 2
-1
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+argument
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+ALTER TABLE t1 ADD COLUMN f2 INTEGER
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index cb5db1b208a..6defa432879 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -19,7 +19,10 @@ SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--let $wait_condition = SELECT COUNT(argument) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--source include/wait_condition.inc
+
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
--connection node_2
SET GLOBAL general_log='ON';
1
0
revision-id: b0026e33af8fc3b25a42099c096a84591fd550e2 (mariadb-10.1.35-37-gb0026e33af8)
parent(s): fba683c0697af8fb1c79af4d4e2e739e01e8147d
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 18:55:57 +0300
message:
Disable failing galera test for now.
---
mysql-test/suite/galera/disabled.def | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 704cb753f0d..40b4065c3db 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -32,4 +32,5 @@ galera_wan : MDEV-13549 Galera test failures
MW-388 : MDEV-13549 Galera test failures
galera_sst_mysqldump_with_key : MDEV-16890 Galera test failure
galera.galera_binlog_stmt_autoinc : MDEV-17106 Test failure on galera.galera_binlog_stmt_autoinc
-galera.galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
\ No newline at end of file
+galera.galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
+galera.galera_var_node_address : MDEV-17151 Galera test failure on galera.galera_var_node_address
1
0
revision-id: 42100230ea35587e4bfb7d87d433ca1c32ca4db4 (mariadb-10.1.35-37-g42100230ea3)
parent(s): 13c4e2bc10471dda9ec3886e7413a3669a5358da
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 16:03:56 +0300
message:
Remove.
---
mysql-test/suite/galera/r/galera_defaults.result | 71 ------------------------
mysql-test/suite/galera/t/galera_defaults.test | 11 ----
2 files changed, 82 deletions(-)
diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result
index 38fb224d6e0..8083784bb01 100644
--- a/mysql-test/suite/galera/r/galera_defaults.result
+++ b/mysql-test/suite/galera/r/galera_defaults.result
@@ -53,74 +53,3 @@ WSREP_SST_DONOR_REJECTS_QUERIES OFF
WSREP_SST_METHOD rsync
WSREP_SYNC_WAIT 15
<BASE_DIR>; <BASE_HOST>; <BASE_PORT>; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT30S; evs.info_log_mask = 0; evs.install_timeout = PT15S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT10S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; <GCACHE_DIR>; gcache.keep_pages_size = 0; gcache.mem_size = 0; <GCACHE_NAME>; gcache.page_size = 128M; gcache.recover = no; gcache.size = 10M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; <GCS_RECV_Q_HARD_LIMIT>; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; <GMC
AST_LISTEN_ADDR>; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; <IST_RECV_ADDR>; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = PT30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; <REPL_PROTO_MAX>;socket.checksum = 2; socket.recv_buf_size = 212992;
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters';
-COUNT(*)
-61
-SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'
-ORDER BY VARIABLE_NAME;
-VARIABLE_NAME
-WSREP_APPLY_OOOE
-WSREP_APPLY_OOOL
-WSREP_APPLY_WINDOW
-WSREP_CAUSAL_READS
-WSREP_CERT_DEPS_DISTANCE
-WSREP_CERT_INDEX_SIZE
-WSREP_CERT_INTERVAL
-WSREP_CLUSTER_CONF_ID
-WSREP_CLUSTER_SIZE
-WSREP_CLUSTER_STATE_UUID
-WSREP_CLUSTER_STATUS
-WSREP_CLUSTER_WEIGHT
-WSREP_COMMIT_OOOE
-WSREP_COMMIT_OOOL
-WSREP_COMMIT_WINDOW
-WSREP_CONNECTED
-WSREP_DESYNC_COUNT
-WSREP_EVS_DELAYED
-WSREP_EVS_EVICT_LIST
-WSREP_EVS_REPL_LATENCY
-WSREP_EVS_STATE
-WSREP_FLOW_CONTROL_PAUSED
-WSREP_FLOW_CONTROL_PAUSED_NS
-WSREP_FLOW_CONTROL_RECV
-WSREP_FLOW_CONTROL_SENT
-WSREP_GCOMM_UUID
-WSREP_INCOMING_ADDRESSES
-WSREP_LAST_COMMITTED
-WSREP_LOCAL_BF_ABORTS
-WSREP_LOCAL_CACHED_DOWNTO
-WSREP_LOCAL_CERT_FAILURES
-WSREP_LOCAL_COMMITS
-WSREP_LOCAL_INDEX
-WSREP_LOCAL_RECV_QUEUE
-WSREP_LOCAL_RECV_QUEUE_AVG
-WSREP_LOCAL_RECV_QUEUE_MAX
-WSREP_LOCAL_RECV_QUEUE_MIN
-WSREP_LOCAL_REPLAYS
-WSREP_LOCAL_SEND_QUEUE
-WSREP_LOCAL_SEND_QUEUE_AVG
-WSREP_LOCAL_SEND_QUEUE_MAX
-WSREP_LOCAL_SEND_QUEUE_MIN
-WSREP_LOCAL_STATE
-WSREP_LOCAL_STATE_COMMENT
-WSREP_LOCAL_STATE_UUID
-WSREP_OPEN_CONNECTIONS
-WSREP_OPEN_TRANSACTIONS
-WSREP_PROTOCOL_VERSION
-WSREP_PROVIDER_NAME
-WSREP_PROVIDER_VENDOR
-WSREP_PROVIDER_VERSION
-WSREP_READY
-WSREP_RECEIVED
-WSREP_RECEIVED_BYTES
-WSREP_REPLICATED
-WSREP_REPLICATED_BYTES
-WSREP_REPL_DATA_BYTES
-WSREP_REPL_KEYS
-WSREP_REPL_KEYS_BYTES
-WSREP_REPL_OTHER_BYTES
-WSREP_THREAD_COUNT
diff --git a/mysql-test/suite/galera/t/galera_defaults.test b/mysql-test/suite/galera/t/galera_defaults.test
index 70929f547f8..0ad97916302 100644
--- a/mysql-test/suite/galera/t/galera_defaults.test
+++ b/mysql-test/suite/galera/t/galera_defaults.test
@@ -59,14 +59,3 @@ ORDER BY VARIABLE_NAME;
$wsrep_provider_options =~ s/repl.proto_max = .*?;\s*/<REPL_PROTO_MAX>;/sgio;
print $wsrep_provider_options."\n";
EOF
-
-# Global Status
-
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters';
-
-SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'
-ORDER BY VARIABLE_NAME;
1
0
revision-id: 6ca6f25d4e96a479eb144a8da1066a27d0abce40 (mariadb-10.1.35-36-g6ca6f25d4e9)
parent(s): 653038ccad1d91f3fff516f6b22462ab83e2b6f8
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 14:16:09 +0300
message:
MDEV-17143: Galera test failure on galera.MW-44
Test changes only.
---
mysql-test/suite/galera/r/MW-44.result | 6 +-----
mysql-test/suite/galera/t/MW-44.test | 8 --------
2 files changed, 1 insertion(+), 13 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index a1e55318422..394c749b253 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -1,10 +1,6 @@
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SET GLOBAL general_log='ON';
-SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
-argument
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
@@ -16,5 +12,5 @@ COUNT(*) = 2
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
-1
+0
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index 6b5eb823985..cb5db1b208a 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -6,21 +6,13 @@
--source include/have_innodb.inc
--connection node_1
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
---source include/wait_condition.inc
--connection node_2
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
---source include/wait_condition.inc
--connection node_1
SET GLOBAL general_log='ON';
-SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
-
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
1
0