
06 Sep '18
revision-id: d527bf5390aa0a7810ebafbe15fd96310062e44e (mariadb-10.0.36-25-gd527bf5390a)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 21:04:56 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..58694112e63
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::bindir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0

06 Sep '18
revision-id: 3d82f0c76377718567f1583b5d38de57c569f94d (mariadb-10.0.36-25-g3d82f0c7637)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 18:51:41 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..70a65de4a2a
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::basedir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0

06 Sep '18
revision-id: 3d82f0c76377718567f1583b5d38de57c569f94d (mariadb-10.0.36-25-g3d82f0c7637)
parent(s): 0ccba62db385139caae514f70b31187bdce0de88 a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-06 18:51:41 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft.h | 3 +
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 96 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 50 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
storage/tokudb/mysql-test/rpl/disabled.def | 1 +
.../r/rpl_mixed_replace_into.result | 0
.../rpl/r/rpl_parallel_tokudb_delete_pk.result | 5 -
...pl_parallel_tokudb_update_pk_uc0_lookup0.result | 5 -
.../rpl/r/rpl_parallel_tokudb_write_pk.result | 2 -
.../r/rpl_row_replace_into.result | 0
.../r/rpl_stmt_replace_into.result | 0
.../mysql-test/rpl/r/rpl_xa_interleave.result | 59 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
192 files changed, 3936 insertions(+), 194538 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..72fbe45cfc9
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
-SET(TOKUDB_VERSION )
++SET(TOKUDB_VERSION 5.6.41-84.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/ft/ft.h
index 7a3c4fa783c,7a3c4fa783c..ff0b63b2b12
--- a/storage/tokudb/PerconaFT/ft/ft.h
+++ b/storage/tokudb/PerconaFT/ft/ft.h
@@@ -44,6 -44,6 +44,9 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "ft/ft-ops.h"
#include "ft/logger/log.h"
#include "util/dbt.h"
++#ifndef TOKU_MYSQL_WITH_PFS
++#include <my_global.h>
++#endif
typedef struct ft *FT;
typedef struct ft_options *FT_OPTIONS;
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..bae19ba8b16
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -52,6 -50,6 +50,7 @@@ Copyright (c) 2006, 2015, Percona and/o
// Support more complicated update expressions
// Replace field_offset
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Debug function to dump an Item
static void dump_item(Item* item) {
fprintf(stderr, "%u", item->type());
@@@ -1131,5 -1127,3 +1128,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..66a8fa5d982
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -117,20 -142,21 +142,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/rpl/disabled.def
index 4c1a9a3e785,00000000000..282e343d57f
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/rpl/disabled.def
+++ b/storage/tokudb/mysql-test/rpl/disabled.def
@@@ -1,15 -1,0 +1,16 @@@
+rpl_tokudb_delete_pk: unreliable, uses timestamp differences
+rpl_tokudb_delete_pk_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_pk_uc1_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup0: unreliable, uses timestamp differences
+rpl_tokudb_update_unique_uc0_lookup1: unreliable, uses timestamp differences
+rpl_tokudb_write_pk: unreliable, uses timestamp differences
+rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
+rpl_tokudb_write_unique: unreliable, uses timestamp differences
+rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
+rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
+rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
++rpl_tokudb_read_only_ft: no TOKU_INCLUDE_RFR
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
index 5935e5ddcbd,afbc4b50da8..48ea60013ad
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_delete_pk.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
index 8bb426d9448,7aab8947940..10375677c8d
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_update_pk_uc0_lookup0.result
@@@ -3,11 -8,11 +3,6 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_%';
Variable_name Value
--tokudb_rpl_check_readonly ON
--tokudb_rpl_lookup_rows OFF
--tokudb_rpl_lookup_rows_delay 10000
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 10000
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
index ca547e34be2,64b495350c2..1cb047bbf62
--- a/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_parallel_tokudb_write_pk.result
@@@ -3,9 -8,10 +3,7 @@@ include/master-slave.in
drop table if exists t;
show variables like 'tokudb_rpl_unique_checks%';
Variable_name Value
--tokudb_rpl_unique_checks OFF
--tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
-select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
diff --cc storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
index 00000000000,72e8644f7f2..53564ab0fe4
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_xa_interleave.result
@@@ -1,0 -1,62 +1,59 @@@
+ include/master-slave.inc
-Warnings:
-Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
-Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
+ [connection master]
+ CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA COMMIT 'x1';
+ XA COMMIT 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA COMMIT 'x2';
+ XA COMMIT 'y2';
+ XA START 'x1';
+ INSERT INTO t1 VALUES (1);
+ XA END 'x1';
+ XA PREPARE 'x1';
+ BEGIN;
+ INSERT INTO t1 VALUES (10);
+ COMMIT;
+ XA START 'y1';
+ INSERT INTO t1 VALUES (2);
+ XA END 'y1';
+ XA PREPARE 'y1';
+ XA ROLLBACK 'x1';
+ XA ROLLBACK 'y1';
+ BEGIN;
+ INSERT INTO t1 VALUES (11);
+ COMMIT;
+ XA START 'x2';
+ INSERT INTO t1 VALUES (3);
+ XA END 'x2';
+ XA PREPARE 'x2';
+ XA START 'y2';
+ INSERT INTO t1 VALUES (4);
+ XA END 'y2';
+ XA PREPARE 'y2';
+ XA ROLLBACK 'x2';
+ XA ROLLBACK 'y2';
+ TABLES t1 and t2 must be equal otherwise an error will be thrown.
+ include/diff_tables.inc [master:test.t1, slave:test.t1]
+ DROP TABLE t1;
+ include/rpl_end.inc
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..70a65de4a2a
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::basedir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0
revision-id: db9c1fb454af71cf564aac8a6901238e66190549 (mariadb-10.1.35-37-gdb9c1fb454a)
parent(s): 6ca6f25d4e96a479eb144a8da1066a27d0abce40
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 19:31:25 +0300
message:
Add wait and output.
---
mysql-test/suite/galera/r/MW-44.result | 7 ++++---
mysql-test/suite/galera/t/MW-44.test | 5 ++++-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index 394c749b253..7e3d2f4b7ec 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -6,9 +6,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
-COUNT(*) = 2
-1
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+argument
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+ALTER TABLE t1 ADD COLUMN f2 INTEGER
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index cb5db1b208a..6defa432879 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -19,7 +19,10 @@ SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
-SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--let $wait_condition = SELECT COUNT(argument) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
+--source include/wait_condition.inc
+
+SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%';
--connection node_2
SET GLOBAL general_log='ON';
1
0
revision-id: b0026e33af8fc3b25a42099c096a84591fd550e2 (mariadb-10.1.35-37-gb0026e33af8)
parent(s): fba683c0697af8fb1c79af4d4e2e739e01e8147d
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 18:55:57 +0300
message:
Disable failing galera test for now.
---
mysql-test/suite/galera/disabled.def | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 704cb753f0d..40b4065c3db 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -32,4 +32,5 @@ galera_wan : MDEV-13549 Galera test failures
MW-388 : MDEV-13549 Galera test failures
galera_sst_mysqldump_with_key : MDEV-16890 Galera test failure
galera.galera_binlog_stmt_autoinc : MDEV-17106 Test failure on galera.galera_binlog_stmt_autoinc
-galera.galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
\ No newline at end of file
+galera.galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
+galera.galera_var_node_address : MDEV-17151 Galera test failure on galera.galera_var_node_address
1
0
revision-id: 42100230ea35587e4bfb7d87d433ca1c32ca4db4 (mariadb-10.1.35-37-g42100230ea3)
parent(s): 13c4e2bc10471dda9ec3886e7413a3669a5358da
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 16:03:56 +0300
message:
Remove.
---
mysql-test/suite/galera/r/galera_defaults.result | 71 ------------------------
mysql-test/suite/galera/t/galera_defaults.test | 11 ----
2 files changed, 82 deletions(-)
diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result
index 38fb224d6e0..8083784bb01 100644
--- a/mysql-test/suite/galera/r/galera_defaults.result
+++ b/mysql-test/suite/galera/r/galera_defaults.result
@@ -53,74 +53,3 @@ WSREP_SST_DONOR_REJECTS_QUERIES OFF
WSREP_SST_METHOD rsync
WSREP_SYNC_WAIT 15
<BASE_DIR>; <BASE_HOST>; <BASE_PORT>; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT30S; evs.info_log_mask = 0; evs.install_timeout = PT15S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT10S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; <GCACHE_DIR>; gcache.keep_pages_size = 0; gcache.mem_size = 0; <GCACHE_NAME>; gcache.page_size = 128M; gcache.recover = no; gcache.size = 10M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; <GCS_RECV_Q_HARD_LIMIT>; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; <GMC
AST_LISTEN_ADDR>; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; <IST_RECV_ADDR>; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = PT30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; <REPL_PROTO_MAX>;socket.checksum = 2; socket.recv_buf_size = 212992;
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters';
-COUNT(*)
-61
-SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'
-ORDER BY VARIABLE_NAME;
-VARIABLE_NAME
-WSREP_APPLY_OOOE
-WSREP_APPLY_OOOL
-WSREP_APPLY_WINDOW
-WSREP_CAUSAL_READS
-WSREP_CERT_DEPS_DISTANCE
-WSREP_CERT_INDEX_SIZE
-WSREP_CERT_INTERVAL
-WSREP_CLUSTER_CONF_ID
-WSREP_CLUSTER_SIZE
-WSREP_CLUSTER_STATE_UUID
-WSREP_CLUSTER_STATUS
-WSREP_CLUSTER_WEIGHT
-WSREP_COMMIT_OOOE
-WSREP_COMMIT_OOOL
-WSREP_COMMIT_WINDOW
-WSREP_CONNECTED
-WSREP_DESYNC_COUNT
-WSREP_EVS_DELAYED
-WSREP_EVS_EVICT_LIST
-WSREP_EVS_REPL_LATENCY
-WSREP_EVS_STATE
-WSREP_FLOW_CONTROL_PAUSED
-WSREP_FLOW_CONTROL_PAUSED_NS
-WSREP_FLOW_CONTROL_RECV
-WSREP_FLOW_CONTROL_SENT
-WSREP_GCOMM_UUID
-WSREP_INCOMING_ADDRESSES
-WSREP_LAST_COMMITTED
-WSREP_LOCAL_BF_ABORTS
-WSREP_LOCAL_CACHED_DOWNTO
-WSREP_LOCAL_CERT_FAILURES
-WSREP_LOCAL_COMMITS
-WSREP_LOCAL_INDEX
-WSREP_LOCAL_RECV_QUEUE
-WSREP_LOCAL_RECV_QUEUE_AVG
-WSREP_LOCAL_RECV_QUEUE_MAX
-WSREP_LOCAL_RECV_QUEUE_MIN
-WSREP_LOCAL_REPLAYS
-WSREP_LOCAL_SEND_QUEUE
-WSREP_LOCAL_SEND_QUEUE_AVG
-WSREP_LOCAL_SEND_QUEUE_MAX
-WSREP_LOCAL_SEND_QUEUE_MIN
-WSREP_LOCAL_STATE
-WSREP_LOCAL_STATE_COMMENT
-WSREP_LOCAL_STATE_UUID
-WSREP_OPEN_CONNECTIONS
-WSREP_OPEN_TRANSACTIONS
-WSREP_PROTOCOL_VERSION
-WSREP_PROVIDER_NAME
-WSREP_PROVIDER_VENDOR
-WSREP_PROVIDER_VERSION
-WSREP_READY
-WSREP_RECEIVED
-WSREP_RECEIVED_BYTES
-WSREP_REPLICATED
-WSREP_REPLICATED_BYTES
-WSREP_REPL_DATA_BYTES
-WSREP_REPL_KEYS
-WSREP_REPL_KEYS_BYTES
-WSREP_REPL_OTHER_BYTES
-WSREP_THREAD_COUNT
diff --git a/mysql-test/suite/galera/t/galera_defaults.test b/mysql-test/suite/galera/t/galera_defaults.test
index 70929f547f8..0ad97916302 100644
--- a/mysql-test/suite/galera/t/galera_defaults.test
+++ b/mysql-test/suite/galera/t/galera_defaults.test
@@ -59,14 +59,3 @@ ORDER BY VARIABLE_NAME;
$wsrep_provider_options =~ s/repl.proto_max = .*?;\s*/<REPL_PROTO_MAX>;/sgio;
print $wsrep_provider_options."\n";
EOF
-
-# Global Status
-
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters';
-
-SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS
-WHERE VARIABLE_NAME LIKE 'wsrep_%'
-AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'
-ORDER BY VARIABLE_NAME;
1
0
revision-id: 6ca6f25d4e96a479eb144a8da1066a27d0abce40 (mariadb-10.1.35-36-g6ca6f25d4e9)
parent(s): 653038ccad1d91f3fff516f6b22462ab83e2b6f8
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 14:16:09 +0300
message:
MDEV-17143: Galera test failure on galera.MW-44
Test changes only.
---
mysql-test/suite/galera/r/MW-44.result | 6 +-----
mysql-test/suite/galera/t/MW-44.test | 8 --------
2 files changed, 1 insertion(+), 13 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index a1e55318422..394c749b253 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -1,10 +1,6 @@
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
SET GLOBAL general_log='ON';
-SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
-argument
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
@@ -16,5 +12,5 @@ COUNT(*) = 2
SET GLOBAL general_log='ON';
SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%';
COUNT(*) = 0
-1
+0
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index 6b5eb823985..cb5db1b208a 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -6,21 +6,13 @@
--source include/have_innodb.inc
--connection node_1
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
---source include/wait_condition.inc
--connection node_2
-SET GLOBAL general_log='OFF';
TRUNCATE TABLE mysql.general_log;
---let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.general_log;
---source include/wait_condition.inc
--connection node_1
SET GLOBAL general_log='ON';
-SELECT argument from mysql.general_log WHERE argument NOT LIKE 'SELECT%';
-
SET SESSION wsrep_osu_method=TOI;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION wsrep_osu_method=RSU;
1
0

06 Sep '18
revision-id: 13c4e2bc10471dda9ec3886e7413a3669a5358da (mariadb-10.1.35-36-g13c4e2bc104)
parent(s): 3b0ac8fa013d0a56bce736fb4283a35409394d71
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 13:18:34 +0300
message:
Disable changing output and fix version requirement.
---
mysql-test/suite/galera/r/galera_defaults.result | 5 ++++-
mysql-test/suite/galera/t/galera_defaults.test | 2 +-
mysql-test/suite/wsrep/r/variables.result | 1 -
mysql-test/suite/wsrep/t/variables.test | 2 ++
4 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result
index 5c5fdabf432..38fb224d6e0 100644
--- a/mysql-test/suite/galera/r/galera_defaults.result
+++ b/mysql-test/suite/galera/r/galera_defaults.result
@@ -57,7 +57,7 @@ SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS
WHERE VARIABLE_NAME LIKE 'wsrep_%'
AND VARIABLE_NAME != 'wsrep_debug_sync_waiters';
COUNT(*)
-58
+61
SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS
WHERE VARIABLE_NAME LIKE 'wsrep_%'
AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'
@@ -74,6 +74,7 @@ WSREP_CLUSTER_CONF_ID
WSREP_CLUSTER_SIZE
WSREP_CLUSTER_STATE_UUID
WSREP_CLUSTER_STATUS
+WSREP_CLUSTER_WEIGHT
WSREP_COMMIT_OOOE
WSREP_COMMIT_OOOL
WSREP_COMMIT_WINDOW
@@ -107,6 +108,8 @@ WSREP_LOCAL_SEND_QUEUE_MIN
WSREP_LOCAL_STATE
WSREP_LOCAL_STATE_COMMENT
WSREP_LOCAL_STATE_UUID
+WSREP_OPEN_CONNECTIONS
+WSREP_OPEN_TRANSACTIONS
WSREP_PROTOCOL_VERSION
WSREP_PROVIDER_NAME
WSREP_PROVIDER_VENDOR
diff --git a/mysql-test/suite/galera/t/galera_defaults.test b/mysql-test/suite/galera/t/galera_defaults.test
index facc5180f2f..70929f547f8 100644
--- a/mysql-test/suite/galera/t/galera_defaults.test
+++ b/mysql-test/suite/galera/t/galera_defaults.test
@@ -13,7 +13,7 @@
# Make sure that the test is operating on the right version of galera library.
--disable_query_log
---let $galera_version=3.20
+--let $galera_version=25.3.20
source ../wsrep/include/check_galera_version.inc;
--enable_query_log
diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result
index 7489aa0d914..2e31e761f5f 100644
--- a/mysql-test/suite/wsrep/r/variables.result
+++ b/mysql-test/suite/wsrep/r/variables.result
@@ -19,7 +19,6 @@ SET GLOBAL wsrep_provider=none;
# variables when using "_"
#
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
-SET GLOBAL wsrep_provider= '/usr/lib/galera/libgalera_smm.so';
SHOW GLOBAL STATUS LIKE 'wsrep%';
Variable_name Value
wsrep_apply_oooe #
diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test
index ca71baaba02..f3cd66e5cf9 100644
--- a/mysql-test/suite/wsrep/t/variables.test
+++ b/mysql-test/suite/wsrep/t/variables.test
@@ -28,10 +28,12 @@ SET GLOBAL wsrep_provider=none;
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
--disable_result_log
+--disable_query_log
eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
--let $galera_version=25.3.17
source include/check_galera_version.inc;
--enable_result_log
+--enable_query_log
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep%';
1
0
revision-id: 3b0ac8fa013d0a56bce736fb4283a35409394d71 (mariadb-10.1.35-35-g3b0ac8fa013)
parent(s): 6695fcead3fb081109f04897c7a865d6c1bee275
author: Jan Lindström
committer: Jan Lindström
timestamp: 2018-09-06 10:29:27 +0300
message:
MDEV-17062: Test failure on galera.MW-336
MDEV-17058: Test failure on wsrep.variables
MDEV-17060: Test failure on galera.galera_var_slave_threads
Fix incorrect calculation of increased applier (slave) threads.
Note that increase change takes effect "immediately" but we should
use proper wait condition to wait it. Reducing the number of
slave threads is not immediate as thread will only exit after a
replication event.
---
mysql-test/suite/galera/r/MW-336.result | 42 ++++----
.../suite/galera/r/galera_var_slave_threads.result | 64 ------------
mysql-test/suite/galera/t/MW-336.test | 108 ++++++++++++++++-----
.../suite/galera/t/galera_var_slave_threads.test | 13 +++
.../suite/wsrep/include/check_galera_version.inc | 20 ++--
mysql-test/suite/wsrep/r/variables.result | 5 +
mysql-test/suite/wsrep/t/variables.test | 14 ++-
sql/wsrep_var.cc | 4 +-
8 files changed, 145 insertions(+), 125 deletions(-)
diff --git a/mysql-test/suite/galera/r/MW-336.result b/mysql-test/suite/galera/r/MW-336.result
index 34874198c6f..81e8eae0eb3 100644
--- a/mysql-test/suite/galera/r/MW-336.result
+++ b/mysql-test/suite/galera/r/MW-336.result
@@ -1,33 +1,31 @@
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
+INSERT INTO t1 values(0);
SET GLOBAL wsrep_slave_threads = 10;
SET GLOBAL wsrep_slave_threads = 1;
-INSERT INTO t1 VALUES (1);
+# Wait 10 slave threads to start 1
+# Generate 12 replication events
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+13
+# Wait 9 slave threads to exit 1
SET GLOBAL wsrep_slave_threads = 10;
+# Wait 10 slave threads to start 2
SET GLOBAL wsrep_slave_threads = 20;
+# Wait 20 slave threads to start 3
SET GLOBAL wsrep_slave_threads = 1;
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (2);
-INSERT INTO t1 VALUES (3);
-INSERT INTO t1 VALUES (4);
-INSERT INTO t1 VALUES (5);
-INSERT INTO t1 VALUES (6);
-INSERT INTO t1 VALUES (7);
-INSERT INTO t1 VALUES (8);
-INSERT INTO t1 VALUES (9);
+# Generate 40 replication events
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+53
+# Wait 10 slave threads to exit 3
SET GLOBAL wsrep_slave_threads = 10;
SET GLOBAL wsrep_slave_threads = 0;
Warnings:
Warning 1292 Truncated incorrect wsrep_slave_threads value: '0'
-INSERT INTO t1 VALUES (10);
-INSERT INTO t1 VALUES (11);
-INSERT INTO t1 VALUES (12);
-INSERT INTO t1 VALUES (13);
-INSERT INTO t1 VALUES (14);
-INSERT INTO t1 VALUES (15);
-INSERT INTO t1 VALUES (16);
-INSERT INTO t1 VALUES (17);
-INSERT INTO t1 VALUES (18);
-INSERT INTO t1 VALUES (19);
-INSERT INTO t1 VALUES (20);
-SET GLOBAL wsrep_slave_threads = 1;
+# Wait 10 slave threads to start 3
+# Generate 12 replication events
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+65
+# Wait 10 slave threads to exit 4
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_var_slave_threads.result b/mysql-test/suite/galera/r/galera_var_slave_threads.result
index 70f3b8ee257..c7c6af2098f 100644
--- a/mysql-test/suite/galera/r/galera_var_slave_threads.result
+++ b/mysql-test/suite/galera/r/galera_var_slave_threads.result
@@ -26,70 +26,6 @@ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system use
COUNT(*) = 1
1
SET GLOBAL wsrep_slave_threads = 1;
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
-INSERT INTO t2 VALUES (DEFAULT);
SELECT COUNT(*) = 64 FROM t2;
COUNT(*) = 64
1
diff --git a/mysql-test/suite/galera/t/MW-336.test b/mysql-test/suite/galera/t/MW-336.test
index 9572489ebe9..749ffe671be 100644
--- a/mysql-test/suite/galera/t/MW-336.test
+++ b/mysql-test/suite/galera/t/MW-336.test
@@ -6,62 +6,118 @@
--source include/have_innodb.inc
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
+INSERT INTO t1 values(0);
--connection node_1
+
SET GLOBAL wsrep_slave_threads = 10;
SET GLOBAL wsrep_slave_threads = 1;
---let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
+
+--echo # Wait 10 slave threads to start 1
+--let $wait_timeout=600
+--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
--source include/wait_condition.inc
--connection node_2
-INSERT INTO t1 VALUES (1);
+# Wait until inserts are replicated
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
+--source include/wait_condition.inc
+--echo # Generate 12 replication events
+--disable_query_log
+--disable_result_log
+--let $count = 12
+while ($count)
+{
+ INSERT INTO t1 VALUES (1);
+ --dec $count
+}
+--enable_result_log
+--enable_query_log
--connection node_1
+# Wait until inserts are replicated
+--let $wait_condition = SELECT COUNT(*) = 13 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Wait 9 slave threads to exit 1
+# Wait until appliers exit
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--source include/wait_condition.inc
+
SET GLOBAL wsrep_slave_threads = 10;
+
+--echo # Wait 10 slave threads to start 2
--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
--source include/wait_condition.inc
SET GLOBAL wsrep_slave_threads = 20;
+
+--echo # Wait 20 slave threads to start 3
--let $wait_condition = SELECT COUNT(*) = 21 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
--source include/wait_condition.inc
SET GLOBAL wsrep_slave_threads = 1;
--connection node_2
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (2);
-INSERT INTO t1 VALUES (3);
-INSERT INTO t1 VALUES (4);
-INSERT INTO t1 VALUES (5);
-INSERT INTO t1 VALUES (6);
-INSERT INTO t1 VALUES (7);
-INSERT INTO t1 VALUES (8);
-INSERT INTO t1 VALUES (9);
-
+--echo # Generate 40 replication events
+--disable_query_log
+--disable_result_log
+--let $count = 40
+while ($count)
+{
+ INSERT INTO t1 VALUES (1);
+ --dec $count
+}
+--enable_query_log
+--enable_result_log
--connection node_1
---let $wait_condition = SELECT COUNT(*) = 12 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
+
+# Wait until inserts are replicated
+--let $wait_condition = SELECT COUNT(*) = 53 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) FROM t1;
+
+--echo # Wait 10 slave threads to exit 3
+# Wait until appliers exit
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
--source include/wait_condition.inc
SET GLOBAL wsrep_slave_threads = 10;
SET GLOBAL wsrep_slave_threads = 0;
+--echo # Wait 10 slave threads to start 3
+--let $wait_timeout=600
+--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--source include/wait_condition.inc
+
--connection node_2
-INSERT INTO t1 VALUES (10);
-INSERT INTO t1 VALUES (11);
-INSERT INTO t1 VALUES (12);
-INSERT INTO t1 VALUES (13);
-INSERT INTO t1 VALUES (14);
-INSERT INTO t1 VALUES (15);
-INSERT INTO t1 VALUES (16);
-INSERT INTO t1 VALUES (17);
-INSERT INTO t1 VALUES (18);
-INSERT INTO t1 VALUES (19);
-INSERT INTO t1 VALUES (20);
+--echo # Generate 12 replication events
+--disable_query_log
+--disable_result_log
+--let $count = 12
+while ($count)
+{
+ INSERT INTO t1 VALUES (1);
+ --dec $count
+}
+--enable_result_log
+--enable_query_log
--connection node_1
---let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
+# Wait until inserts are replicated
+--let $wait_condition = SELECT COUNT(*) = 65 FROM t1;
--source include/wait_condition.inc
-SET GLOBAL wsrep_slave_threads = 1;
+SELECT COUNT(*) FROM t1;
+
+--echo # Wait 10 slave threads to exit 4
+# Wait until appliers exit
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--source include/wait_condition.inc
+
+--connection node_1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_var_slave_threads.test b/mysql-test/suite/galera/t/galera_var_slave_threads.test
index 0afda948ed5..1cee845b6ab 100644
--- a/mysql-test/suite/galera/t/galera_var_slave_threads.test
+++ b/mysql-test/suite/galera/t/galera_var_slave_threads.test
@@ -34,7 +34,12 @@ SET GLOBAL wsrep_slave_threads = 64;
INSERT INTO t1 VALUES (1);
--connection node_2
+--let $wait_timeout=600
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
+--source include/wait_condition.inc
+
SELECT COUNT(*) = 1 FROM t1;
+
--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
--source include/wait_condition.inc
SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
@@ -47,6 +52,8 @@ SET GLOBAL wsrep_slave_threads = 1;
--connection node_1
+--disable_result_log
+--disable_query_log
# Generate 64 replication events
--let $count = 64
while ($count)
@@ -54,9 +61,15 @@ while ($count)
INSERT INTO t2 VALUES (DEFAULT);
--dec $count
}
+--enable_query_log
+--enable_result_log
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 64 FROM t2;
+--source include/wait_condition.inc
+
SELECT COUNT(*) = 64 FROM t2;
+
--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
--source include/wait_condition.inc
SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
diff --git a/mysql-test/suite/wsrep/include/check_galera_version.inc b/mysql-test/suite/wsrep/include/check_galera_version.inc
index e495da8f1ee..fd691161a54 100644
--- a/mysql-test/suite/wsrep/include/check_galera_version.inc
+++ b/mysql-test/suite/wsrep/include/check_galera_version.inc
@@ -17,23 +17,27 @@
eval SET @GALERA_VERSION='$galera_version';
SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @GALERA_MAJOR_VERSION;
SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @GALERA_MINOR_VERSION;
+SELECT CAST(REGEXP_REPLACE(@GALERA_VERSION,'^(\\d+)\\.(\\d+)\\.(\\d+).*','\\3') AS UNSIGNED) INTO @GALERA_RELEASE_VERSION;
# Actual
SELECT VARIABLE_VALUE INTO @ACTUAL_GALERA_VERSION FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_provider_version';
-
-SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @ACTUAL_GALERA_MAJOR_VERSION;
-SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @ACTUAL_GALERA_MINOR_VERSION;
+SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\1') AS UNSIGNED) INTO @ACTUAL_GALERA_MINOR_VERSION;
+SELECT CAST(REGEXP_REPLACE(@ACTUAL_GALERA_VERSION,'^(\\d+)\\.(\\d+).*','\\2') AS UNSIGNED) INTO @ACTUAL_GALERA_RELEASE_VERSION;
# For testing
-#SELECT @GALERA_MAJOR_VERSION, @GALERA_MINOR_VERSION;
+#SELECT @GALERA_MAJOR_VERSION;
+#SELECT @GALERA_MINOR_VERSION;
+#SELECT @GALERA_RELEASE_VERSION;
#SELECT @ACTUAL_GALERA_VERSION;
-#SELECT @ACTUAL_GALERA_MAJOR_VERSION, @ACTUAL_GALERA_MINOR_VERSION;
+#SELECT @ACTUAL_GALERA_MINOR_VERSION;
+#SELECT @ACTUAL_GALERA_RELEASE_VERSION;
-if (!`SELECT (@ACTUAL_GALERA_MAJOR_VERSION > @GALERA_MAJOR_VERSION) OR
- (@ACTUAL_GALERA_MAJOR_VERSION = @GALERA_MAJOR_VERSION AND @ACTUAL_GALERA_MINOR_VERSION >= @GALERA_MINOR_VERSION)
+if (!`SELECT (@ACTUAL_GALERA_MINOR_VERSION > @GALERA_MINOR_VERSION) OR
+ (@ACTUAL_GALERA_MINOR_VERSION = @GALERA_MINOR_VERSION AND
+ @ACTUAL_GALERA_RELEASE_VERSION >= @GALERA_RELEASE_VERSION)
`)
{
- skip Test requires Galera library version $galera_version;
+ skip Test requires Galera library version >= $galera_version;
}
--enable_query_log
diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result
index 4ecc1ca3b79..7489aa0d914 100644
--- a/mysql-test/suite/wsrep/r/variables.result
+++ b/mysql-test/suite/wsrep/r/variables.result
@@ -19,6 +19,7 @@ SET GLOBAL wsrep_provider=none;
# variables when using "_"
#
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
+SET GLOBAL wsrep_provider= '/usr/lib/galera/libgalera_smm.so';
SHOW GLOBAL STATUS LIKE 'wsrep%';
Variable_name Value
wsrep_apply_oooe #
@@ -59,6 +60,8 @@ wsrep_local_send_queue_min #
wsrep_local_state #
wsrep_local_state_comment #
wsrep_local_state_uuid #
+wsrep_open_connections #
+wsrep_open_transactions #
wsrep_protocol_version #
wsrep_provider_name #
wsrep_provider_vendor #
@@ -114,6 +117,8 @@ wsrep_local_send_queue_min #
wsrep_local_state #
wsrep_local_state_comment #
wsrep_local_state_uuid #
+wsrep_open_connections #
+wsrep_open_transactions #
wsrep_protocol_version #
wsrep_provider_name #
wsrep_provider_vendor #
diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test
index 1315f090d5c..ca71baaba02 100644
--- a/mysql-test/suite/wsrep/t/variables.test
+++ b/mysql-test/suite/wsrep/t/variables.test
@@ -27,11 +27,11 @@ SET GLOBAL wsrep_provider=none;
CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*");
---disable_query_log
+--disable_result_log
eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER';
--let $galera_version=25.3.17
source include/check_galera_version.inc;
---enable_query_log
+--enable_result_log
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep%';
@@ -101,8 +101,11 @@ SHOW STATUS LIKE 'wsrep_thread_count';
--echo # Setting wsrep_cluster_address triggers the creation of
--echo # applier/rollbacker threads.
SET GLOBAL wsrep_cluster_address= 'gcomm://';
+
--echo # Wait for applier threads to get created.
-sleep 3;
+--let $wait_timeout=600
+--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--source include/wait_condition.inc
--replace_regex /.*libgalera_smm.*/libgalera_smm.so/
SELECT @@global.wsrep_provider;
@@ -113,8 +116,11 @@ SHOW STATUS LIKE 'wsrep_thread_count';
SET @wsrep_slave_threads_saved= @@global.wsrep_slave_threads;
SET GLOBAL wsrep_slave_threads= 10;
+
--echo # Wait for applier threads to get created.
-sleep 3;
+--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--source include/wait_condition.inc
+
SHOW STATUS LIKE 'threads_connected';
SHOW STATUS LIKE 'wsrep_thread_count';
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 3826ebed14c..eea1da1d46c 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -564,7 +564,9 @@ void wsrep_node_address_init (const char* value)
static void wsrep_slave_count_change_update ()
{
- wsrep_slave_count_change += (wsrep_slave_threads - wsrep_prev_slave_threads);
+ wsrep_slave_count_change = (wsrep_slave_threads - wsrep_prev_slave_threads);
+ WSREP_DEBUG("Change on slave threads: New %lu old %lu difference %lu",
+ wsrep_slave_threads, wsrep_prev_slave_threads, wsrep_slave_count_change);
wsrep_prev_slave_threads = wsrep_slave_threads;
}
1
0

05 Sep '18
revision-id: 997dfc2af4abe58c709a5c0b8122b9bbc1090ee3 (mariadb-10.0.36-14-g997dfc2af4a)
parent(s): b9bc3c24630980b260b91fc856689dbad336064e a816eac92ac2381e1b9cd4d655e733bdeafb173e
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2018-09-05 23:30:28 +0200
message:
Merge branch 'merge-tokudb-5.6' into 10.0
storage/tokudb/CMakeLists.txt | 8 +-
storage/tokudb/PerconaFT/CMakeLists.txt | 3 +-
.../cmake_modules/TokuSetupCompiler.cmake | 3 +
.../tokudb/PerconaFT/ft/cachetable/cachetable.cc | 21 +-
.../tokudb/PerconaFT/ft/cachetable/cachetable.h | 8 +-
.../tokudb/PerconaFT/ft/ft-cachetable-wrappers.cc | 3 -
storage/tokudb/PerconaFT/ft/ft-test-helpers.cc | 3 -
storage/tokudb/PerconaFT/ft/node.cc | 2 +
.../PerconaFT/ft/serialize/block_allocator.cc | 2 +-
.../tokudb/PerconaFT/ft/tests/cachetable-4357.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-4365.cc | 4 -
.../tokudb/PerconaFT/ft/tests/cachetable-5097.cc | 6 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978-2.cc | 7 +-
.../tokudb/PerconaFT/ft/tests/cachetable-5978.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-all-write.cc | 5 +-
.../ft/tests/cachetable-checkpoint-pending.cc | 8 +-
.../ft/tests/cachetable-checkpoint-pinned-nodes.cc | 6 +-
.../ft/tests/cachetable-cleaner-checkpoint.cc | 5 +-
.../ft/tests/cachetable-cleaner-checkpoint2.cc | 5 +-
.../cachetable-cleaner-thread-attrs-accumulate.cc | 8 +-
.../cachetable-cleaner-thread-everything-pinned.cc | 5 +-
...etable-cleaner-thread-nothing-needs-flushing.cc | 5 +-
.../cachetable-cleaner-thread-same-fullhash.cc | 7 +-
.../ft/tests/cachetable-cleaner-thread-simple.cc | 7 +-
.../ft/tests/cachetable-clock-eviction.cc | 9 +-
.../ft/tests/cachetable-clock-eviction2.cc | 9 +-
.../ft/tests/cachetable-clock-eviction3.cc | 9 +-
.../ft/tests/cachetable-clock-eviction4.cc | 9 +-
.../ft/tests/cachetable-clone-checkpoint.cc | 5 +-
.../cachetable-clone-partial-fetch-pinned-node.cc | 7 +-
.../ft/tests/cachetable-clone-partial-fetch.cc | 7 +-
.../ft/tests/cachetable-clone-pin-nonblocking.cc | 7 +-
.../ft/tests/cachetable-clone-unpin-remove.cc | 5 +-
.../ft/tests/cachetable-eviction-close-test.cc | 4 -
.../ft/tests/cachetable-eviction-close-test2.cc | 4 -
.../ft/tests/cachetable-eviction-getandpin-test.cc | 14 +-
.../tests/cachetable-eviction-getandpin-test2.cc | 12 +-
.../ft/tests/cachetable-fetch-inducing-evictor.cc | 15 +-
.../ft/tests/cachetable-flush-during-cleaner.cc | 3 +-
.../ft/tests/cachetable-getandpin-test.cc | 8 +-
.../cachetable-kibbutz_and_flush_cachefile.cc | 3 +-
.../PerconaFT/ft/tests/cachetable-partial-fetch.cc | 18 +-
.../ft/tests/cachetable-pin-checkpoint.cc | 6 -
.../cachetable-pin-nonblocking-checkpoint-clean.cc | 9 +-
.../ft/tests/cachetable-prefetch-close-test.cc | 2 -
.../ft/tests/cachetable-prefetch-getandpin-test.cc | 12 +-
.../ft/tests/cachetable-put-checkpoint.cc | 9 -
.../PerconaFT/ft/tests/cachetable-simple-clone.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-clone2.cc | 5 +-
.../PerconaFT/ft/tests/cachetable-simple-close.cc | 20 +-
.../ft/tests/cachetable-simple-maybe-get-pin.cc | 3 +-
.../ft/tests/cachetable-simple-pin-cheap.cc | 9 +-
.../ft/tests/cachetable-simple-pin-dep-nodes.cc | 8 +-
.../cachetable-simple-pin-nonblocking-cheap.cc | 19 +-
.../ft/tests/cachetable-simple-pin-nonblocking.cc | 13 +-
.../PerconaFT/ft/tests/cachetable-simple-pin.cc | 11 +-
.../ft/tests/cachetable-simple-put-dep-nodes.cc | 6 +-
.../cachetable-simple-read-pin-nonblocking.cc | 13 +-
.../ft/tests/cachetable-simple-read-pin.cc | 13 +-
.../cachetable-simple-unpin-remove-checkpoint.cc | 7 +-
.../PerconaFT/ft/tests/cachetable-simple-verify.cc | 5 +-
.../tokudb/PerconaFT/ft/tests/cachetable-test.cc | 22 +-
.../ft/tests/cachetable-unpin-and-remove-test.cc | 4 +-
.../cachetable-unpin-remove-and-checkpoint.cc | 6 +-
.../PerconaFT/ft/tests/cachetable-unpin-test.cc | 2 -
storage/tokudb/PerconaFT/ft/tests/test-TDB2-pe.cc | 178 +
storage/tokudb/PerconaFT/ft/tests/test-TDB89.cc | 208 +
storage/tokudb/PerconaFT/ft/txn/rollback-apply.cc | 2 +
storage/tokudb/PerconaFT/ft/txn/rollback.cc | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.cpp | 2 +-
storage/tokudb/PerconaFT/ftcxx/malloc_utils.hpp | 2 +-
storage/tokudb/PerconaFT/portability/memory.cc | 14 +-
storage/tokudb/PerconaFT/portability/toku_assert.h | 2 +-
.../tokudb/PerconaFT/portability/toku_debug_sync.h | 3 +-
.../PerconaFT/portability/toku_instr_mysql.cc | 6 +-
.../PerconaFT/portability/toku_instrumentation.h | 6 +-
.../PerconaFT/portability/toku_portability.h | 2 +-
.../tokudb/PerconaFT/portability/toku_race_tools.h | 2 +-
storage/tokudb/PerconaFT/src/tests/get_last_key.cc | 32 +-
storage/tokudb/PerconaFT/src/ydb.cc | 3 +
storage/tokudb/PerconaFT/tools/CMakeLists.txt | 4 +
storage/tokudb/PerconaFT/util/dmt.cc | 4 +-
storage/tokudb/PerconaFT/util/minicron.cc | 3 +-
storage/tokudb/PerconaFT/util/scoped_malloc.cc | 2 +-
.../util/tests/minicron-change-period-data-race.cc | 66 +
storage/tokudb/ha_tokudb.cc | 325 +-
storage/tokudb/ha_tokudb.h | 92 +-
storage/tokudb/ha_tokudb_admin.cc | 8 +-
storage/tokudb/ha_tokudb_alter_55.cc | 4 +
storage/tokudb/ha_tokudb_alter_56.cc | 265 +-
storage/tokudb/ha_tokudb_alter_common.cc | 6 +-
storage/tokudb/ha_tokudb_update.cc | 100 +-
storage/tokudb/hatoku_cmp.cc | 33 +-
storage/tokudb/hatoku_cmp.h | 14 +-
storage/tokudb/hatoku_defines.h | 51 +-
storage/tokudb/hatoku_hton.cc | 169 +-
storage/tokudb/hatoku_hton.h | 25 +-
.../r/rpl_mixed_replace_into.result | 0
.../r/rpl_row_replace_into.result | 3 +
.../r/rpl_stmt_replace_into.result | 3 +
.../mysql-test/rpl/r/rpl_xa_interleave.result | 62 +
.../t/rpl_mixed_replace_into.test | 0
.../t/rpl_row_replace_into.test | 0
.../t/rpl_stmt_replace_into.test | 0
.../tokudb/mysql-test/rpl/t/rpl_xa_interleave.test | 103 +
.../tokudb/include/fast_update_gen_footer.inc | 2 +
.../include/fast_update_gen_footer_silent.inc | 9 +
.../tokudb/include/fast_update_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_update_int.inc | 48 +
.../tokudb/include/fast_upsert_gen_header.inc | 6 +
.../mysql-test/tokudb/include/fast_upsert_int.inc | 19 +
.../tokudb/mysql-test/tokudb/include/have_mrr.inc | 0
.../tokudb/include/setup_fast_update_upsert.inc | 8 +
.../tokudb/mysql-test/tokudb/r/compressions.result | 11 +
.../tokudb/r/fast_update_binlog_mixed.result | 225 +-
.../tokudb/r/fast_update_binlog_row.result | 19 +-
.../tokudb/r/fast_update_binlog_statement.result | 222 +-
.../mysql-test/tokudb/r/fast_update_blobs.result | 18253 +---------
.../r/fast_update_blobs_fixed_varchar.result | 33026 ------------------
.../tokudb/r/fast_update_blobs_with_varchar.result | 32771 +-----------------
.../mysql-test/tokudb/r/fast_update_char.result | 60 +-
.../tokudb/r/fast_update_deadlock.result | 19 +-
.../tokudb/r/fast_update_decr_floor.result | 314 +-
.../r/fast_update_disable_slow_update.result | 7 -
.../mysql-test/tokudb/r/fast_update_error.result | 12 +-
.../mysql-test/tokudb/r/fast_update_int.result | 562 +-
.../tokudb/r/fast_update_int_bounds.result | 52 +-
.../mysql-test/tokudb/r/fast_update_key.result | 54 +-
.../mysql-test/tokudb/r/fast_update_sqlmode.result | 21 +-
.../tokudb/r/fast_update_uint_bounds.result | 36 +-
.../mysql-test/tokudb/r/fast_update_varchar.result | 13575 +-------
.../mysql-test/tokudb/r/fast_upsert_bin_pad.result | Bin 659 -> 738 bytes
.../mysql-test/tokudb/r/fast_upsert_char.result | 24 +-
.../tokudb/r/fast_upsert_deadlock.result | 19 +-
.../mysql-test/tokudb/r/fast_upsert_int.result | 428 +-
.../mysql-test/tokudb/r/fast_upsert_key.result | 43 +-
.../mysql-test/tokudb/r/fast_upsert_sqlmode.result | 23 +-
.../mysql-test/tokudb/r/fast_upsert_values.result | 18 +-
.../tokudb/mysql-test/tokudb/r/tokudb_mrr.result | 326 +
storage/tokudb/mysql-test/tokudb/suite.pm | 6 +
.../tokudb/mysql-test/tokudb/t/compressions.test | 68 +
storage/tokudb/mysql-test/tokudb/t/disabled.def | 24 -
.../tokudb/t/fast_update_binlog_mixed-master.opt | 2 +
.../tokudb/t/fast_update_binlog_mixed.test | 15 +-
.../tokudb/t/fast_update_binlog_row-master.opt | 2 +
.../tokudb/t/fast_update_binlog_row.test | 19 +-
.../t/fast_update_binlog_statement-master.opt | 2 +
.../tokudb/t/fast_update_binlog_statement.test | 15 +-
.../mysql-test/tokudb/t/fast_update_blobs.py | 57 -
.../mysql-test/tokudb/t/fast_update_blobs.test | 18575 +----------
.../tokudb/t/fast_update_blobs_fixed_varchar.py | 63 -
.../tokudb/t/fast_update_blobs_fixed_varchar.test | 33287 -------------------
.../tokudb/t/fast_update_blobs_with_varchar.py | 62 -
.../tokudb/t/fast_update_blobs_with_varchar.test | 33115 +-----------------
.../mysql-test/tokudb/t/fast_update_char.test | 66 +-
.../mysql-test/tokudb/t/fast_update_deadlock.test | 21 +-
.../mysql-test/tokudb/t/fast_update_decr_floor.py | 58 -
.../tokudb/t/fast_update_decr_floor.test | 409 +-
.../tokudb/t/fast_update_disable_slow_update.test | 17 -
.../mysql-test/tokudb/t/fast_update_error.test | 16 +-
.../tokudb/mysql-test/tokudb/t/fast_update_int.py | 77 -
.../mysql-test/tokudb/t/fast_update_int.test | 682 +-
.../tokudb/t/fast_update_int_bounds.test | 55 +-
.../mysql-test/tokudb/t/fast_update_key.test | 63 +-
.../mysql-test/tokudb/t/fast_update_sqlmode.test | 25 +-
.../tokudb/t/fast_update_uint_bounds.test | 42 +-
.../mysql-test/tokudb/t/fast_update_varchar.py | 63 -
.../mysql-test/tokudb/t/fast_update_varchar.test | 7390 +---
.../mysql-test/tokudb/t/fast_upsert_bin_pad.test | 19 +-
.../mysql-test/tokudb/t/fast_upsert_char.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_deadlock.test | 22 +-
.../tokudb/mysql-test/tokudb/t/fast_upsert_int.py | 50 -
.../mysql-test/tokudb/t/fast_upsert_int.test | 486 +-
.../mysql-test/tokudb/t/fast_upsert_key.test | 46 +-
.../mysql-test/tokudb/t/fast_upsert_sqlmode.test | 27 +-
.../mysql-test/tokudb/t/fast_upsert_values.test | 21 +-
storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test | 73 +
.../tokudb/mysql-test/tokudb_bugs/r/PS-3773.result | 8 +
.../r/alter_table_comment_rebuild_data.result | 177 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-3773.test | 26 +
.../t/alter_table_comment_rebuild_data.test | 188 +
storage/tokudb/tokudb_debug.h | 5 -
storage/tokudb/tokudb_dir_cmd.h | 6 +-
storage/tokudb/tokudb_information_schema.cc | 74 +-
storage/tokudb/tokudb_sysvars.cc | 122 +-
storage/tokudb/tokudb_sysvars.h | 16 +-
storage/tokudb/tokudb_thread.h | 26 +-
storage/tokudb/tokudb_update_fun.cc | 230 +-
188 files changed, 3950 insertions(+), 194526 deletions(-)
diff --cc storage/tokudb/CMakeLists.txt
index 3099e704497,0ac3c20bf16..fda6afdc51a
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@@ -1,11 -1,7 +1,11 @@@
- SET(TOKUDB_VERSION 5.6.39-83.1)
+ SET(TOKUDB_VERSION )
# PerconaFT only supports x86-64 and cmake-2.8.9+
-IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
- NOT CMAKE_VERSION VERSION_LESS "2.8.9")
+IF(CMAKE_VERSION VERSION_LESS "2.8.9")
+ MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
+ELSEIF(NOT HAVE_DLOPEN)
+ MESSAGE(STATUS "dlopen is required by TokuDB")
+ELSEIF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
CHECK_CXX_SOURCE_COMPILES(
"
struct a {int b; int c; };
diff --cc storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index b7b4c0ab233,6f69c3c31b9..d742555f878
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@@ -18,7 -18,7 +18,7 @@@ int toku_pthread_create(const toku_inst
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg) {
- #if (MYSQL_VERSION_MAJOR >= 5) && (MYSQL_VERSION_MINOR >= 7)
-#if (MYSQL_VERSION_ID >= 50700)
++#if (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
return PSI_THREAD_CALL(spawn_thread)(
key.id(), reinterpret_cast<my_thread_handle *>(thread),
attr, start_routine, arg);
diff --cc storage/tokudb/PerconaFT/tools/CMakeLists.txt
index af40a838b9a,d54c2c21827..4615e537a8f
--- a/storage/tokudb/PerconaFT/tools/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/tools/CMakeLists.txt
@@@ -8,11 -8,24 +8,15 @@@ foreach(tool ${tools}
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
+ # if we are compiling under a MySQL variant, this macro must be present
+ GET_MYSQL_VERSION()
+
+
if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
(CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
- if (MYSQL_BASE_VERSION VERSION_EQUAL "8.0")
- target_link_libraries(${tool} sql_main sql_gis sql_main binlog rpl master slave ${ICU_LIBRARIES})
- else ()
- target_link_libraries(${tool} sql binlog rpl master slave)
- endif ()
- else ()
- if (MYSQL_BASE_VERSION VERSION_EQUAL "8.0")
- target_link_libraries(${tool} mysqlclient)
- else ()
- target_link_libraries(${tool} perconaserverclient)
- endif ()
- endif ()
+ target_link_libraries(${tool} sql)
+ endif()
+ target_link_libraries(${tool} mysqlclient)
endif ()
add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden)
diff --cc storage/tokudb/ha_tokudb.cc
index 7a328e31261,548ac5c7b09..4637ac1bf5f
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@@ -34,20 -34,7 +34,20 @@@ Copyright (c) 2006, 2015, Percona and/o
pfs_key_t ha_tokudb_mutex_key;
pfs_key_t num_DBs_lock_key;
- #if TOKU_INCLUDE_EXTENDED_KEYS
++#if defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+static inline uint get_ext_key_parts(const KEY *key) {
+#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
+ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
+ return key->actual_key_parts;
+#elif defined(MARIADB_BASE_VERSION)
+ return key->ext_key_parts;
+#else
+#error
+#endif
+}
- #endif
++#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
+
- HASH TOKUDB_SHARE::_open_tables;
+ std::unordered_map<std::string, TOKUDB_SHARE*> TOKUDB_SHARE::_open_tables;
tokudb::thread::mutex_t TOKUDB_SHARE::_open_tables_mutex;
static const char* ha_tokudb_exts[] = {
@@@ -7221,8 -7262,8 +7263,8 @@@ int ha_tokudb::create
form->s->write_frm_image();
#endif
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- const tokudb::sysvars::format_t row_format =
+ const tokudb::sysvars::row_format_t row_format =
(tokudb::sysvars::row_format_t)form->s->option_struct->row_format;
#else
// TDB-76 : CREATE TABLE ... LIKE ... does not use source row_format on
diff --cc storage/tokudb/ha_tokudb.h
index a2fd747bb92,1f47308c978..6f592617b76
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@@ -1072,7 -1085,28 +1085,8 @@@ private
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
+ #endif // defined(TOKU_INCLUDE_RFR) && TOKU_INCLUDE_RFR
};
-#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-struct ha_table_option_struct {
- uint row_format;
-};
-
-struct ha_index_option_struct {
- bool clustering;
-};
-
-static inline bool key_is_clustering(const KEY *key) {
- return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
-}
-
-#else
-
-static inline bool key_is_clustering(const KEY *key) {
- return key->flags & HA_CLUSTERING;
-}
-#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
-
#endif // _HA_TOKUDB_H
diff --cc storage/tokudb/ha_tokudb_update.cc
index 9fe5e729ec4,5b09279afc5..74f313c8493
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@@ -207,6 -202,6 +202,7 @@@ static uint32_t blob_field_index(TABLE
return b_index;
}
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Determine if an update operation can be offloaded to the storage engine.
// The update operation consists of a list of update expressions
// (fields[i] = values[i]), and a list of where conditions (conds).
@@@ -241,25 -243,21 +244,22 @@@ int ha_tokudb::fast_update
}
error = send_update_message(
- update_fields,
- update_values,
- conds,
- transaction);
- if (error != 0) {
- goto check_error;
- }
+ update_fields, update_values, conds, transaction);
- check_error:
- if (error != 0) {
- if (tokudb::sysvars::disable_slow_update(thd) != 0)
+ if (error) {
+ int mapped_error = map_to_handler_error(error);
+ if (mapped_error == error)
error = HA_ERR_UNSUPPORTED;
- if (error != ENOTSUP)
- print_error(error, MYF(0));
}
- return_error:
+ exit:
+
+ if (error != 0 && error != ENOTSUP)
+ print_error(error, MYF(0));
+
TOKUDB_HANDLER_DBUG_RETURN(error);
}
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Return true if an expression is a simple int expression or a simple function
// of +- int expression.
@@@ -570,6 -568,6 +570,7 @@@ static bool is_strict_mode(THD* thd)
#endif
}
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Check if an update operation can be handled by this storage engine.
// Return true if it can.
bool ha_tokudb::check_fast_update(
@@@ -608,6 -606,6 +609,7 @@@
return true;
}
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
static void marshall_varchar_descriptor(
tokudb::buffer& b,
@@@ -833,6 -827,6 +831,7 @@@ static void count_update_types
}
}
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
// Generate an update message for an update operation and send it into the
// primary tree. Return 0 if successful.
int ha_tokudb::send_update_message(
@@@ -1131,5 -1127,3 +1132,4 @@@ int ha_tokudb::send_upsert_message
return error;
}
-
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
diff --cc storage/tokudb/hatoku_defines.h
index 92d7da86edf,e2fbe85b3b1..55d6da6280f
--- a/storage/tokudb/hatoku_defines.h
+++ b/storage/tokudb/hatoku_defines.h
@@@ -35,8 -35,8 +35,8 @@@ Copyright (c) 2006, 2015, Percona and/o
#include "log.h"
#include "sql_class.h"
#include "sql_show.h"
- #include "discover.h"
+ #include "item_cmpfunc.h"
-#include <binlog.h>
+//#include <binlog.h>
#include "debug_sync.h"
#undef PACKAGE
@@@ -75,10 -93,11 +93,12 @@@
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0
#define TOKU_INCLUDE_XA 1
- #define TOKU_INCLUDE_WRITE_FRM_DATA 0
+ #define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_PARTITION_WRITE_FRM_DATA 0
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
++#define TOKU_INCLUDE_RFR 1
#endif
#define TOKU_INCLUDE_OPTION_STRUCTS 1
#define TOKU_OPTIMIZE_WITH_RECREATE 1
@@@ -117,20 -142,21 +143,22 @@@
#endif
#endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
+ #define TOKU_INCLUDE_RFR 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
-// mysql 5.5 and mariadb 5.5
#define TOKU_USE_DB_TYPE_TOKUDB 1
-#define TOKU_INCLUDE_ALTER_56 1
-#define TOKU_INCLUDE_ALTER_55 1
-#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
+#define TOKU_INCLUDE_ALTER_56 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ALTER_55 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0 /* MariaDB 5.5 */
#define TOKU_INCLUDE_XA 1
-#define TOKU_INCLUDE_WRITE_FRM_DATA 1
-#define TOKU_PARTITION_WRITE_FRM_DATA 1
+#define TOKU_PARTITION_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+#define TOKU_INCLUDE_WRITE_FRM_DATA 0 /* MariaDB 5.5 */
+ #define TOKU_INCLUDE_DISCOVER_FRM 1
-#define TOKU_INCLUDE_UPSERT 1
+#define TOKU_INCLUDE_UPSERT 0 /* MariaDB 5.5 */
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
+#define TOKU_INCLUDE_OPTION_STRUCTS 1
+#define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
diff --cc storage/tokudb/hatoku_hton.cc
index 693f9d28a9a,610c9e07be0..ce5e396146b
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@@ -62,14 -76,16 +64,16 @@@ static bool tokudb_show_status
THD* thd,
stat_print_fn* print,
enum ha_stat_type);
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
static int tokudb_close_connection(handlerton* hton, THD* thd);
-static void tokudb_kill_connection(handlerton *hton, THD *thd);
+static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
- #if TOKU_INCLUDE_XA
+ #if defined(TOKU_INCLUDE_XA) && TOKU_INCLUDE_XA
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all);
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len);
static int tokudb_commit_by_xid(handlerton* hton, XID* xid);
@@@ -120,8 -138,8 +126,8 @@@ handlerton* tokudb_hton
const char* ha_tokudb_ext = ".tokudb";
DB_ENV* db_env;
-#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static tokudb::thread::mutex_t tokudb_map_mutex;
- #if TOKU_THDVAR_MEMALLOC_BUG
++#if defined(TOKU_THDVAR_MEMALLOC_BUG) && TOKU_THDVAR_MEMALLOC_BUG
static TREE tokudb_map;
struct tokudb_map_pair {
THD* thd;
@@@ -388,14 -408,16 +396,16 @@@ static int tokudb_init_func(void *p)
tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status;
- #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
+ #if defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) && \
+ TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
- #endif
+ #endif // defined(TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL) &&
+ // TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
- #if TOKU_INCLUDE_OPTION_STRUCTS
+ #if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
- tokudb_hton->table_options = tokudb_table_options;
- tokudb_hton->index_options = tokudb_index_options;
+ tokudb_hton->table_options = tokudb::sysvars::tokudb_table_options;
+ tokudb_hton->index_options = tokudb::sysvars::tokudb_index_options;
- #endif
+ #endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
if (!tokudb_home)
tokudb_home = mysql_real_data_home;
@@@ -784,8 -807,7 +795,8 @@@ static int tokudb_close_connection(TOKU
return error;
}
- void tokudb_kill_connection(handlerton *hton, THD *thd,
- enum thd_kill_levels level) {
-void tokudb_kill_connection(TOKUDB_UNUSED(handlerton* hton), THD* thd) {
++void tokudb_kill_connection(TOKUDB_UNUSED(handlerton *hton), THD *thd,
++ TOKUDB_UNUSED(enum thd_kill_levels level)) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
@@@ -883,7 -905,7 +894,7 @@@ extern "C" enum durability_properties t
#endif
// Determine if an fsync is used when a transaction is committed.
- static bool tokudb_sync_on_commit(THD* thd, tokudb_trx_data* trx, DB_TXN* txn) {
-static bool tokudb_sync_on_commit(THD* thd) {
++static bool tokudb_sync_on_commit(THD* thd, DB_TXN* txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
@@@ -906,8 -928,7 +917,8 @@@ static int tokudb_commit(handlerton * h
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
- uint32_t syncflag = tokudb_sync_on_commit(thd) ? 0 : DB_TXN_NOSYNC;
+ uint32_t syncflag =
- tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
++ tokudb_sync_on_commit(thd, this_txn) ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
"commit trx %u txn %p syncflag %u",
diff --cc storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
index 00000000000,00000000000..e69de29bb2d
new file mode 100644
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/include/have_mrr.inc
diff --cc storage/tokudb/mysql-test/tokudb/r/compressions.result
index 00000000000,87ba94ebbe8..03e0d18e9eb
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@@ -1,0 -1,6 +1,11 @@@
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
++FOUND /compression_method=0/ in dump
++FOUND /compression_method=7/ in dump
++FOUND /compression_method=9/ in dump
++FOUND /compression_method=10/ in dump
++FOUND /compression_method=11/ in dump
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 00000000000,9eb0c2f5e34..ba469a3ac96
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@@ -1,0 -1,706 +1,326 @@@
-set optimizer_switch='mrr=on,mrr_cost_based=off';
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+ create table t1(a int);
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
-) ENGINE=TokuDB DEFAULT CHARSET=latin1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib'
+ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+ create table t2(a int);
+ insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C;
+ create table t3 (
+ a char(8) not null, b char(8) not null, filler char(200),
+ key(a)
+ );
+ insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'),
+ 'filler-1' from t2 A;
+ insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'),
+ 'filler-2' from t2 A;
+ select a,filler from t3 where a >= 'c-9011=w';
+ a filler
+ select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=z', 'c-1013=z', 'err');
+ insert into t3 values ('a-1014=w', 'a-1014=w', 'err');
+ select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or
+ (a>='c-1014=w' and a <= 'c-1015=w');
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ delete from t3 where b in ('c-1013=z', 'a-1014=w');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
+ insert into t3 values ('c-1013=w', 'del-me', 'inserted');
+ select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or
+ a='c-1014=w' or a='c-1015=w';
+ a filler
+ c-1011=w filler
-c-1011=w filler-1
-c-1011=w filler-2
+ c-1012=w filler
-c-1012=w filler-1
-c-1012=w filler-2
+ c-1013=w filler
-c-1013=w filler-1
-c-1013=w filler-2
-c-1013=w inserted
+ c-1014=w filler
-c-1014=w filler-1
-c-1014=w filler-2
+ c-1015=w filler
++c-1011=w filler-1
++c-1012=w filler-1
++c-1013=w filler-1
++c-1014=w filler-1
+ c-1015=w filler-1
++c-1011=w filler-2
++c-1012=w filler-2
++c-1013=w filler-2
++c-1014=w filler-2
+ c-1015=w filler-2
++c-1013=w inserted
+ delete from t3 where b='del-me';
+ alter table t3 add primary key(b);
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w', 'c-1021=w',
+ 'c-1022=w', 'c-1023=w', 'c-1024=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ c-1024=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or
+ b IN ('c-1021=w', 'c-1022=w', 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
+ select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or
+ b IN ('c-1019=w', 'c-1020=w') or
+ (b>='c-1021=w' and b<= 'c-1023=w');
+ b filler
+ c-1011=w filler
+ c-1012=w filler
+ c-1013=w filler
+ c-1014=w filler
+ c-1015=w filler
+ c-1016=w filler
+ c-1017=w filler
+ c-1018=w filler
+ c-1019=w filler
+ c-1020=w filler
+ c-1021=w filler
+ c-1022=w filler
+ c-1023=w filler
++drop table if exists t4;
+ create table t4 (a varchar(10), b int, c char(10), filler char(200),
+ key idx1 (a, b, c));
+ insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15;
+ insert into t4 (a,b,c,filler)
+ select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500;
+ explain
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan
+ select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1'
+ or c='no-such-row2');
+ a b c filler
+ NULL NULL NULL NULL-15
+ NULL NULL NULL NULL-14
+ NULL NULL NULL NULL-13
+ NULL NULL NULL NULL-12
+ NULL NULL NULL NULL-11
+ NULL NULL NULL NULL-10
+ NULL NULL NULL NULL-9
+ NULL NULL NULL NULL-8
+ NULL NULL NULL NULL-7
+ NULL NULL NULL NULL-6
+ NULL NULL NULL NULL-5
+ NULL NULL NULL NULL-4
+ NULL NULL NULL NULL-3
+ NULL NULL NULL NULL-2
+ NULL NULL NULL NULL-1
+ explain
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Using MRR
++1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan
+ select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
+ a b c filler
+ b-1 NULL c-1 NULL-15
+ b-1 NULL c-1 NULL-14
+ b-1 NULL c-1 NULL-13
+ b-1 NULL c-1 NULL-12
+ b-1 NULL c-1 NULL-11
+ b-1 NULL c-1 NULL-10
+ b-1 NULL c-1 NULL-9
+ b-1 NULL c-1 NULL-8
+ b-1 NULL c-1 NULL-7
+ b-1 NULL c-1 NULL-6
+ b-1 NULL c-1 NULL-5
+ b-1 NULL c-1 NULL-4
+ b-1 NULL c-1 NULL-3
+ b-1 NULL c-1 NULL-2
+ b-1 NULL c-1 NULL-1
+ bb-1 NULL cc-2 NULL-15
+ bb-1 NULL cc-2 NULL-14
+ bb-1 NULL cc-2 NULL-13
+ bb-1 NULL cc-2 NULL-12
+ bb-1 NULL cc-2 NULL-11
+ bb-1 NULL cc-2 NULL-10
+ bb-1 NULL cc-2 NULL-9
+ bb-1 NULL cc-2 NULL-8
+ bb-1 NULL cc-2 NULL-7
+ bb-1 NULL cc-2 NULL-6
+ bb-1 NULL cc-2 NULL-5
+ bb-1 NULL cc-2 NULL-4
+ bb-1 NULL cc-2 NULL-3
+ bb-1 NULL cc-2 NULL-2
+ bb-1 NULL cc-2 NULL-1
+ drop table t1, t2, t3, t4;
+ create table t1 (a int, b int not null,unique key (a,b),index(b));
+ insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6);
++Warnings:
++Warning 1062 Duplicate entry '6-6' for key 'a'
+ create table t2 like t1;
+ insert into t2 select * from t1;
+ alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10));
+ select * from t1 where a is null;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3;
+ a b c
+ NULL 9 0
+ NULL 9 0
+ select * from t1 where a is null and b=9 or a is null and b=7 limit 3;
+ a b c
+ NULL 7 0
+ NULL 9 0
+ NULL 9 0
+ drop table t1, t2;
-CREATE TABLE t1 (
-ID int(10) unsigned NOT NULL AUTO_INCREMENT,
-col1 int(10) unsigned DEFAULT NULL,
-key1 int(10) unsigned NOT NULL DEFAULT '0',
-key2 int(10) unsigned DEFAULT NULL,
-text1 text,
-text2 text,
-col2 smallint(6) DEFAULT '100',
-col3 enum('headers','bodyandsubject') NOT NULL DEFAULT 'bodyandsubject',
-col4 tinyint(3) unsigned NOT NULL DEFAULT '0',
-PRIMARY KEY (ID),
-KEY (key1),
-KEY (key2)
-) AUTO_INCREMENT=6 DEFAULT CHARSET=utf8;
-INSERT INTO t1 VALUES
-(1,NULL,1130,NULL,'Hello',NULL,100,'bodyandsubject',0),
-(2,NULL,1130,NULL,'bye',NULL,100,'bodyandsubject',0),
-(3,NULL,1130,NULL,'red',NULL,100,'bodyandsubject',0),
-(4,NULL,1130,NULL,'yellow',NULL,100,'bodyandsubject',0),
-(5,NULL,1130,NULL,'blue',NULL,100,'bodyandsubject',0);
-select * FROM t1 WHERE key1=1130 AND col1 IS NULL ORDER BY text1;
-ID col1 key1 key2 text1 text2 col2 col3 col4
-5 NULL 1130 NULL blue NULL 100 bodyandsubject 0
-2 NULL 1130 NULL bye NULL 100 bodyandsubject 0
-1 NULL 1130 NULL Hello NULL 100 bodyandsubject 0
-3 NULL 1130 NULL red NULL 100 bodyandsubject 0
-4 NULL 1130 NULL yellow NULL 100 bodyandsubject 0
-drop table t1;
-
-BUG#37851: Crash in test_if_skip_sort_order tab->select is zero
-
-CREATE TABLE t1 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1 VALUES (1);
-CREATE TABLE t2 (
-pk int(11) NOT NULL AUTO_INCREMENT,
-int_key int(11) DEFAULT NULL,
-PRIMARY KEY (pk),
-KEY int_key (int_key)
-);
-INSERT INTO t2 VALUES (1,1),(2,6),(3,0);
-EXPLAIN EXTENDED
-SELECT MIN(t1.pk)
-FROM t1 WHERE EXISTS (
-SELECT t2.pk
-FROM t2
-WHERE t2.int_key IS NULL
-GROUP BY t2.pk
-);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 ref int_key int_key 5 const 1 100.00 Using where; Using index
-Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`pk`) AS `MIN(t1.pk)` from `test`.`t1` where 0
-DROP TABLE t1, t2;
-#
-# BUG#42048 Discrepancy between MyISAM and Maria's ICP implementation
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b char(20), filler char(200), key(a,b(10)));
-insert into t1 select A.a + 10*(B.a + 10*C.a), 'bbb','filler' from t0 A, t0 B, t0 C;
-update t1 set b=repeat(char(65+a), 20) where a < 25;
-This must show range + using index condition:
-explain select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL x Using where
-select * from t1 where a < 10 and b = repeat(char(65+a), 20);
-a b filler
-0 AAAAAAAAAAAAAAAAAAAA filler
-1 BBBBBBBBBBBBBBBBBBBB filler
-2 CCCCCCCCCCCCCCCCCCCC filler
-3 DDDDDDDDDDDDDDDDDDDD filler
-4 EEEEEEEEEEEEEEEEEEEE filler
-5 FFFFFFFFFFFFFFFFFFFF filler
-6 GGGGGGGGGGGGGGGGGGGG filler
-7 HHHHHHHHHHHHHHHHHHHH filler
-8 IIIIIIIIIIIIIIIIIIII filler
-9 JJJJJJJJJJJJJJJJJJJJ filler
-drop table t0,t1;
-#
-# BUG#41136: ORDER BY + range access: EXPLAIN shows "Using MRR" while MRR is actually not used
-#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t1 (a int, b int, key(a));
-insert into t1 select A.a + 10 *(B.a + 10*C.a), A.a + 10 *(B.a + 10*C.a) from t0 A, t0 B, t0 C;
-This mustn't show "Using MRR":
-explain select * from t1 where a < 20 order by a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where
-drop table t0, t1;
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=64;
-create table t1(a int);
-insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t2(a char(8), b char(8), c char(8), filler char(100), key k1(a,b,c) );
-insert into t2 select
-concat('a-', 1000 + A.a, '-a'),
-concat('b-', 1000 + B.a, '-b'),
-concat('c-', 1000 + C.a, '-c'),
-'filler'
-from t1 A, t1 B, t1 C;
-EXPLAIN select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k1 k1 9 NULL 100 Using where; Using MRR
-select count(length(a) + length(filler))
-from t2 force index (k1)
-where a>='a-1000-a' and a <'a-1001-a';
-count(length(a) + length(filler))
-100
-drop table t2;
-create table t2 (a char(100), b char(100), c char(100), d int,
-filler char(10), key(d), primary key (a,b,c));
-insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B;
-explain select * from t2 force index (d) where d < 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range d d 5 NULL # Using where
-drop table t2;
-drop table t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3));
-select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4);
-f1 f2 f3 f4
-1 1 1 A
-10 10 10 A
-2 2 2 A
-3 3 3 A
-4 4 4 A
-5 5 5 A
-6 6 6 A
-7 7 7 A
-8 8 8 A
-9 9 9 A
-drop table t1;
-
-BUG#37977: Wrong result returned on GROUP BY + OR + Innodb
-
-CREATE TABLE t1 (
-`pk` int(11) NOT NULL AUTO_INCREMENT,
-`int_nokey` int(11) NOT NULL,
-`int_key` int(11) NOT NULL,
-`date_key` date NOT NULL,
-`date_nokey` date NOT NULL,
-`time_key` time NOT NULL,
-`time_nokey` time NOT NULL,
-`datetime_key` datetime NOT NULL,
-`datetime_nokey` datetime NOT NULL,
-`varchar_key` varchar(5) DEFAULT NULL,
-`varchar_nokey` varchar(5) DEFAULT NULL,
-PRIMARY KEY (`pk`),
-KEY `int_key` (`int_key`),
-KEY `date_key` (`date_key`),
-KEY `time_key` (`time_key`),
-KEY `datetime_key` (`datetime_key`),
-KEY `varchar_key` (`varchar_key`)
-);
-INSERT INTO t1 VALUES
-(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'),
-(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'),
-(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'),
-(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL),
-(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu');
-select pk from t1 WHERE `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk;
-pk
-1
-5
-drop table t1;
-#
-# BUG#39447: Error with NOT NULL condition and LIMIT 1
-#
-CREATE TABLE t1 (
-id int(11) NOT NULL,
-parent_id int(11) DEFAULT NULL,
-name varchar(10) DEFAULT NULL,
-PRIMARY KEY (id),
-KEY ind_parent_id (parent_id)
-);
-insert into t1 (id, parent_id, name) values
-(10,NULL,'A'),
-(20,10,'B'),
-(30,10,'C'),
-(40,NULL,'D'),
-(50,40,'E'),
-(60,40,'F'),
-(70,NULL,'J');
-SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id
-60
-This must show type=index, extra=Using where
-explain SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index ind_parent_id PRIMARY 4 NULL 1 Using where
-SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1;
-id parent_id name
-60 40 F
-drop table t1;
-#
-# Bug#50381 "Assertion failing in handler.h:1283:
-# void COST_VECT::add_io(double, double)"
-#
-CREATE TABLE t1 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-CREATE TABLE t2 (
-c1 INT NOT NULL,
-c2 VARCHAR(1) DEFAULT NULL,
-PRIMARY KEY (c1)
-);
-INSERT INTO t2 VALUES (10,'v');
-INSERT INTO t2 VALUES (11,'r');
-SELECT t1.c2
-FROM t2 STRAIGHT_JOIN t1 ON t1.c1 < t2.c1;
-c2
-DROP TABLE t1, t2;
-#
-# Bug#58463: Error Can't find record on SELECT with JOIN and ORDER BY
-#
-CREATE TABLE t1 (
-pk INT NOT NULL,
-PRIMARY KEY (pk)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (2);
-CREATE TABLE t2 (
-pk INT NOT NULL,
-i1 INT NOT NULL,
-i2 INT NOT NULL,
-c1 VARCHAR(1024) CHARACTER SET utf8,
-PRIMARY KEY (pk),
-KEY k1 (i1)
-);
-INSERT INTO t2 VALUES (3, 9, 1, NULL);
-EXPLAIN SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 NULL
-1 SIMPLE t2 const PRIMARY,k1 PRIMARY 4 const 1 Using where
-SELECT i1
-FROM t1 LEFT JOIN t2 ON t1.pk = t2.i2
-WHERE t2.i1 > 5
-AND t2.pk IS NULL
-ORDER BY i1;
-i1
-DROP TABLE t1, t2;
-#
-# Bug#12321461: CRASH IN DSMRR_IMPL::DSMRR_INIT ON SELECT STRAIGHT_JOIN
-#
-set @save_optimizer_switch = @@optimizer_switch;
-set optimizer_switch='block_nested_loop=off,batched_key_access=off';
-CREATE TABLE t1 (
-pk INTEGER,
-c1 VARCHAR(1) NOT NULL,
-PRIMARY KEY (pk)
-);
-CREATE TABLE t2 (
-c1 VARCHAR(1) NOT NULL
-);
-INSERT INTO t2 VALUES ('v'), ('c');
-EXPLAIN SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2 NULL
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
-SELECT STRAIGHT_JOIN t1.c1
-FROM t1 RIGHT OUTER JOIN t2 ON t1.c1 = t2.c1
-WHERE t1.pk > 176;
-c1
-DROP TABLE t1,t2;
-set optimizer_switch= @save_optimizer_switch;
-#
-# Bug#13249966 MRR: RANDOM ERROR DUE TO UNINITIALIZED RES WITH
-# SMALL READ_RND_BUFFER_SIZE
-#
-set @read_rnd_buffer_size_save= @@read_rnd_buffer_size;
-set read_rnd_buffer_size=1;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-1
-CREATE TABLE t1 (
-i1 INTEGER NOT NULL,
-i2 INTEGER NOT NULL,
-KEY (i2)
-);
-INSERT INTO t1 VALUES (0,1),(1,2),(2,3);
-EXPLAIN SELECT i1
-FROM t1
-WHERE i2 > 2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i2 i2 4 NULL 1 Using where
-SELECT i1
-FROM t1
-WHERE i2 > 2;
-i1
-2
-DROP TABLE t1;
-set @@read_rnd_buffer_size= @read_rnd_buffer_size_save;
-select @@read_rnd_buffer_size;
-@@read_rnd_buffer_size
-262144
-#
-# Bug 12365385 STRAIGHT_JOIN QUERY QUICKLY EXHAUSTS SYSTEM+VIRT.
-# MEMORY LEADING TO SYSTEM CRASH
-#
-CREATE TABLE ten (a INTEGER);
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-CREATE TABLE t1 (
-pk INTEGER NOT NULL,
-i1 INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t1
-SELECT a, 1, 'MySQL' FROM ten;
-CREATE TABLE t2 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-c2 varchar(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t2
-SELECT a, 'MySQL', 'MySQL' FROM ten;
-CREATE TABLE t3 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t3
-SELECT a, 'MySQL' FROM ten;
-CREATE TABLE t4 (
-pk int(11) NOT NULL,
-c1_key varchar(10) CHARACTER SET utf8 NOT NULL,
-c2 varchar(10) NOT NULL,
-c3 varchar(10) NOT NULL,
-PRIMARY KEY (pk),
-KEY k1 (c1_key)
-);
-CREATE TABLE t5 (
-pk INTEGER NOT NULL,
-c1 VARCHAR(10) NOT NULL,
-PRIMARY KEY (pk)
-);
-INSERT INTO t5
-SELECT a, 'MySQL' FROM ten;
-EXPLAIN SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t5 ALL NULL NULL NULL NULL 10 NULL
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (Block Nested Loop)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 10 Using join buffer (Block Nested Loop)
-1 SIMPLE t4 ALL k1 NULL NULL NULL 1 Range checked for each record (index map: 0x2)
-SELECT STRAIGHT_JOIN *
-FROM
-(t1 LEFT JOIN
-(t2 LEFT JOIN
-(t3 LEFT OUTER JOIN t4 ON t3.c1 <= t4.c1_key)
-ON t2.c1 = t4.c3)
-ON t1.c1 = t4.c2)
-RIGHT OUTER JOIN t5 ON t2.c2 <= t5.c1
-WHERE t1.i1 = 1;
-pk i1 c1 pk c1 c2 pk c1 pk c1_key c2 c3 pk c1
-DROP TABLE ten, t1, t2, t3, t4, t5;
+ #
+ # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ #
+ SET AUTOCOMMIT=0;
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ @@tx_isolation
+ REPEATABLE-READ
+ START TRANSACTION;
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ id select_type table type possible_keys key key_len ref rows Extra
+ 1 SIMPLE t1 range a a 5 NULL 2 Using where
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+ dummy a b
+ 3 3 3
+ 5 5 5
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES (2,2,2);
+ ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ ROLLBACK;
+ ROLLBACK;
+ DROP TABLE t1;
diff --cc storage/tokudb/mysql-test/tokudb/suite.pm
index 6c52d0110fe,00000000000..70a65de4a2a
mode 100644,000000..100644
--- a/storage/tokudb/mysql-test/tokudb/suite.pm
+++ b/storage/tokudb/mysql-test/tokudb/suite.pm
@@@ -1,14 -1,0 +1,20 @@@
+package My::Suite::TokuDB;
+use File::Basename;
+@ISA = qw(My::Suite);
+
+# Ensure we can run the TokuDB tests even if hugepages are enabled
+$ENV{TOKU_HUGE_PAGES_OK}=1;
++my $exe_tokuftdump=
++ ::mtr_exe_maybe_exists(
++ ::vs_config_dirs('storage/tokudb/PerconaFT/tools', 'tokuftdump'),
++ "$::path_client_bindir/tokuftdump",
++ "$::basedir/storage/tokudb/PerconaFT/tools/tokuftdump");
++$ENV{'MYSQL_TOKUFTDUMP'}= ::native_path($exe_tokuftdump);
+
+#return "Not run for embedded server" if $::opt_embedded_server;
+return "No TokuDB engine" unless $ENV{HA_TOKUDB_SO} or $::mysqld_variables{tokudb};
+
+sub is_default { not $::opt_embedded_server }
+
+bless { };
+
diff --cc storage/tokudb/mysql-test/tokudb/t/compressions.test
index 00000000000,3e83cdb8b68..cd2e405c13a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/compressions.test
+++ b/storage/tokudb/mysql-test/tokudb/t/compressions.test
@@@ -1,0 -1,68 +1,68 @@@
+ --source include/have_tokudb.inc
+
+ # The purpose of this test is to perform about as full of an end-to-end
+ # validation that the requested compression algo at the SQL layer is actually
+ # applied to the FT data files. The only practical way to check this is to use
+ # tokuftdump and look at the data files header value for compression_method.
+ # A side effect of this is that the existance of this test will ensure that at
+ # no time will the compression method IDs ever change, if they do, this test
+ # will fail and users data will be irreparably damaged.
+
+ # uncompressed - compression_method=0
-CREATE TABLE t1 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_UNCOMPRESSED;
++CREATE TABLE t1 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_UNCOMPRESSED;
+ --let $t1_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t1-main'`
+
+ # SNAPPY - compression_method=7
-CREATE TABLE t2 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_SNAPPY;
++CREATE TABLE t2 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_SNAPPY;
+ --let $t2_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t2-main'`
+
+ # QUICKLZ - compression_method=9
-CREATE TABLE t3 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_QUICKLZ;
++CREATE TABLE t3 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_QUICKLZ;
+ --let $t3_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t3-main'`
+
+ # LZMA - compression_method=10
-CREATE TABLE t4 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_LZMA;
++CREATE TABLE t4 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_LZMA;
+ --let $t4_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t4-main'`
+
+ # ZLIB (without checksum) - compression_method=11
-CREATE TABLE t5 (a INT) ENGINE=TokuDB ROW_FORMAT=TOKUDB_ZLIB;
++CREATE TABLE t5 (a INT) ENGINE=TokuDB COMPRESSION=TOKUDB_ZLIB;
+ --let $t5_file = `SELECT internal_file_name FROM information_schema.tokudb_file_map WHERE dictionary_name = './test/t5-main'`
+
+ --let $datadir = `SELECT @@global.datadir`
+
+ # To ensure we have correct headers written to FT data files and no chance of a
+ # race between header rotation and tokuftdump, lets just perform a clean server
+ # shutdown before we go rooting around in the FT files.
+ --source include/shutdown_mysqld.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/dump
+
+ # uncompressed - compression_method=0
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t1_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=0
+ --source include/search_pattern_in_file.inc
+
+ # SNAPPY - compression_method=7
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t2_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=7
+ --source include/search_pattern_in_file.inc
+
+ # QUICKLZ - compression_method=9
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t3_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=9
+ --source include/search_pattern_in_file.inc
+
+ # LZMA - compression_method=10
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t4_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=10
+ --source include/search_pattern_in_file.inc
+
+ # ZLIB (without checksum) - compression_method=11
+ --exec $MYSQL_TOKUFTDUMP $datadir/$t5_file > $SEARCH_FILE
+ --let SEARCH_PATTERN=compression_method=11
+ --source include/search_pattern_in_file.inc
+
+ --remove_file $SEARCH_FILE
+ --source include/start_mysqld.inc
+
+ DROP TABLE t1, t2, t3, t4, t5;
diff --cc storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 00000000000,b30bc18d759..6130933b279
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@@ -1,0 -1,89 +1,73 @@@
+ #
+ # MRR/Tokudb tests, taken from mysqltest/t/innodb_mrr.test
+ # (Turns off all other 6.0 optimizer switches than MRR)
+ #
+
+ --source include/have_tokudb.inc
+ --source include/have_mrr.inc
+
-set optimizer_switch='mrr=on,mrr_cost_based=off';
-
---disable_query_log
-if (`select locate('semijoin', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='semijoin=off';
-}
-if (`select locate('materialization', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='materialization=off';
-}
-if (`select locate('index_condition_pushdown', @@optimizer_switch) > 0`)
-{
- set optimizer_switch='index_condition_pushdown=off';
-}
---enable_query_log
-
++set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+ set default_storage_engine=TokuDB;
+
+ --source include/mrr_tests.inc
+
+
+ # taken from include/mrr_innodb_tests.inc
+
+ --source include/count_sessions.inc
+
+ # MRR tests that are special for InnoDB (and copied for TokuDB)
+
+ --echo #
+ --echo # Bug#41029 "MRR: SELECT FOR UPDATE fails to lock gaps (InnoDB table)"
+ --echo #
+
+ # This test verifies that a SELECT FOR UPDATE statement executed in
+ # REPEATABLE READ isolation will lock the entire read interval by verifying
+ # that a second transaction trying to update data within this interval will
+ # be blocked.
+
+ connect (con1,localhost,root,,);
+ connect (con2,localhost,root,,);
+
+ connection con1;
+
+ SET AUTOCOMMIT=0;
+
+ CREATE TABLE t1 (
+ dummy INT PRIMARY KEY,
+ a INT UNIQUE,
+ b INT
+ ) ENGINE=TokuDB;
+
+ INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+ COMMIT;
+
+ SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT @@tx_isolation;
+ START TRANSACTION;
+
+ EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+
+ connection con2;
+
+ SET AUTOCOMMIT=0;
+ SET TOKUDB_LOCK_TIMEOUT=2;
+ START TRANSACTION;
+
+ --error ER_LOCK_WAIT_TIMEOUT
+ INSERT INTO t1 VALUES (2,2,2);
+ ROLLBACK;
+
+ connection con1;
+
+ ROLLBACK;
+ DROP TABLE t1;
+
+ connection default;
+ disconnect con1;
+ disconnect con2;
+
+ --source include/wait_until_count_sessions.inc
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index 00000000000,e2e695611b5..49c61790837
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@@ -1,0 -1,7 +1,8 @@@
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ SELECT * FROM t1 WHERE b = 2;
+ ERROR HY000: Incorrect key file for table 't1'; try to repair it
+ DROP TABLE t1;
++FOUND /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
diff --cc storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
index 00000000000,f536f5163ef..1bd5aee087a
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/alter_table_comment_rebuild_data.result
@@@ -1,0 -1,186 +1,177 @@@
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ alter table t1 min_rows = 8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter min_rows]
+ alter table t1 max_rows = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter max_rows]
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter avg_row_length]
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=latin1 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter pack_keys]
+ alter table t1 character set = utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter character set]
+ alter table t1 data directory = '/tmp';
+ Warnings:
+ Warning 1618 <DATA DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter data directory]
+ alter table t1 index directory = '/tmp';
+ Warnings:
+ Warning 1618 <INDEX DIRECTORY> option ignored
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter index directory]
+ alter table t1 checksum = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter checksum]
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter delay_key_write]
+ alter table t1 comment = 'test table';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter comment]
+ alter table t1 password = '123456';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter password]
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter connection]
-alter table t1 key_block_size=32;
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
-include/assert.inc [underlying ft file name not changed after alter key_block_size]
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_persistent]
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_auto_recalc]
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter stats_sample_pages]
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`='tokudb_zlib'
+ include/assert.inc [underlying ft file name not changed after alter auto_increment]
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name not changed after alter compression method]
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) CHARACTER SET latin1 DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter engine type]
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ Table Create Table
+ t1 CREATE TABLE `t1` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(30) DEFAULT NULL,
+ PRIMARY KEY (`id`)
-) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 ROW_FORMAT=TOKUDB_LZMA KEY_BLOCK_SIZE=32 COMMENT='test table' CONNECTION='127.0.0.1:3306'
++) ENGINE=TokuDB AUTO_INCREMENT=1000 DEFAULT CHARSET=utf8 MIN_ROWS=8 MAX_ROWS=100 AVG_ROW_LENGTH=100 PACK_KEYS=1 STATS_PERSISTENT=1 STATS_AUTO_RECALC=1 STATS_SAMPLE_PAGES=1 CHECKSUM=1 DELAY_KEY_WRITE=1 COMMENT='test table' CONNECTION='127.0.0.1:3306' `compression`=tokudb_lzma
+ include/assert.inc [underlying ft file name changed after alter convert character]
+ drop table t1;
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
index 00000000000,684f9cbf8d5..e9490e91c33
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-3773.test
@@@ -1,0 -1,26 +1,26 @@@
+ --source include/have_tokudb.inc
+ --source include/have_debug.inc
+
+ --let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
---let $restart_parameters="restart: --log-error=$SEARCH_FILE"
++--let $restart_parameters="--log-error=$SEARCH_FILE"
+ --source include/restart_mysqld.inc
+
+ CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
+ SET tokudb_auto_analyze=0;
+ INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
+
-SET GLOBAL debug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
++SET GLOBAL debug_dbug = "+d,tokudb_fake_db_notfound_error_in_read_full_row";
+ --error ER_NOT_KEYFILE
+ SELECT * FROM t1 WHERE b = 2;
+
+ DROP TABLE t1;
+
+ --let SEARCH_PATTERN=ha_tokudb::read_full_row on table
+ --source include/search_pattern_in_file.inc
+
+ --let $restart_parameters=
+ --source include/restart_mysqld.inc
+
+ --remove_file $SEARCH_FILE
+ --let SEARCH_PATTERN=
+ --let SEARCH_FILE=
diff --cc storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
index 00000000000,fc4f3e0fd3d..e0e043f96ab
mode 000000,100644..100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/alter_table_comment_rebuild_data.test
@@@ -1,0 -1,184 +1,188 @@@
+ --source include/have_tokudb.inc
+
+ #
+ # Create a table and get the underlying main ft file name
+ #
+ create table t1(id int auto_increment, name varchar(30), primary key(id)) engine=TokuDB;
+ --let $ori_file= `select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+
+ #
+ # Case 1: alter create options that are ignored by TokuDB
+ #
+
+ # Alter table with min_rows
+ alter table t1 min_rows = 8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter min_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with max_rows
+ alter table t1 max_rows = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter max_rows
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with avg_row_length
+ alter table t1 avg_row_length = 100;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter avg_row_length
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with pack_keys
+ alter table t1 pack_keys = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter pack_keys
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with default character set
+ alter table t1 character set = utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter character set
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with data directory
+ alter table t1 data directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter data directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with index directory
+ alter table t1 index directory = '/tmp';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter index directory
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with checksum
+ alter table t1 checksum = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter checksum
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with delay_key_write
+ alter table t1 delay_key_write=1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter delay_key_write
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with comment
+ alter table t1 comment = 'test table';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter comment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with password
+ alter table t1 password = '123456';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter password
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with connection
+ alter table t1 connection = '127.0.0.1:3306';
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter connection
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
++
++#
++# In mariadb changing of key_block_size treated as index change
++#
+ # Alter table with key_block_size
-alter table t1 key_block_size=32;
-show create table t1;
---let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
---let $assert_text= underlying ft file name not changed after alter key_block_size
---let $assert_cond= "$ori_file" = "$new_file"
---source include/assert.inc
++#alter table t1 key_block_size=32;
++#show create table t1;
++#--let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
++#--let $assert_text= underlying ft file name not changed after alter key_block_size
++#--let $assert_cond= "$ori_file" = "$new_file"
++#--source include/assert.inc
+
+ # Alter table with stats_persistent
+ alter table t1 stats_persistent = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_persistent
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_auto_recalc
+ alter table t1 stats_auto_recalc = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_auto_recalc
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with stats_sample_pages
+ alter table t1 stats_sample_pages = 1;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter stats_sample_pages
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 2: alter create options that only update meta info, i.e inplace
+ #
+
+ # Alter table with auto_increment
+ alter table t1 auto_increment = 1000;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter auto_increment
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ # Alter table with compression method
-alter table t1 row_format=tokudb_lzma;
++alter table t1 compression=tokudb_lzma;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name not changed after alter compression method
+ --let $assert_cond= "$ori_file" = "$new_file"
+ --source include/assert.inc
+
+ #
+ # Case 3: alter create options that rebuild table using copy algorithm
+ #
+
+ # Alter table with engine type
+ alter table t1 engine=TokuDB;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter engine type
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ # Alter table with convert character
+ alter table t1 convert to character set utf8;
+ show create table t1;
+ --let $new_file=`select internal_file_name from information_schema.tokudb_file_map where table_schema='test' and table_name='t1' and table_dictionary_name='main'`
+ --let $assert_text= underlying ft file name changed after alter convert character
+ --let $assert_cond= "$ori_file" != "$new_file"
+ --source include/assert.inc
+
+ #
+ # clean up
+ #
+ drop table t1;
diff --cc storage/tokudb/tokudb_sysvars.cc
index bbc39dc550a,e8e9f908275..7771204dc11
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@@ -1006,9 -1075,12 +1002,9 @@@ st_mysql_sys_var* system_variables[] =
MYSQL_SYSVAR(support_xa),
#endif
- #if TOKUDB_DEBUG
+ #if defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(debug_pause_background_job_manager),
-#endif // defined(TOKUDB_DEBUG) && TOKUDB_DEBUG
- MYSQL_SYSVAR(dir_cmd_last_error),
- MYSQL_SYSVAR(dir_cmd_last_error_string),
- MYSQL_SYSVAR(dir_cmd),
+ MYSQL_SYSVAR(debug_pause_background_job_manager),
+#endif // TOKUDB_DEBUG
NULL
};
@@@ -1055,14 -1127,12 +1051,14 @@@ my_bool disable_prefetching(THD* thd)
my_bool disable_slow_alter(THD* thd) {
return (THDVAR(thd, disable_slow_alter) != 0);
}
- #if TOKU_INCLUDE_UPSERT
- my_bool disable_slow_update(THD* thd) {
- return (THDVAR(thd, disable_slow_update) != 0);
++#if defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
+ my_bool enable_fast_update(THD* thd) {
+ return (THDVAR(thd, enable_fast_update) != 0);
}
- my_bool disable_slow_upsert(THD* thd) {
- return (THDVAR(thd, disable_slow_upsert) != 0);
+ my_bool enable_fast_upsert(THD* thd) {
+ return (THDVAR(thd, enable_fast_upsert) != 0);
}
- #endif
++#endif // defined(TOKU_INCLUDE_UPSERT) && TOKU_INCLUDE_UPSERT
empty_scan_mode_t empty_scan(THD* thd) {
return (empty_scan_mode_t)THDVAR(thd, empty_scan);
}
@@@ -1139,17 -1211,5 +1137,17 @@@ my_bool support_xa(THD* thd)
return (THDVAR(thd, support_xa) != 0);
}
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+ha_create_table_option tokudb_table_options[] = {
+ HA_TOPTION_SYSVAR("compression", row_format, row_format),
+ HA_TOPTION_END
+};
+
+ha_create_table_option tokudb_index_options[] = {
+ HA_IOPTION_BOOL("clustering", clustering, 0),
+ HA_IOPTION_END
+};
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
} // namespace sysvars
} // namespace tokudb
diff --cc storage/tokudb/tokudb_sysvars.h
index 7701f211729,d81d5fd7999..2454f8fefd2
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@@ -26,26 -26,6 +26,26 @@@ Copyright (c) 2006, 2015, Percona and/o
#ifndef _TOKUDB_SYSVARS_H
#define _TOKUDB_SYSVARS_H
- #if TOKU_INCLUDE_OPTION_STRUCTS
++#if defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+struct ha_table_option_struct {
+ uint row_format;
+};
+
+struct ha_index_option_struct {
+ bool clustering;
+};
+
+static inline bool key_is_clustering(const KEY *key) {
+ return (key->flags & HA_CLUSTERING) || (key->option_struct && key->option_struct->clustering);
+}
+
+#else
+
+static inline bool key_is_clustering(const KEY *key) {
+ return key->flags & HA_CLUSTERING;
+}
- #endif
++#endif // defined(TOKU_INCLUDE_OPTION_STRUCTS) && TOKU_INCLUDE_OPTION_STRUCTS
+
namespace tokudb {
namespace sysvars {
1
0