lists.mariadb.org
Sign In Sign Up
Manage this list Sign In Sign Up

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

commits

Thread Start a new thread
Threads by month
  • ----- 2025 -----
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
commits@lists.mariadb.org

  • 14605 discussions
[Commits] 33a3184a56e: Range Locking: previous cset cont'd: fix assertions with reverse-order CFs
by Sergei Petrunia 05 Jan '20

05 Jan '20
revision-id: 33a3184a56e03cec2a910ed3b7c67624e08ab993 (fb-prod201903-273-g33a3184a56e) parent(s): f9cc0d345aeccabcb84ac98ce5d1130b637e024d author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2020-01-05 18:04:51 +0300 message: Range Locking: previous cset cont'd: fix assertions with reverse-order CFs - Endpoint flags must be flipped whenever we are using reverse-ordered CFs. - As for range endpoints themselves = they must be flipped when doing a reverse-ordered scan = they must NOT be flipped when using reverse-ordered CF. (but the code in index_first/index_last already "flips" them by calling index_(first|last)_intern. Take all of the above into account. --- mysql-test/suite/rocksdb/r/range_locking.result | 2 +- .../suite/rocksdb/r/range_locking_rev_cf.result | 2 +- storage/rocksdb/ha_rocksdb.cc | 23 +++++++++++++++------- storage/rocksdb/ha_rocksdb.h | 1 + 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/mysql-test/suite/rocksdb/r/range_locking.result b/mysql-test/suite/rocksdb/r/range_locking.result index 85264fbf2fa..3205045c3b3 100644 --- a/mysql-test/suite/rocksdb/r/range_locking.result +++ b/mysql-test/suite/rocksdb/r/range_locking.result @@ -510,7 +510,7 @@ pk a 1992 1992 # select * from information_schema.rocksdb_locks; # With replacements by select_from_is_rowlocks.inc COLUMN_FAMILY_ID TRANSACTION_ID KEY mode -$cf_id $trx_id 00${indexnr}800007c6 - 01${indexnr+1} X +$cf_id $trx_id 00${indexnr}800007c6 - 00${indexnr+1} X rollback; connection con1; rollback; diff --git a/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result b/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result index 87522d5ae37..c10ebf6bf87 100644 --- a/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result +++ b/mysql-test/suite/rocksdb/r/range_locking_rev_cf.result @@ -470,7 +470,7 @@ pk a 1992 1992 # select * from information_schema.rocksdb_locks; # With replacements by select_from_is_rowlocks.inc COLUMN_FAMILY_ID TRANSACTION_ID KEY mode -$cf_id $trx_id 00${indexnr}800007c6 - 01${indexnr+1} X +$cf_id $trx_id 00${indexnr}800007c6 - 00${indexnr+1} X rollback; connection con1; rollback; diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index f77da1bc8da..b00801c62ab 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -8393,6 +8393,7 @@ int ha_rocksdb::read_range_first(const key_range *const start_key, if (!start_key) { // Read first record + // (range locking will set the lock inside the ha_index_first call) result = ha_index_first(table->record[0]); } else { if (is_using_prohibited_gap_locks( @@ -8443,6 +8444,7 @@ int ha_rocksdb::set_range_lock(Rdb_transaction *tx, const enum ha_rkey_function &find_flag, const rocksdb::Slice &slice_arg, const key_range *const end_key, + bool flip_rev_cf, bool *use_locking_iterator ) { @@ -8591,12 +8593,19 @@ int ha_rocksdb::set_range_lock(Rdb_transaction *tx, no_end_endpoint= true; } + if (kd.m_is_reverse_cf) { + // Flip the endpoint flags + end_has_inf_suffix = !end_has_inf_suffix; + start_has_inf_suffix = !start_has_inf_suffix; + } + rocksdb::Endpoint start_endp; rocksdb::Endpoint end_endp; - if (kd.m_is_reverse_cf) { + + if (flip_rev_cf && kd.m_is_reverse_cf) { // Flip the endpoints - start_endp =rocksdb::Endpoint(end_slice, !end_has_inf_suffix); - end_endp = rocksdb::Endpoint(slice, !start_has_inf_suffix); + start_endp =rocksdb::Endpoint(end_slice, end_has_inf_suffix); + end_endp = rocksdb::Endpoint(slice, start_has_inf_suffix); } else { start_endp= rocksdb::Endpoint(slice, start_has_inf_suffix); end_endp= rocksdb::Endpoint(end_slice, end_has_inf_suffix); @@ -8747,7 +8756,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, bool use_locking_iterator; rocksdb::Slice lock_slice(reinterpret_cast<const char *>(m_sk_packed_tuple), packed_size); - if ((rc = set_range_lock(tx, kd, find_flag, lock_slice, end_key, + if ((rc = set_range_lock(tx, kd, find_flag, lock_slice, end_key, true, &use_locking_iterator))) DBUG_RETURN(rc); @@ -9432,7 +9441,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { bool use_locking_iter; if ((rc = set_range_lock(tx, kd, HA_READ_KEY_OR_NEXT, index_key, - end_range, &use_locking_iter))) + end_range, false, &use_locking_iter))) DBUG_RETURN(rc); const bool is_new_snapshot = !tx->has_snapshot(); @@ -9527,8 +9536,8 @@ int ha_rocksdb::index_last_intern(uchar *const buf) { DBUG_ASSERT(tx != nullptr); bool use_locking_iter; - if ((rc = set_range_lock(tx, kd, HA_READ_PREFIX_LAST_OR_PREV, index_key, - end_range, &use_locking_iter))) + if ((rc = set_range_lock(tx, kd, HA_READ_BEFORE_KEY, index_key, + end_range, false, &use_locking_iter))) DBUG_RETURN(rc); bool is_new_snapshot = !tx->has_snapshot(); diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 272dc9973d4..cbe894df8f5 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -329,6 +329,7 @@ class ha_rocksdb : public my_core::handler { const enum ha_rkey_function &find_flag, const rocksdb::Slice &slice, const key_range *const end_key, + bool flip_rev_cf, bool *use_locking_iterator); void release_scan_iterator(void);
1 0
0 0
[Commits] d13f3a4: MDEV-21184 Assertion `used_tables_cache == 0' failed in Item_func::fix_fields
by IgorBabaev 03 Jan '20

03 Jan '20
revision-id: d13f3a43c0fffd0daffbaf564760b73d6228b171 (mariadb-10.4.10-33-gd13f3a4) parent(s): ed355f59dd7e0065ebde15223c2f39f8b71b2958 author: Igor Babaev committer: Igor Babaev timestamp: 2020-01-03 11:15:00 -0800 message: MDEV-21184 Assertion `used_tables_cache == 0' failed in Item_func::fix_fields with condition_pushdown_from_having This bug could manifest itself for queries with GROUP BY and HAVING clauses when the HAVING clause was a conjunctive condition that depended exclusively on grouping fields and at least one conjunct contained an equality of the form fld=sq where fld is a grouping field and sq is a constant subquery. In this case the optimizer tries to perform a pushdown of the HAVING condition into WHERE. To construct the pushable condition the optimizer first transforms all multiple equalities in HAVING into simple equalities. This has to be done for a proper processing of the pushed conditions in WHERE. The multiple equalities at all AND/OR levels must be converted to simple equalities because any multiple equality may refer to a multiple equality at the upper level. Before this patch the conversion was performed like this: multiple_equality(x,f1,...,fn) => x=f1 and ... and x=fn. When an equality item for x=fi was constructed both the items for x and fi were cloned. If x happened to be a constant subquery that could not be cloned the conversion failed. If the conversions of multiple equalities previously performed had succeeded then the whole condition became in an inconsistent state that could cause different failures. The solution provided by the patch is: 1. to use a different conversion rule if x is a constant multiple_equality(x,f1,...,fn) => f1=x and f2=f1 and ... and fn=f1 2. not to clone x if it's a constant. Such conversions cannot fail and besides the result of the conversion preserves the equivalence of f1,...,fn that can be used for other optimizations. This patch also made sure that expensive predicates are not pushed from HAVING to WHERE. --- mysql-test/main/derived_cond_pushdown.result | 8 +- mysql-test/main/having_cond_pushdown.result | 148 +++++++++++++++++++++++++++ mysql-test/main/having_cond_pushdown.test | 39 +++++++ sql/item.cc | 14 ++- sql/item.h | 7 +- sql/item_cmpfunc.cc | 105 +++++++++++-------- sql/item_cmpfunc.h | 3 +- sql/sql_lex.cc | 14 ++- 8 files changed, 280 insertions(+), 58 deletions(-) diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result index c044b79..125de26 100644 --- a/mysql-test/main/derived_cond_pushdown.result +++ b/mysql-test/main/derived_cond_pushdown.result @@ -8937,13 +8937,13 @@ EXPLAIN "materialized": { "query_block": { "select_id": 2, - "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30", + "having_condition": "max_c > 37 and max_c > 30", "table": { "table_name": "t1", "access_type": "ALL", "rows": 3, "filtered": 100, - "attached_condition": "t1.a = 1" + "attached_condition": "t1.a = 1 and t1.b = 1" } } } @@ -9012,13 +9012,13 @@ EXPLAIN "materialized": { "query_block": { "select_id": 2, - "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30", + "having_condition": "max_c > 37 and max_c > 30", "table": { "table_name": "t1", "access_type": "ALL", "rows": 3, "filtered": 100, - "attached_condition": "t1.a = 1 and t1.d = 1" + "attached_condition": "t1.a = 1 and t1.b = 1 and t1.d = 1" } } } diff --git a/mysql-test/main/having_cond_pushdown.result b/mysql-test/main/having_cond_pushdown.result index 82a4813..9b12429 100644 --- a/mysql-test/main/having_cond_pushdown.result +++ b/mysql-test/main/having_cond_pushdown.result @@ -4776,3 +4776,151 @@ WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE DROP TABLE t1; +# +# MDEV-21184: Constant subquery in condition movable to WHERE +# +CREATE TABLE t1(a int, b int); +INSERT INTO t1 VALUES +(1,10), (2,20), (1,11), (1,15), (2,20), (1,10), (2,21); +CREATE TABLE t2 (c INT); +INSERT INTO t2 VALUES (2),(3); +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "t1.a", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = 8 or t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); +a +2 +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a,b +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a = 8 or t1.a = (subquery#2)) and t1.b < 20" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a,b +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; +a +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "having_condition": "sum(t1.b) > 20", + "filesort": { + "sort_key": "t1.a", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = 8 or t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; +a +2 +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } +} +SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); +a +2 +DROP TABLE t1,t2; diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test index f1bf706..fc75122 100644 --- a/mysql-test/main/having_cond_pushdown.test +++ b/mysql-test/main/having_cond_pushdown.test @@ -1401,3 +1401,42 @@ EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1 WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a; DROP TABLE t1; + +--echo # +--echo # MDEV-21184: Constant subquery in condition movable to WHERE +--echo # + +CREATE TABLE t1(a int, b int); +INSERT INTO t1 VALUES + (1,10), (2,20), (1,11), (1,15), (2,20), (1,10), (2,21); + +CREATE TABLE t2 (c INT); +INSERT INTO t2 VALUES (2),(3); + +let $q= +SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a,b + HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a + HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +DROP TABLE t1,t2; diff --git a/sql/item.cc b/sql/item.cc index 900a973..7b4571e 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -7352,7 +7352,7 @@ Item *Item::build_pushable_cond(THD *thd, List<Item> equalities; Item *new_cond= NULL; if (((Item_equal *)this)->create_pushable_equalities(thd, &equalities, - checker, arg) || + checker, arg, true) || (equalities.elements == 0)) return 0; @@ -10512,3 +10512,15 @@ void Item::register_in(THD *thd) next= thd->free_list; thd->free_list= this; } + + +bool Item::cleanup_excluding_immutables_processor (void *arg) +{ + if (!(get_extraction_flag() == IMMUTABLE_FL)) + return cleanup_processor(arg); + else + { + clear_extraction_flag(); + return false; + } +} diff --git a/sql/item.h b/sql/item.h index 2ac0964..205c070 100644 --- a/sql/item.h +++ b/sql/item.h @@ -152,8 +152,10 @@ bool mark_unsupported_function(const char *w1, const char *w2, #define NO_EXTRACTION_FL (1 << 6) #define FULL_EXTRACTION_FL (1 << 7) #define DELETION_FL (1 << 8) -#define SUBSTITUTION_FL (1 << 9) -#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL) +#define IMMUTABLE_FL (1 << 9) +#define SUBSTITUTION_FL (1 << 10) +#define EXTRACTION_MASK \ + (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL | IMMUTABLE_FL) extern const char *item_empty_name; @@ -1867,6 +1869,7 @@ class Item: public Value_source, virtual bool cleanup_processor(void *arg); virtual bool cleanup_excluding_fields_processor (void *arg) { return cleanup_processor(arg); } + bool cleanup_excluding_immutables_processor (void *arg); virtual bool cleanup_excluding_const_fields_processor (void *arg) { return cleanup_processor(arg); } virtual bool collect_item_field_processor(void *arg) { return 0; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9110f34..5ae5931 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -7410,6 +7410,7 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) of the tree of the object to check if multiple equality elements can be used to create equalities @param arg parameter to be passed to the checker + @param clone_const true <=> clone the constant member if there is any @details How the method works on examples: @@ -7420,36 +7421,31 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) Example 2: It takes MULT_EQ(1,a,b) and tries to create from its elements a set of - equalities {(1=a),(1=b)}. + equalities {(a=1),(a=b)}. How it is done: - 1. The method finds the left part of the equalities to be built. It will - be the same for all equalities. It is either: - a. A constant if there is any - b. A first element in the multiple equality that satisfies - checker function + 1. If there is a constant member c the first non-constant member x for + which the function checker returns true is taken and an item for + the equality x=c is created. When constructing the equality item + the left part of the equality is always taken as a clone of x while + the right part is taken as a clone of c only if clone_const == true. - For the example 1 the left element is field 'x'. - For the example 2 it is constant '1'. + 2. After this all equalities of the form x=a (where x designates the first + non-constant member for which checker returns true and a is some other + such member of the multiplle equality) are created. When constructing + an equality item both its parts are taken as clones of x and a. - 2. If the left element is found the rest elements of the multiple equality - are checked with the checker function if they can be right parts - of equalities. - If the element can be a right part of the equality, equality is built. - It is built with the left part element found at the step 1 and - the right part element found at this step (step 2). - - Suppose for the example above that both 'a' and 'b' fields can be used - to build equalities: + Suppose in the examples above that for 'x', 'a', and 'b' the function + checker returns true. Example 1: - for 'a' field (x=a) is built - for 'b' field (x=b) is built + the equality (x=a) is built + the equality (x=b) is built Example 2: - for 'a' field (1=a) is built - for 'b' field (1=b) is built + the equality (a=1) is built + the equality (a=b) is built 3. As a result we get a set of equalities built with the elements of this multiple equality. They are saved in the equality list. @@ -7458,15 +7454,17 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) {(x=a),(x=b)} Example 2: - {(1=a),(1=b)} + {(a=1),(a=b)} @note This method is called for condition pushdown into materialized derived table/view, and IN subquery, and pushdown from HAVING into WHERE. When it is called for pushdown from HAVING the empty checker is passed. - It happens because elements of this multiple equality don't need to be - checked if they can be used to build equalities. There are no elements - that can't be used to build equalities. + This is because in this case the elements of the multiple equality don't + need to be checked if they can be used to build equalities: either all + equalities can be pushed or none of them can be pushed. + When the function is called for pushdown from HAVING the value of the + parameter clone_const is always false. In other cases it's always true. @retval true if an error occurs @retval false otherwise @@ -7475,24 +7473,42 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) bool Item_equal::create_pushable_equalities(THD *thd, List<Item> *equalities, Pushdown_checker checker, - uchar *arg) + uchar *arg, + bool clone_const) { Item *item; + Item *left_item= NULL; + Item *right_item = get_const(); Item_equal_fields_iterator it(*this); - Item *left_item = get_const(); - if (!left_item) + + while ((item=it++)) { - while ((item=it++)) - { - left_item= item; - if (checker && !((item->*checker) (arg))) - continue; - break; - } + left_item= item; + if (checker && !((item->*checker) (arg))) + continue; + break; } + if (!left_item) return false; + if (right_item) + { + Item_func_eq *eq= 0; + Item *left_item_clone= left_item->build_clone(thd); + Item *right_item_clone= !clone_const ? + right_item : right_item->build_clone(thd); + if (!left_item_clone || !right_item_clone) + return true; + eq= new (thd->mem_root) Item_func_eq(thd, + left_item_clone, + right_item_clone); + if (!eq || equalities->push_back(eq, thd->mem_root)) + return true; + if (!clone_const) + right_item->set_extraction_flag(IMMUTABLE_FL); + } + while ((item=it++)) { if (checker && !((item->*checker) (arg))) @@ -7500,15 +7516,14 @@ bool Item_equal::create_pushable_equalities(THD *thd, Item_func_eq *eq= 0; Item *left_item_clone= left_item->build_clone(thd); Item *right_item_clone= item->build_clone(thd); - if (left_item_clone && right_item_clone) - { - left_item_clone->set_item_equal(NULL); - right_item_clone->set_item_equal(NULL); - eq= new (thd->mem_root) Item_func_eq(thd, - right_item_clone, - left_item_clone); - } - if (eq && equalities->push_back(eq, thd->mem_root)) + if (!(left_item_clone && right_item_clone)) + return true; + left_item_clone->set_item_equal(NULL); + right_item_clone->set_item_equal(NULL); + eq= new (thd->mem_root) Item_func_eq(thd, + right_item_clone, + left_item_clone); + if (!eq || equalities->push_back(eq, thd->mem_root)) return true; } return false; @@ -7533,7 +7548,7 @@ bool Item_equal::create_pushable_equalities(THD *thd, Item *Item_equal::multiple_equality_transformer(THD *thd, uchar *arg) { List<Item> equalities; - if (create_pushable_equalities(thd, &equalities, 0, 0)) + if (create_pushable_equalities(thd, &equalities, 0, 0, false)) return 0; switch (equalities.elements) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 0a91d45..1d84ee6 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -3208,7 +3208,8 @@ class Item_equal: public Item_bool_func bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred); bool excl_dep_on_grouping_fields(st_select_lex *sel); bool create_pushable_equalities(THD *thd, List<Item> *equalities, - Pushdown_checker checker, uchar *arg); + Pushdown_checker checker, uchar *arg, + bool clone_const); /* Return the number of elements in this multiple equality */ uint elements_count() { return equal_items.elements; } friend class Item_equal_fields_iterator; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 16bb53c..ea34679 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -7988,7 +7988,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) } else { - int fl= cond->excl_dep_on_grouping_fields(this) ? + int fl= cond->excl_dep_on_grouping_fields(this) && !cond->is_expensive() ? FULL_EXTRACTION_FL : NO_EXTRACTION_FL; cond->set_extraction_flag(fl); } @@ -9819,7 +9819,7 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) { List_iterator<Item> li(*((Item_cond*) result)->argument_list()); Item *item; - while ((item=li++)) + while ((item= li++)) { if (attach_to_conds.push_back(item, thd->mem_root)) return true; @@ -9839,8 +9839,13 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) */ if (cond->type() != Item::COND_ITEM) return false; + if (((Item_cond *)cond)->functype() != Item_cond::COND_AND_FUNC) { + /* + cond is not a conjunctive formula and it cannot be pushed into WHERE. + Try to extract a formula that can be pushed. + */ Item *fix= cond->build_pushable_cond(thd, 0, 0); if (!fix) return false; @@ -9860,7 +9865,6 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) Item *result= item->transform(thd, &Item::multiple_equality_transformer, (uchar *)item); - if (!result) return true; if (result->type() == Item::COND_ITEM && @@ -10188,8 +10192,8 @@ Item *st_select_lex::pushdown_from_having_into_where(THD *thd, Item *having) &Item::field_transformer_for_having_pushdown, (uchar *)this); - if (item->walk(&Item:: cleanup_processor, 0, STOP_PTR) || - item->fix_fields(thd, NULL)) + if (item->walk(&Item::cleanup_excluding_immutables_processor, 0, STOP_PTR) + || item->fix_fields(thd, NULL)) { attach_to_conds.empty(); goto exit;
1 0
0 0
[Commits] 547495b: MDEV-21184 Assertion `used_tables_cache == 0' failed in Item_func::fix_fields
by IgorBabaev 03 Jan '20

03 Jan '20
revision-id: 547495bacdeb7de5bfa135159879d3c3ac894d1d (mariadb-10.4.10-33-g547495b) parent(s): ed355f59dd7e0065ebde15223c2f39f8b71b2958 author: Igor Babaev committer: Igor Babaev timestamp: 2020-01-03 11:12:51 -0800 message: MDEV-21184 Assertion `used_tables_cache == 0' failed in Item_func::fix_fields with condition_pushdown_from_having This bug could manifest itself for queries with GROUP BY and HAVING clauses when the HAVING clause was a conjunctive condition that depended exclusively on grouping fields and at least one conjunct contained an equality of the form fld=sq where fld is a grouping field and sq is a constant subquery. In this case the optimizer tries to perform a pushdown of the HAVING condition into WHERE. To construct the pushable condition the optimizer first transforms all multiple equalities in HAVING into simple equalities. This has to be done for a proper processing of the pushed conditions in WHERE. The multiple equalities at all AND/OR levels must be converted to simple equalities because any multiple equality may refer to a multiple equality at the upper level. Before this patch the conversion was performed like this: multiple_equality(x,f1,...,fn) => x=f1 and ... and x=fn. When an equality item for x=fi was constructed both the items for x and fi were cloned. If x happened to be a constant subquery that could not be cloned the conversion failed. If the conversions of multiple equalities previously performed had succeeded then the whole condition became in an inconsistent state that could cause different failures. The solution provided by the patch is: 1. to use a different conversion rule if x is a constant multiple_equality(x,f1,...,fn) => f1=x and f2=f1 and ... and fn=f1 2. not to clone x if it's a constant. Such conversions cannot fail and besides the result of the conversion preserves the equivalence of f1,...,fn that can be used for other optimizations. This patch also made sure that expensive predicates are not pushed from HAVING to WHERE. --- mysql-test/main/derived_cond_pushdown.result | 8 +- mysql-test/main/having_cond_pushdown.result | 148 +++++++++++++++++++++++++++ mysql-test/main/having_cond_pushdown.test | 39 +++++++ sql/item.cc | 14 ++- sql/item.h | 7 +- sql/item_cmpfunc.cc | 105 +++++++++++-------- sql/sql_lex.cc | 14 ++- 7 files changed, 278 insertions(+), 57 deletions(-) diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result index c044b79..125de26 100644 --- a/mysql-test/main/derived_cond_pushdown.result +++ b/mysql-test/main/derived_cond_pushdown.result @@ -8937,13 +8937,13 @@ EXPLAIN "materialized": { "query_block": { "select_id": 2, - "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30", + "having_condition": "max_c > 37 and max_c > 30", "table": { "table_name": "t1", "access_type": "ALL", "rows": 3, "filtered": 100, - "attached_condition": "t1.a = 1" + "attached_condition": "t1.a = 1 and t1.b = 1" } } } @@ -9012,13 +9012,13 @@ EXPLAIN "materialized": { "query_block": { "select_id": 2, - "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30", + "having_condition": "max_c > 37 and max_c > 30", "table": { "table_name": "t1", "access_type": "ALL", "rows": 3, "filtered": 100, - "attached_condition": "t1.a = 1 and t1.d = 1" + "attached_condition": "t1.a = 1 and t1.b = 1 and t1.d = 1" } } } diff --git a/mysql-test/main/having_cond_pushdown.result b/mysql-test/main/having_cond_pushdown.result index 82a4813..9b12429 100644 --- a/mysql-test/main/having_cond_pushdown.result +++ b/mysql-test/main/having_cond_pushdown.result @@ -4776,3 +4776,151 @@ WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE DROP TABLE t1; +# +# MDEV-21184: Constant subquery in condition movable to WHERE +# +CREATE TABLE t1(a int, b int); +INSERT INTO t1 VALUES +(1,10), (2,20), (1,11), (1,15), (2,20), (1,10), (2,21); +CREATE TABLE t2 (c INT); +INSERT INTO t2 VALUES (2),(3); +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "t1.a", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = 8 or t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); +a +2 +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a,b +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a = 8 or t1.a = (subquery#2)) and t1.b < 20" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a,b +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; +a +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "having_condition": "sum(t1.b) > 20", + "filesort": { + "sort_key": "t1.a", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = 8 or t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } + } + } +} +SELECT a FROM t1 GROUP BY a +HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; +a +2 +EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 7, + "filtered": 100, + "attached_condition": "t1.a = (subquery#2)" + }, + "subqueries": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 2, + "filtered": 100 + } + } + } + ] + } +} +SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); +a +2 +DROP TABLE t1,t2; diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test index f1bf706..fc75122 100644 --- a/mysql-test/main/having_cond_pushdown.test +++ b/mysql-test/main/having_cond_pushdown.test @@ -1401,3 +1401,42 @@ EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1 WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a; DROP TABLE t1; + +--echo # +--echo # MDEV-21184: Constant subquery in condition movable to WHERE +--echo # + +CREATE TABLE t1(a int, b int); +INSERT INTO t1 VALUES + (1,10), (2,20), (1,11), (1,15), (2,20), (1,10), (2,21); + +CREATE TABLE t2 (c INT); +INSERT INTO t2 VALUES (2),(3); + +let $q= +SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 ); + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a,b + HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20; + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a + HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20; + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +let $q= +SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 ); + +eval EXPLAIN FORMAT=JSON $q; +eval $q; + +DROP TABLE t1,t2; diff --git a/sql/item.cc b/sql/item.cc index 900a973..7b4571e 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -7352,7 +7352,7 @@ Item *Item::build_pushable_cond(THD *thd, List<Item> equalities; Item *new_cond= NULL; if (((Item_equal *)this)->create_pushable_equalities(thd, &equalities, - checker, arg) || + checker, arg, true) || (equalities.elements == 0)) return 0; @@ -10512,3 +10512,15 @@ void Item::register_in(THD *thd) next= thd->free_list; thd->free_list= this; } + + +bool Item::cleanup_excluding_immutables_processor (void *arg) +{ + if (!(get_extraction_flag() == IMMUTABLE_FL)) + return cleanup_processor(arg); + else + { + clear_extraction_flag(); + return false; + } +} diff --git a/sql/item.h b/sql/item.h index 2ac0964..205c070 100644 --- a/sql/item.h +++ b/sql/item.h @@ -152,8 +152,10 @@ bool mark_unsupported_function(const char *w1, const char *w2, #define NO_EXTRACTION_FL (1 << 6) #define FULL_EXTRACTION_FL (1 << 7) #define DELETION_FL (1 << 8) -#define SUBSTITUTION_FL (1 << 9) -#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL) +#define IMMUTABLE_FL (1 << 9) +#define SUBSTITUTION_FL (1 << 10) +#define EXTRACTION_MASK \ + (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL | IMMUTABLE_FL) extern const char *item_empty_name; @@ -1867,6 +1869,7 @@ class Item: public Value_source, virtual bool cleanup_processor(void *arg); virtual bool cleanup_excluding_fields_processor (void *arg) { return cleanup_processor(arg); } + bool cleanup_excluding_immutables_processor (void *arg); virtual bool cleanup_excluding_const_fields_processor (void *arg) { return cleanup_processor(arg); } virtual bool collect_item_field_processor(void *arg) { return 0; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 9110f34..5ae5931 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -7410,6 +7410,7 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) of the tree of the object to check if multiple equality elements can be used to create equalities @param arg parameter to be passed to the checker + @param clone_const true <=> clone the constant member if there is any @details How the method works on examples: @@ -7420,36 +7421,31 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) Example 2: It takes MULT_EQ(1,a,b) and tries to create from its elements a set of - equalities {(1=a),(1=b)}. + equalities {(a=1),(a=b)}. How it is done: - 1. The method finds the left part of the equalities to be built. It will - be the same for all equalities. It is either: - a. A constant if there is any - b. A first element in the multiple equality that satisfies - checker function + 1. If there is a constant member c the first non-constant member x for + which the function checker returns true is taken and an item for + the equality x=c is created. When constructing the equality item + the left part of the equality is always taken as a clone of x while + the right part is taken as a clone of c only if clone_const == true. - For the example 1 the left element is field 'x'. - For the example 2 it is constant '1'. + 2. After this all equalities of the form x=a (where x designates the first + non-constant member for which checker returns true and a is some other + such member of the multiplle equality) are created. When constructing + an equality item both its parts are taken as clones of x and a. - 2. If the left element is found the rest elements of the multiple equality - are checked with the checker function if they can be right parts - of equalities. - If the element can be a right part of the equality, equality is built. - It is built with the left part element found at the step 1 and - the right part element found at this step (step 2). - - Suppose for the example above that both 'a' and 'b' fields can be used - to build equalities: + Suppose in the examples above that for 'x', 'a', and 'b' the function + checker returns true. Example 1: - for 'a' field (x=a) is built - for 'b' field (x=b) is built + the equality (x=a) is built + the equality (x=b) is built Example 2: - for 'a' field (1=a) is built - for 'b' field (1=b) is built + the equality (a=1) is built + the equality (a=b) is built 3. As a result we get a set of equalities built with the elements of this multiple equality. They are saved in the equality list. @@ -7458,15 +7454,17 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) {(x=a),(x=b)} Example 2: - {(1=a),(1=b)} + {(a=1),(a=b)} @note This method is called for condition pushdown into materialized derived table/view, and IN subquery, and pushdown from HAVING into WHERE. When it is called for pushdown from HAVING the empty checker is passed. - It happens because elements of this multiple equality don't need to be - checked if they can be used to build equalities. There are no elements - that can't be used to build equalities. + This is because in this case the elements of the multiple equality don't + need to be checked if they can be used to build equalities: either all + equalities can be pushed or none of them can be pushed. + When the function is called for pushdown from HAVING the value of the + parameter clone_const is always false. In other cases it's always true. @retval true if an error occurs @retval false otherwise @@ -7475,24 +7473,42 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) bool Item_equal::create_pushable_equalities(THD *thd, List<Item> *equalities, Pushdown_checker checker, - uchar *arg) + uchar *arg, + bool clone_const) { Item *item; + Item *left_item= NULL; + Item *right_item = get_const(); Item_equal_fields_iterator it(*this); - Item *left_item = get_const(); - if (!left_item) + + while ((item=it++)) { - while ((item=it++)) - { - left_item= item; - if (checker && !((item->*checker) (arg))) - continue; - break; - } + left_item= item; + if (checker && !((item->*checker) (arg))) + continue; + break; } + if (!left_item) return false; + if (right_item) + { + Item_func_eq *eq= 0; + Item *left_item_clone= left_item->build_clone(thd); + Item *right_item_clone= !clone_const ? + right_item : right_item->build_clone(thd); + if (!left_item_clone || !right_item_clone) + return true; + eq= new (thd->mem_root) Item_func_eq(thd, + left_item_clone, + right_item_clone); + if (!eq || equalities->push_back(eq, thd->mem_root)) + return true; + if (!clone_const) + right_item->set_extraction_flag(IMMUTABLE_FL); + } + while ((item=it++)) { if (checker && !((item->*checker) (arg))) @@ -7500,15 +7516,14 @@ bool Item_equal::create_pushable_equalities(THD *thd, Item_func_eq *eq= 0; Item *left_item_clone= left_item->build_clone(thd); Item *right_item_clone= item->build_clone(thd); - if (left_item_clone && right_item_clone) - { - left_item_clone->set_item_equal(NULL); - right_item_clone->set_item_equal(NULL); - eq= new (thd->mem_root) Item_func_eq(thd, - right_item_clone, - left_item_clone); - } - if (eq && equalities->push_back(eq, thd->mem_root)) + if (!(left_item_clone && right_item_clone)) + return true; + left_item_clone->set_item_equal(NULL); + right_item_clone->set_item_equal(NULL); + eq= new (thd->mem_root) Item_func_eq(thd, + right_item_clone, + left_item_clone); + if (!eq || equalities->push_back(eq, thd->mem_root)) return true; } return false; @@ -7533,7 +7548,7 @@ bool Item_equal::create_pushable_equalities(THD *thd, Item *Item_equal::multiple_equality_transformer(THD *thd, uchar *arg) { List<Item> equalities; - if (create_pushable_equalities(thd, &equalities, 0, 0)) + if (create_pushable_equalities(thd, &equalities, 0, 0, false)) return 0; switch (equalities.elements) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 16bb53c..ea34679 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -7988,7 +7988,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) } else { - int fl= cond->excl_dep_on_grouping_fields(this) ? + int fl= cond->excl_dep_on_grouping_fields(this) && !cond->is_expensive() ? FULL_EXTRACTION_FL : NO_EXTRACTION_FL; cond->set_extraction_flag(fl); } @@ -9819,7 +9819,7 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) { List_iterator<Item> li(*((Item_cond*) result)->argument_list()); Item *item; - while ((item=li++)) + while ((item= li++)) { if (attach_to_conds.push_back(item, thd->mem_root)) return true; @@ -9839,8 +9839,13 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) */ if (cond->type() != Item::COND_ITEM) return false; + if (((Item_cond *)cond)->functype() != Item_cond::COND_AND_FUNC) { + /* + cond is not a conjunctive formula and it cannot be pushed into WHERE. + Try to extract a formula that can be pushed. + */ Item *fix= cond->build_pushable_cond(thd, 0, 0); if (!fix) return false; @@ -9860,7 +9865,6 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) Item *result= item->transform(thd, &Item::multiple_equality_transformer, (uchar *)item); - if (!result) return true; if (result->type() == Item::COND_ITEM && @@ -10188,8 +10192,8 @@ Item *st_select_lex::pushdown_from_having_into_where(THD *thd, Item *having) &Item::field_transformer_for_having_pushdown, (uchar *)this); - if (item->walk(&Item:: cleanup_processor, 0, STOP_PTR) || - item->fix_fields(thd, NULL)) + if (item->walk(&Item::cleanup_excluding_immutables_processor, 0, STOP_PTR) + || item->fix_fields(thd, NULL)) { attach_to_conds.empty(); goto exit;
1 0
0 0
[Commits] e8d3583fc88: MDEV-18514: Assertion `!writer.checksum_len || writer.remains == 0' failed
by sujatha 31 Dec '19

31 Dec '19
revision-id: e8d3583fc88ea8c08a94aceba5a21bd5e367c454 (mariadb-10.2.30-39-ge8d3583fc88) parent(s): 16bce0f6fe6bcad0091dc45a97a8ac7b33fe9d44 author: Sujatha committer: Sujatha timestamp: 2019-12-31 11:49:42 +0530 message: MDEV-18514: Assertion `!writer.checksum_len || writer.remains == 0' failed Analysis: ======== 'max_binlog_cache_size' is configured and a huge transaction is executed. When the transaction specific events size exceeds 'max_binlog_cache_size' the event cannot be written to the binary log cache and cache write error is raised. Upon cache write error the statement is rolled back and the transaction cache should be truncated to a previous statement specific position. The truncate operation should reset the cache to earlier valid positions and flush the new changes. Even though the flush is successful the cache write error is still in marked state. The truncate code interprets the cache write error as cache flush failure and returns abruptly without modifying the write cache parameters. Hence cache is in a invalid state. When a COMMIT statement is executed in this session it tries to flush the contents of transaction cache to binary log. Since cache has partial events the cache write operation will report 'writer.remains' assert. Fix: === During the rollback operation truncate the cache and flush the cache contents. If truncation is successful clear the cache write error. --- .../suite/rpl/r/rpl_binlog_rollback_cleanup.result | 9 +++++ .../suite/rpl/t/rpl_binlog_rollback_cleanup.test | 46 ++++++++++++++++++++++ mysys/mf_iocache.c | 2 + 3 files changed, 57 insertions(+) diff --git a/mysql-test/suite/rpl/r/rpl_binlog_rollback_cleanup.result b/mysql-test/suite/rpl/r/rpl_binlog_rollback_cleanup.result new file mode 100644 index 00000000000..a677cbfecf6 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_binlog_rollback_cleanup.result @@ -0,0 +1,9 @@ +include/master-slave.inc +[connection master] +connection master; +SET GLOBAL max_binlog_cache_size = 65536; +CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=INNODB; +ERROR HY000: Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again +SET GLOBAL max_binlog_cache_size= ORIGINAL_VALUE; +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_binlog_rollback_cleanup.test b/mysql-test/suite/rpl/t/rpl_binlog_rollback_cleanup.test new file mode 100644 index 00000000000..ed4d713f626 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_binlog_rollback_cleanup.test @@ -0,0 +1,46 @@ +# ==== Purpose ==== +# +# Test verifies that when flushing an event to binary log fails the transaction +# is successfully rolled back and following COMMIT command doesn't report any +# assert. +# +# ==== Implementation ==== +# +# Steps: +# 0 - SET max_binlog_cache_size=64K +# 1 - Create an Innodb table and insert required amount of data. Execute an +# UPDATE operation which generates a big update event whose size exceeds +# max_binlog_cache_size. +# 2 - Wait for error 1197. Execute COMMIT command. +# 3 - COMMIT should be successful. +# +# ==== References ==== +# +# MDEV-18514: Assertion `!writer.checksum_len || writer.remains == 0' failed +# +--source include/have_innodb.inc +--source include/have_binlog_format_row.inc +--source include/master-slave.inc +--connection master +let $old_max_binlog_cache_size= query_get_value(SHOW VARIABLES LIKE "max_binlog_cache_size", Value, 1); +SET GLOBAL max_binlog_cache_size = 65536; +CREATE TABLE t1(a INT PRIMARY KEY, data VARCHAR(30000)) ENGINE=INNODB; +let $data = `select concat('"', repeat('a',6000), '"')`; +let $data1 = `select concat('"', repeat('b',6000), '"')`; +--disable_query_log +eval INSERT INTO t1 (a, data) VALUES (1, CONCAT($data, $data)); +eval INSERT INTO t1 (a, data) VALUES (2, CONCAT($data, $data)); +eval INSERT INTO t1 (a, data) VALUES (3, CONCAT($data, $data)); +eval INSERT INTO t1 (a, data) VALUES (4, CONCAT($data, $data)); +eval INSERT INTO t1 (a, data) VALUES (5, CONCAT($data, $data)); +START TRANSACTION; +--error ER_TRANS_CACHE_FULL +eval UPDATE t1 SET data=$data1; +COMMIT; +--enable_query_log + +--replace_result $old_max_binlog_cache_size ORIGINAL_VALUE +--eval SET GLOBAL max_binlog_cache_size= $old_max_binlog_cache_size +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c index d7689e204b6..6196e65b9de 100644 --- a/mysys/mf_iocache.c +++ b/mysys/mf_iocache.c @@ -1949,6 +1949,8 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock) int res= info->write_function(info, info->write_buffer, length); if (res) DBUG_RETURN(res); + else + info->error= 0; set_if_bigger(info->end_of_file, info->pos_in_file); }
1 0
0 0
[Commits] d3a2c173b63: Big Test added for sorting
by Varun 30 Dec '19

30 Dec '19
revision-id: d3a2c173b63e784513e67502eb91345f7620bd7d (mariadb-10.4.11-18-gd3a2c173b63) parent(s): 59d4f2a373a7960a533e653877ab69a97e91444a author: Varun Gupta committer: Varun Gupta timestamp: 2019-12-31 03:18:09 +0530 message: Big Test added for sorting --- mysql-test/main/order_by_pack_big.result | 170 +++++++++++++++++++++++++++++++ mysql-test/main/order_by_pack_big.test | 81 +++++++++++++++ 2 files changed, 251 insertions(+) diff --git a/mysql-test/main/order_by_pack_big.result b/mysql-test/main/order_by_pack_big.result new file mode 100644 index 00000000000..b1b7b7e5940 --- /dev/null +++ b/mysql-test/main/order_by_pack_big.result @@ -0,0 +1,170 @@ +set @save_rand_seed1= @@RAND_SEED1; +set @save_rand_seed2= @@RAND_SEED2; +set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772; +create table t1(a int); +insert into t1 select seq from seq_1_to_10000 order by rand(); +# +# function f1 has parameters mean(peak) and std_dev (standard deviation) +# for a normal distribution +# +CREATE FUNCTION f1(mean DOUBLE, std_dev DOUBLE) RETURNS DOUBLE +BEGIN +set @z= (rand() + rand() + rand() + rand() + rand() + rand() + +rand() + rand() + rand() + rand() + rand() + rand() - 6); +set @z= std_dev*@z + mean; +return @z; +END| +CREATE function f2(len INT) RETURNS varchar(256) +BEGIN +DECLARE str VARCHAR(256) DEFAULT ''; +DECLARE x INT DEFAULT 0; +WHILE (len > 0 AND len < 256) DO +SET x =round(rand()*25); +SET str= CONCAT(str, CHAR(65 + x)); +SET len= len-1; +END WHILE; +RETURN str; +END| +CREATE function f3(mean DOUBLE, std_dev DOUBLE, min_val INT) RETURNS INT +BEGIN +DECLARE r DOUBLE DEFAULT 0; +WHILE 1=1 DO +set r= f1(mean, std_dev); +IF (r >= min_val) THEN +RETURN round(r); +end if; +END WHILE; +RETURN 0; +END| +create table t2 (id INT NOT NULL, a INT, b int); +insert into t2 select a, f3(12, 8.667, 0), f3(32, 16, 0) from t1; +CREATE TABLE t3( +id INT NOT NULL, +names VARCHAR(64), +address VARCHAR(132), +PRIMARY KEY (id) +); +insert into t3 select id, f2(a) , f2(b) from t2; +set sort_buffer_size=262144*10; +flush status; +select id, +MD5(group_concat(substring(names,1,3), substring(address,1,3))) +FROM t3 +GROUP BY id DIV 100 +ORDER BY id; +id MD5(group_concat(substring(names,1,3), substring(address,1,3))) +10 4e2909ba6af73a6f2331c332272ebe6f +149 2591aa134a03c0baedb1f2e09d37fc01 +232 779fe09e8677c88c17dab1c2c5c54a9e +311 6f66733f6f2fee55853a4081dac362bf +430 b7431e4e30f7ce214350649a9c1877fa +502 5a39b69003d083e81ffc5ff7d1840ae0 +665 796aafa79e47c3f99a2f83c2fe87e9af +719 1adaa36769a3ec1aaf61cc67ee035bf2 +883 558686327dba2531104dc32497f13806 +942 68c2c6be36ad81da00f875e05678ac1a +1007 c9fe7a2a3b1e77f3512afa96edcdbd0f +1134 f51a946d3bcefb2ef0bb9a816654993f +1226 968710fb0ee891bf5efedac64daa7f59 +1367 e84b491129560c4a31176ec17c7e4651 +1486 f13314fa07ab1786256544206f552126 +1502 723fa7da3db0cfc61bce43af13bd7733 +1674 9296d3b0d959e7c7bfb0536c0a680003 +1781 dd9dc3ee5356e90a7279d2a7e09ab067 +1803 a0787eb53c2ddfe4dc501cbd62cd6ef5 +1969 d0d2d4c9a33b5e7c7ba517b21bbb225c +2087 0491191a92ecef7813b491c651c3017a +2111 be7737ae27e517dee4d2c2490dd08fe5 +2268 13387d22efac2835387acb5fdf466847 +2328 0da4eaaa6d40790e83426adc88c6b867 +2416 c6376f9b85a6247e4bb4b7fed7019654 +2599 07c0494dc58e02c9e4d476ed4e828999 +2602 3f2b2cd46fdbcb9f4de5d22af1ddf332 +2777 cf080c656cf2c72c6c40df9522c4a444 +2858 dd6f033f165c3764e5eee748b6a4b306 +2922 d69118e72d77dc35d437a23bc620600a +3027 d7b6c3a6cd712c99fa428d40d7d14e8f +3131 8765144e43704034b8bde8195c3bd566 +3220 4da111ce557f3c27ed0413f5ced46e66 +3331 c4b80698fd39a7124e5bbac98d949980 +3452 db6e2ab7247a49a5a03ae9eb5d197c50 +3519 271a797d3251ea00339f72d18ccb6d2f +3680 7c04e35481d3d64878330db84158bb67 +3799 15febc7ee00f06dbf96a961d90840571 +3876 936394440bf45ba95fcb237c106f0ad4 +3937 1468b953f59d3d9124646cb9a62a066c +4029 a2d060975fe98313cc4d6d0967134862 +4158 3ef575890dc439f165a6c0e7cecef3a0 +4291 ad24580ca5589c22e66ee9b2507bffe2 +4355 469e098eb29d367bf52e8dcb626af560 +4461 985a0571305b940cf9cdddc0bd992868 +4544 b596b41cced9a72635ebb621990a9884 +4646 530f8884df8f69e89a422dccf15dde4e +4714 e4c39cbe81b717e5dc55c3d8a5f958bd +4848 0441809790e48eae3e3451982d6e9685 +4935 5bf7c598b37c14c1c667c7a3e765ef4b +5014 52af0e4bcdcaa9798efa5208fc14160f +5184 fb35e60e92eb7d77d24d74fca72f1d74 +5219 3b7efa9a63a7c2abc6242706bd4e178b +5349 e34bcfd71d15658df22d985d7cfd4f46 +5400 8567ac3ba9b56908b1df46b72b44e91d +5507 b437451c7e0d0b0be72f7bea8cbad4ff +5672 071a6b393989a88074bf31f780d7d6ee +5725 eced78afcf11ac492c2e14682ca097dd +5849 376cd2d795fb1cc25ea9832e37b193fd +5941 c25cbc66d609b9beee3c948cb0339f74 +6064 4b0eaec74cb4c0234b30373493749360 +6135 a5108f050da716c3f2b030587c1ce615 +6289 b19351d2e4e58a77f1f33f15c90c6181 +6329 b213caf63af61795ac76e7c56d1d1314 +6404 a6128a481c3c23141b200f0693bba3a7 +6580 06dca934fccedb081f18d418fc7a3909 +6653 20dd771bd5239beb009b81853f5adc72 +6773 cb107d95104e7ed5f9796a3fc8103251 +6822 ed49d107938a7fb5058d8e6f719b97fb +6938 a638345003ca5ee6ea677446193c4af2 +7085 1a5ed3ccde9927cb1f4dfc8b2b71f25f +7180 7275a16515755a6b789da7c2fdb2f096 +7290 aa916ee2a7b4f6aff0592309e890d6c0 +7390 040861544798bccfaebec94c719e2fa9 +7449 58ddd09159a92cd3fee0c241b8629a8c +7589 3244004b56a66402cde3c60a41d99dfc +7686 960d350f442e03aa745d4740b1dee8f1 +7713 e43df84b0b6a40d26a4e36bb371af6c1 +7844 b3ac24da8fdba43ba00ad2772cfeedd8 +7935 a7d07f684c0c18946695a5fa369139f0 +8059 81523df1da525bb0e5490aec44809552 +8153 a34469e694de9c3e3d7d2cb171df691a +8245 3bdcc5556a474a7a67be2134804987a8 +8310 10ed2c9f70315df533c94d5a79200452 +8432 3f7273962e617832452163b98058791a +8596 2eec8a128e031b31e890fd545aef9c20 +8647 1ae5260b3db571fcfdf36e17b9286d73 +8742 da8e83a01b7e17d4b28de7275b83ccbc +8856 426253c28af8a7364a0c4795355590f9 +8967 1cad1040530e52c64e4f5ba5bf640fdd +9057 7c265622653013e1bfa47dc4b1f3289f +9148 dd25afe1e73889c5810e2fa91c31ca6c +9275 5e8b0a1ecfde14c6fa8d4cba12aa8ac1 +9311 b1f7571deb95309671533b657b5392ab +9488 324bbfc66eaf7254c7b1d99b23cd33f0 +9518 5f514525711e5c42e067776a3139d011 +9696 10f135781fe4bfffc5b1feb18818e592 +9752 87c432d39312dcc756ea49ae3a63bfe1 +9887 2e8c5ee85ee68afdf25985e9b1c01bd6 +9903 4e4c6529cb69a85c095381d0991dedcf +10000 a849cf46ea623ee01daa24e77a05d671 +show status like '%sort%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 10101 +Sort_scan 2 +set sort_buffer_size=default; +set @@RAND_SEED1= @save_rand_seed1; +set @@RAND_SEED2= @save_rand_seed2; +drop function f1; +drop function f2; +drop function f3; +drop table t1, t2, t3; diff --git a/mysql-test/main/order_by_pack_big.test b/mysql-test/main/order_by_pack_big.test new file mode 100644 index 00000000000..3840e023e2c --- /dev/null +++ b/mysql-test/main/order_by_pack_big.test @@ -0,0 +1,81 @@ +--source include/big_test.inc +--source include/have_sequence.inc +--source include/have_64bit.inc + +set @save_rand_seed1= @@RAND_SEED1; +set @save_rand_seed2= @@RAND_SEED2; +set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772; + +create table t1(a int); +insert into t1 select seq from seq_1_to_10000 order by rand(); +delimiter |; + +--echo # +--echo # function f1 has parameters mean(peak) and std_dev (standard deviation) +--echo # for a normal distribution +--echo # + +CREATE FUNCTION f1(mean DOUBLE, std_dev DOUBLE) RETURNS DOUBLE +BEGIN + set @z= (rand() + rand() + rand() + rand() + rand() + rand() + + rand() + rand() + rand() + rand() + rand() + rand() - 6); + set @z= std_dev*@z + mean; + return @z; +END| + +CREATE function f2(len INT) RETURNS varchar(256) +BEGIN + DECLARE str VARCHAR(256) DEFAULT ''; + DECLARE x INT DEFAULT 0; + WHILE (len > 0 AND len < 256) DO + SET x =round(rand()*25); + SET str= CONCAT(str, CHAR(65 + x)); + SET len= len-1; + END WHILE; +RETURN str; +END| + +CREATE function f3(mean DOUBLE, std_dev DOUBLE, min_val INT) RETURNS INT +BEGIN + DECLARE r DOUBLE DEFAULT 0; + WHILE 1=1 DO + set r= f1(mean, std_dev); + IF (r >= min_val) THEN + RETURN round(r); + end if; + END WHILE; + RETURN 0; +END| + +delimiter ;| + + +create table t2 (id INT NOT NULL, a INT, b int); +insert into t2 select a, f3(12, 8.667, 0), f3(32, 16, 0) from t1; + +CREATE TABLE t3( + id INT NOT NULL, + names VARCHAR(64), + address VARCHAR(132), + PRIMARY KEY (id) +); + +insert into t3 select id, f2(a) , f2(b) from t2; + +set sort_buffer_size=262144*10; +flush status; +select id, + MD5(group_concat(substring(names,1,3), substring(address,1,3))) +FROM t3 +GROUP BY id DIV 100 +ORDER BY id; +show status like '%sort%'; +set sort_buffer_size=default; + +set @@RAND_SEED1= @save_rand_seed1; +set @@RAND_SEED2= @save_rand_seed2; + +drop function f1; +drop function f2; +drop function f3; +drop table t1, t2, t3;
1 0
0 0
[Commits] d855595deab: MDEV-21263: Allow packed values of non-sorted fields in the sort buffer
by Varun 26 Dec '19

26 Dec '19
revision-id: d855595deab0be296b6e88059de2e8a2eda27d07 (mariadb-10.5.0-68-gd855595deab) parent(s): 89633995e4962a7ad4a241cdf62ee637990d6787 author: Varun Gupta committer: Varun Gupta timestamp: 2019-12-26 11:58:55 +0530 message: MDEV-21263: Allow packed values of non-sorted fields in the sort buffer This task deals with packing the non-sorted fields (or addon fields). This would lead to efficient usage of the memory allocated for the sort buffer. The changes brought by this feature are 1) Sort buffers would have records of variable length 2) Each record in the sort buffer would be stored like <sort_key1><sort_key2>....<addon_length><null_bytes><field1><field2>.... addon_length is the extra bytes that are required to store the variable length of addon field across different records. 3) Changes in rr_unpack_from_buffer and rr_from_tempfile to take into account the variable length of records. Ported WL#1509 Pack values of non-sorted fields in the sort buffer from MySQL by Tor Didriksen --- mysql-test/main/order_by_pack_big.result | 153 +++++ mysql-test/main/order_by_pack_big.test | 62 ++ sql/bounded_queue.h | 4 +- sql/field.h | 2 + sql/filesort.cc | 700 +++++++++++++-------- sql/filesort.h | 83 ++- sql/filesort_utils.cc | 64 +- sql/filesort_utils.h | 214 ++++++- sql/records.cc | 141 ++++- sql/records.h | 14 +- sql/sql_array.h | 4 + sql/sql_select.cc | 4 +- sql/sql_sort.h | 291 ++++++++- sql/uniques.cc | 80 ++- sql/uniques.h | 2 +- .../mysql-test/connect/r/mysql_index.result | 4 +- .../connect/mysql-test/connect/t/mysql_index.test | 2 +- 17 files changed, 1390 insertions(+), 434 deletions(-) diff --git a/mysql-test/main/order_by_pack_big.result b/mysql-test/main/order_by_pack_big.result new file mode 100644 index 00000000000..a65e8f61579 --- /dev/null +++ b/mysql-test/main/order_by_pack_big.result @@ -0,0 +1,153 @@ +set @save_rand_seed1= @@RAND_SEED1; +set @save_rand_seed2= @@RAND_SEED2; +set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772; +create table t1(a int); +insert into t1 select seq from seq_1_to_10000 order by rand(); +CREATE FUNCTION f1(median INT) RETURNS DOUBLE +BEGIN +set @z = (rand() + rand() + rand() + rand() + rand() + rand())/6; +set @z = @z*median*2+1; +return round(@z); +END| +CREATE function f2(len INT) RETURNS varchar(256) +BEGIN +DECLARE str VARCHAR(256) DEFAULT ''; +DECLARE x INT DEFAULT 0; +WHILE (len > 0 AND len < 256) DO +SET x =round(rand()*25); +SET str= CONCAT(str, CHAR(65 + x)); +SET len= len-1; +END WHILE; +RETURN str; +END| +create table t3 (id INT NOT NULL, a INT, b int); +insert into t3 select a, f1(12), f1(32) from t1; +CREATE TABLE t2( +id INT NOT NULL, +names VARCHAR(64), +address VARCHAR(132), +PRIMARY KEY (id) +); +insert into t2 select a, f2(f1(12)) , f2(f1(32)) from t1; +set sort_buffer_size=262144*10; +flush status; +select id, +MD5(group_concat(substring(names,1,3), substring(address,1,3))) +FROM t2 +GROUP BY id DIV 100 +ORDER BY id; +id MD5(group_concat(substring(names,1,3), substring(address,1,3))) +10 b253ed13e64cf733323d27afc92d11b9 +149 43ea485e548da5dd3fcd8c4906dccd7e +232 6ccc7fc342e42192ccdfbb77e85fa433 +311 90f94659c39641a213d3744bbffc41e0 +430 a53bead2e13ee4bddd5c2fb66c04c917 +502 419c219a887215fe89226ed9226bfa1e +665 1e7133708a3e1bc43f37d7173996743b +719 9a6863e2c59ea8b03395f8ff8df49412 +883 5ebe88422a3a7a4df6048471b1371392 +942 51747d9e4b077ed1426cc23e76865cf0 +1007 ba48b7a8d9ce563b2ee39a52bd1283e5 +1134 800292609f2c2a3a794e3afe20d17e20 +1226 c72aaf8bbd16c026d0ba70c15e2ba31d +1367 4b6b782916e30ac4f87353faa63e5784 +1486 cd4ba93263a8f1b90b16017c2a12581f +1502 08be1a9c9d0fe26afa885dece8dff600 +1674 5ee5624374baf6a2fdb69a760a9c1e3c +1781 a569b76cce1a35bcebd4629700fae8da +1803 0f882c91efad7865d9a808a8905bdcd2 +1969 3bb29bf3c1ffa7e33825e426ba3a67fe +2087 4c8ded95fd37400de735ebd6613f28d1 +2111 92f1941edd43dd254471c758dcec0125 +2268 e577a7e2de58d4097693fec26179564d +2328 c74d953831057abf57019a847af38982 +2416 185c279928fe6860d70635be36e59f81 +2599 d0e84deb33d2b299c914b3d0ffbf56c8 +2602 bba2e5c462eb682aeaf4d423fe088f88 +2777 8acf4418303155237e2e45973a95e03b +2858 4e503f79070142fcc39161cfcee3d5cc +2922 54c4420bd27edd931db9de43dc78e304 +3027 8c7edeec29069b0369024f771a208ccf +3131 b72b4098fa4cc8b7ca9e44ebf49bfdcd +3220 1447ca382f80c823aa769692af4c79a4 +3331 37c48ec7c7f6d6e402431d4ce9162ca3 +3452 54396ef321b22670fdf76372c102278f +3519 331a7e1f8f758db2f83c35a7dd6169de +3680 909128ecfc539d004852a1b085d97b0a +3799 4f36d7d8844cd0ef2d09c5ecb4a0e147 +3876 42038816d752bf58f15fdaab41326c85 +3937 b79796f21d78da6908b06a393deae228 +4029 4225c1b7b0242f9f52b373ff08bdaa35 +4158 3ded3bb37f1d6beaeff4b144c44f6283 +4291 09c7086bada0918fb7b49cb5d18cbc63 +4355 9bc74fee3974ac0a37d5aeb47b3e8fe7 +4461 a208fdfd27499fb25e1e4fb17474ad2d +4544 985d5096cb3e7721bf2bda5b3652aa6e +4646 4111a94e90540d2869d1a4443048ff5b +4714 a50de2a7c76405fd7e9ee4a245b7443e +4848 4f3d9f6c2214d1dbfcbeb52d24db4803 +4935 196434d066598e31480970cef6858992 +5014 64a725bcf6bf00ea93a974cc5e67faed +5184 3b13c7d69d2cf79295a99517c37917e5 +5219 9ea5ef3ad046b49a696bcbb1f6b46073 +5349 cb541d0cc1deec0309330d23384e6b73 +5400 d13a8f18ff97c555f94016765084ffa1 +5507 f22f063f95f49d447fe668612c6df270 +5672 8d4d95cb5e75e377b0316fd0ebda84f0 +5725 f93deb9bf142a233c487441989500ad2 +5849 1a3f23db800626853f20df9912eda5bb +5941 202f31698078ca21ba5ea37fa20c0a12 +6064 1b3a59f5cb65c1710253ea455aeb6e65 +6135 d13e27505091f5a2e1b645ce77daa2a1 +6289 c92136d3d26acd47d132ddfc78adc729 +6329 f3ea174f4de598bc6d7d2d2c464acb14 +6404 54110ca947fa2d23c9735b7e5a5a50cf +6580 9478efd805dc2f2e3f1e770f3097d1d1 +6653 d5267e02f46dc3d2f169ea3f578371e8 +6773 f18e4f7005b81d1a950d4a1bba8b1534 +6822 9f8860eaf35ba68cfeb0ad6a78a2cc56 +6938 78e2b08e77fb6d969550b7eb1967da28 +7085 bbbd509f22273997f5461c10b17075d4 +7180 9e18b5a59366df9ad7a62a4f424153ed +7290 0a60c3d6661499de48d2ced87409e9bb +7390 93e875e3c9d690e1d01198c0c44c3cb2 +7449 c98c9cb4d00db6c707e284c0f318c9fc +7589 e2058c36596b0d23b4b1f2ca8591e6be +7686 0f182c5acb930902f62cdcf428f4745f +7713 eebff08db371b43703acf8aba8e483b1 +7844 3036d395f077b96f89e1b7bc5986ceb6 +7935 3b67bd296f31c17da7074dda34590d7b +8059 d7ff3396e0a4739f867438d60e93bac8 +8153 6e8a00cd8f006010c295195a3c7eef01 +8245 e65335846dde70d2e22687026b03a35f +8310 d1959cf02010be41a5294c86acd1fb07 +8432 db80164eee1baf3c95c2d34796ca8d4b +8596 f4774e3f3aa83c5ff8d4053df36cb3a6 +8647 00a98557e738bb097ab4de083507479e +8742 407e13848129bacb38edd4a12ce696cf +8856 18fef961b5144c669a43eb010d02e972 +8967 741b7ff21a3e73cb47092f43d85e8c6c +9057 ace84e526e26f0497f3621192d0e4a84 +9148 a2268d21fcdfafc04e22422cfff90bc8 +9275 811cc7fc2a15d64fe3c3b6caf84b92c7 +9311 15d529de7dc6536040dc2a1d8fd25013 +9488 1c9331758eb2a3675891b8af982d79c1 +9518 1cbde8577db1d61225037045d9706666 +9696 7a6fcd67a46a3a5a5ed2efd0848c9e92 +9752 b0d433f38ca8f67f079d87789ad40618 +9887 b125b717470a8260dfdaf0b90a1b9d70 +9903 602cdc924ea991d73014a15b5404e1d8 +10000 d450f3b4dc99fa4f0ed6046e770ba3ee +show status like '%sort%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 10101 +Sort_scan 2 +set sort_buffer_size=default; +set @@RAND_SEED1= @save_rand_seed1; +set @@RAND_SEED2= @save_rand_seed2; +drop function f1; +drop function f2; +drop table t1,t2,t3; diff --git a/mysql-test/main/order_by_pack_big.test b/mysql-test/main/order_by_pack_big.test new file mode 100644 index 00000000000..d3072800217 --- /dev/null +++ b/mysql-test/main/order_by_pack_big.test @@ -0,0 +1,62 @@ +--source include/big_test.inc +--source include/have_sequence.inc +--source include/have_64bit.inc + +set @save_rand_seed1= @@RAND_SEED1; +set @save_rand_seed2= @@RAND_SEED2; +set @@RAND_SEED1=810763568, @@RAND_SEED2=600681772; + +create table t1(a int); +insert into t1 select seq from seq_1_to_10000 order by rand(); + +delimiter |; + +CREATE FUNCTION f1(median INT) RETURNS DOUBLE +BEGIN + set @z = (rand() + rand() + rand() + rand() + rand() + rand())/6; + set @z = @z*median*2+1; + return round(@z); +END| + +CREATE function f2(len INT) RETURNS varchar(256) +BEGIN + DECLARE str VARCHAR(256) DEFAULT ''; + DECLARE x INT DEFAULT 0; + WHILE (len > 0 AND len < 256) DO + SET x =round(rand()*25); + SET str= CONCAT(str, CHAR(65 + x)); + SET len= len-1; + END WHILE; +RETURN str; +END| + +delimiter ;| + +create table t3 (id INT NOT NULL, a INT, b int); +insert into t3 select a, f1(12), f1(32) from t1; + +CREATE TABLE t2( + id INT NOT NULL, + names VARCHAR(64), + address VARCHAR(132), + PRIMARY KEY (id) +); + +insert into t2 select a, f2(f1(12)) , f2(f1(32)) from t1; + +set sort_buffer_size=262144*10; +flush status; +select id, + MD5(group_concat(substring(names,1,3), substring(address,1,3))) +FROM t2 +GROUP BY id DIV 100 +ORDER BY id; +show status like '%sort%'; +set sort_buffer_size=default; + +set @@RAND_SEED1= @save_rand_seed1; +set @@RAND_SEED2= @save_rand_seed2; + +drop function f1; +drop function f2; +drop table t1,t2,t3; diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h index fd733caa019..cd710d835aa 100644 --- a/sql/bounded_queue.h +++ b/sql/bounded_queue.h @@ -57,7 +57,7 @@ class Bounded_queue @param to Where to put the key. @param from The input data. */ - typedef void (*keymaker_function)(Sort_param *param, + typedef uint (*keymaker_function)(Sort_param *param, Key_type *to, Element_type *from); @@ -181,7 +181,7 @@ void Bounded_queue<Element_type, Key_type>::push(Element_type *element) { // Replace top element with new key, and re-order the queue. Key_type **pq_top= reinterpret_cast<Key_type **>(queue_top(&m_queue)); - (*m_keymaker)(m_sort_param, *pq_top, element); + (void)(*m_keymaker)(m_sort_param, *pq_top, element); queue_replace_top(&m_queue); } else { // Insert new key into the queue. diff --git a/sql/field.h b/sql/field.h index 911fe430371..3e45fe774b3 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1527,6 +1527,7 @@ class Field: public Value_source { return length;} virtual uint max_packed_col_length(uint max_length) { return max_length;} + virtual bool is_packable() { return false; } uint offset(const uchar *record) const { @@ -2139,6 +2140,7 @@ class Field_longstr :public Field_str bool can_optimize_range(const Item_bool_func *cond, const Item *item, bool is_eq_func) const; + bool is_packable() { return true; } }; /* base class for float and double and decimal (old one) */ diff --git a/sql/filesort.cc b/sql/filesort.cc index df6e1eb9104..d93ae3b595a 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -48,17 +48,17 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, ha_rows *found_rows); static bool write_keys(Sort_param *param, SORT_INFO *fs_info, uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile); -static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos); +static uint make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos); static void register_used_fields(Sort_param *param); static bool save_index(Sort_param *param, uint count, SORT_INFO *table_sort); static uint suffix_length(ulong string_length); static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, - bool *multi_byte_charset); -static SORT_ADDON_FIELD *get_addon_fields(TABLE *table, uint sortlength, - LEX_STRING *addon_buf); -static void unpack_addon_fields(struct st_sort_addon_field *addon_field, - uchar *buff, uchar *buff_end); + bool *multi_byte_charset); +static Addon_fields *get_addon_fields(TABLE *table, uint sortlength, + uint *addon_length, + uint *m_packable_length); + static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info, TABLE *table, ha_rows records, size_t memory_available); @@ -66,7 +66,7 @@ static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info, void Sort_param::init_for_filesort(uint sortlen, TABLE *table, ha_rows maxrows, bool sort_positions) { - DBUG_ASSERT(addon_field == 0 && addon_buf.length == 0); + DBUG_ASSERT(addon_fields == NULL); sort_length= sortlen; ref_length= table->file->ref_length; @@ -77,12 +77,13 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table, Get the descriptors of all fields whose values are appended to sorted fields and get its total length in addon_buf.length */ - addon_field= get_addon_fields(table, sort_length, &addon_buf); + addon_fields= get_addon_fields(table, sort_length, &addon_length, + &m_packable_length); } - if (addon_field) + if (using_addon_fields()) { - DBUG_ASSERT(addon_buf.length < UINT_MAX32); - res_length= (uint)addon_buf.length; + DBUG_ASSERT(addon_length < UINT_MAX32); + res_length= addon_length; } else { @@ -93,11 +94,43 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table, */ sort_length+= ref_length; } - rec_length= sort_length + (uint)addon_buf.length; + rec_length= sort_length + addon_length; max_rows= maxrows; } +void Sort_param::try_to_pack_addons(ulong max_length_for_sort_data) +{ + if (!using_addon_fields() || // no addons, or + using_packed_addons()) // already packed + return; + + if (!Addon_fields::can_pack_addon_fields(res_length)) + return; + + const uint sz= Addon_fields::size_of_length_field;; + if (rec_length + sz > max_length_for_sort_data) + return; + + // Heuristic: skip packing if potential savings are less than 10 bytes. + if (m_packable_length < (10 + sz)) + return; + + SORT_ADDON_FIELD *addonf= addon_fields->begin(); + for (;addonf != addon_fields->end(); ++addonf) + { + addonf->offset+= sz; + addonf->null_offset+= sz; + } + + addon_fields->set_using_packed_addons(true); + m_using_packed_addons= true; + + addon_length+= sz; + res_length+= sz; + rec_length+= sz; +} + /** Sort a table. Creates a set of pointers that can be used to read the rows @@ -134,7 +167,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, DBUG_ASSERT(thd->variables.sortbuff_size <= SIZE_T_MAX); size_t memory_available= (size_t)thd->variables.sortbuff_size; uint maxbuffer; - BUFFPEK *buffpek; + Merge_chunk *buffpek; ha_rows num_rows= HA_POS_ERROR; IO_CACHE tempfile, buffpek_pointers, *outfile; Sort_param param; @@ -164,13 +197,16 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, if (subselect && subselect->filesort_buffer.is_allocated()) { - /* Reuse cache from last call */ + // Reuse cache from last call sort->filesort_buffer= subselect->filesort_buffer; sort->buffpek= subselect->sortbuffer; subselect->filesort_buffer.reset(); subselect->sortbuffer.str=0; } + DBUG_ASSERT(sort->sorted_result_in_fsbuf == FALSE || + sort->record_pointers == NULL); + outfile= &sort->io_cache; my_b_clear(&tempfile); @@ -183,9 +219,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, &multi_byte_charset), table, max_rows, filesort->sort_positions); - sort->addon_buf= param.addon_buf; - sort->addon_field= param.addon_field; - sort->unpack= unpack_addon_fields; + sort->addon_fields= param.addon_fields; + if (multi_byte_charset && !(param.tmp_buffer= (char*) my_malloc(param.sort_length, MYF(MY_WME | MY_THREAD_SPECIFIC)))) @@ -208,7 +243,15 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE; status_var_increment(thd->status_var.filesort_pq_sorts_); tracker->incr_pq_used(); + param.using_pq= true; const size_t compare_length= param.sort_length; + /* + For PQ queries (with limit) we know exactly how many pointers/records + we have in the buffer, so to simplify things, we initialize + all pointers here. (We cannot pack fields anyways, so there is no + point in doing lazy initialization). + */ + sort->init_record_pointers(); if (pq.init(param.max_rows, true, // max_at_top NULL, // compare_function @@ -223,21 +266,23 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, DBUG_ASSERT(thd->is_error()); goto err; } - // For PQ queries (with limit) we initialize all pointers. - sort->init_record_pointers(); } else { DBUG_PRINT("info", ("filesort PQ is not applicable")); + param.try_to_pack_addons(thd->variables.max_length_for_sort_data); + param.using_pq= false; + size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2); - set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2); + set_if_bigger(min_sort_memory, sizeof(Merge_chunk*)*MERGEBUFF2); while (memory_available >= min_sort_memory) { ulonglong keys= memory_available / (param.rec_length + sizeof(char*)); param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys); - if (sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length)) + sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length); + if (sort->sort_buffer_size() > 0) break; size_t old_memory_available= memory_available; memory_available= memory_available/4*3; @@ -258,7 +303,9 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, goto err; param.sort_form= table; - param.end=(param.local_sortorder=filesort->sortorder)+s_length; + param.local_sortorder= + Bounds_checked_array<SORT_FIELD>(filesort->sortorder, s_length); + num_rows= find_all_keys(thd, &param, select, sort, &buffpek_pointers, @@ -287,12 +334,20 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, my_free(sort->buffpek.str); sort->buffpek.str= 0; } + + if (param.using_addon_fields()) + { + DBUG_ASSERT(sort->addon_fields); + if (!sort->addon_fields->allocate_addon_buf(param.addon_length)) + goto err; + } + if (!(sort->buffpek.str= (char *) read_buffpek_from_file(&buffpek_pointers, maxbuffer, (uchar*) sort->buffpek.str))) goto err; sort->buffpek.length= maxbuffer; - buffpek= (BUFFPEK *) sort->buffpek.str; + buffpek= (Merge_chunk *) sort->buffpek.str; close_cached_file(&buffpek_pointers); /* Open cached file if it isn't open */ if (! my_b_inited(outfile) && @@ -306,25 +361,25 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, Use also the space previously used by string pointers in sort_buffer for temporary key storage. */ - param.max_keys_per_buffer=((param.max_keys_per_buffer * - (param.rec_length + sizeof(char*))) / - param.rec_length - 1); + + param.max_keys_per_buffer= static_cast<uint>(sort->sort_buffer_size()) / + param.rec_length; set_if_bigger(param.max_keys_per_buffer, 1); maxbuffer--; // Offset from 0 - if (merge_many_buff(&param, - (uchar*) sort->get_sort_keys(), + + if (merge_many_buff(&param, sort->get_raw_buf(), buffpek,&maxbuffer, - &tempfile)) + &tempfile)) goto err; if (flush_io_cache(&tempfile) || reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)) goto err; if (merge_index(&param, - (uchar*) sort->get_sort_keys(), + sort->get_raw_buf(), buffpek, maxbuffer, &tempfile, - outfile)) + outfile)) goto err; } @@ -339,7 +394,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, my_free(param.tmp_buffer); if (!subselect || !subselect->is_uncacheable()) { - sort->free_sort_buffer(); + if (!param.using_addon_fields()) + sort->free_sort_buffer(); my_free(sort->buffpek.str); } else @@ -347,7 +403,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, /* Remember sort buffers for next subquery call */ subselect->filesort_buffer= sort->filesort_buffer; subselect->sortbuffer= sort->buffpek; - sort->filesort_buffer.reset(); // Don't free this + sort->filesort_buffer.reset(); // Don't free this*/ } sort->buffpek.str= 0; @@ -361,7 +417,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, my_off_t save_pos=outfile->pos_in_file; /* For following reads */ if (reinit_io_cache(outfile,READ_CACHE,0L,0,0)) - error=1; + error=1; outfile->end_of_file=save_pos; } } @@ -490,10 +546,10 @@ uint Filesort::make_sortorder(THD *thd, JOIN *join, table_map first_table_bit) static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count, uchar *buf) { - size_t length= sizeof(BUFFPEK)*count; + size_t length= sizeof(Merge_chunk)*count; uchar *tmp= buf; DBUG_ENTER("read_buffpek_from_file"); - if (count > UINT_MAX/sizeof(BUFFPEK)) + if (count > UINT_MAX/sizeof(Merge_chunk)) return 0; /* sizeof(BUFFPEK)*count will overflow */ if (!tmp) tmp= (uchar *)my_malloc(length, MYF(MY_WME | MY_THREAD_SPECIFIC)); @@ -702,7 +758,8 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, handler *file; MY_BITMAP *save_read_set, *save_write_set; Item *sort_cond; - ha_rows retval; + ha_rows num_records= 0; + const bool packed_addon_fields= param->using_packed_addons(); DBUG_ENTER("find_all_keys"); DBUG_PRINT("info",("using: %s", (select ? select->quick ? "ranges" : "where": @@ -810,23 +867,27 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, if (write_record) { - ++(*found_rows); if (pq) - { pq->push(ref_pos); - idx= pq->num_elements(); - } else { - if (idx == param->max_keys_per_buffer) + if (fs_info->isfull()) { if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile)) goto err; - idx= 0; - indexpos++; + idx= 0; + indexpos++; } - make_sortkey(param, fs_info->get_record_buffer(idx++), ref_pos); + if (idx == 0) + fs_info->init_next_record_pointer(); + uchar *start_of_rec= fs_info->get_next_record_pointer(); + + const uint rec_sz= make_sortkey(param, start_of_rec, ref_pos); + if (packed_addon_fields && rec_sz != param->rec_length) + fs_info->adjust_next_record_pointer(rec_sz); + idx++; } + num_records++; } /* It does not make sense to read more keys in case of a fatal error */ @@ -862,11 +923,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, if (indexpos && idx && write_keys(param, fs_info, idx, buffpek_pointers, tempfile)) DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ - retval= (my_b_inited(tempfile) ? - (ha_rows) (my_b_tell(tempfile)/param->rec_length) : - idx); - DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) retval)); - DBUG_RETURN(retval); + + (*found_rows)= num_records; + if (pq) + num_records= pq->num_elements(); + + + DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) num_records)); + DBUG_RETURN(num_records); err: sort_form->column_bitmaps_set(save_read_set, save_write_set); @@ -901,36 +965,48 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count, IO_CACHE *buffpek_pointers, IO_CACHE *tempfile) { size_t rec_length; - uchar **end; - BUFFPEK buffpek; + Merge_chunk buffpek; DBUG_ENTER("write_keys"); rec_length= param->rec_length; - uchar **sort_keys= fs_info->get_sort_keys(); fs_info->sort_buffer(param, count); if (!my_b_inited(tempfile) && open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME))) - goto err; /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ /* check we won't have more buffpeks than we can possibly keep in memory */ - if (my_b_tell(buffpek_pointers) + sizeof(BUFFPEK) > (ulonglong)UINT_MAX) - goto err; + if (my_b_tell(buffpek_pointers) + sizeof(Merge_chunk) > (ulonglong)UINT_MAX) + DBUG_RETURN(1); + bzero(&buffpek, sizeof(buffpek)); - buffpek.file_pos= my_b_tell(tempfile); + buffpek.set_file_position(my_b_tell(tempfile)); if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; /* purecov: inspected */ - buffpek.count=(ha_rows) count; - for (end=sort_keys+count ; sort_keys != end ; sort_keys++) - if (my_b_write(tempfile, (uchar*) *sort_keys, (uint) rec_length)) - goto err; + buffpek.set_rowcount(static_cast<ha_rows>(count)); + + const bool packed_addon_fields= param->using_packed_addons(); + for (uint ix= 0; ix < count; ++ix) + { + uchar *record= fs_info->get_sorted_record(ix); + if (packed_addon_fields) + { + rec_length= param->sort_length + + Addon_fields::read_addon_length(record + param->sort_length); + } + else + rec_length= param->rec_length; + + if (my_b_write(tempfile, record, rec_length)) + DBUG_RETURN(1); /* purecov: inspected */ + } + if (my_b_write(buffpek_pointers, (uchar*) &buffpek, sizeof(buffpek))) - goto err; + DBUG_RETURN(1); + DBUG_RETURN(0); -err: - DBUG_RETURN(1); } /* write_keys */ @@ -1168,14 +1244,15 @@ Type_handler_real_result::make_sort_key(uchar *to, Item *item, /** Make a sort-key from record. */ -static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos) +static uint make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos) { Field *field; SORT_FIELD *sort_field; uint length; + uchar *orig_to= to; - for (sort_field=param->local_sortorder ; - sort_field != param->end ; + for (sort_field=param->local_sortorder.begin() ; + sort_field != param->local_sortorder.end() ; sort_field++) { bool maybe_null=0; @@ -1202,15 +1279,15 @@ static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos) length=sort_field->length; while (length--) { - *to = (uchar) (~ *to); - to++; + *to = (uchar) (~ *to); + to++; } } else to+= sort_field->length; } - if (param->addon_field) + if (param->using_addon_fields()) { /* Save field values appended to sorted fields. @@ -1218,41 +1295,44 @@ static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos) In this implementation we use fixed layout for field values - the same for all records. */ - SORT_ADDON_FIELD *addonf= param->addon_field; + SORT_ADDON_FIELD *addonf= param->addon_fields->begin(); uchar *nulls= to; + uchar *p_len= to; DBUG_ASSERT(addonf != 0); + const bool packed_addon_fields= param->addon_fields->using_packed_addons(); + uint32 res_len= addonf->offset; memset(nulls, 0, addonf->offset); to+= addonf->offset; - for ( ; (field= addonf->field) ; addonf++) + for ( ; addonf != param->addon_fields->end() ; addonf++) { + Field *field= addonf->field; if (addonf->null_bit && field->is_null()) { nulls[addonf->null_offset]|= addonf->null_bit; -#ifdef HAVE_valgrind - bzero(to, addonf->length); -#endif + if (!packed_addon_fields) + to+= addonf->length; } else { -#ifdef HAVE_valgrind uchar *end= field->pack(to, field->ptr); - uint length= (uint) ((to + addonf->length) - end); - DBUG_ASSERT((int) length >= 0); - if (length) - bzero(end, length); -#else - (void) field->pack(to, field->ptr); -#endif + int sz= static_cast<int>(end - to); + res_len += sz; + if (packed_addon_fields) + to+= sz; + else + to+= addonf->length; } - to+= addonf->length; } + if (packed_addon_fields) + Addon_fields::store_addon_length(p_len, res_len); } else { /* Save filepos last */ memcpy((uchar*) to, ref_pos, (size_t) param->ref_length); + to+= param->ref_length; } - return; + return static_cast<uint>(to - orig_to); } @@ -1265,8 +1345,8 @@ static void register_used_fields(Sort_param *param) SORT_FIELD *sort_field; TABLE *table=param->sort_form; - for (sort_field= param->local_sortorder ; - sort_field != param->end ; + for (sort_field= param->local_sortorder.begin() ; + sort_field != param->local_sortorder.end() ; sort_field++) { Field *field; @@ -1281,12 +1361,14 @@ static void register_used_fields(Sort_param *param) } } - if (param->addon_field) + if (param->using_addon_fields()) { - SORT_ADDON_FIELD *addonf= param->addon_field; - Field *field; - for ( ; (field= addonf->field) ; addonf++) + SORT_ADDON_FIELD *addonf= param->addon_fields->begin(); + for ( ; (addonf != param->addon_fields->end()) ; addonf++) + { + Field *field= addonf->field; field->register_field_in_read_map(); + } } else { @@ -1305,16 +1387,24 @@ static bool save_index(Sort_param *param, uint count, DBUG_ASSERT(table_sort->record_pointers == 0); table_sort->sort_buffer(param, count); + + if (param->using_addon_fields()) + { + table_sort->sorted_result_in_fsbuf= TRUE; + table_sort->set_sort_length(param->sort_length); + DBUG_RETURN(0); + } + res_length= param->res_length; offset= param->rec_length-res_length; if (!(to= table_sort->record_pointers= (uchar*) my_malloc(res_length*count, MYF(MY_WME | MY_THREAD_SPECIFIC)))) DBUG_RETURN(1); /* purecov: inspected */ - uchar **sort_keys= table_sort->get_sort_keys(); - for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++) + for (uint ix= 0; ix < count; ++ix) { - memcpy(to, *sort_keys+offset, res_length); + uchar *record= table_sort->get_sorted_record(ix); + memcpy(to, record + offset, res_length); to+= res_length; } DBUG_RETURN(0); @@ -1385,8 +1475,9 @@ static bool check_if_pq_applicable(Sort_param *param, // The whole source set fits into memory. if (param->max_rows < num_rows/PQ_slowness ) { - DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->rec_length) != NULL); + filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, + param->rec_length); + DBUG_RETURN(filesort_info->sort_buffer_size() != 0); } else { @@ -1398,12 +1489,13 @@ static bool check_if_pq_applicable(Sort_param *param, // Do we have space for LIMIT rows in memory? if (param->max_keys_per_buffer < num_available_keys) { - DBUG_RETURN(filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->rec_length) != NULL); + filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, + param->rec_length); + DBUG_RETURN(filesort_info->sort_buffer_size() != 0); } // Try to strip off addon fields. - if (param->addon_field) + if (param->addon_fields) { const size_t row_length= param->sort_length + param->ref_length + sizeof(char*); @@ -1435,14 +1527,15 @@ static bool check_if_pq_applicable(Sort_param *param, if (sort_merge_cost < pq_cost) DBUG_RETURN(false); - if (filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->sort_length + - param->ref_length)) + filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, + param->sort_length + param->ref_length); + + if (filesort_info->sort_buffer_size() > 0) { /* Make attached data to be references instead of fields. */ - my_free(filesort_info->addon_field); - filesort_info->addon_field= NULL; - param->addon_field= NULL; + my_free(filesort_info->addon_fields); + filesort_info->addon_fields= NULL; + param->addon_fields= NULL; param->res_length= param->ref_length; param->sort_length+= param->ref_length; @@ -1458,12 +1551,12 @@ static bool check_if_pq_applicable(Sort_param *param, /** Merge buffers to make < MERGEBUFF2 buffers. */ -int merge_many_buff(Sort_param *param, uchar *sort_buffer, - BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file) +int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer, + Merge_chunk *buffpek, uint *maxbuffer, IO_CACHE *t_file) { uint i; IO_CACHE t_file2,*from_file,*to_file,*temp; - BUFFPEK *lastbuff; + Merge_chunk *lastbuff; DBUG_ENTER("merge_many_buff"); if (*maxbuffer < MERGEBUFF2) @@ -1483,11 +1576,11 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer, lastbuff=buffpek; for (i=0 ; i <= *maxbuffer-MERGEBUFF*3/2 ; i+=MERGEBUFF) { - if (merge_buffers(param,from_file,to_file,sort_buffer,lastbuff++, + if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++, buffpek+i,buffpek+i+MERGEBUFF-1,0)) goto cleanup; } - if (merge_buffers(param,from_file,to_file,sort_buffer,lastbuff++, + if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++, buffpek+i,buffpek+ *maxbuffer,0)) break; /* purecov: inspected */ if (flush_io_cache(to_file)) @@ -1513,24 +1606,68 @@ int merge_many_buff(Sort_param *param, uchar *sort_buffer, (ulong)-1 if something goes wrong */ -ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, - uint rec_length) +ulong read_to_buffer(IO_CACHE *fromfile, Merge_chunk *buffpek, + Sort_param *param) { - ulong count; - ulong length= 0; + ha_rows count; + uint rec_length= param->rec_length; - if ((count= (ulong) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count))) + if ((count= MY_MIN(buffpek->max_keys(),buffpek->rowcount()))) { - length= rec_length*count; - if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length, - buffpek->file_pos))) + size_t bytes_to_read; + if (param->using_packed_addons()) + { + count= buffpek->rowcount(); + bytes_to_read= MY_MIN(buffpek->buffer_size(), + static_cast<size_t>(fromfile->end_of_file - + buffpek->file_position())); + } + else + bytes_to_read= rec_length * static_cast<size_t>(count); + + if (unlikely(my_b_pread(fromfile, buffpek->buffer_start(), + bytes_to_read, buffpek->file_position()))) return ((ulong) -1); - buffpek->key=buffpek->base; - buffpek->file_pos+= length; /* New filepos */ - buffpek->count-= count; - buffpek->mem_count= count; + + size_t num_bytes_read; + if (param->using_packed_addons()) + { + /* + The last record read is most likely not complete here. + We need to loop through all the records, reading the length fields, + and then "chop off" the final incomplete record. + */ + uchar *record= buffpek->buffer_start(); + uint ix= 0; + for (; ix < count; ++ix) + { + if (record + param->sort_length + Addon_fields::size_of_length_field > + buffpek->buffer_end()) + break; // Incomplete record. + uchar *plen= record + param->sort_length; + uint res_length= Addon_fields::read_addon_length(plen); + if (plen + res_length > buffpek->buffer_end()) + break; // Incomplete record. + DBUG_ASSERT(res_length > 0); + record+= param->sort_length; + record+= res_length; + } + DBUG_ASSERT(ix > 0); + count= ix; + num_bytes_read= record - buffpek->buffer_start(); + DBUG_PRINT("info", ("read %llu bytes of complete records", + static_cast<ulonglong>(bytes_to_read))); + } + else + num_bytes_read= bytes_to_read; + + buffpek->init_current_key(); + buffpek->advance_file_position(num_bytes_read); /* New filepos */ + buffpek->decrement_rowcount(count); + buffpek->set_mem_count(count); + return (ulong) num_bytes_read; } - return (length); + return 0; } /* read_to_buffer */ @@ -1545,25 +1682,15 @@ ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, @param[in] key_length key length */ -void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) +void reuse_freed_buff(QUEUE *queue, Merge_chunk *reuse, uint key_length) { - uchar *reuse_end= reuse->base + reuse->max_keys * key_length; for (uint i= queue_first_element(queue); i <= queue_last_element(queue); i++) { - BUFFPEK *bp= (BUFFPEK *) queue_element(queue, i); - if (bp->base + bp->max_keys * key_length == reuse->base) - { - bp->max_keys+= reuse->max_keys; + Merge_chunk *bp= (Merge_chunk *) queue_element(queue, i); + if (reuse->merge_freed_buff(bp)) return; - } - else if (bp->base == reuse_end) - { - bp->base= reuse->base; - bp->max_keys+= reuse->max_keys; - return; - } } DBUG_ASSERT(0); } @@ -1588,8 +1715,8 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) */ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, - IO_CACHE *to_file, uchar *sort_buffer, - BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb, + IO_CACHE *to_file, Sort_buffer sort_buffer, + Merge_chunk *lastbuff, Merge_chunk *Fb, Merge_chunk *Tb, int flag) { bool error= 0; @@ -1599,7 +1726,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; - BUFFPEK *buffpek; + Merge_chunk *buffpek; QUEUE queue; qsort2_cmp cmp; void *first_cmp_arg; @@ -1625,7 +1752,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, uint wr_offset= flag ? offset : 0; maxcount= (ulong) (param->max_keys_per_buffer/((uint) (Tb-Fb) +1)); to_start_filepos= my_b_tell(to_file); - strpos= sort_buffer; + strpos= sort_buffer.array(); org_max_rows=max_rows= param->max_rows; set_if_bigger(maxcount, 1); @@ -1640,19 +1767,23 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, cmp= get_ptr_compare(sort_length); first_cmp_arg= (void*) &sort_length; } - if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0, + if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, + offsetof(Merge_chunk,m_current_key), 0, (queue_compare) cmp, first_cmp_arg, 0, 0))) DBUG_RETURN(1); /* purecov: inspected */ for (buffpek= Fb ; buffpek <= Tb ; buffpek++) { - buffpek->base= strpos; - buffpek->max_keys= maxcount; - bytes_read= read_to_buffer(from_file, buffpek, rec_length); + buffpek->set_buffer(strpos, + strpos + (sort_buffer.size()/((uint) (Tb-Fb) +1))); + + buffpek->set_max_keys(maxcount); + bytes_read= read_to_buffer(from_file, buffpek, param); if (unlikely(bytes_read == (ulong) -1)) goto err; /* purecov: inspected */ - strpos+= bytes_read; - buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected + buffpek->set_buffer_end(strpos); + // If less data in buffers than expected + buffpek->set_max_keys(buffpek->mem_count()); queue_insert(&queue, (uchar*) buffpek); } @@ -1663,16 +1794,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, Copy the first argument to unique_buff for unique removal. Store it also in 'to_file'. */ - buffpek= (BUFFPEK*) queue_top(&queue); - memcpy(unique_buff, buffpek->key, rec_length); + buffpek= (Merge_chunk*) queue_top(&queue); + memcpy(unique_buff, buffpek->current_key(), rec_length); if (min_dupl_count) memcpy(&dupl_count, unique_buff+dupl_count_ofs, sizeof(dupl_count)); - buffpek->key+= rec_length; - if (! --buffpek->mem_count) + buffpek->advance_current_key(rec_length); + buffpek->decrement_mem_count(); + if (buffpek->mem_count() == 0) { if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek, - rec_length)))) + param)))) { (void) queue_remove_top(&queue); reuse_freed_buff(&queue, buffpek, rec_length); @@ -1692,61 +1824,68 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, for (;;) { - buffpek= (BUFFPEK*) queue_top(&queue); - src= buffpek->key; + buffpek= (Merge_chunk*) queue_top(&queue); + src= buffpek->current_key(); if (cmp) // Remove duplicates { - if (!(*cmp)(first_cmp_arg, &unique_buff, - (uchar**) &buffpek->key)) - { + uchar *current_key= buffpek->current_key(); + if (!(*cmp)(first_cmp_arg, &unique_buff, &current_key)) + { if (min_dupl_count) - { + { element_count cnt; - memcpy(&cnt, (uchar *) buffpek->key+dupl_count_ofs, sizeof(cnt)); + memcpy(&cnt, buffpek->current_key() + dupl_count_ofs, sizeof(cnt)); dupl_count+= cnt; } goto skip_duplicate; } if (min_dupl_count) - { + { memcpy(unique_buff+dupl_count_ofs, &dupl_count, sizeof(dupl_count)); } - src= unique_buff; - } - - /* - Do not write into the output file if this is the final merge called - for a Unique object used for intersection and dupl_count is less - than min_dupl_count. - If the Unique object is used to intersect N sets of unique elements - then for any element: - dupl_count >= N <=> the element is occurred in each of these N sets. - */ - if (!check_dupl_count || dupl_count >= min_dupl_count) - { - if (my_b_write(to_file, src+wr_offset, wr_len)) - goto err; /* purecov: inspected */ + src= unique_buff; } - if (cmp) - { - memcpy(unique_buff, (uchar*) buffpek->key, rec_length); - if (min_dupl_count) - memcpy(&dupl_count, unique_buff+dupl_count_ofs, - sizeof(dupl_count)); - } - if (!--max_rows) + { - /* Nothing more to do */ - goto end; /* purecov: inspected */ - } + param->get_rec_and_res_len(buffpek->current_key(), + &rec_length, &res_length); + const uint bytes_to_write= (flag == 0) ? rec_length : res_length; + /* + Do not write into the output file if this is the final merge called + for a Unique object used for intersection and dupl_count is less + than min_dupl_count. + If the Unique object is used to intersect N sets of unique elements + then for any element: + dupl_count >= N <=> the element is occurred in each of these N sets. + */ + if (!check_dupl_count || dupl_count >= min_dupl_count) + { + if (my_b_write(to_file, src + wr_offset, bytes_to_write)) + goto err; /* purecov: inspected */ + } + if (cmp) + { + memcpy(unique_buff, buffpek->current_key(), rec_length); + if (min_dupl_count) + memcpy(&dupl_count, unique_buff+dupl_count_ofs, + sizeof(dupl_count)); + } + if (!--max_rows) + { + /* Nothing more to do */ + goto end; /* purecov: inspected */ + } + } skip_duplicate: - buffpek->key+= rec_length; - if (! --buffpek->mem_count) + buffpek->advance_current_key(rec_length); + buffpek->decrement_mem_count(); + + if (buffpek->mem_count() == 0) { if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek, - rec_length)))) + param)))) { (void) queue_remove_top(&queue); reuse_freed_buff(&queue, buffpek, rec_length); @@ -1758,9 +1897,10 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, queue_replace_top(&queue); /* Top element has been replaced */ } } - buffpek= (BUFFPEK*) queue_top(&queue); - buffpek->base= (uchar*) sort_buffer; - buffpek->max_keys= param->max_keys_per_buffer; + buffpek= (Merge_chunk*) queue_top(&queue); + buffpek->set_buffer(sort_buffer.array(), + sort_buffer.array() + sort_buffer.size()); + buffpek->set_max_keys(param->max_keys_per_buffer); /* As we know all entries in the buffer are unique, we only have to @@ -1768,16 +1908,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, */ if (cmp) { - if (!(*cmp)(first_cmp_arg, &unique_buff, (uchar**) &buffpek->key)) + uchar *current_key= buffpek->current_key(); + if (!(*cmp)(first_cmp_arg, &unique_buff, &current_key)) { if (min_dupl_count) { element_count cnt; - memcpy(&cnt, (uchar *) buffpek->key+dupl_count_ofs, sizeof(cnt)); + memcpy(&cnt, buffpek->current_key() + dupl_count_ofs, sizeof(cnt)); dupl_count+= cnt; } - buffpek->key+= rec_length; - --buffpek->mem_count; + buffpek->advance_current_key(rec_length); + buffpek->decrement_mem_count(); } if (min_dupl_count) @@ -1796,45 +1937,40 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, do { - if ((ha_rows) buffpek->mem_count > max_rows) + if (buffpek->mem_count() > max_rows) { /* Don't write too many records */ - buffpek->mem_count= (uint) max_rows; - buffpek->count= 0; /* Don't read more */ + buffpek->set_mem_count(max_rows); + buffpek->set_rowcount(0); /* Don't read more */ } - max_rows-= buffpek->mem_count; - if (flag == 0) + max_rows-= buffpek->mem_count(); + for (uint ix= 0; ix < buffpek->mem_count(); ++ix) { - if (my_b_write(to_file, (uchar*) buffpek->key, - (size_t)(rec_length*buffpek->mem_count))) - goto err; /* purecov: inspected */ - } - else - { - uchar *end; - src= buffpek->key+offset; - for (end= src+buffpek->mem_count*rec_length ; - src != end ; - src+= rec_length) + param->get_rec_and_res_len(buffpek->current_key(), + &rec_length, &res_length); + const uint bytes_to_write= (flag == 0) ? rec_length : res_length; + if (check_dupl_count) { - if (check_dupl_count) - { - memcpy((uchar *) &dupl_count, src+dupl_count_ofs, sizeof(dupl_count)); - if (dupl_count < min_dupl_count) - continue; - } - if (my_b_write(to_file, src, wr_len)) - goto err; + memcpy((uchar *) &dupl_count, + buffpek->current_key() + offset + dupl_count_ofs, + sizeof(dupl_count)); + if (dupl_count < min_dupl_count) + continue; } + if (my_b_write(to_file, buffpek->current_key() + wr_offset, + bytes_to_write)) + goto err; + buffpek->advance_current_key(rec_length); } } while (likely(!(error= (bytes_read= read_to_buffer(from_file, buffpek, - rec_length)) == (ulong) -1)) && + param)) == (ulong) -1)) && bytes_read != 0); end: - lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows); - lastbuff->file_pos= to_start_filepos; + lastbuff->set_rowcount(MY_MIN(org_max_rows-max_rows, param->max_rows)); + lastbuff->set_file_position(to_start_filepos); + cleanup: delete_queue(&queue); DBUG_RETURN(error); @@ -1848,13 +1984,13 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, /* Do a merge to output-file (save only positions) */ -int merge_index(Sort_param *param, uchar *sort_buffer, - BUFFPEK *buffpek, uint maxbuffer, - IO_CACHE *tempfile, IO_CACHE *outfile) +int merge_index(Sort_param *param, Sort_buffer sort_buffer, + Merge_chunk *buffpek, uint maxbuffer, + IO_CACHE *tempfile, IO_CACHE *outfile) { DBUG_ENTER("merge_index"); - if (merge_buffers(param,tempfile,outfile,sort_buffer,buffpek,buffpek, - buffpek+maxbuffer,1)) + if (merge_buffers(param, tempfile, outfile, sort_buffer, buffpek, buffpek, + buffpek + maxbuffer, 1)) DBUG_RETURN(1); /* purecov: inspected */ DBUG_RETURN(0); } /* merge_index */ @@ -1977,7 +2113,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, sortorder->length= (uint)cs->coll->strnxfrmlen(cs, sortorder->length); } if (sortorder->field->maybe_null()) - length++; // Place for NULL marker + length++; // Place for NULL marker } else { @@ -1988,21 +2124,40 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, *multi_byte_charset= true; } if (sortorder->item->maybe_null) - length++; // Place for NULL marker + length++; // Place for NULL marker } set_if_smaller(sortorder->length, thd->variables.max_sort_length); length+=sortorder->length; } - sortorder->field= (Field*) 0; // end marker + sortorder->field= NULL; // end marker DBUG_PRINT("info",("sort_length: %d",length)); return length; } + +/* + Check whether addon fields can be used or not. + + @param table Table structure + @param sortlength Length of sort key + @param length [OUT] Max length of addon fields + @param fields [OUT] Number of addon fields + @param null_fields [OUT] Number of nullable addon fields + @param packable_length [OUT] Max length of addon fields that can be + packed + + @retval + TRUE Addon fields can be used + FALSE Otherwise +*/ + bool filesort_use_addons(TABLE *table, uint sortlength, - uint *length, uint *fields, uint *null_fields) + uint *length, uint *fields, uint *null_fields, + uint *packable_length) { Field **pfield, *field; - *length= *fields= *null_fields= 0; + *length= *fields= *null_fields= *packable_length= 0; + uint field_length=0; for (pfield= table->field; (field= *pfield) ; pfield++) { @@ -2010,7 +2165,12 @@ bool filesort_use_addons(TABLE *table, uint sortlength, continue; if (field->flags & BLOB_FLAG) return false; - (*length)+= field->max_packed_col_length(field->pack_length()); + field_length= field->max_packed_col_length(field->pack_length()); + (*length)+= field_length; + + if (field->maybe_null() || field->is_packable()) + (*packable_length)+= field_length; + if (field->maybe_null()) (*null_fields)++; (*fields)++; @@ -2035,11 +2195,11 @@ bool filesort_use_addons(TABLE *table, uint sortlength, layouts for the values of the non-sorted fields in the buffer and fills them. - @param thd Current thread - @param ptabfield Array of references to the table fields - @param sortlength Total length of sorted fields - @param [out] addon_buf Buffer to us for appended fields - + @param table Table structure + @param sortlength Total length of sorted fields + @param addon_length [OUT] Length of addon fields + @param m_packable_length [OUT] Length of the addon fields that can be + packed @note The null bits for the appended values are supposed to be put together and stored the buffer just ahead of the value of the first field. @@ -2050,13 +2210,13 @@ bool filesort_use_addons(TABLE *table, uint sortlength, NULL if we do not store field values with sort data. */ -static SORT_ADDON_FIELD * -get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf) +static Addon_fields* +get_addon_fields(TABLE *table, uint sortlength, + uint *addon_length, uint *m_packable_length) { Field **pfield; Field *field; - SORT_ADDON_FIELD *addonf; - uint length, fields, null_fields; + uint length, fields, null_fields, packable_length; MY_BITMAP *read_set= table->read_set; DBUG_ENTER("get_addon_fields"); @@ -2070,23 +2230,34 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf) the values directly from sorted fields. But beware the case when item->cmp_type() != item->result_type() */ - addon_buf->str= 0; - addon_buf->length= 0; // see remove_const() for HA_SLOW_RND_POS explanation if (table->file->ha_table_flags() & HA_SLOW_RND_POS) sortlength= 0; - if (!filesort_use_addons(table, sortlength, &length, &fields, &null_fields) || - !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC), &addonf, - sizeof(SORT_ADDON_FIELD) * (fields+1), - &addon_buf->str, length, NullS)) + void *raw_mem_addon_field, *raw_mem; + if (!filesort_use_addons(table, sortlength, &length, &fields, &null_fields, + &packable_length) || + !(my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC), + &raw_mem, sizeof(Addon_fields), + &raw_mem_addon_field, + sizeof(SORT_ADDON_FIELD) * fields, + NullS))) DBUG_RETURN(0); - addon_buf->length= length; + Addon_fields_array + addon_array(static_cast<SORT_ADDON_FIELD*>(raw_mem_addon_field), fields); + Addon_fields *addon_fields= new (raw_mem) Addon_fields(addon_array); + + DBUG_ASSERT(addon_fields); + + (*addon_length)= length; + (*m_packable_length)= packable_length; + length= (null_fields+7)/8; null_fields= 0; + SORT_ADDON_FIELD* addonf= addon_fields->begin(); for (pfield= table->field; (field= *pfield) ; pfield++) { if (!bitmap_is_set(read_set, field->field_index)) @@ -2108,10 +2279,9 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf) length+= addonf->length; addonf++; } - addonf->field= 0; // Put end marker DBUG_PRINT("info",("addon_length: %d",length)); - DBUG_RETURN(addonf-fields); + DBUG_RETURN(addon_fields); } @@ -2130,24 +2300,7 @@ get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf) void. */ -static void -unpack_addon_fields(struct st_sort_addon_field *addon_field, uchar *buff, - uchar *buff_end) -{ - Field *field; - SORT_ADDON_FIELD *addonf= addon_field; - for ( ; (field= addonf->field) ; addonf++) - { - if (addonf->null_bit && (addonf->null_bit & buff[addonf->null_offset])) - { - field->set_null(); - continue; - } - field->set_notnull(); - field->unpack(field->ptr, buff + addonf->offset, buff_end, 0); - } -} /* ** functions to change a double or float to a sortable string @@ -2197,6 +2350,17 @@ void change_double_for_sort(double nr,uchar *to) } } +bool SORT_INFO::using_packed_addons() +{ + return addon_fields != NULL && addon_fields->using_packed_addons(); +} + +void SORT_INFO::free_addon_buff() +{ + if (addon_fields) + addon_fields->free_addon_buff(); +} + /** Free SORT_INFO */ diff --git a/sql/filesort.h b/sql/filesort.h index 5f79a5095cc..5102ee2326f 100644 --- a/sql/filesort.h +++ b/sql/filesort.h @@ -27,7 +27,7 @@ class Filesort_tracker; struct SORT_FIELD; typedef struct st_order ORDER; class JOIN; - +class Addon_fields; /** Sorting related info. @@ -87,7 +87,8 @@ class SORT_INFO public: SORT_INFO() - :addon_field(0), record_pointers(0) + :addon_fields(NULL), record_pointers(0), + sorted_result_in_fsbuf(FALSE) { buffpek.str= 0; my_b_clear(&io_cache); @@ -98,9 +99,11 @@ class SORT_INFO void free_data() { close_cached_file(&io_cache); + free_addon_buff(); my_free(record_pointers); my_free(buffpek.str); - my_free(addon_field); + my_free(addon_fields); + free_sort_buffer(); } void reset() @@ -108,17 +111,26 @@ class SORT_INFO free_data(); record_pointers= 0; buffpek.str= 0; - addon_field= 0; + addon_fields= 0; + sorted_result_in_fsbuf= false; } + void free_addon_buff(); IO_CACHE io_cache; /* If sorted through filesort */ LEX_STRING buffpek; /* Buffer for buffpek structures */ - LEX_STRING addon_buf; /* Pointer to a buffer if sorted with fields */ - struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ - /* To unpack back */ - void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); + Addon_fields *addon_fields; /* Addon field descriptors */ uchar *record_pointers; /* If sorted in memory */ + + /** + If the entire result of filesort fits in memory, we skip the merge phase. + We may leave the result in filesort_buffer + (indicated by sorted_result_in_fsbuf), or we may strip away + the sort keys, and copy the sorted result into a new buffer. + @see save_index() + */ + bool sorted_result_in_fsbuf; + /* How many rows in final result. Also how many rows in record_pointers, if used @@ -131,27 +143,65 @@ class SORT_INFO void sort_buffer(Sort_param *param, uint count) { filesort_buffer.sort_buffer(param, count); } - /** - Accessors for Filesort_buffer (which @c). - */ - uchar *get_record_buffer(uint idx) - { return filesort_buffer.get_record_buffer(idx); } - uchar **get_sort_keys() { return filesort_buffer.get_sort_keys(); } - uchar **alloc_sort_buffer(uint num_records, uint record_length) + uchar *get_sorted_record(uint ix) + { return filesort_buffer.get_sorted_record(ix); } + + uchar *alloc_sort_buffer(uint num_records, uint record_length) { return filesort_buffer.alloc_sort_buffer(num_records, record_length); } void free_sort_buffer() { filesort_buffer.free_sort_buffer(); } + bool isfull() const + { return filesort_buffer.isfull(); } void init_record_pointers() { filesort_buffer.init_record_pointers(); } + void init_next_record_pointer() + { filesort_buffer.init_next_record_pointer(); } + uchar *get_next_record_pointer() + { return filesort_buffer.get_next_record_pointer(); } + void adjust_next_record_pointer(uint val) + { filesort_buffer.adjust_next_record_pointer(val); } + + Bounds_checked_array<uchar> get_raw_buf() + { return filesort_buffer.get_raw_buf(); } size_t sort_buffer_size() const { return filesort_buffer.sort_buffer_size(); } + bool is_allocated() const + { return filesort_buffer.is_allocated(); } + void set_sort_length(uint val) + { filesort_buffer.set_sort_length(val); } + uint get_sort_length() const + { return filesort_buffer.get_sort_length(); } + + bool has_filesort_result_in_memory() const + { + return record_pointers || sorted_result_in_fsbuf; + } + + /// Are we using "addon fields"? + bool using_addon_fields() const + { + return addon_fields != NULL; + } + + /// Are we using "packed addon fields"? + bool using_packed_addons(); + + /** + Copies (unpacks) values appended to sorted fields from a buffer back to + their regular positions specified by the Field::ptr pointers. + @param buff Buffer which to unpack the value from + */ + template<bool Packed_addon_fields> + inline void unpack_addon_fields(uchar *buff); + + friend SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, Filesort_tracker* tracker, JOIN *join, table_map first_table_bit); @@ -162,7 +212,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, table_map first_table_bit=0); bool filesort_use_addons(TABLE *table, uint sortlength, - uint *length, uint *fields, uint *null_fields); + uint *length, uint *fields, uint *null_fields, + uint *m_packable_length); void change_double_for_sort(double nr,uchar *to); diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc index 703db84495f..06e3f477993 100644 --- a/sql/filesort_utils.cc +++ b/sql/filesort_utils.cc @@ -96,82 +96,92 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows, # Pointer to allocated buffer */ -uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, - uint record_length) +uchar *Filesort_buffer::alloc_sort_buffer(uint num_records, + uint record_length) { size_t buff_size; - uchar **sort_keys, **start_of_data; DBUG_ENTER("alloc_sort_buffer"); DBUG_EXECUTE_IF("alloc_sort_buffer_fail", DBUG_SET("+d,simulate_out_of_memory");); - buff_size= ((size_t)num_records) * (record_length + sizeof(uchar*)); - set_if_bigger(buff_size, record_length * MERGEBUFF2); + buff_size= ALIGN_SIZE(num_records * (record_length + sizeof(uchar*))); - if (!m_idx_array.is_null()) + /* + The minimum memory required should be each merge buffer can hold atmost + one key. + TODO varun: move this to the place where min_sort_memory is used. + */ + set_if_bigger(buff_size, (record_length +sizeof(uchar*)) * MERGEBUFF2); + + if (m_rawmem) { /* Reuse old buffer if exists and is large enough Note that we don't make the buffer smaller, as we want to be prepared for next subquery iteration. */ - - sort_keys= m_idx_array.array(); - if (buff_size > allocated_size) + if (buff_size > m_size_in_bytes) { /* Better to free and alloc than realloc as we don't have to remember the old values */ - my_free(sort_keys); - if (!(sort_keys= (uchar**) my_malloc(buff_size, - MYF(MY_THREAD_SPECIFIC)))) + my_free(m_rawmem); + if (!(m_rawmem= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC)))) { - reset(); + m_size_in_bytes= 0; DBUG_RETURN(0); } - allocated_size= buff_size; } } else { - if (!(sort_keys= (uchar**) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC)))) + if (!(m_rawmem= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC)))) + { + m_size_in_bytes= 0; DBUG_RETURN(0); - allocated_size= buff_size; + } + } - m_idx_array= Idx_array(sort_keys, num_records); + m_size_in_bytes= buff_size; + m_record_pointers= reinterpret_cast<uchar**>(m_rawmem) + + ((m_size_in_bytes / sizeof(uchar*)) - 1); + m_num_records= num_records; m_record_length= record_length; - start_of_data= m_idx_array.array() + m_idx_array.size(); - m_start_of_data= reinterpret_cast<uchar*>(start_of_data); - - DBUG_RETURN(m_idx_array.array()); + m_idx= 0; + DBUG_RETURN(m_rawmem); } void Filesort_buffer::free_sort_buffer() { - my_free(m_idx_array.array()); - m_idx_array.reset(); - m_start_of_data= NULL; + my_free(m_rawmem); + *this= Filesort_buffer(); } void Filesort_buffer::sort_buffer(const Sort_param *param, uint count) { size_t size= param->sort_length; + m_sort_keys= get_sort_keys(); + if (count <= 1 || size == 0) return; - uchar **keys= get_sort_keys(); + + // dont reverse for PQ, it is already done + if (!param->using_pq) + reverse_record_pointers(); + uchar **buffer= NULL; if (radixsort_is_appliccable(count, param->sort_length) && (buffer= (uchar**) my_malloc(count*sizeof(char*), MYF(MY_THREAD_SPECIFIC)))) { - radixsort_for_str_ptr(keys, count, param->sort_length, buffer); + radixsort_for_str_ptr(m_sort_keys, count, param->sort_length, buffer); my_free(buffer); return; } - my_qsort2(keys, count, sizeof(uchar*), get_ptr_compare(size), &size); + my_qsort2(m_sort_keys, count, sizeof(uchar*), get_ptr_compare(size), &size); } diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h index 1ab1ba2daa8..e8b93940abf 100644 --- a/sql/filesort_utils.h +++ b/sql/filesort_utils.h @@ -46,68 +46,194 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows, /** A wrapper class around the buffer used by filesort(). - The buffer is a contiguous chunk of memory, - where the first part is <num_records> pointers to the actual data. + The sort buffer is a contiguous chunk of memory, + containing both records to be sorted, and pointers to said records: + + <start of buffer | still unused | end of buffer> + |rec 0|record 1 |rec 2| ............ |ptr to rec2|ptr to rec1|ptr to rec0| + + Records will be inserted "left-to-right". Records are not necessarily + fixed-size, they can be packed and stored without any "gaps". + + Record pointers will be inserted "right-to-left", as a side-effect + of inserting the actual records. We wrap the buffer in order to be able to do lazy initialization of the pointers: the buffer is often much larger than what we actually need. + With this allocation scheme, and lazy initialization of the pointers, + we are able to pack variable-sized records in the buffer, + and thus possibly have space for more records than we initially estimated. + The buffer must be kept available for multiple executions of the same sort operation, so we have explicit allocate and free functions, rather than doing alloc/free in CTOR/DTOR. */ + class Filesort_buffer { public: - Filesort_buffer() - : m_idx_array(), m_start_of_data(NULL), allocated_size(0) + Filesort_buffer() : + m_next_rec_ptr(NULL), m_rawmem(NULL), m_record_pointers(NULL), + m_sort_keys(NULL), + m_num_records(0), m_record_length(0), + m_sort_length(0), + m_size_in_bytes(0), m_idx(0) {} - - ~Filesort_buffer() + + /** Sort me... */ + void sort_buffer(const Sort_param *param, uint count); + + /** + Reverses the record pointer array, to avoid recording new results for + non-deterministic mtr tests. + */ + void reverse_record_pointers() { - my_free(m_idx_array.array()); + if (m_idx < 2) // There is nothing to swap. + return; + uchar **keys= get_sort_keys(); + const longlong count= m_idx - 1; + for (longlong ix= 0; ix <= count/2; ++ix) + { + uchar *tmp= keys[count - ix]; + keys[count - ix] = keys[ix]; + keys[ix]= tmp; + } } - bool is_allocated() + /** + Initializes all the record pointers. + */ + void init_record_pointers() { - return m_idx_array.array() != 0; + init_next_record_pointer(); + while (m_idx < m_num_records) + (void) get_next_record_pointer(); + reverse_record_pointers(); } - void reset() + + /** + Prepares the buffer for the next batch of records to process. + */ + void init_next_record_pointer() { - m_idx_array.reset(); + m_idx= 0; + m_next_rec_ptr= m_rawmem; + m_sort_keys= NULL; } - /** Sort me... */ - void sort_buffer(const Sort_param *param, uint count); + /** + @returns the number of bytes currently in use for data. + */ + size_t space_used_for_data() const + { + return m_next_rec_ptr ? m_next_rec_ptr - m_rawmem : 0; + } - /// Initializes a record pointer. - uchar *get_record_buffer(uint idx) + /** + @returns the number of bytes left in the buffer. + */ + size_t spaceleft() const { - m_idx_array[idx]= m_start_of_data + (idx * m_record_length); - return m_idx_array[idx]; + DBUG_ASSERT(m_next_rec_ptr >= m_rawmem); + const size_t spaceused= + (m_next_rec_ptr - m_rawmem) + + (static_cast<size_t>(m_idx) * sizeof(uchar*)); + return m_size_in_bytes - spaceused; } - /// Initializes all the record pointers. - void init_record_pointers() + /** + Is the buffer full? + */ + bool isfull() const + { + if (m_idx < m_num_records) + return false; + return spaceleft() < (m_record_length + sizeof(uchar*)); + } + + /** + Where should the next record be stored? + */ + uchar *get_next_record_pointer() + { + uchar *retval= m_next_rec_ptr; + // Save the return value in the record pointer array. + m_record_pointers[-m_idx]= m_next_rec_ptr; + // Prepare for the subsequent request. + m_idx++; + m_next_rec_ptr+= m_record_length; + return retval; + } + + /** + Adjusts for actual record length. get_next_record_pointer() above was + pessimistic, and assumed that the record could not be packed. + */ + void adjust_next_record_pointer(uint val) { - for (uint ix= 0; ix < m_idx_array.size(); ++ix) - (void) get_record_buffer(ix); + m_next_rec_ptr-= (m_record_length - val); } /// Returns total size: pointer array + record buffers. size_t sort_buffer_size() const { - return allocated_size; + return m_size_in_bytes; } - /// Allocates the buffer, but does *not* initialize pointers. - uchar **alloc_sort_buffer(uint num_records, uint record_length); + bool is_allocated() const + { + return m_rawmem; + } + + /** + Allocates the buffer, but does *not* initialize pointers. + Total size = (num_records * record_length) + (num_records * sizeof(pointer)) + space for records space for pointer to records + Caller is responsible for raising an error if allocation fails. + + @param num_records Number of records. + @param record_length (maximum) size of each record. + @returns Pointer to allocated area, or NULL in case of out-of-memory. + */ + uchar *alloc_sort_buffer(uint num_records, uint record_length); /// Frees the buffer. void free_sort_buffer(); - /// Getter, for calling routines which still use the uchar** interface. - uchar **get_sort_keys() { return m_idx_array.array(); } + void reset() + { + m_rawmem= NULL; + } + /** + Used to access the "right-to-left" array of record pointers as an ordinary + "left-to-right" array, so that we can pass it directly on to std::sort(). + */ + uchar **get_sort_keys() + { + if (m_idx == 0) + return NULL; + return &m_record_pointers[1 - m_idx]; + } + + /** + Gets sorted record number ix. @see get_sort_keys() + Only valid after buffer has been sorted! + */ + uchar *get_sorted_record(uint ix) + { + return m_sort_keys[ix]; + } + + /** + @returns The entire buffer, as a character array. + This is for reusing the memory for merge buffers. + */ + Bounds_checked_array<uchar> get_raw_buf() + { + return Bounds_checked_array<uchar>(m_rawmem, m_size_in_bytes); + } /** We need an assignment operator, see filesort(). @@ -117,20 +243,40 @@ class Filesort_buffer */ Filesort_buffer &operator=(const Filesort_buffer &rhs) { - m_idx_array= rhs.m_idx_array; + m_next_rec_ptr= rhs.m_next_rec_ptr; + m_rawmem= rhs.m_rawmem; + m_record_pointers= rhs.m_record_pointers; + m_sort_keys= rhs.m_sort_keys; + m_num_records= rhs.m_num_records; m_record_length= rhs.m_record_length; - m_start_of_data= rhs.m_start_of_data; - allocated_size= rhs.allocated_size; + m_sort_length= rhs.m_sort_length; + m_size_in_bytes= rhs.m_size_in_bytes; + m_idx= rhs.m_idx; return *this; } + uint get_sort_length() const { return m_sort_length; } + void set_sort_length(uint val) { m_sort_length= val; } + private: - typedef Bounds_checked_array<uchar*> Idx_array; + uchar *m_next_rec_ptr; /// The next record will be inserted here. + uchar *m_rawmem; /// The raw memory buffer. + uchar **m_record_pointers; /// The "right-to-left" array of record pointers. + uchar **m_sort_keys; /// Caches the value of get_sort_keys() + uint m_num_records; /// Saved value from alloc_sort_buffer() + uint m_record_length; /// Saved value from alloc_sort_buffer() + uint m_sort_length; /// The length of the sort key. + size_t m_size_in_bytes; /// Size of raw buffer, in bytes. - Idx_array m_idx_array; /* Pointers to key data */ - uint m_record_length; - uchar *m_start_of_data; /* Start of key data */ - size_t allocated_size; + /** + This is the index in the "right-to-left" array of the next record to + be inserted into the buffer. It is signed, because we use it in signed + expressions like: + m_record_pointers[-m_idx]; + It is longlong rather than int, to ensure that it covers UINT_MAX32 + without any casting/warning. + */ + longlong m_idx; }; #endif // FILESORT_UTILS_INCLUDED diff --git a/sql/records.cc b/sql/records.cc index 3d709182a4e..2b146abb005 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -38,8 +38,8 @@ static int rr_quick(READ_RECORD *info); int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); -static int rr_unpack_from_tempfile(READ_RECORD *info); -static int rr_unpack_from_buffer(READ_RECORD *info); +template<bool> static int rr_unpack_from_tempfile(READ_RECORD *info); +template<bool> static int rr_unpack_from_buffer(READ_RECORD *info); int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); static int init_rr_cache(THD *thd, READ_RECORD *info); @@ -187,23 +187,23 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, bool disable_rr_cache) { IO_CACHE *tempfile; - SORT_ADDON_FIELD *addon_field= filesort ? filesort->addon_field : 0; DBUG_ENTER("init_read_record"); + const bool using_addon_fields= filesort && filesort->using_addon_fields(); + bzero((char*) info,sizeof(*info)); info->thd=thd; info->table=table; - info->addon_field= addon_field; + info->sort_info= filesort; if ((table->s->tmp_table == INTERNAL_TMP_TABLE) && - !addon_field) + !using_addon_fields) (void) table->file->extra(HA_EXTRA_MMAP); - if (addon_field) + if (using_addon_fields) { - info->rec_buf= (uchar*) filesort->addon_buf.str; - info->ref_length= (uint)filesort->addon_buf.length; - info->unpack= filesort->unpack; + info->rec_buf= filesort->addon_fields->get_addon_buf(); + info->ref_length= filesort->addon_fields->get_addon_buf_length(); } else { @@ -223,9 +223,20 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, if (tempfile && !(select && select->quick)) { - DBUG_PRINT("info",("using rr_from_tempfile")); - info->read_record_func= - addon_field ? rr_unpack_from_tempfile : rr_from_tempfile; + if (using_addon_fields) + { + DBUG_PRINT("info",("using rr_from_tempfile")); + if (filesort->addon_fields->using_packed_addons()) + info->read_record_func= rr_unpack_from_tempfile<true>; + else + info->read_record_func= rr_unpack_from_tempfile<false>; + } + else + { + DBUG_PRINT("info",("using rr_from_tempfile")); + info->read_record_func= rr_from_tempfile; + } + info->io_cache= tempfile; reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0); info->ref_pos=table->file->ref; @@ -239,7 +250,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, and filesort->io_cache is read sequentially */ if (!disable_rr_cache && - !addon_field && + !using_addon_fields && thd->variables.read_rnd_buff_size && !(table->file->ha_table_flags() & HA_FAST_KEY_READ) && (table->db_stat & HA_READ_ONLY || @@ -264,16 +275,29 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, DBUG_PRINT("info",("using rr_quick")); info->read_record_func= rr_quick; } - else if (filesort && filesort->record_pointers) + else if (filesort && filesort->has_filesort_result_in_memory()) { DBUG_PRINT("info",("using record_pointers")); if (unlikely(table->file->ha_rnd_init_with_error(0))) DBUG_RETURN(1); + info->cache_pos= filesort->record_pointers; - info->cache_end= (info->cache_pos+ - filesort->return_rows * info->ref_length); - info->read_record_func= - addon_field ? rr_unpack_from_buffer : rr_from_pointers; + if (using_addon_fields) + { + DBUG_PRINT("info",("using rr_unpack_from_buffer")); + DBUG_ASSERT(filesort->sorted_result_in_fsbuf); + info->unpack_counter= 0; + if (filesort->using_packed_addons()) + info->read_record_func= rr_unpack_from_buffer<true>; + else + info->read_record_func= rr_unpack_from_buffer<false>; + } + else + { + info->cache_end= (info->cache_pos+ + filesort->return_rows * info->ref_length); + info->read_record_func= rr_from_pointers; + } } else if (table->file->keyread_enabled()) { @@ -510,7 +534,11 @@ static int rr_from_tempfile(READ_RECORD *info) the fields values use in the result set from this buffer into their positions in the regular record buffer. - @param info Reference to the context including record descriptors + @param info Reference to the context including record + descriptors + @param Packed_addon_fields Are the addon fields packed? + This is a compile-time constant, to + avoid if (....) tests during execution. @retval 0 Record successfully read. @@ -518,12 +546,38 @@ static int rr_from_tempfile(READ_RECORD *info) -1 There is no record to be read anymore. */ +template<bool Packed_addon_fields> static int rr_unpack_from_tempfile(READ_RECORD *info) { - if (my_b_read(info->io_cache, info->rec_buf, info->ref_length)) - return -1; - (*info->unpack)(info->addon_field, info->rec_buf, - info->rec_buf + info->ref_length); + uchar *destination= info->rec_buf; +#ifndef DBUG_OFF + my_off_t where= my_b_tell(info->io_cache); +#endif + if (Packed_addon_fields) + { + const uint len_sz= Addon_fields::size_of_length_field; + + // First read length of the record. + if (my_b_read(info->io_cache, destination, len_sz)) + return -1; + uint res_length= Addon_fields::read_addon_length(destination); + DBUG_PRINT("info", ("rr_unpack from %llu to %p sz %u", + static_cast<ulonglong>(where), + destination, res_length)); + DBUG_ASSERT(res_length > len_sz); + DBUG_ASSERT(info->sort_info->using_addon_fields()); + + // Then read the rest of the record. + if (my_b_read(info->io_cache, destination + len_sz, res_length - len_sz)) + return -1; /* purecov: inspected */ + } + else + { + if (my_b_read(info->io_cache, destination, info->ref_length)) + return -1; + } + + info->sort_info->unpack_addon_fields<Packed_addon_fields>(destination); return 0; } @@ -560,7 +614,11 @@ int rr_from_pointers(READ_RECORD *info) the fields values use in the result set from this buffer into their positions in the regular record buffer. - @param info Reference to the context including record descriptors + @param info Reference to the context including record + descriptors + @param Packed_addon_fields Are the addon fields packed? + This is a compile-time constant, to + avoid if (....) tests during execution. @retval 0 Record successfully read. @@ -568,13 +626,17 @@ int rr_from_pointers(READ_RECORD *info) -1 There is no record to be read anymore. */ +template<bool Packed_addon_fields> static int rr_unpack_from_buffer(READ_RECORD *info) { - if (info->cache_pos == info->cache_end) + if (info->unpack_counter == info->sort_info->return_rows) return -1; /* End of buffer */ - (*info->unpack)(info->addon_field, info->cache_pos, - info->cache_end); - info->cache_pos+= info->ref_length; + + uchar *record= info->sort_info->get_sorted_record( + static_cast<uint>(info->unpack_counter)); + uchar *plen= record + info->sort_info->get_sort_length(); + info->sort_info->unpack_addon_fields<Packed_addon_fields>(plen); + info->unpack_counter++; return 0; } /* cacheing of records from a database */ @@ -709,3 +771,26 @@ static int rr_cmp(uchar *a,uchar *b) return (int) a[7] - (int) b[7]; #endif } + +template<bool Packed_addon_fields> +inline void SORT_INFO::unpack_addon_fields(uchar *buff) +{ + SORT_ADDON_FIELD *addonf= addon_fields->begin(); + uchar *buff_end= buff + sort_buffer_size(); + const uchar *start_of_record= buff + addonf->offset; + + for ( ; addonf != addon_fields->end() ; addonf++) + { + Field *field= addonf->field; + if (addonf->null_bit && (addonf->null_bit & buff[addonf->null_offset])) + { + field->set_null(); + continue; + } + field->set_notnull(); + if (Packed_addon_fields) + start_of_record= field->unpack(field->ptr, start_of_record, buff_end, 0); + else + field->unpack(field->ptr, buff + addonf->offset, buff_end, 0); + } +} diff --git a/sql/records.h b/sql/records.h index faf0d13c9a9..04dc06b3c74 100644 --- a/sql/records.h +++ b/sql/records.h @@ -58,13 +58,23 @@ struct READ_RECORD THD *thd; SQL_SELECT *select; uint ref_length, reclength, rec_cache_size, error_offset; + + /** + Counting records when reading result from filesort(). + Used when filesort leaves the result in the filesort buffer. + */ + ha_rows unpack_counter; + uchar *ref_pos; /* pointer to form->refpos */ uchar *rec_buf; /* to read field values after filesort */ uchar *cache,*cache_pos,*cache_end,*read_positions; - struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ + + /* + Structure storing information about sorting + */ + SORT_INFO *sort_info; struct st_io_cache *io_cache; bool print_error; - void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); int read_record() { return read_record_func(this); } uchar *record() const { return table->record[0]; } diff --git a/sql/sql_array.h b/sql/sql_array.h index bcfbb98ef19..b05e8f779bd 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -85,6 +85,10 @@ template <typename Element_type> class Bounds_checked_array Element_type *array() const { return m_array; } + Element_type *begin() const { return array(); } + Element_type *end() const { return array() + m_size; } + + bool operator==(const Bounds_checked_array<Element_type>&rhs) const { return m_array == rhs.m_array && m_size == rhs.m_size; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3619c603697..4da2eb3a4d4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -13981,7 +13981,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, *simple_order= head->on_expr_ref[0] == NULL; if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS) { - uint u1, u2, u3; + uint u1, u2, u3, u4; /* normally the condition is (see filesort_use_addons()) @@ -13992,7 +13992,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, TODO proper cost estimations */ - *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3); + *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3, &u4); } } else diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 7abbc808632..5aa8f4545a4 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -20,8 +20,6 @@ #include <my_sys.h> /* qsort2_cmp */ #include "queues.h" -typedef struct st_buffpek BUFFPEK; - struct SORT_FIELD; class Field; struct TABLE; @@ -64,21 +62,236 @@ struct BUFFPEK_COMPARE_CONTEXT }; +/** + Descriptor for a merge chunk to be sort-merged. + A merge chunk is a sequence of pre-sorted records, written to a + temporary file. A Merge_chunk instance describes where this chunk is stored + in the file, and where it is located when it is in memory. + + It is a POD because + - we read/write them from/to files. + + We have accessors (getters/setters) for all struct members. + */ + +struct Merge_chunk { +public: + Merge_chunk(): m_current_key(NULL), + m_file_position(0), + m_buffer_start(NULL), + m_buffer_end(NULL), + m_rowcount(0), + m_mem_count(0), + m_max_keys(0) + {} + + my_off_t file_position() const { return m_file_position; } + void set_file_position(my_off_t val) { m_file_position= val; } + void advance_file_position(my_off_t val) { m_file_position+= val; } + + uchar *buffer_start() { return m_buffer_start; } + const uchar *buffer_end() const { return m_buffer_end; } + + void set_buffer(uchar *start, uchar *end) + { + m_buffer_start= start; + m_buffer_end= end; + } + void set_buffer_start(uchar *start) + { + m_buffer_start= start; + } + void set_buffer_end(uchar *end) + { + DBUG_ASSERT(m_buffer_end == NULL || end <= m_buffer_end); + m_buffer_end= end; + } + + void init_current_key() { m_current_key= m_buffer_start; } + uchar *current_key() { return m_current_key; } + void advance_current_key(uint val) { m_current_key+= val; } + + void decrement_rowcount(ha_rows val) { m_rowcount-= val; } + void set_rowcount(ha_rows val) { m_rowcount= val; } + ha_rows rowcount() const { return m_rowcount; } + + ha_rows mem_count() const { return m_mem_count; } + void set_mem_count(ha_rows val) { m_mem_count= val; } + ha_rows decrement_mem_count() { return --m_mem_count; } + + ha_rows max_keys() const { return m_max_keys; } + void set_max_keys(ha_rows val) { m_max_keys= val; } + + size_t buffer_size() const { return m_buffer_end - m_buffer_start; } + + /** + Tries to merge *this with *mc, returns true if successful. + The assumption is that *this is no longer in use, + and the space it has been allocated can be handed over to a + buffer which is adjacent to it. + */ + bool merge_freed_buff(Merge_chunk *mc) const + { + if (mc->m_buffer_end == m_buffer_start) + { + mc->m_buffer_end= m_buffer_end; + mc->m_max_keys+= m_max_keys; + return true; + } + else if (mc->m_buffer_start == m_buffer_end) + { + mc->m_buffer_start= m_buffer_start; + mc->m_max_keys+= m_max_keys; + return true; + } + return false; + } + + uchar *m_current_key; /// The current key for this chunk. + my_off_t m_file_position;/// Current position in the file to be sorted. + uchar *m_buffer_start; /// Start of main-memory buffer for this chunk. + uchar *m_buffer_end; /// End of main-memory buffer for this chunk. + ha_rows m_rowcount; /// Number of unread rows in this chunk. + ha_rows m_mem_count; /// Number of rows in the main-memory buffer. + ha_rows m_max_keys; /// If we have fixed-size rows: + /// max number of rows in buffer. +}; + +typedef Bounds_checked_array<SORT_ADDON_FIELD> Addon_fields_array; + +/** + This class wraps information about usage of addon fields. + An Addon_fields object is used both during packing of data in the filesort + buffer, and later during unpacking in 'Filesort_info::unpack_addon_fields'. + + @see documentation for the Sort_addon_field struct. + @see documentation for get_addon_fields() + */ +class Addon_fields { +public: + Addon_fields(Addon_fields_array arr) + : m_field_descriptors(arr), + m_addon_buf(), + m_addon_buf_length(), + m_using_packed_addons(false) + { + DBUG_ASSERT(!arr.is_null()); + } + + SORT_ADDON_FIELD *begin() { return m_field_descriptors.begin(); } + SORT_ADDON_FIELD *end() { return m_field_descriptors.end(); } + + /// rr_unpack_from_tempfile needs an extra buffer when unpacking. + uchar *allocate_addon_buf(uint sz) + { + m_addon_buf= (uchar *)my_malloc(sz, MYF(MY_WME | MY_THREAD_SPECIFIC)); + if (m_addon_buf) + m_addon_buf_length= sz; + return m_addon_buf; + } + + void free_addon_buff() + { + my_free(m_addon_buf); + m_addon_buf= NULL; + m_addon_buf_length= 0; + } + + uchar *get_addon_buf() { return m_addon_buf; } + uint get_addon_buf_length() const { return m_addon_buf_length; } + + void set_using_packed_addons(bool val) + { + m_using_packed_addons= val; + } + + bool using_packed_addons() const + { + return m_using_packed_addons; + } + + static bool can_pack_addon_fields(uint record_length) + { + return (record_length <= (0xFFFF)); + } + + /** + @returns Total number of bytes used for packed addon fields. + the size of the length field + size of null bits + sum of field sizes. + */ + static uint read_addon_length(uchar *p) + { + return size_of_length_field + uint2korr(p); + } + + /** + Stores the number of bytes used for packed addon fields. + */ + static void store_addon_length(uchar *p, uint sz) + { + // We actually store the length of everything *after* the length field. + int2store(p, sz - size_of_length_field); + } + + static const uint size_of_length_field= 2; + +private: + Addon_fields_array m_field_descriptors; + + uchar *m_addon_buf; ///< Buffer for unpacking addon fields. + uint m_addon_buf_length; ///< Length of the buffer. + bool m_using_packed_addons; ///< Are we packing the addon fields? +}; + + +/** + There are two record formats for sorting: + |<key a><key b>...|<rowid>| + / sort_length / ref_l / + + or with "addon fields" + |<key a><key b>...|<null bits>|<field a><field b>...| + / sort_length / addon_length / + + The packed format for "addon fields" + |<key a><key b>...|<length>|<null bits>|<field a><field b>...| + / sort_length / addon_length / + + <key> Fields are fixed-size, specially encoded with + Field::make_sort_key() so we can do byte-by-byte compare. + <length> Contains the *actual* packed length (after packing) of + everything after the sort keys. + The size of the length field is 2 bytes, + which should cover most use cases: addon data <= 65535 bytes. + This is the same as max record size in MySQL. + <null bits> One bit for each nullable field, indicating whether the field + is null or not. May have size zero if no fields are nullable. + <field xx> Are stored with field->pack(), and retrieved with + field->unpack(). Addon fields within a record are stored + consecutively, with no "holes" or padding. They will have zero + size for NULL values. + +*/ + class Sort_param { public: uint rec_length; // Length of sorted records. uint sort_length; // Length of sorted columns. uint ref_length; // Length of record ref. + uint addon_length; // Length of addon_fields uint res_length; // Length of records in final sorted file/buffer. uint max_keys_per_buffer; // Max keys / buffer. uint min_dupl_count; ha_rows max_rows; // Select limit, or HA_POS_ERROR if unlimited. ha_rows examined_rows; // Number of examined rows. TABLE *sort_form; // For quicker make_sortkey. - SORT_FIELD *local_sortorder; - SORT_FIELD *end; - SORT_ADDON_FIELD *addon_field; // Descriptors for companion fields. - LEX_STRING addon_buf; // Buffer & length of added packed fields. + /** + ORDER BY list with some precalculated info for filesort. + Array is created and owned by a Filesort instance. + */ + Bounds_checked_array<SORT_FIELD> local_sortorder; + Addon_fields *addon_fields; // Descriptors for companion fields. + bool using_pq; uchar *unique_buff; bool not_killable; @@ -93,21 +306,63 @@ class Sort_param { } void init_for_filesort(uint sortlen, TABLE *table, ha_rows maxrows, bool sort_positions); + /// Enables the packing of addons if possible. + void try_to_pack_addons(ulong max_length_for_sort_data); + + /// Are we packing the "addon fields"? + bool using_packed_addons() const + { + DBUG_ASSERT(m_using_packed_addons == + (addon_fields != NULL && + addon_fields->using_packed_addons())); + return m_using_packed_addons; + } + + /// Are we using "addon fields"? + bool using_addon_fields() const + { + return addon_fields != NULL; + } + + /** + Getter for record length and result length. + @param record_start Pointer to record. + @param [out] recl Store record length here. + @param [out] resl Store result length here. + */ + void get_rec_and_res_len(uchar *record_start, uint *recl, uint *resl) + { + if (!using_packed_addons()) + { + *recl= rec_length; + *resl= res_length; + return; + } + uchar *plen= record_start + sort_length; + *resl= Addon_fields::read_addon_length(plen); + DBUG_ASSERT(*resl <= res_length); + const uchar *record_end= plen + *resl; + *recl= static_cast<uint>(record_end - record_start); + } + +private: + uint m_packable_length; + bool m_using_packed_addons; ///< caches the value of using_packed_addons() }; +typedef Bounds_checked_array<uchar> Sort_buffer; -int merge_many_buff(Sort_param *param, uchar *sort_buffer, - BUFFPEK *buffpek, - uint *maxbuffer, IO_CACHE *t_file); -ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek, - uint sort_length); +int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer, + Merge_chunk *buffpek, uint *maxbuffer, IO_CACHE *t_file); +ulong read_to_buffer(IO_CACHE *fromfile, Merge_chunk *buffpek, + Sort_param *param); bool merge_buffers(Sort_param *param,IO_CACHE *from_file, - IO_CACHE *to_file, uchar *sort_buffer, - BUFFPEK *lastbuff,BUFFPEK *Fb, - BUFFPEK *Tb,int flag); -int merge_index(Sort_param *param, uchar *sort_buffer, - BUFFPEK *buffpek, uint maxbuffer, - IO_CACHE *tempfile, IO_CACHE *outfile); -void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length); + IO_CACHE *to_file, Sort_buffer sort_buffer, + Merge_chunk *lastbuff, Merge_chunk *Fb, + Merge_chunk *Tb, int flag); +int merge_index(Sort_param *param, Sort_buffer sort_buffer, + Merge_chunk *buffpek, uint maxbuffer, + IO_CACHE *tempfile, IO_CACHE *outfile); +void reuse_freed_buff(QUEUE *queue, Merge_chunk *reuse, uint key_length); #endif /* SQL_SORT_INCLUDED */ diff --git a/sql/uniques.cc b/sql/uniques.cc index fafb44b56a0..a8170951e88 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -39,7 +39,6 @@ #include "my_tree.h" // element_count #include "uniques.h" // Unique #include "sql_sort.h" -#include "myisamchk.h" // BUFFPEK int unique_write_to_file(uchar* key, element_count count, Unique *unique) { @@ -94,7 +93,7 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, init_tree(&tree, (max_in_memory_size / 16), 0, size, comp_func, NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC)); /* If the following fail's the next add will also fail */ - my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16, + my_init_dynamic_array(&file_ptrs, sizeof(Merge_chunk), 16, 16, MYF(MY_THREAD_SPECIFIC)); /* If you change the following, change it in get_max_elements function, too. @@ -375,10 +374,10 @@ Unique::~Unique() /* Write tree to disk; clear tree */ bool Unique::flush() { - BUFFPEK file_ptr; + Merge_chunk file_ptr; elements+= tree.elements_in_tree; - file_ptr.count=tree.elements_in_tree; - file_ptr.file_pos=my_b_tell(&file); + file_ptr.set_rowcount(tree.elements_in_tree); + file_ptr.set_file_position(my_b_tell(&file)); tree_walk_action action= min_dupl_count ? (tree_walk_action) unique_write_to_file_with_count : @@ -490,7 +489,7 @@ void put_counter_into_merged_element(void *ptr, uint ofs, element_count cnt) */ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, - uint key_length, BUFFPEK *begin, BUFFPEK *end, + uint key_length, Merge_chunk *begin, Merge_chunk *end, tree_walk_action walk_action, void *walk_action_arg, qsort_cmp2 compare, void *compare_arg, IO_CACHE *file, bool with_counters) @@ -499,7 +498,8 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, QUEUE queue; if (end <= begin || merge_buffer_size < (size_t) (key_length * (end - begin + 1)) || - init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0, + init_queue(&queue, (uint) (end - begin), + offsetof(Merge_chunk, m_current_key), 0, buffpek_compare, &compare_context, 0, 0)) return 1; /* we need space for one key when a piece of merge buffer is re-read */ @@ -510,10 +510,16 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, /* if piece_size is aligned reuse_freed_buffer will always hit */ uint piece_size= max_key_count_per_piece * key_length; ulong bytes_read; /* to hold return value of read_to_buffer */ - BUFFPEK *top; + Merge_chunk *top; int res= 1; uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0); element_count cnt; + + // read_to_buffer() needs only rec_length. + Sort_param sort_param; + sort_param.rec_length= key_length; + DBUG_ASSERT(!sort_param.using_addon_fields()); + /* Invariant: queue must contain top element from each tree, until a tree is not completely walked through. @@ -522,15 +528,16 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, */ for (top= begin; top != end; ++top) { - top->base= merge_buffer + (top - begin) * piece_size; - top->max_keys= max_key_count_per_piece; - bytes_read= read_to_buffer(file, top, key_length); + top->set_buffer_start(merge_buffer + (top - begin) * piece_size); + top->set_buffer_end(top->buffer_start() + piece_size); + top->set_max_keys(max_key_count_per_piece); + bytes_read= read_to_buffer(file, top, &sort_param); if (unlikely(bytes_read == (ulong) -1)) goto end; DBUG_ASSERT(bytes_read); queue_insert(&queue, (uchar *) top); } - top= (BUFFPEK *) queue_top(&queue); + top= (Merge_chunk *) queue_top(&queue); while (queue.elements > 1) { /* @@ -540,20 +547,21 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, elements in each tree are unique. Action is applied only to unique elements. */ - void *old_key= top->key; + void *old_key= top->current_key(); /* read next key from the cache or from the file and push it to the queue; this gives new top. */ - top->key+= key_length; - if (--top->mem_count) + top->advance_current_key(key_length); + top->decrement_mem_count(); + if (top->mem_count()) queue_replace_top(&queue); else /* next piece should be read */ { /* save old_key not to overwrite it in read_to_buffer */ memcpy(save_key_buff, old_key, key_length); old_key= save_key_buff; - bytes_read= read_to_buffer(file, top, key_length); + bytes_read= read_to_buffer(file, top, &sort_param); if (unlikely(bytes_read == (ulong) -1)) goto end; else if (bytes_read) /* top->key, top->mem_count are reset */ @@ -568,9 +576,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, reuse_freed_buff(&queue, top, key_length); } } - top= (BUFFPEK *) queue_top(&queue); + top= (Merge_chunk *) queue_top(&queue); /* new top has been obtained; if old top is unique, apply the action */ - if (compare(compare_arg, old_key, top->key)) + if (compare(compare_arg, old_key, top->current_key())) { cnt= with_counters ? get_counter_from_merged_element(old_key, cnt_ofs) : 1; @@ -579,9 +587,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, } else if (with_counters) { - cnt= get_counter_from_merged_element(top->key, cnt_ofs); + cnt= get_counter_from_merged_element(top->current_key(), cnt_ofs); cnt+= get_counter_from_merged_element(old_key, cnt_ofs); - put_counter_into_merged_element(top->key, cnt_ofs, cnt); + put_counter_into_merged_element(top->current_key(), cnt_ofs, cnt); } } /* @@ -595,13 +603,13 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size, { cnt= with_counters ? - get_counter_from_merged_element(top->key, cnt_ofs) : 1; - if (walk_action(top->key, cnt, walk_action_arg)) + get_counter_from_merged_element(top->current_key(), cnt_ofs) : 1; + if (walk_action(top->current_key(), cnt, walk_action_arg)) goto end; - top->key+= key_length; + top->advance_current_key(key_length); } - while (--top->mem_count); - bytes_read= read_to_buffer(file, top, key_length); + while (top->decrement_mem_count()); + bytes_read= read_to_buffer(file, top, &sort_param); if (unlikely(bytes_read == (ulong) -1)) goto end; } @@ -657,13 +665,14 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg) if (!(merge_buffer = (uchar *)my_malloc(buff_sz, MYF(MY_WME)))) return 1; if (buff_sz < full_size * (file_ptrs.elements + 1UL)) - res= merge(table, merge_buffer, buff_sz >= full_size * MERGEBUFF2) ; + res= merge(table, merge_buffer, buff_sz, + buff_sz >= full_size * MERGEBUFF2) ; if (!res) { res= merge_walk(merge_buffer, buff_sz, full_size, - (BUFFPEK *) file_ptrs.buffer, - (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements, + (Merge_chunk *) file_ptrs.buffer, + (Merge_chunk *) file_ptrs.buffer + file_ptrs.elements, action, walk_action_arg, tree.compare, tree.custom_arg, &file, with_counters); } @@ -684,16 +693,18 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg) All params are 'IN': table the parameter to access sort context buff merge buffer + buff_size size of merge buffer without_last_merge TRUE <=> do not perform the last merge RETURN VALUE 0 OK <> 0 error */ -bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge) +bool Unique::merge(TABLE *table, uchar *buff, size_t buff_size, + bool without_last_merge) { IO_CACHE *outfile= &sort.io_cache; - BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer; + Merge_chunk *file_ptr= (Merge_chunk*) file_ptrs.buffer; uint maxbuffer= file_ptrs.elements - 1; my_off_t save_pos; bool error= 1; @@ -724,7 +735,9 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge) sort_param.cmp_context.key_compare_arg= tree.custom_arg; /* Merge the buffers to one file, removing duplicates */ - if (merge_many_buff(&sort_param,buff,file_ptr,&maxbuffer,&file)) + if (merge_many_buff(&sort_param, + Bounds_checked_array<uchar>(buff, buff_size), + file_ptr,&maxbuffer,&file)) goto err; if (flush_io_cache(&file) || reinit_io_cache(&file,READ_CACHE,0L,0,0)) @@ -736,7 +749,8 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge) file_ptrs.elements= maxbuffer+1; return 0; } - if (merge_index(&sort_param, buff, file_ptr, maxbuffer, &file, outfile)) + if (merge_index(&sort_param, Bounds_checked_array<uchar>(buff, buff_size), + file_ptr, maxbuffer, &file, outfile)) goto err; error= 0; err: @@ -791,7 +805,7 @@ bool Unique::get(TABLE *table) MYF(MY_THREAD_SPECIFIC|MY_WME)))) DBUG_RETURN(1); - if (merge(table, sort_buffer, FALSE)) + if (merge(table, sort_buffer, buff_sz, FALSE)) goto err; rc= 0; diff --git a/sql/uniques.h b/sql/uniques.h index 654b3692aaa..f83eac36855 100644 --- a/sql/uniques.h +++ b/sql/uniques.h @@ -39,7 +39,7 @@ class Unique :public Sql_alloc uint min_dupl_count; /* always 0 for unions, > 0 for intersections */ bool with_counters; - bool merge(TABLE *table, uchar *buff, bool without_last_merge); + bool merge(TABLE *table, uchar *buff, size_t size, bool without_last_merge); bool flush(); public: diff --git a/storage/connect/mysql-test/connect/r/mysql_index.result b/storage/connect/mysql-test/connect/r/mysql_index.result index b0c88b16fef..5f8f41f6218 100644 --- a/storage/connect/mysql-test/connect/r/mysql_index.result +++ b/storage/connect/mysql-test/connect/r/mysql_index.result @@ -299,11 +299,11 @@ matricule nom prenom 7626 HENIN PHILIPPE 403 HERMITTE PHILIPPE 9096 HELENA PHILIPPE -SELECT matricule, nom, prenom FROM t2 ORDER BY nom LIMIT 10; +SELECT matricule, nom, prenom FROM t2 ORDER BY nom,prenom LIMIT 10; matricule nom prenom 4552 ABBADIE MONIQUE -6627 ABBAYE GERALD 307 ABBAYE ANNICK +6627 ABBAYE GERALD 7961 ABBE KATIA 1340 ABBE MICHELE 9270 ABBE SOPHIE diff --git a/storage/connect/mysql-test/connect/t/mysql_index.test b/storage/connect/mysql-test/connect/t/mysql_index.test index 74dc48f42c8..e36a827ac3c 100644 --- a/storage/connect/mysql-test/connect/t/mysql_index.test +++ b/storage/connect/mysql-test/connect/t/mysql_index.test @@ -120,7 +120,7 @@ SELECT matricule, nom, prenom FROM t2 WHERE nom <= 'ABEL' OR nom > 'YVON'; SELECT matricule, nom, prenom FROM t2 WHERE nom > 'HELEN' AND nom < 'HEROS'; SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS'; SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS' AND prenom = 'PHILIPPE'; -SELECT matricule, nom, prenom FROM t2 ORDER BY nom LIMIT 10; +SELECT matricule, nom, prenom FROM t2 ORDER BY nom,prenom LIMIT 10; SELECT a.nom, a.prenom, b.nom FROM t1 a STRAIGHT_JOIN t2 b ON a.prenom = b.prenom WHERE a.nom = 'FOCH' AND a.nom != b.nom; DROP TABLE t2;
1 0
0 0
[Commits] 359d91aaeec: MDEV-19680:: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' or alike failed upon SELECT with mix of functions from simple view
by Varun 26 Dec '19

26 Dec '19
revision-id: 359d91aaeec25825b51b0a00f52f272edad7d6cc (mariadb-10.1.39-254-g359d91aaeec) parent(s): 9f7fcb9f25238945e4fb8cc1a1f98e56457b714f author: Varun Gupta committer: Varun Gupta timestamp: 2019-12-26 17:36:32 +0530 message: MDEV-19680:: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' or alike failed upon SELECT with mix of functions from simple view Set read_set bitmap for view from the JOIN::all_fields list instead of JOIN::fields_list as split_sum_func would have added items to the all_fields list. --- mysql-test/r/func_misc.result | 13 +++++++++++++ mysql-test/t/func_misc.test | 15 +++++++++++++++ sql/sql_lex.cc | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 287a70f1f73..89f6102ef83 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -1479,3 +1479,16 @@ EXECUTE stmt; x x DEALLOCATE PREPARE stmt; +# +# MDEV-19680: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || +# (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' +# or alike failed upon SELECT with mix of functions from simple view +# +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE VIEW v1 AS SELECT * FROM t1; +SELECT ISNULL( BENCHMARK(1, MIN(a))) FROM v1; +ISNULL( BENCHMARK(1, MIN(a))) +0 +DROP VIEW v1; +DROP TABLE t1; diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index a8da9068ab8..6412980a5fa 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -1154,3 +1154,18 @@ DROP PROCEDURE p1; PREPARE stmt FROM "SELECT 'x' ORDER BY NAME_CONST( 'f', 'foo' )"; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +--echo # +--echo # MDEV-19680: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || +--echo # (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' +--echo # or alike failed upon SELECT with mix of functions from simple view +--echo # + +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE VIEW v1 AS SELECT * FROM t1; + +SELECT ISNULL( BENCHMARK(1, MIN(a))) FROM v1; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index a36a19357eb..1cd2a369d7a 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4177,7 +4177,7 @@ void SELECT_LEX::update_used_tables() } Item *item; - List_iterator_fast<Item> it(join->fields_list); + List_iterator_fast<Item> it(join->all_fields); while ((item= it++)) { item->update_used_tables();
1 0
0 0
[Commits] 359d91aaeec: MDEV-19680:: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' or alike failed upon SELECT with mix of functions from simple view
by Varun 26 Dec '19

26 Dec '19
revision-id: 359d91aaeec25825b51b0a00f52f272edad7d6cc (mariadb-10.1.39-254-g359d91aaeec) parent(s): 9f7fcb9f25238945e4fb8cc1a1f98e56457b714f author: Varun Gupta committer: Varun Gupta timestamp: 2019-12-26 17:36:32 +0530 message: MDEV-19680:: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' or alike failed upon SELECT with mix of functions from simple view Set read_set bitmap for view from the JOIN::all_fields list instead of JOIN::fields_list as split_sum_func would have added items to the all_fields list. --- mysql-test/r/func_misc.result | 13 +++++++++++++ mysql-test/t/func_misc.test | 15 +++++++++++++++ sql/sql_lex.cc | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 287a70f1f73..89f6102ef83 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -1479,3 +1479,16 @@ EXECUTE stmt; x x DEALLOCATE PREPARE stmt; +# +# MDEV-19680: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || +# (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' +# or alike failed upon SELECT with mix of functions from simple view +# +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE VIEW v1 AS SELECT * FROM t1; +SELECT ISNULL( BENCHMARK(1, MIN(a))) FROM v1; +ISNULL( BENCHMARK(1, MIN(a))) +0 +DROP VIEW v1; +DROP TABLE t1; diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index a8da9068ab8..6412980a5fa 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -1154,3 +1154,18 @@ DROP PROCEDURE p1; PREPARE stmt FROM "SELECT 'x' ORDER BY NAME_CONST( 'f', 'foo' )"; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +--echo # +--echo # MDEV-19680: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || +--echo # (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' +--echo # or alike failed upon SELECT with mix of functions from simple view +--echo # + +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +CREATE VIEW v1 AS SELECT * FROM t1; + +SELECT ISNULL( BENCHMARK(1, MIN(a))) FROM v1; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index a36a19357eb..1cd2a369d7a 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4177,7 +4177,7 @@ void SELECT_LEX::update_used_tables() } Item *item; - List_iterator_fast<Item> it(join->fields_list); + List_iterator_fast<Item> it(join->all_fields); while ((item= it++)) { item->update_used_tables();
1 0
0 0
[Commits] 714762ddb7b: MDEV-18648: slave_parallel_mode= optimistic default in 10.5
by sujatha 23 Dec '19

23 Dec '19
revision-id: 714762ddb7ba0233ed584ed896e8f688fb085aeb (mariadb-10.5.0-79-g714762ddb7b) parent(s): 7e10e80b8faab51139588a985a684df960ab81b9 author: Sujatha committer: Sujatha timestamp: 2019-12-23 17:48:01 +0530 message: MDEV-18648: slave_parallel_mode= optimistic default in 10.5 Description: ============ To change 'CONSERVATIVE' @@global.slave_parallel_mode default to 'OPTIMISTIC' in 10.5. @sql/sys_vars.cc Changed default parallel_mode to 'OPTIMISTIC' @sql/rpl_filter.cc Changed default parallel_mode to 'OPTIMISTIC' @sql/mysqld.cc Removed the initialization of 'SLAVE_PARALLEL_CONSERVATIVE' to 'opt_slave_parallel_mode' variable. @mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test @mysql-test/suite/rpl/t/rpl_mdev6386.test Added 'mtr' suppression to ignore 'ER_PRIOR_COMMIT_FAILED'. In case of 'OPTIMISTIC' mode if a transaction gets killed during "wait_for_prior_commit" it results in above error "1964". Hence suppression needs to be added for this error. @mysql-test/suite/rpl/t/rpl_parallel_conflicts.test Test has a 'slave.opt' which explicitly sets slave_parallel_mode to 'conservative'. When the test ends this mode conflicts with new default mode. Hence check test case reports an error. The 'slave.opt' is removed and options are set and reset within test. @mysql-test/suite/multi_source/info_logs.result @mysql-test/suite/multi_source/reset_slave.result @mysql-test/suite/multi_source/simple.result Result content mismatch in "show slave status" output. This is expected as new slave_parallel_mode='OPTIMISTIC'. @mysql-test/include/check-testcase.test Updated default 'slave_parallel_mode' to 'optimistic'. Refactored rpl_parallel.test into following test cases. Test case 1: @mysql-test/suite/rpl/t/rpl_parallel_domain.test Test case 2: @mysql-test/suite/rpl/t/rpl_parallel_domain_slave_single_grp.test Test case 3: @mysql-test/suite/rpl/t/rpl_parallel_single_grpcmt.test Test case 4: @mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test Test case 5: @mysql-test/suite/rpl/t/rpl_parallel_slave_bgc_kill.test Test case 6: @mysql-test/suite/rpl/t/rpl_parallel_gco_wait_kill.test Test case 7: @mysql-test/suite/rpl/t/rpl_parallel_free_deferred_event.test Test case 8: @mysql-test/suite/rpl/t/rpl_parallel_missed_error_handling.test Test case 9: @mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test Test case 10: @mysql-test/suite/rpl/t/rpl_parallel_gtid_slave_pos_update_fail.test Test case 11: @mysql-test/suite/rpl/t/rpl_parallel_wrong_exec_master_pos.test Test case 12: @mysql-test/suite/rpl/t/rpl_parallel_partial_binlog_trans.test Test case 13: @mysql-test/suite/rpl/t/rpl_parallel_ignore_error_on_rotate.test Test case 14: @mysql-test/suite/rpl/t/rpl_parallel_wrong_binlog_order.test Test case 15: @mysql-test/suite/rpl/t/rpl_parallel_incorrect_relay_pos.test Test case 16: @mysql-test/suite/rpl/t/rpl_parallel_retry_deadlock.test Test case 17: @mysql-test/suite/rpl/t/rpl_parallel_deadlock_corrupt_binlog.test Test case 18: @mysql-test/suite/rpl/t/rpl_parallel_mode.test Test case 19: @mysql-test/suite/rpl/t/rpl_parallel_analyze_table_hang.test Test case 20: @mysql-test/suite/rpl/t/rpl_parallel_record_gtid_wakeup.test Test case 21: @mysql-test/suite/rpl/t/rpl_parallel_stop_on_con_kill.test Test case 22: @mysql-test/suite/rpl/t/rpl_parallel_rollback_assert.test --- mysql-test/include/check-testcase.test | 2 +- .../suite/binlog_encryption/rpl_parallel.result | 1691 --------------- .../suite/binlog_encryption/rpl_parallel.test | 1 - .../rpl_parallel_analyze_table_hang.result | 51 + .../rpl_parallel_analyze_table_hang.test | 1 + .../rpl_parallel_deadlock_corrupt_binlog.result | 93 + .../rpl_parallel_deadlock_corrupt_binlog.test | 1 + .../binlog_encryption/rpl_parallel_domain.result | 71 + .../binlog_encryption/rpl_parallel_domain.test | 1 + .../rpl_parallel_domain_slave_single_grp.result | 101 + .../rpl_parallel_domain_slave_single_grp.test | 1 + .../rpl_parallel_free_deferred_event.result | 44 + .../rpl_parallel_free_deferred_event.test | 1 + .../rpl_parallel_gco_wait_kill.result | 257 +++ .../rpl_parallel_gco_wait_kill.test | 1 + .../rpl_parallel_gtid_slave_pos_update_fail.result | 65 + .../rpl_parallel_gtid_slave_pos_update_fail.test | 1 + .../rpl_parallel_ignore_error_on_rotate.result | 74 + .../rpl_parallel_ignore_error_on_rotate.test | 1 + .../rpl_parallel_incorrect_relay_pos.result | 75 + .../rpl_parallel_incorrect_relay_pos.test | 1 + .../rpl_parallel_innodb_lock_conflict.result | 79 + .../rpl_parallel_innodb_lock_conflict.test | 1 + .../rpl_parallel_missed_error_handling.result | 65 + .../rpl_parallel_missed_error_handling.test | 1 + .../binlog_encryption/rpl_parallel_mode.result | 75 + .../suite/binlog_encryption/rpl_parallel_mode.test | 1 + .../rpl_parallel_partial_binlog_trans.result | 51 + .../rpl_parallel_partial_binlog_trans.test | 1 + .../rpl_parallel_record_gtid_wakeup.result | 48 + .../rpl_parallel_record_gtid_wakeup.test | 1 + .../rpl_parallel_retry_deadlock.result | 192 ++ .../rpl_parallel_retry_deadlock.test | 1 + .../rpl_parallel_rollback_assert.result | 45 + .../rpl_parallel_rollback_assert.test | 1 + .../rpl_parallel_single_grpcmt.result | 161 ++ .../rpl_parallel_single_grpcmt.test | 1 + .../rpl_parallel_slave_bgc_kill.result | 323 +++ .../rpl_parallel_slave_bgc_kill.test | 1 + .../rpl_parallel_stop_on_con_kill.result | 102 + .../rpl_parallel_stop_on_con_kill.test | 1 + .../rpl_parallel_stop_slave.result | 85 + .../binlog_encryption/rpl_parallel_stop_slave.test | 1 + .../rpl_parallel_wrong_binlog_order.result | 75 + .../rpl_parallel_wrong_binlog_order.test | 1 + .../rpl_parallel_wrong_exec_master_pos.result | 34 + .../rpl_parallel_wrong_exec_master_pos.test | 1 + mysql-test/suite/multi_source/info_logs.result | 8 +- mysql-test/suite/multi_source/reset_slave.result | 4 +- mysql-test/suite/multi_source/simple.result | 14 +- mysql-test/suite/rpl/include/rpl_parallel.inc | 2219 -------------------- .../include/rpl_parallel_analyze_table_hang.inc | 73 + .../rpl_parallel_deadlock_corrupt_binlog.inc | 79 + .../suite/rpl/include/rpl_parallel_domain.inc | 87 + .../rpl_parallel_domain_slave_single_grp.inc | 128 ++ .../include/rpl_parallel_free_deferred_event.inc | 67 + .../rpl/include/rpl_parallel_gco_wait_kill.inc | 366 ++++ .../rpl_parallel_gtid_slave_pos_update_fail.inc | 98 + .../rpl_parallel_ignore_error_on_rotate.inc | 96 + .../include/rpl_parallel_incorrect_relay_pos.inc | 128 ++ .../include/rpl_parallel_innodb_lock_conflict.inc | 107 + .../include/rpl_parallel_missed_error_handling.inc | 87 + mysql-test/suite/rpl/include/rpl_parallel_mode.inc | 87 + .../include/rpl_parallel_partial_binlog_trans.inc | 71 + .../include/rpl_parallel_record_gtid_wakeup.inc | 72 + .../rpl/include/rpl_parallel_retry_deadlock.inc | 281 +++ .../rpl/include/rpl_parallel_rollback_assert.inc | 62 + .../rpl/include/rpl_parallel_single_grpcmt.inc | 170 ++ .../rpl/include/rpl_parallel_slave_bgc_kill.inc | 454 ++++ .../rpl/include/rpl_parallel_stop_on_con_kill.inc | 129 ++ .../suite/rpl/include/rpl_parallel_stop_slave.inc | 114 + .../include/rpl_parallel_wrong_binlog_order.inc | 91 + .../include/rpl_parallel_wrong_exec_master_pos.inc | 56 + mysql-test/suite/rpl/r/rpl_delayed_slave.result | 2 +- mysql-test/suite/rpl/r/rpl_mdev6386.result | 1 + mysql-test/suite/rpl/r/rpl_parallel.result | 1690 --------------- .../rpl/r/rpl_parallel_analyze_table_hang.result | 51 + .../suite/rpl/r/rpl_parallel_conflicts.result | 9 + .../r/rpl_parallel_deadlock_corrupt_binlog.result | 93 + mysql-test/suite/rpl/r/rpl_parallel_domain.result | 71 + .../r/rpl_parallel_domain_slave_single_grp.result | 101 + .../rpl/r/rpl_parallel_free_deferred_event.result | 44 + .../suite/rpl/r/rpl_parallel_gco_wait_kill.result | 257 +++ .../rpl_parallel_gtid_slave_pos_update_fail.result | 65 + .../r/rpl_parallel_ignore_error_on_rotate.result | 74 + .../rpl/r/rpl_parallel_incorrect_relay_pos.result | 75 + .../rpl/r/rpl_parallel_innodb_lock_conflict.result | 79 + .../suite/rpl/r/rpl_parallel_mdev6589.result | 1 + .../r/rpl_parallel_missed_error_handling.result | 65 + mysql-test/suite/rpl/r/rpl_parallel_mode.result | 75 + .../rpl/r/rpl_parallel_partial_binlog_trans.result | 51 + .../rpl/r/rpl_parallel_record_gtid_wakeup.result | 48 + .../suite/rpl/r/rpl_parallel_retry_deadlock.result | 192 ++ .../rpl/r/rpl_parallel_rollback_assert.result | 45 + .../suite/rpl/r/rpl_parallel_single_grpcmt.result | 160 ++ .../suite/rpl/r/rpl_parallel_slave_bgc_kill.result | 323 +++ .../rpl/r/rpl_parallel_stop_on_con_kill.result | 102 + .../suite/rpl/r/rpl_parallel_stop_slave.result | 85 + .../rpl/r/rpl_parallel_wrong_binlog_order.result | 75 + .../r/rpl_parallel_wrong_exec_master_pos.result | 34 + .../suite/rpl/t/rpl_delayed_slave.combinations | 1 - mysql-test/suite/rpl/t/rpl_mdev6386.test | 1 + mysql-test/suite/rpl/t/rpl_parallel.test | 1 - .../rpl/t/rpl_parallel_analyze_table_hang.test | 1 + .../suite/rpl/t/rpl_parallel_conflicts-slave.opt | 1 - mysql-test/suite/rpl/t/rpl_parallel_conflicts.test | 10 +- .../t/rpl_parallel_deadlock_corrupt_binlog.test | 1 + mysql-test/suite/rpl/t/rpl_parallel_domain.test | 1 + .../t/rpl_parallel_domain_slave_single_grp.test | 1 + .../rpl/t/rpl_parallel_free_deferred_event.test | 1 + .../suite/rpl/t/rpl_parallel_gco_wait_kill.test | 1 + .../t/rpl_parallel_gtid_slave_pos_update_fail.test | 1 + .../rpl/t/rpl_parallel_ignore_error_on_rotate.test | 1 + .../rpl/t/rpl_parallel_incorrect_relay_pos.test | 1 + .../rpl/t/rpl_parallel_innodb_lock_conflict.test | 1 + mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test | 1 + .../rpl/t/rpl_parallel_missed_error_handling.test | 1 + mysql-test/suite/rpl/t/rpl_parallel_mode.test | 1 + .../rpl/t/rpl_parallel_partial_binlog_trans.test | 1 + .../rpl/t/rpl_parallel_record_gtid_wakeup.test | 1 + .../suite/rpl/t/rpl_parallel_retry_deadlock.test | 1 + .../suite/rpl/t/rpl_parallel_rollback_assert.test | 1 + .../suite/rpl/t/rpl_parallel_single_grpcmt.test | 1 + .../suite/rpl/t/rpl_parallel_slave_bgc_kill.test | 1 + .../suite/rpl/t/rpl_parallel_stop_on_con_kill.test | 1 + .../suite/rpl/t/rpl_parallel_stop_slave.test | 1 + .../rpl/t/rpl_parallel_wrong_binlog_order.test | 1 + .../rpl/t/rpl_parallel_wrong_exec_master_pos.test | 1 + sql/mysqld.cc | 2 +- sql/rpl_filter.cc | 2 +- sql/sys_vars.cc | 2 +- 131 files changed, 7318 insertions(+), 5623 deletions(-) diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test index 514c28b0e00..e984c4dc497 100644 --- a/mysql-test/include/check-testcase.test +++ b/mysql-test/include/check-testcase.test @@ -66,7 +66,7 @@ if ($tmp) --echo Gtid_IO_Pos # --echo Replicate_Do_Domain_Ids --echo Replicate_Ignore_Domain_Ids - --echo Parallel_Mode conservative + --echo Parallel_Mode optimistic --echo SQL_Delay 0 --echo SQL_Remaining_Delay NULL --echo Slave_SQL_Running_State diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel.result b/mysql-test/suite/binlog_encryption/rpl_parallel.result deleted file mode 100644 index 5f78a378829..00000000000 --- a/mysql-test/suite/binlog_encryption/rpl_parallel.result +++ /dev/null @@ -1,1691 +0,0 @@ -include/master-slave.inc -[connection master] -connection server_2; -SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; -SET GLOBAL slave_parallel_threads=10; -ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) -OK -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) -OK -include/stop_slave.inc -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) -OK -include/start_slave.inc -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) -OK -*** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** -connection server_1; -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; -CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1); -INSERT INTO t2 VALUES (1); -connection server_2; -connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; -LOCK TABLE t1 WRITE; -connection server_1; -SET gtid_domain_id=1; -INSERT INTO t1 VALUES (2); -SET gtid_domain_id=0; -INSERT INTO t2 VALUES (2); -INSERT INTO t2 VALUES (3); -BEGIN; -INSERT INTO t2 VALUES (4); -INSERT INTO t2 VALUES (5); -COMMIT; -INSERT INTO t2 VALUES (6); -connection server_2; -SELECT * FROM t2 ORDER by a; -a -1 -2 -3 -4 -5 -6 -connection con_temp1; -SELECT * FROM t1; -a -1 -UNLOCK TABLES; -connection server_2; -SELECT * FROM t1 ORDER BY a; -a -1 -2 -*** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** -connection server_2; -include/stop_slave.inc -connection server_1; -SET sql_log_bin=0; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET gtid_domain_id=1; -INSERT INTO t2 VALUES (foo(10, -'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); -connection server_2; -FLUSH LOGS; -SET sql_log_bin=0; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=statement; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -SET debug_sync='now WAIT_FOR ready1'; -connection server_1; -SET gtid_domain_id=2; -INSERT INTO t2 VALUES (foo(11, -'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); -SET gtid_domain_id=0; -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; -a -10 -11 -connection server_2; -SET debug_sync='now WAIT_FOR ready3'; -SET debug_sync='now SIGNAL cont3'; -SET debug_sync='now WAIT_FOR ready4'; -SET debug_sync='now SIGNAL cont1'; -SET debug_sync='now WAIT_FOR ready2'; -SET debug_sync='now SIGNAL cont4'; -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; -a -10 -11 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002 -slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(11, -'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')) -slave-bin.000002 # Xid # # COMMIT /* XID */ -slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(10, -'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')) -slave-bin.000002 # Xid # # COMMIT /* XID */ -FLUSH LOGS; -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET debug_sync='RESET'; -include/start_slave.inc -*** Test that group-committed transactions on the master can replicate in parallel on the slave. *** -connection server_1; -SET debug_sync='RESET'; -FLUSH LOGS; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; -INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); -connection server_2; -connection con_temp1; -BEGIN; -INSERT INTO t3 VALUES (2,102); -connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; -BEGIN; -INSERT INTO t3 VALUES (4,104); -connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -SET debug_sync='RESET'; -connection server_1; -SELECT * FROM t3 ORDER BY a; -a b -1 1 -2 12 -3 3 -4 14 -5 5 -6 16 -7 7 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000002 # Binlog_checkpoint # # master-bin.000001 -master-bin.000002 # Binlog_checkpoint # # master-bin.000002 -master-bin.000002 # Gtid # # GTID #-#-# -master-bin.000002 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB -master-bin.000002 # Gtid # # BEGIN GTID #-#-# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued3'; -connection con_temp1; -ROLLBACK; -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued1'; -connection con_temp2; -ROLLBACK; -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued2'; -SET debug_sync='now SIGNAL slave_cont1'; -SELECT * FROM t3 ORDER BY a; -a b -1 1 -2 12 -3 3 -4 14 -5 5 -6 16 -7 7 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -slave-bin.000003 # Binlog_checkpoint # # slave-bin.000003 -slave-bin.000003 # Gtid # # GTID #-#-# -slave-bin.000003 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -*** Test STOP SLAVE in parallel mode *** -connection server_2; -include/stop_slave.inc -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection server_1; -SET binlog_direct_non_transactional_updates=0; -SET sql_log_bin=0; -CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); -SET sql_log_bin=1; -BEGIN; -INSERT INTO t2 VALUES (20); -INSERT INTO t1 VALUES (20); -INSERT INTO t2 VALUES (21); -INSERT INTO t3 VALUES (20, 20); -COMMIT; -INSERT INTO t3 VALUES(21, 21); -INSERT INTO t3 VALUES(22, 22); -SET binlog_format=@old_format; -connection con_temp1; -BEGIN; -INSERT INTO t2 VALUES (21); -connection server_2; -START SLAVE; -connection con_temp2; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -STOP SLAVE; -connection con_temp1; -SET debug_sync='now WAIT_FOR wait_for_done_waiting'; -ROLLBACK; -connection con_temp2; -SET GLOBAL debug_dbug=@old_dbug; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_to_stop.inc -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -a -20 -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -a -20 -21 -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; -a b -20 20 -include/start_slave.inc -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -a -20 -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -a -20 -21 -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; -a b -20 20 -21 21 -22 22 -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** Test killing slave threads at various wait points *** -*** 1. Test killing transaction waiting in commit for previous transaction to commit *** -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (31, foo(31, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (32, foo(32, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (33, foo(33, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (34, foo(34, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -32 32 -33 33 -34 34 -SET debug_sync='RESET'; -connection server_2; -SET sql_log_bin=0; -CALL mtr.add_suppression("Query execution was interrupted"); -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -CALL mtr.add_suppression("Slave: Connection was killed"); -SET sql_log_bin=1; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (39,0); -connection server_2; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -32 32 -33 33 -34 34 -39 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (41, foo(41, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (42, foo(42, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (43, foo(43, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (44, foo(44, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -a b -41 41 -42 42 -43 43 -44 44 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (49,0); -connection server_2; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -a b -41 41 -42 42 -43 43 -44 44 -49 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 3. Same as (2), but not using gtid mode *** -connection server_2; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; -include/start_slave.inc -connection server_1; -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (51, foo(51, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (52, foo(52, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (53, foo(53, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (54, foo(54, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -52 52 -53 53 -54 54 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (59,0); -connection server_2; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -52 52 -53 53 -54 54 -59 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=4; -include/start_slave.inc -*** 4. Test killing thread that is waiting to start transaction until previous transaction commits *** -connection server_1; -SET binlog_format=statement; -SET gtid_domain_id=2; -BEGIN; -INSERT INTO t3 VALUES (70, foo(70, -'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); -INSERT INTO t3 VALUES (60, foo(60, -'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', -'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d2_query'; -connection server_1; -SET gtid_domain_id=1; -BEGIN; -INSERT INTO t3 VALUES (61, foo(61, -'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', -'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); -INSERT INTO t3 VALUES (62, foo(62, -'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', -'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d1_query'; -connection server_1; -SET gtid_domain_id=0; -INSERT INTO t3 VALUES (63, foo(63, -'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', -'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); -connection server_2; -SET debug_sync='now WAIT_FOR d0_query'; -connection server_1; -SET gtid_domain_id=3; -BEGIN; -INSERT INTO t3 VALUES (68, foo(68, -'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); -INSERT INTO t3 VALUES (69, foo(69, -'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', -'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d3_query'; -SET debug_sync='now SIGNAL d2_cont2'; -SET debug_sync='now WAIT_FOR d2_done'; -SET debug_sync='now SIGNAL d1_cont2'; -SET debug_sync='now WAIT_FOR d1_done'; -SET debug_sync='now SIGNAL d0_cont2'; -SET debug_sync='now WAIT_FOR d0_done'; -SET debug_sync='now SIGNAL d3_cont2'; -SET debug_sync='now WAIT_FOR d3_done'; -connection con_temp3; -SET binlog_format=statement; -INSERT INTO t3 VALUES (64, foo(64, -'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; -INSERT INTO t3 VALUES (65, foo(65, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -INSERT INTO t3 VALUES (66, foo(66, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; -INSERT INTO t3 VALUES (67, foo(67, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued4'; -SET debug_sync='now SIGNAL master_cont2'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -a b -60 60 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now SIGNAL d0_cont'; -SET debug_sync='now WAIT_FOR t1_waiting'; -SET debug_sync='now SIGNAL d3_cont'; -SET debug_sync='now WAIT_FOR t2_waiting'; -SET debug_sync='now SIGNAL d1_cont'; -SET debug_sync='now WAIT_FOR t3_waiting'; -SET debug_sync='now SIGNAL d2_cont'; -SET debug_sync='now WAIT_FOR t4_waiting'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t3_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; -a b -60 60 -61 61 -62 62 -63 63 -64 64 -68 68 -69 69 -70 70 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -UPDATE t3 SET b=b+1 WHERE a=60; -connection server_2; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -a b -60 61 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 5. Test killing thread that is waiting for queue of max length to shorten *** -SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; -SET GLOBAL slave_parallel_max_queued=9000; -connection server_1; -SET binlog_format=statement; -INSERT INTO t3 VALUES (80, foo(0, -'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); -connection server_2; -SET debug_sync='now WAIT_FOR query_waiting'; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; -connection server_1; -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; -a b -80 0 -81 10000 -connection server_2; -SET debug_sync='now WAIT_FOR wait_queue_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR wait_queue_killed'; -SET debug_sync='now SIGNAL query_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_max_queued= @old_max_queued; -connection server_1; -INSERT INTO t3 VALUES (82,0); -SET binlog_format=@old_format; -connection server_2; -SET debug_sync='RESET'; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; -a b -80 0 -81 10000 -82 0 -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL replicate_ignore_table="test.t3"; -SET GLOBAL slave_parallel_threads=2; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (100, rand()); -INSERT INTO t3 VALUES (101, rand()); -connection server_2; -connection server_1; -INSERT INTO t3 VALUES (102, rand()); -INSERT INTO t3 VALUES (103, rand()); -INSERT INTO t3 VALUES (104, rand()); -INSERT INTO t3 VALUES (105, rand()); -connection server_2; -include/stop_slave.inc -SET GLOBAL replicate_ignore_table=""; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (106, rand()); -INSERT INTO t3 VALUES (107, rand()); -connection server_2; -SELECT * FROM t3 WHERE a >= 100 ORDER BY a; -a b -106 # -107 # -*** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (110, 1); -connection server_2; -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -SET sql_log_bin=0; -INSERT INTO t3 VALUES (111, 666); -SET sql_log_bin=1; -connection server_1; -connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -INSERT INTO t3 VALUES (111, 2); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -INSERT INTO t3 VALUES (112, 3); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1062] -include/wait_for_slave_sql_to_stop.inc -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -111 666 -SET sql_log_bin=0; -DELETE FROM t3 WHERE a=111 AND b=666; -SET sql_log_bin=1; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -111 2 -112 3 -***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** -connection server_2; -include/stop_slave.inc -connection server_1; -CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/start_slave.inc -include/stop_slave.inc -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -INSERT INTO t4 VALUES (7, NULL); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/start_slave.inc -include/stop_slave.inc -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 6 -7 NULL -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 1; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET @old_format=@@GLOBAL.binlog_format; -SET debug_sync='RESET'; -connection server_2; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; -include/start_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -2 2 -3 NULL -4 4 -5 NULL -6 NULL -SET @last_gtid= 'GTID'; -SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", -CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) -AS result; -result -GTID found ok -SELECT "ROW FOUND" AS `Is the row found?` - FROM mysql.gtid_slave_pos -WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; -Is the row found? -ROW FOUND -*** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET DEBUG_SYNC= 'RESET'; -include/start_slave.inc -connection server_1; -CREATE TABLE t5 (a INT PRIMARY KEY, b INT); -INSERT INTO t5 VALUES (1,1); -INSERT INTO t5 VALUES (2,2), (3,8); -INSERT INTO t5 VALUES (4,16); -connection server_2; -test_check -OK -test_check -OK -connection server_1; -FLUSH LOGS; -connection server_2; -test_check -OK -test_check -OK -*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** -connection server_1; -CREATE TABLE t6 (a INT) ENGINE=MyISAM; -CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; -connection con1; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; -INSERT INTO t6 VALUES (1), (2), (3); -connection server_1; -SET debug_sync='now WAIT_FOR ready'; -KILL QUERY CONID; -SET debug_sync='now SIGNAL cont'; -connection con1; -ERROR 70100: Query execution was interrupted -SET binlog_format= @old_format; -SET debug_sync='RESET'; -connection server_1; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1317] -STOP SLAVE IO_THREAD; -SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; -include/start_slave.inc -connection server_1; -INSERT INTO t6 VALUES (4); -SELECT * FROM t6 ORDER BY a; -a -1 -4 -connection server_2; -SELECT * FROM t6 ORDER BY a; -a -4 -*** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** -connection server_1; -INSERT INTO t2 VALUES (31); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET GLOBAL slave_parallel_threads= 0; -include/start_slave.inc -SET sql_log_bin= 0; -INSERT INTO t2 VALUES (32); -SET sql_log_bin= 1; -connection server_1; -INSERT INTO t2 VALUES (32); -FLUSH LOGS; -INSERT INTO t2 VALUES (33); -INSERT INTO t2 VALUES (34); -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -33 -34 -include/save_master_gtid.inc -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1062] -connection server_2; -include/stop_slave_io.inc -SET GLOBAL slave_parallel_threads=10; -START SLAVE; -include/wait_for_slave_sql_error.inc [errno=1062] -START SLAVE SQL_THREAD; -include/wait_for_slave_sql_error.inc [errno=1062] -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -SET sql_slave_skip_counter= 1; -ERROR HY000: When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position -include/stop_slave_io.inc -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -33 -34 -*** MDEV-6775: Wrong binlog order in parallel replication *** -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=ROW; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection con1; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -SET binlog_format= @old_format; -connection con2; -SET binlog_format= @old_format; -SET debug_sync='RESET'; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -connection server_2; -include/start_slave.inc -SET debug_sync= 'now WAIT_FOR waiting'; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -SET debug_sync= 'now SIGNAL cont'; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL binlog_format= @old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** -connection server_1; -INSERT INTO t2 VALUES (40); -connection server_2; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection server_1; -INSERT INTO t2 VALUES (41); -INSERT INTO t2 VALUES (42); -SET @old_format= @@binlog_format; -SET binlog_format= statement; -DELETE FROM t2 WHERE a=40; -SET binlog_format= @old_format; -INSERT INTO t2 VALUES (43); -INSERT INTO t2 VALUES (44); -FLUSH LOGS; -INSERT INTO t2 VALUES (45); -SET gtid_seq_no=100; -INSERT INTO t2 VALUES (46); -connection con_temp2; -BEGIN; -SELECT * FROM t2 WHERE a=40 FOR UPDATE; -a -40 -connection server_2; -include/start_slave.inc -SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; -STOP SLAVE; -connection con_temp2; -SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; -ROLLBACK; -connection server_2; -include/wait_for_slave_sql_to_stop.inc -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -a -41 -42 -include/start_slave.inc -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -a -41 -42 -43 -44 -45 -46 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET DEBUG_SYNC= 'RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -*** MDEV-7326 Server deadlock in connection with parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; -include/start_slave.inc -connection server_1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -INSERT INTO t1 VALUES (foo(50, -"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", -"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; -connection server_1; -INSERT INTO t2 VALUES (foo(50, -"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", -"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -connection server_1; -INSERT INTO t1 VALUES (foo(51, -"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", -"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -connection server_1; -INSERT INTO t1 VALUES (52); -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -a -50 -SELECT * FROM t1 WHERE a>=50 ORDER BY a; -a -50 -51 -52 -connection server_2; -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; -connection server_1; -connection server_2; -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -a -50 -SELECT * FROM t1 WHERE a>=50 ORDER BY a; -a -50 -51 -52 -SET DEBUG_SYNC="reset"; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7326 Server deadlock in connection with parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; -include/start_slave.inc -connection server_1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -INSERT INTO t1 VALUES (foo(60, -"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", -"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; -connection server_1; -INSERT INTO t2 VALUES (foo(60, -"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", -"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t1 VALUES (foo(61, -"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", -"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -INSERT INTO t6 VALUES (62); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection server_1; -SET debug_sync='RESET'; -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -a -60 -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -a -60 -61 -SELECT * FROM t6 WHERE a>=60 ORDER BY a; -a -62 -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -connection server_2; -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; -connection server_1; -connection server_2; -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -a -60 -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -a -60 -61 -SELECT * FROM t6 WHERE a>=60 ORDER BY a; -a -62 -SET DEBUG_SYNC="reset"; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; -connection server_1; -INSERT INTO t2 VALUES (101); -INSERT INTO t2 VALUES (102); -INSERT INTO t2 VALUES (103); -INSERT INTO t2 VALUES (104); -INSERT INTO t2 VALUES (105); -SET gtid_seq_no=1000; -INSERT INTO t2 VALUES (106); -INSERT INTO t2 VALUES (107); -INSERT INTO t2 VALUES (108); -INSERT INTO t2 VALUES (109); -INSERT INTO t2 VALUES (110); -INSERT INTO t2 VALUES (111); -INSERT INTO t2 VALUES (112); -INSERT INTO t2 VALUES (113); -INSERT INTO t2 VALUES (114); -INSERT INTO t2 VALUES (115); -INSERT INTO t2 VALUES (116); -INSERT INTO t2 VALUES (117); -INSERT INTO t2 VALUES (118); -INSERT INTO t2 VALUES (119); -INSERT INTO t2 VALUES (120); -INSERT INTO t2 VALUES (121); -INSERT INTO t2 VALUES (122); -INSERT INTO t2 VALUES (123); -INSERT INTO t2 VALUES (124); -INSERT INTO t2 VALUES (125); -INSERT INTO t2 VALUES (126); -INSERT INTO t2 VALUES (127); -INSERT INTO t2 VALUES (128); -INSERT INTO t2 VALUES (129); -INSERT INTO t2 VALUES (130); -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 100 ORDER BY a; -a -101 -102 -103 -104 -105 -107 -108 -109 -110 -111 -112 -113 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -127 -128 -129 -130 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-6676 - test syntax of @@slave_parallel_mode *** -connection server_2; -Parallel_Mode = 'conservative' -include/stop_slave.inc -SET GLOBAL slave_parallel_mode='aggressive'; -Parallel_Mode = 'aggressive' -SET GLOBAL slave_parallel_mode='conservative'; -Parallel_Mode = 'conservative' -*** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** -connection server_1; -INSERT INTO t2 VALUES (1040); -include/save_master_gtid.inc -connection server_2; -SET GLOBAL slave_parallel_mode='none'; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; -a -1040 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -*** MDEV-6676 - test disabling domain-based parallel replication *** -connection server_1; -SET gtid_domain_id = 1; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -DELETE FROM t2 WHERE a >= 1041; -SET gtid_domain_id = 2; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -SET gtid_domain_id = 0; -include/save_master_gtid.inc -connection server_2; -SET GLOBAL slave_parallel_mode=minimal; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; -a -1040 -1041 -1042 -1043 -1044 -1045 -1046 -*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_mode='conservative'; -SET GLOBAL slave_parallel_threads=10; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10000; -ANALYZE TABLE t2; -Table Op Msg_type Msg_text -test.t2 analyze status Engine-independent statistics collected -test.t2 analyze status OK -INSERT INTO t3 VALUES (120, 0); -SET @commit_id= 10001; -INSERT INTO t3 VALUES (121, 0); -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; -a b -120 0 -121 0 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; -a b -120 0 -121 0 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** -connection server_2; -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @old_server_id= @@SESSION.server_id; -SET SESSION server_id= 100; -SET @commit_id= 10010; -ALTER TABLE t1 COMMENT "Hulubulu!"; -SET SESSION server_id= @old_server_id; -INSERT INTO t3 VALUES (130, 0); -SET @commit_id= 10011; -INSERT INTO t3 VALUES (131, 0); -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; -a b -130 0 -131 0 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; -a b -130 0 -131 0 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** -connection server_1; -INSERT INTO t3 VALUES (201,0), (202,0); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_mdev8031'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10200; -INSERT INTO t3 VALUES (203, 1); -INSERT INTO t3 VALUES (204, 1); -INSERT INTO t3 VALUES (205, 1); -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=205; -UPDATE t3 SET b=b+1 WHERE a=205; -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 3 -202 4 -203 4 -204 4 -205 3 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 3 -202 4 -203 4 -204 4 -205 3 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** Check getting deadlock killed inside open_binlog() during retry. *** -connection server_2; -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; -SET @old_max= @@GLOBAL.max_relay_log_size; -SET GLOBAL max_relay_log_size= 4096; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10210; -Omit long queries that cause relaylog rotations and transaction retries... -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 6 -202 8 -203 7 -204 7 -205 5 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 6 -202 8 -203 7 -204 7 -205 5 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_debg; -SET GLOBAL max_relay_log_size= @old_max; -include/start_slave.inc -*** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** -connection server_1; -BEGIN; -INSERT INTO t2 VALUES (2000); -INSERT INTO t1 VALUES (2000); -INSERT INTO t2 VALUES (2001); -ROLLBACK; -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -a -2000 -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; -a -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -a -2000 -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; -a -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=@old_parallel_threads; -include/start_slave.inc -SET DEBUG_SYNC= 'RESET'; -connection server_1; -DROP function foo; -DROP TABLE t1,t2,t3,t4,t5,t6; -SET DEBUG_SYNC= 'RESET'; -include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel.test b/mysql-test/suite/binlog_encryption/rpl_parallel.test deleted file mode 100644 index dba54e4fd7a..00000000000 --- a/mysql-test/suite/binlog_encryption/rpl_parallel.test +++ /dev/null @@ -1 +0,0 @@ ---source suite/rpl/include/rpl_parallel.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.result b/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.result new file mode 100644 index 00000000000..3c3cd2601e8 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.result @@ -0,0 +1,51 @@ +*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10000; +ANALYZE TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +INSERT INTO t3 VALUES (120, 0); +SET @commit_id= 10001; +INSERT INTO t3 VALUES (121, 0); +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; +a b +120 0 +121 0 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; +a b +120 0 +121 0 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2,t3; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.test b/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.test new file mode 100644 index 00000000000..69e76692ce1 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_analyze_table_hang.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_analyze_table_hang.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.result b/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.result new file mode 100644 index 00000000000..74d1d53b67c --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.result @@ -0,0 +1,93 @@ +*** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; +connection server_1; +INSERT INTO t2 VALUES (101); +INSERT INTO t2 VALUES (102); +INSERT INTO t2 VALUES (103); +INSERT INTO t2 VALUES (104); +INSERT INTO t2 VALUES (105); +SET gtid_seq_no=1000; +INSERT INTO t2 VALUES (106); +INSERT INTO t2 VALUES (107); +INSERT INTO t2 VALUES (108); +INSERT INTO t2 VALUES (109); +INSERT INTO t2 VALUES (110); +INSERT INTO t2 VALUES (111); +INSERT INTO t2 VALUES (112); +INSERT INTO t2 VALUES (113); +INSERT INTO t2 VALUES (114); +INSERT INTO t2 VALUES (115); +INSERT INTO t2 VALUES (116); +INSERT INTO t2 VALUES (117); +INSERT INTO t2 VALUES (118); +INSERT INTO t2 VALUES (119); +INSERT INTO t2 VALUES (120); +INSERT INTO t2 VALUES (121); +INSERT INTO t2 VALUES (122); +INSERT INTO t2 VALUES (123); +INSERT INTO t2 VALUES (124); +INSERT INTO t2 VALUES (125); +INSERT INTO t2 VALUES (126); +INSERT INTO t2 VALUES (127); +INSERT INTO t2 VALUES (128); +INSERT INTO t2 VALUES (129); +INSERT INTO t2 VALUES (130); +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 100 ORDER BY a; +a +101 +102 +103 +104 +105 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t2; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.test b/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.test new file mode 100644 index 00000000000..71c589dcd88 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_deadlock_corrupt_binlog.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_deadlock_corrupt_binlog.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_domain.result b/mysql-test/suite/binlog_encryption/rpl_parallel_domain.result new file mode 100644 index 00000000000..69b9678d149 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_domain.result @@ -0,0 +1,71 @@ +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) +OK +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) +OK +include/stop_slave.inc +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) +OK +include/start_slave.inc +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) +OK +*** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +connection server_2; +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +LOCK TABLE t1 WRITE; +connection server_1; +SET gtid_domain_id=1; +INSERT INTO t1 VALUES (2); +SET gtid_domain_id=0; +INSERT INTO t2 VALUES (2); +INSERT INTO t2 VALUES (3); +BEGIN; +INSERT INTO t2 VALUES (4); +INSERT INTO t2 VALUES (5); +COMMIT; +INSERT INTO t2 VALUES (6); +connection server_2; +SELECT * FROM t2 ORDER by a; +a +1 +2 +3 +4 +5 +6 +connection con_temp1; +SELECT * FROM t1; +a +1 +UNLOCK TABLES; +connection server_2; +SELECT * FROM t1 ORDER BY a; +a +1 +2 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_domain.test b/mysql-test/suite/binlog_encryption/rpl_parallel_domain.test new file mode 100644 index 00000000000..b498b8616c8 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_domain.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_domain.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.result b/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.result new file mode 100644 index 00000000000..613aac64487 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.result @@ -0,0 +1,101 @@ +*** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +connection server_2; +include/stop_slave.inc +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format='statement'; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (foo(10, +'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); +connection server_2; +FLUSH LOGS; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=statement; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +SET debug_sync='now WAIT_FOR ready1'; +connection server_1; +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (foo(11, +'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); +SET gtid_domain_id=0; +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; +a +10 +11 +connection server_2; +SET debug_sync='now WAIT_FOR ready3'; +SET debug_sync='now SIGNAL cont3'; +SET debug_sync='now WAIT_FOR ready4'; +SET debug_sync='now SIGNAL cont1'; +SET debug_sync='now WAIT_FOR ready2'; +SET debug_sync='now SIGNAL cont4'; +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; +a +10 +11 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002 +slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(11, +'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')) +slave-bin.000002 # Xid # # COMMIT /* XID */ +slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(10, +'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')) +slave-bin.000002 # Xid # # COMMIT /* XID */ +FLUSH LOGS; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; +connection server_1; +DROP function foo; +DROP TABLE t1,t2; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.test b/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.test new file mode 100644 index 00000000000..ce9239f189e --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_domain_slave_single_grp.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_domain_slave_single_grp.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.result b/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.result new file mode 100644 index 00000000000..6718561a321 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.result @@ -0,0 +1,44 @@ +*** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL replicate_ignore_table="test.t3"; +SET GLOBAL slave_parallel_threads=2; +include/start_slave.inc +connection server_1; +INSERT INTO t3 VALUES (100, rand()); +INSERT INTO t3 VALUES (101, rand()); +connection server_2; +connection server_1; +INSERT INTO t3 VALUES (102, rand()); +INSERT INTO t3 VALUES (103, rand()); +INSERT INTO t3 VALUES (104, rand()); +INSERT INTO t3 VALUES (105, rand()); +connection server_2; +include/stop_slave.inc +SET GLOBAL replicate_ignore_table=""; +include/start_slave.inc +connection server_1; +INSERT INTO t3 VALUES (106, rand()); +INSERT INTO t3 VALUES (107, rand()); +connection server_2; +SELECT * FROM t3 WHERE a >= 100 ORDER BY a; +a b +106 # +107 # +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t3; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.test b/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.test new file mode 100644 index 00000000000..d0b6c970210 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_free_deferred_event.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_free_deferred_event.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.result b/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.result new file mode 100644 index 00000000000..4472550c4f2 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.result @@ -0,0 +1,257 @@ +*** Test killing thread that is waiting to start transaction until previous transaction commits *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Slave: Connection was killed"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode= 'conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=4; +include/start_slave.inc +connection server_1; +SET gtid_domain_id=2; +BEGIN; +INSERT INTO t3 VALUES (70, foo(70, +'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); +INSERT INTO t3 VALUES (60, foo(60, +'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', +'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d2_query'; +connection server_1; +SET gtid_domain_id=1; +BEGIN; +INSERT INTO t3 VALUES (61, foo(61, +'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', +'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); +INSERT INTO t3 VALUES (62, foo(62, +'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', +'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d1_query'; +connection server_1; +SET gtid_domain_id=0; +INSERT INTO t3 VALUES (63, foo(63, +'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', +'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); +connection server_2; +SET debug_sync='now WAIT_FOR d0_query'; +connection server_1; +SET gtid_domain_id=3; +BEGIN; +INSERT INTO t3 VALUES (68, foo(68, +'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); +INSERT INTO t3 VALUES (69, foo(69, +'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', +'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d3_query'; +SET debug_sync='now SIGNAL d2_cont2'; +SET debug_sync='now WAIT_FOR d2_done'; +SET debug_sync='now SIGNAL d1_cont2'; +SET debug_sync='now WAIT_FOR d1_done'; +SET debug_sync='now SIGNAL d0_cont2'; +SET debug_sync='now WAIT_FOR d0_done'; +SET debug_sync='now SIGNAL d3_cont2'; +SET debug_sync='now WAIT_FOR d3_done'; +connection con_temp3; +INSERT INTO t3 VALUES (64, foo(64, +'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; +INSERT INTO t3 VALUES (65, foo(65, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (66, foo(66, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; +INSERT INTO t3 VALUES (67, foo(67, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued4'; +SET debug_sync='now SIGNAL master_cont2'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +a b +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now SIGNAL d0_cont'; +SET debug_sync='now WAIT_FOR t1_waiting'; +SET debug_sync='now SIGNAL d3_cont'; +SET debug_sync='now WAIT_FOR t2_waiting'; +SET debug_sync='now SIGNAL d1_cont'; +SET debug_sync='now WAIT_FOR t3_waiting'; +SET debug_sync='now SIGNAL d2_cont'; +SET debug_sync='now WAIT_FOR t4_waiting'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t3_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; +a b +60 60 +61 61 +62 62 +63 63 +64 64 +68 68 +69 69 +70 70 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +UPDATE t3 SET b=b+1 WHERE a=60; +connection server_2; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +a b +60 61 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 5. Test killing thread that is waiting for queue of max length to shorten *** +SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; +SET GLOBAL slave_parallel_max_queued=9000; +connection server_1; +INSERT INTO t3 VALUES (80, foo(0, +'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); +connection server_2; +SET debug_sync='now WAIT_FOR query_waiting'; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; +connection server_1; +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; +a b +80 0 +81 10000 +connection server_2; +SET debug_sync='now WAIT_FOR wait_queue_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR wait_queue_killed'; +SET debug_sync='now SIGNAL query_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_max_queued= @old_max_queued; +connection server_1; +INSERT INTO t3 VALUES (82,0); +connection server_2; +SET debug_sync='RESET'; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; +a b +80 0 +81 10000 +82 0 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.test b/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.test new file mode 100644 index 00000000000..853465f1760 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_gco_wait_kill.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_gco_wait_kill.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.result b/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.result new file mode 100644 index 00000000000..2e7e7f547af --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.result @@ -0,0 +1,65 @@ +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection server_2; +include/stop_slave.inc +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 1; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +2 2 +3 NULL +4 4 +5 NULL +6 NULL +SET @last_gtid= 'GTID'; +SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", +CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) +AS result; +result +GTID found ok +SELECT "ROW FOUND" AS `Is the row found?` + FROM mysql.gtid_slave_pos +WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; +Is the row found? +ROW FOUND +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +disconnect con2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.test b/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.test new file mode 100644 index 00000000000..67105ccafee --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_gtid_slave_pos_update_fail.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_gtid_slave_pos_update_fail.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.result b/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.result new file mode 100644 index 00000000000..d00740dba3d --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.result @@ -0,0 +1,74 @@ +*** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (1); +SET gtid_domain_id=0; +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (2); +SET gtid_domain_id=0; +INSERT INTO t2 VALUES (31); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= 0; +include/start_slave.inc +SET sql_log_bin= 0; +INSERT INTO t2 VALUES (32); +SET sql_log_bin= 1; +connection server_1; +INSERT INTO t2 VALUES (32); +FLUSH LOGS; +INSERT INTO t2 VALUES (33); +INSERT INTO t2 VALUES (34); +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 +include/save_master_gtid.inc +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1062] +connection server_2; +include/stop_slave_io.inc +SET GLOBAL slave_parallel_threads=10; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +START SLAVE SQL_THREAD; +include/wait_for_slave_sql_error.inc [errno=1062] +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +SET sql_slave_skip_counter= 1; +ERROR HY000: When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position +include/stop_slave_io.inc +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.test b/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.test new file mode 100644 index 00000000000..ce57184d812 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_ignore_error_on_rotate.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_ignore_error_on_rotate.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.result b/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.result new file mode 100644 index 00000000000..6ca7f2b68e8 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.result @@ -0,0 +1,75 @@ +*** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t2 VALUES (40); +connection server_2; +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +connection server_1; +INSERT INTO t2 VALUES (41); +INSERT INTO t2 VALUES (42); +SET @old_format= @@binlog_format; +SET binlog_format= statement; +DELETE FROM t2 WHERE a=40; +SET binlog_format= @old_format; +INSERT INTO t2 VALUES (43); +INSERT INTO t2 VALUES (44); +FLUSH LOGS; +INSERT INTO t2 VALUES (45); +SET gtid_seq_no=100; +INSERT INTO t2 VALUES (46); +connection con_temp2; +BEGIN; +SELECT * FROM t2 WHERE a=40 FOR UPDATE; +a +40 +connection server_2; +include/start_slave.inc +SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; +STOP SLAVE; +connection con_temp2; +SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; +ROLLBACK; +connection server_2; +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; +a +41 +42 +include/start_slave.inc +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; +a +41 +42 +43 +44 +45 +46 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.test b/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.test new file mode 100644 index 00000000000..ddec96e8792 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_incorrect_relay_pos.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_incorrect_relay_pos.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result new file mode 100644 index 00000000000..1411db16af6 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.result @@ -0,0 +1,79 @@ +***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** +include/master-slave.inc +[connection master] +connection server_2; +SET sql_log_bin=0; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/start_slave.inc +include/stop_slave.inc +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +connection server_1; +DELETE FROM t4; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t4 VALUES (7, NULL); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/start_slave.inc +include/stop_slave.inc +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 6 +7 NULL +connection server_2; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +disconnect con1; +disconnect con2; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.test b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.test new file mode 100644 index 00000000000..624667d5408 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_innodb_lock_conflict.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_innodb_lock_conflict.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.result b/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.result new file mode 100644 index 00000000000..e9d04c02d7a --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.result @@ -0,0 +1,65 @@ +*** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +INSERT INTO t3 VALUES (110, 1); +connection server_2; +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +SET sql_log_bin=0; +INSERT INTO t3 VALUES (111, 666); +SET sql_log_bin=1; +connection server_1; +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (111, 2); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +INSERT INTO t3 VALUES (112, 3); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1062] +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +111 666 +SET sql_log_bin=0; +DELETE FROM t3 WHERE a=111 AND b=666; +SET sql_log_bin=1; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +111 2 +112 3 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +disconnect con1; +disconnect con2; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.test b/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.test new file mode 100644 index 00000000000..c6d09f2196b --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_missed_error_handling.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_missed_error_handling.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_mode.result b/mysql-test/suite/binlog_encryption/rpl_parallel_mode.result new file mode 100644 index 00000000000..313290b1fd2 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_mode.result @@ -0,0 +1,75 @@ +*** MDEV-6676 - test syntax of @@slave_parallel_mode *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +Parallel_Mode = 'optimistic' +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='aggressive'; +Parallel_Mode = 'aggressive' +SET GLOBAL slave_parallel_mode='conservative'; +Parallel_Mode = 'conservative' +*** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** +connection server_1; +INSERT INTO t2 VALUES (1040); +include/save_master_gtid.inc +connection server_2; +SET GLOBAL slave_parallel_mode='none'; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; +a +1040 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +*** MDEV-6676 - test disabling domain-based parallel replication *** +connection server_1; +SET gtid_domain_id = 1; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +DELETE FROM t2 WHERE a >= 1041; +SET gtid_domain_id = 2; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +SET gtid_domain_id = 0; +include/save_master_gtid.inc +connection server_2; +SET GLOBAL slave_parallel_mode=minimal; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; +a +1040 +1041 +1042 +1043 +1044 +1045 +1046 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_mode.test b/mysql-test/suite/binlog_encryption/rpl_parallel_mode.test new file mode 100644 index 00000000000..93170f61c95 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_mode.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_mode.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.result b/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.result new file mode 100644 index 00000000000..ab1cac692a0 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.result @@ -0,0 +1,51 @@ +*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection con1; +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +INSERT INTO t6 VALUES (1), (2), (3); +connection server_1; +SET debug_sync='now WAIT_FOR ready'; +KILL QUERY CONID; +SET debug_sync='now SIGNAL cont'; +connection con1; +ERROR 70100: Query execution was interrupted +SET debug_sync='RESET'; +connection server_1; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1317] +STOP SLAVE IO_THREAD; +SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; +include/start_slave.inc +connection server_1; +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +a +1 +4 +connection server_2; +SELECT * FROM t6 ORDER BY a; +a +4 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t6; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.test b/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.test new file mode 100644 index 00000000000..4f90cf4808e --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_partial_binlog_trans.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_partial_binlog_trans.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.result b/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.result new file mode 100644 index 00000000000..cbe53e4f623 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.result @@ -0,0 +1,48 @@ +*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @old_server_id= @@SESSION.server_id; +SET SESSION server_id= 100; +SET @commit_id= 10010; +ALTER TABLE t1 COMMENT "Hulubulu!"; +SET SESSION server_id= @old_server_id; +INSERT INTO t3 VALUES (130, 0); +SET @commit_id= 10011; +INSERT INTO t3 VALUES (131, 0); +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; +a b +130 0 +131 0 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; +a b +130 0 +131 0 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t3; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.test b/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.test new file mode 100644 index 00000000000..cb3b0dfa119 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_record_gtid_wakeup.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_record_gtid_wakeup.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.result b/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.result new file mode 100644 index 00000000000..1f5a23db848 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.result @@ -0,0 +1,192 @@ +*** MDEV-7326 Server deadlock in connection with parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +include/start_slave.inc +connection server_1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +INSERT INTO t1 VALUES (foo(50, +"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", +"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; +connection server_1; +INSERT INTO t2 VALUES (foo(50, +"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", +"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +connection server_1; +INSERT INTO t1 VALUES (foo(51, +"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", +"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +connection server_1; +INSERT INTO t1 VALUES (52); +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +a +50 +SELECT * FROM t1 WHERE a>=50 ORDER BY a; +a +50 +51 +52 +connection server_2; +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +connection server_1; +connection server_2; +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +a +50 +SELECT * FROM t1 WHERE a>=50 ORDER BY a; +a +50 +51 +52 +SET DEBUG_SYNC="reset"; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** MDEV-7326 Server deadlock in connection with parallel replication *** +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection server_2; +include/stop_slave.inc +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +include/start_slave.inc +connection server_1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +INSERT INTO t1 VALUES (foo(60, +"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", +"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; +connection server_1; +INSERT INTO t2 VALUES (foo(60, +"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", +"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +INSERT INTO t1 VALUES (foo(61, +"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", +"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +INSERT INTO t6 VALUES (62); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection server_1; +SET debug_sync='RESET'; +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +a +60 +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +a +60 +61 +SELECT * FROM t6 WHERE a>=60 ORDER BY a; +a +62 +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +connection server_2; +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +connection server_1; +connection server_2; +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +a +60 +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +a +60 +61 +SELECT * FROM t6 WHERE a>=60 ORDER BY a; +a +62 +SET DEBUG_SYNC="reset"; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP function foo; +DROP TABLE t1,t2,t6; +disconnect con_temp3; +disconnect con_temp4; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.test b/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.test new file mode 100644 index 00000000000..61c2cb22a75 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_retry_deadlock.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_retry_deadlock.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.result b/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.result new file mode 100644 index 00000000000..af9c5f14687 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.result @@ -0,0 +1,45 @@ +*** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +connection server_1; +BEGIN; +INSERT INTO t2 VALUES (2000); +INSERT INTO t1 VALUES (2000); +INSERT INTO t2 VALUES (2001); +ROLLBACK; +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; +a +2000 +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +a +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +connection server_1; +INSERT INTO t2 VALUES (2020); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +a +2020 +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; +a +2000 +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.test b/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.test new file mode 100644 index 00000000000..181305219be --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_rollback_assert.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_rollback_assert.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.result b/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.result new file mode 100644 index 00000000000..25fc9a189ac --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.result @@ -0,0 +1,161 @@ +*** Test that group-committed transactions on the master can replicate in parallel on the slave. *** +include/master-slave.inc +[connection master] +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); +connection server_2; +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +BEGIN; +INSERT INTO t3 VALUES (2,102); +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +BEGIN; +INSERT INTO t3 VALUES (4,104); +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +SET debug_sync='RESET'; +connection server_1; +SELECT * FROM t3 ORDER BY a; +a b +1 1 +2 12 +3 3 +4 14 +5 5 +6 16 +7 7 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Binlog_checkpoint # # master-bin.000001 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued3'; +connection con_temp1; +ROLLBACK; +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued1'; +connection con_temp2; +ROLLBACK; +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued2'; +SET debug_sync='now SIGNAL slave_cont1'; +SELECT * FROM t3 ORDER BY a; +a b +1 1 +2 12 +3 3 +4 14 +5 5 +6 16 +7 7 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +slave-bin.000001 # Gtid # # GTID #-#-# +slave-bin.000001 # Query # # use `test`; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB +slave-bin.000001 # Gtid # # GTID #-#-# +slave-bin.000001 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.test b/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.test new file mode 100644 index 00000000000..6a13735c29c --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_single_grpcmt.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_single_grpcmt.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.result b/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.result new file mode 100644 index 00000000000..320bf0e49f8 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.result @@ -0,0 +1,323 @@ +*** Test killing slave threads at various wait points *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +*** 1. Test killing transaction waiting in commit for previous transaction to commit *** +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (31, foo(31, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (32, foo(32, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (33, foo(33, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (34, foo(34, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +32 32 +33 33 +34 34 +SET debug_sync='RESET'; +connection server_2; +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +CALL mtr.add_suppression("Slave: Connection was killed"); +SET sql_log_bin=1; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (39,0); +connection server_2; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +32 32 +33 33 +34 34 +39 0 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (41, foo(41, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (42, foo(42, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (43, foo(43, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (44, foo(44, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +a b +41 41 +42 42 +43 43 +44 44 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (49,0); +connection server_2; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +a b +41 41 +42 42 +43 43 +44 44 +49 0 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 3. Same as (2), but not using gtid mode *** +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +include/start_slave.inc +connection server_1; +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (51, foo(51, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (52, foo(52, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (53, foo(53, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (54, foo(54, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +52 52 +53 53 +54 54 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (59,0); +connection server_2; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +52 52 +53 53 +54 54 +59 0 +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.test b/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.test new file mode 100644 index 00000000000..72597f32685 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_slave_bgc_kill.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_slave_bgc_kill.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.result b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.result new file mode 100644 index 00000000000..bf0ed9e4374 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.result @@ -0,0 +1,102 @@ +*** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t3 VALUES (201,0), (202,0); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_mdev8031'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10200; +INSERT INTO t3 VALUES (203, 1); +INSERT INTO t3 VALUES (204, 1); +INSERT INTO t3 VALUES (205, 1); +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=205; +UPDATE t3 SET b=b+1 WHERE a=205; +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 3 +202 4 +203 4 +204 4 +205 3 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 3 +202 4 +203 4 +204 4 +205 3 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +include/start_slave.inc +*** Check getting deadlock killed inside open_binlog() during retry. *** +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size= 4096; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10210; +Omit long queries that cause relaylog rotations and transaction retries... +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 6 +202 8 +203 7 +204 7 +205 5 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 6 +202 8 +203 7 +204 7 +205 5 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_debg; +SET GLOBAL max_relay_log_size= @old_max; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t3; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.test b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.test new file mode 100644 index 00000000000..adec2dc631c --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_on_con_kill.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_stop_on_con_kill.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result new file mode 100644 index 00000000000..6c9fd168e73 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result @@ -0,0 +1,85 @@ +*** Test STOP SLAVE in parallel mode *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +connection server_1; +SET binlog_direct_non_transactional_updates=0; +SET sql_log_bin=0; +CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); +SET sql_log_bin=1; +BEGIN; +INSERT INTO t2 VALUES (20); +INSERT INTO t1 VALUES (20); +INSERT INTO t2 VALUES (21); +INSERT INTO t3 VALUES (20, 20); +COMMIT; +INSERT INTO t3 VALUES(21, 21); +INSERT INTO t3 VALUES(22, 22); +connection con_temp1; +BEGIN; +INSERT INTO t2 VALUES (21); +connection server_2; +START SLAVE; +connection con_temp2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +STOP SLAVE; +connection con_temp1; +SET debug_sync='now WAIT_FOR wait_for_done_waiting'; +ROLLBACK; +connection con_temp2; +SET GLOBAL debug_dbug=@old_dbug; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_to_stop.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +a +20 +21 +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; +a b +20 20 +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +a +20 +21 +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; +a b +20 20 +21 21 +22 22 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +disconnect con_temp1; +disconnect con_temp2; +connection server_1; +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.test b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.test new file mode 100644 index 00000000000..c59b2805569 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_stop_slave.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.result b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.result new file mode 100644 index 00000000000..f6781f64d30 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.result @@ -0,0 +1,75 @@ +*** MDEV-6775: Wrong binlog order in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=ROW; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +connection con1; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +SET binlog_format= @old_format; +connection con2; +SET binlog_format= @old_format; +SET debug_sync='RESET'; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +connection server_2; +include/start_slave.inc +SET debug_sync= 'now WAIT_FOR waiting'; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +SET debug_sync= 'now SIGNAL cont'; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL binlog_format= @old_format; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +disconnect con2; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.test b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.test new file mode 100644 index 00000000000..4141dfce6d6 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_binlog_order.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_wrong_binlog_order.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.result b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.result new file mode 100644 index 00000000000..47cfa5e08e2 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.result @@ -0,0 +1,34 @@ +*** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t5 (a INT PRIMARY KEY, b INT); +INSERT INTO t5 VALUES (1,1); +INSERT INTO t5 VALUES (2,2), (3,8); +INSERT INTO t5 VALUES (4,16); +connection server_2; +test_check +OK +test_check +OK +connection server_1; +FLUSH LOGS; +connection server_2; +test_check +OK +test_check +OK +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t5; +include/rpl_end.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.test b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.test new file mode 100644 index 00000000000..34268eb6622 --- /dev/null +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_wrong_exec_master_pos.test @@ -0,0 +1 @@ +--source suite/rpl/include/rpl_parallel_wrong_exec_master_pos.inc diff --git a/mysql-test/suite/multi_source/info_logs.result b/mysql-test/suite/multi_source/info_logs.result index 531a6178cdb..f8519a99964 100644 --- a/mysql-test/suite/multi_source/info_logs.result +++ b/mysql-test/suite/multi_source/info_logs.result @@ -90,16 +90,16 @@ MASTER 2.2 # show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> relay.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> relay-master@00202@002e2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space2> None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 + Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> relay.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> relay-master@00202@002e2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space2> None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 include/wait_for_slave_to_start.inc set default_master_connection = 'MASTER 2.2'; include/wait_for_slave_to_start.inc set default_master_connection = ''; show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> relay.000004 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> relay-master@00202@002e2.000004 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space2> None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 + Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> relay.000004 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> relay-master@00202@002e2.000004 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space2> None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 6 0 60.000 # # List of files matching '*info*' pattern # after slave server restart diff --git a/mysql-test/suite/multi_source/reset_slave.result b/mysql-test/suite/multi_source/reset_slave.result index c1d74ab9f3f..c048784e28d 100644 --- a/mysql-test/suite/multi_source/reset_slave.result +++ b/mysql-test/suite/multi_source/reset_slave.result @@ -14,14 +14,14 @@ connection slave; stop slave 'master1'; show slave 'master1' status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups - 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-master1.000002 <relay_log_pos> master-bin.000001 No No 0 0 <read_master_log_pos> <relay_log_space> None 0 No NULL No 0 0 1 No conservative 0 NULL 2 1 0 + 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-master1.000002 <relay_log_pos> master-bin.000001 No No 0 0 <read_master_log_pos> <relay_log_space> None 0 No NULL No 0 0 1 No optimistic 0 NULL 2 1 0 mysqld-relay-bin-master1.000001 mysqld-relay-bin-master1.000002 mysqld-relay-bin-master1.index reset slave 'master1'; show slave 'master1' status; Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups - 127.0.0.1 root MYPORT_1 60 4 <relay_log_pos> No No 0 0 0 <relay_log_space> None 0 No NULL No 0 0 1 No conservative 0 NULL 2 1 0 + 127.0.0.1 root MYPORT_1 60 4 <relay_log_pos> No No 0 0 0 <relay_log_space> None 0 No NULL No 0 0 1 No optimistic 0 NULL 2 1 0 reset slave 'master1' all; show slave 'master1' status; ERROR HY000: There is no master connection 'master1' diff --git a/mysql-test/suite/multi_source/simple.result b/mysql-test/suite/multi_source/simple.result index 93ea1c023bc..922c7555875 100644 --- a/mysql-test/suite/multi_source/simple.result +++ b/mysql-test/suite/multi_source/simple.result @@ -19,8 +19,8 @@ connection master2; connection slave; show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave1.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +slave1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave1.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 start all slaves; stop slave 'slave1'; show slave 'slave1' status; @@ -70,7 +70,7 @@ Using_Gtid No Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids -Parallel_Mode conservative +Parallel_Mode optimistic SQL_Delay 0 SQL_Remaining_Delay NULL Slave_SQL_Running_State @@ -80,18 +80,18 @@ Slave_Transactional_Groups 0 reset slave 'slave1'; show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave1 127.0.0.1 root MYPORT_1 60 4 <relay_log_pos> No No 0 0 0 <relay_log_space1> None 0 No NULL No 0 0 1 No conservative 0 NULL 0 0 0 0 1073741824 7 0 60.000 -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +slave1 127.0.0.1 root MYPORT_1 60 4 <relay_log_pos> No No 0 0 0 <relay_log_space1> None 0 No NULL No 0 0 1 No optimistic 0 NULL 0 0 0 0 1073741824 7 0 60.000 +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 reset slave 'slave1' all; show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No conservative 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 +slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 Yes Yes 0 0 <read_master_log_pos> <relay_log_space1> None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for the slave I/O thread to update it 0 0 0 0 1073741824 7 0 60.000 stop all slaves; Warnings: Note 1938 SLAVE 'slave2' stopped show all slaves status; Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_tra nsaction s Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos -slave2 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 No No 0 0 <read_master_log_pos> <relay_log_space1> None 0 No NULL No 0 0 2 No conservative 0 NULL 0 0 0 0 1073741824 7 0 60.000 +slave2 127.0.0.1 root MYPORT_2 60 master-bin.000001 <read_master_log_pos> mysqld-relay-bin-slave2.000002 <relay_log_pos> master-bin.000001 No No 0 0 <read_master_log_pos> <relay_log_space1> None 0 No NULL No 0 0 2 No optimistic 0 NULL 0 0 0 0 1073741824 7 0 60.000 stop all slaves; include/reset_master_slave.inc disconnect slave; diff --git a/mysql-test/suite/rpl/include/rpl_parallel.inc b/mysql-test/suite/rpl/include/rpl_parallel.inc deleted file mode 100644 index 42354343084..00000000000 --- a/mysql-test/suite/rpl/include/rpl_parallel.inc +++ /dev/null @@ -1,2219 +0,0 @@ -# -# This include file is used by more than one test suite -# (currently rpl and binlog_encryption). -# Please check all dependent tests after modifying it -# - ---source include/have_innodb.inc ---source include/have_debug.inc ---source include/have_debug_sync.inc ---source include/master-slave.inc - -# Test various aspects of parallel replication. - ---connection server_2 -SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; ---error ER_SLAVE_MUST_STOP -SET GLOBAL slave_parallel_threads=10; ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; - -# Check that we do not spawn any worker threads when no slave is running. -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; - -CHANGE MASTER TO master_use_gtid=slave_pos; ---source include/start_slave.inc - -# Check that worker threads get spawned when slave starts. -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -# ... and that worker threads get removed when slave stops. ---source include/stop_slave.inc -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; ---source include/start_slave.inc -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; - ---echo *** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** - ---connection server_1 -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; -CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1); -INSERT INTO t2 VALUES (1); ---save_master_pos - ---connection server_2 ---sync_with_master - -# Block the table t1 to simulate a replicated query taking a long time. ---connect (con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -LOCK TABLE t1 WRITE; - ---connection server_1 -SET gtid_domain_id=1; -# This query will be blocked on the slave until UNLOCK TABLES. -INSERT INTO t1 VALUES (2); -SET gtid_domain_id=0; -# These t2 queries can be replicated in parallel with the prior t1 query, as -# they are in a separate replication domain. -INSERT INTO t2 VALUES (2); -INSERT INTO t2 VALUES (3); -BEGIN; -INSERT INTO t2 VALUES (4); -INSERT INTO t2 VALUES (5); -COMMIT; -INSERT INTO t2 VALUES (6); - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 6 FROM t2 ---source include/wait_condition.inc - -SELECT * FROM t2 ORDER by a; - ---connection con_temp1 -SELECT * FROM t1; -UNLOCK TABLES; - ---connection server_2 ---let $wait_condition= SELECT COUNT(*) = 2 FROM t1 ---source include/wait_condition.inc - -SELECT * FROM t1 ORDER BY a; - - ---echo *** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** ---connection server_2 ---source include/stop_slave.inc - ---connection server_1 -# Use a stored function to inject a debug_sync into the appropriate THD. -# The function does nothing on the master, and on the slave it injects the -# desired debug_sync action(s). -SET sql_log_bin=0; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET gtid_domain_id=1; -INSERT INTO t2 VALUES (foo(10, - 'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', - 'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); - ---connection server_2 -FLUSH LOGS; ---source include/wait_for_binlog_checkpoint.inc -SET sql_log_bin=0; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - IF d1 != '' THEN - SET debug_sync = d1; - END IF; - IF d2 != '' THEN - SET debug_sync = d2; - END IF; - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=statement; -# We need to restart all parallel threads for the new global setting to -# be copied to the session-level values. -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - -# First make sure the first insert is ready to commit, but not queued yet. -SET debug_sync='now WAIT_FOR ready1'; - ---connection server_1 -SET gtid_domain_id=2; -INSERT INTO t2 VALUES (foo(11, - 'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', - 'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); -SET gtid_domain_id=0; -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; - ---connection server_2 -# Now wait for the second insert to queue itself as the leader, and then -# wait for more commits to queue up. -SET debug_sync='now WAIT_FOR ready3'; -SET debug_sync='now SIGNAL cont3'; -SET debug_sync='now WAIT_FOR ready4'; -# Now allow the first insert to queue up to participate in group commit. -SET debug_sync='now SIGNAL cont1'; -SET debug_sync='now WAIT_FOR ready2'; -# Finally allow the second insert to proceed and do the group commit. -SET debug_sync='now SIGNAL cont4'; - ---let $wait_condition= SELECT COUNT(*) = 2 FROM t2 WHERE a >= 10 ---source include/wait_condition.inc -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; -# The two INSERT transactions should have been committed in opposite order, -# but in the same group commit (seen by precense of cid=# in the SHOW -# BINLOG output). ---let $binlog_file= slave-bin.000002 ---source include/show_binlog_events.inc -FLUSH LOGS; ---source include/wait_for_binlog_checkpoint.inc - -# Restart all the slave parallel worker threads, to clear all debug_sync actions. ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET debug_sync='RESET'; ---source include/start_slave.inc - - ---echo *** Test that group-committed transactions on the master can replicate in parallel on the slave. *** ---connection server_1 -SET debug_sync='RESET'; -FLUSH LOGS; ---source include/wait_for_binlog_checkpoint.inc -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; -# Create some sentinel rows so that the rows inserted in parallel fall into -# separate gaps and do not cause gap lock conflicts. -INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); ---save_master_pos ---connection server_2 ---sync_with_master - -# We want to test that the transactions can execute out-of-order on -# the slave, but still end up committing in-order, and in a single -# group commit. -# -# The idea is to group-commit three transactions together on the master: -# A, B, and C. On the slave, C will execute the insert first, then A, -# and then B. But B manages to complete before A has time to commit, so -# all three end up committing together. -# -# So we start by setting up some row locks that will block transactions -# A and B from executing, allowing C to run first. - ---connection con_temp1 -BEGIN; -INSERT INTO t3 VALUES (2,102); ---connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) -BEGIN; -INSERT INTO t3 VALUES (4,104); - -# On the master, queue three INSERT transactions as a single group commit. ---connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (2, foo(12, - 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (4, foo(14, - 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; - ---connect (con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (6, foo(16, - 'group_commit_waiting_for_prior SIGNAL slave_queued3', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; ---connection con_temp5 -REAP; -SET debug_sync='RESET'; - ---connection server_1 -SELECT * FROM t3 ORDER BY a; ---let $binlog_file= master-bin.000002 ---source include/show_binlog_events.inc - -# First, wait until insert 3 is ready to queue up for group commit, but is -# waiting for insert 2 to commit before it can do so itself. ---connection server_2 -SET debug_sync='now WAIT_FOR slave_queued3'; - -# Next, let insert 1 proceed, and allow it to queue up as the group commit -# leader, but let it wait for insert 2 to also queue up before proceeding. ---connection con_temp1 -ROLLBACK; ---connection server_2 -SET debug_sync='now WAIT_FOR slave_queued1'; - -# Now let insert 2 proceed and queue up. ---connection con_temp2 -ROLLBACK; ---connection server_2 -SET debug_sync='now WAIT_FOR slave_queued2'; -# And finally, we can let insert 1 proceed and do the group commit with all -# three insert transactions together. -SET debug_sync='now SIGNAL slave_cont1'; - -# Wait for the commit to complete and check that all three transactions -# group-committed together (will be seen in the binlog as all three having -# cid=# on their GTID event). ---let $wait_condition= SELECT COUNT(*) = 3 FROM t3 WHERE a IN (2,4,6) ---source include/wait_condition.inc -SELECT * FROM t3 ORDER BY a; ---let $binlog_file= slave-bin.000003 ---source include/show_binlog_events.inc - - ---echo *** Test STOP SLAVE in parallel mode *** ---connection server_2 ---source include/stop_slave.inc -# Respawn all worker threads to clear any left-over debug_sync or other stuff. -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; - ---connection server_1 -# Set up a couple of transactions. The first will be blocked halfway -# through on a lock, and while it is blocked we initiate STOP SLAVE. -# We then test that the halfway-initiated transaction is allowed to -# complete, but no subsequent ones. -# We have to use statement-based mode and set -# binlog_direct_non_transactional_updates=0; otherwise the binlog will -# be split into two event groups, one for the MyISAM part and one for the -# InnoDB part. -SET binlog_direct_non_transactional_updates=0; -SET sql_log_bin=0; -CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); -SET sql_log_bin=1; -BEGIN; -INSERT INTO t2 VALUES (20); ---disable_warnings -INSERT INTO t1 VALUES (20); ---enable_warnings -INSERT INTO t2 VALUES (21); -INSERT INTO t3 VALUES (20, 20); -COMMIT; -INSERT INTO t3 VALUES(21, 21); -INSERT INTO t3 VALUES(22, 22); -SET binlog_format=@old_format; ---save_master_pos - -# Start a connection that will block the replicated transaction halfway. ---connection con_temp1 -BEGIN; -INSERT INTO t2 VALUES (21); - ---connection server_2 -START SLAVE; -# Wait for the MyISAM change to be visible, after which replication will wait -# for con_temp1 to roll back. ---let $wait_condition= SELECT COUNT(*) = 1 FROM t1 WHERE a=20 ---source include/wait_condition.inc - ---connection con_temp2 -# Initiate slave stop. It will have to wait for the current event group -# to complete. -# The dbug injection causes debug_sync to signal 'wait_for_done_waiting' -# when the SQL driver thread is ready. -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -send STOP SLAVE; - ---connection con_temp1 -SET debug_sync='now WAIT_FOR wait_for_done_waiting'; -ROLLBACK; - ---connection con_temp2 -reap; -SET GLOBAL debug_dbug=@old_dbug; -SET debug_sync='RESET'; - ---connection server_2 ---source include/wait_for_slave_to_stop.inc -# We should see the first transaction applied, but not the two others. -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; - ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; - - ---connection server_2 -# Respawn all worker threads to clear any left-over debug_sync or other stuff. ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** Test killing slave threads at various wait points *** ---echo *** 1. Test killing transaction waiting in commit for previous transaction to commit *** - -# Set up three transactions on the master that will be group-committed -# together so they can be replicated in parallel on the slave. ---connection con_temp3 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (31, foo(31, - 'commit_before_prepare_ordered WAIT_FOR t2_waiting', - 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con_temp4 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -# This insert is just so we can get T2 to wait while a query is running that we -# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. -INSERT INTO t3 VALUES (32, foo(32, - 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', - '')); -# This insert sets up debug_sync points so that T2 will tell when it is at its -# wait point where we want to kill it - and when it has been killed. -INSERT INTO t3 VALUES (33, foo(33, - 'group_commit_waiting_for_prior SIGNAL t2_waiting', - 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -send COMMIT; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; - ---connection con_temp5 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (34, foo(34, - '', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; ---connection con_temp5 -REAP; - ---connection server_1 -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -SET debug_sync='RESET'; - ---connection server_2 -SET sql_log_bin=0; -CALL mtr.add_suppression("Query execution was interrupted"); -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -CALL mtr.add_suppression("Slave: Connection was killed"); -SET sql_log_bin=1; -# Wait until T2 is inside executing its insert of 32, then find it in SHOW -# PROCESSLIST to know its thread id for KILL later. -SET debug_sync='now WAIT_FOR t2_query'; ---let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(32%' AND INFO NOT LIKE '%LIKE%'` -SET debug_sync='now SIGNAL t2_cont'; - -# Wait until T2 has entered its wait for T1 to commit, and T1 has -# progressed into its commit phase. -SET debug_sync='now WAIT_FOR t1_ready'; - -# Now kill the transaction T2. ---replace_result $thd_id THD_ID -eval KILL $thd_id; - -# Wait until T2 has reacted on the kill. -SET debug_sync='now WAIT_FOR t2_killed'; - -# Now we can allow T1 to proceed. -SET debug_sync='now SIGNAL t1_cont'; - ---let $slave_sql_errno= 1317,1927,1964 ---source include/wait_for_slave_sql_error.inc -STOP SLAVE IO_THREAD; -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; - -# Now we have to disable the debug_sync statements, so they do not trigger -# when the events are retried. -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - ---connection server_1 -INSERT INTO t3 VALUES (39,0); ---save_master_pos - ---connection server_2 ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -# Restore the foo() function. -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - IF d1 != '' THEN - SET debug_sync = d1; - END IF; - IF d2 != '' THEN - SET debug_sync = d2; - END IF; - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - - ---connection server_2 -# Respawn all worker threads to clear any left-over debug_sync or other stuff. ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** - -# Set up three transactions on the master that will be group-committed -# together so they can be replicated in parallel on the slave. ---connection con_temp3 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (41, foo(41, - 'commit_before_prepare_ordered WAIT_FOR t2_waiting', - 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con_temp4 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -# This insert is just so we can get T2 to wait while a query is running that we -# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. -INSERT INTO t3 VALUES (42, foo(42, - 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', - '')); -# This insert sets up debug_sync points so that T2 will tell when it is at its -# wait point where we want to kill it - and when it has been killed. -INSERT INTO t3 VALUES (43, foo(43, - 'group_commit_waiting_for_prior SIGNAL t2_waiting', - 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -send COMMIT; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; - ---connection con_temp5 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (44, foo(44, - '', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; ---connection con_temp5 -REAP; - ---connection server_1 -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -SET debug_sync='RESET'; - ---connection server_2 -# Wait until T2 is inside executing its insert of 42, then find it in SHOW -# PROCESSLIST to know its thread id for KILL later. -SET debug_sync='now WAIT_FOR t2_query'; ---let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(42%' AND INFO NOT LIKE '%LIKE%'` -SET debug_sync='now SIGNAL t2_cont'; - -# Wait until T2 has entered its wait for T1 to commit, and T1 has -# progressed into its commit phase. -SET debug_sync='now WAIT_FOR t1_ready'; - -# Now kill the transaction T2. ---replace_result $thd_id THD_ID -eval KILL $thd_id; - -# Wait until T2 has reacted on the kill. -SET debug_sync='now WAIT_FOR t2_killed'; - -# Now we can allow T1 to proceed. -SET debug_sync='now SIGNAL t1_cont'; - ---let $slave_sql_errno= 1317,1927,1964 ---source include/wait_for_slave_sql_error.inc - -# Now we have to disable the debug_sync statements, so they do not trigger -# when the events are retried. -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - ---connection server_1 -INSERT INTO t3 VALUES (49,0); ---save_master_pos - ---connection server_2 -START SLAVE SQL_THREAD; ---sync_with_master -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -# Restore the foo() function. -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - IF d1 != '' THEN - SET debug_sync = d1; - END IF; - IF d2 != '' THEN - SET debug_sync = d2; - END IF; - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - - ---connection server_2 -# Respawn all worker threads to clear any left-over debug_sync or other stuff. ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** 3. Same as (2), but not using gtid mode *** - ---connection server_2 ---source include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; ---source include/start_slave.inc - ---connection server_1 -# Set up three transactions on the master that will be group-committed -# together so they can be replicated in parallel on the slave. ---connection con_temp3 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (51, foo(51, - 'commit_before_prepare_ordered WAIT_FOR t2_waiting', - 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con_temp4 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -# This insert is just so we can get T2 to wait while a query is running that we -# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. -INSERT INTO t3 VALUES (52, foo(52, - 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', - '')); -# This insert sets up debug_sync points so that T2 will tell when it is at its -# wait point where we want to kill it - and when it has been killed. -INSERT INTO t3 VALUES (53, foo(53, - 'group_commit_waiting_for_prior SIGNAL t2_waiting', - 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -send COMMIT; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; - ---connection con_temp5 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -send INSERT INTO t3 VALUES (54, foo(54, - '', - '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; ---connection con_temp5 -REAP; - ---connection server_1 -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -SET debug_sync='RESET'; - ---connection server_2 -# Wait until T2 is inside executing its insert of 52, then find it in SHOW -# PROCESSLIST to know its thread id for KILL later. -SET debug_sync='now WAIT_FOR t2_query'; ---let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(52%' AND INFO NOT LIKE '%LIKE%'` -SET debug_sync='now SIGNAL t2_cont'; - -# Wait until T2 has entered its wait for T1 to commit, and T1 has -# progressed into its commit phase. -SET debug_sync='now WAIT_FOR t1_ready'; - -# Now kill the transaction T2. ---replace_result $thd_id THD_ID -eval KILL $thd_id; - -# Wait until T2 has reacted on the kill. -SET debug_sync='now WAIT_FOR t2_killed'; - -# Now we can allow T1 to proceed. -SET debug_sync='now SIGNAL t1_cont'; - ---let $slave_sql_errno= 1317,1927,1964 ---source include/wait_for_slave_sql_error.inc -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; - -# Now we have to disable the debug_sync statements, so they do not trigger -# when the events are retried. -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - ---connection server_1 -INSERT INTO t3 VALUES (59,0); ---save_master_pos - ---connection server_2 -START SLAVE SQL_THREAD; ---sync_with_master -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -# Restore the foo() function. -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - IF d1 != '' THEN - SET debug_sync = d1; - END IF; - IF d2 != '' THEN - SET debug_sync = d2; - END IF; - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - - ---source include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=slave_pos; ---source include/start_slave.inc - ---connection server_2 -# Respawn all worker threads to clear any left-over debug_sync or other stuff. ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=4; ---source include/start_slave.inc - - ---echo *** 4. Test killing thread that is waiting to start transaction until previous transaction commits *** - -# We set up four transactions T1, T2, T3, and T4 on the master. T2, T3, and T4 -# can run in parallel with each other (same group commit and commit id), -# but not in parallel with T1. -# -# We use four worker threads, each Ti will be queued on each their own -# worker thread. We will delay T1 commit, T3 will wait for T1 to begin -# commit before it can start. We will kill T3 during this wait, and -# check that everything works correctly. -# -# It is rather tricky to get the correct thread id of the worker to kill. -# We start by injecting four dummy transactions in a debug_sync-controlled -# manner to be able to get known thread ids for the workers in a pool with -# just 4 worker threads. Then we let in each of the real test transactions -# T1-T4 one at a time in a way which allows us to know which transaction -# ends up with which thread id. - ---connection server_1 -SET binlog_format=statement; -SET gtid_domain_id=2; -BEGIN; -# This debug_sync will linger on and be used to control T4 later. -INSERT INTO t3 VALUES (70, foo(70, - 'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); -INSERT INTO t3 VALUES (60, foo(60, - 'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', - 'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); -COMMIT; -SET gtid_domain_id=0; - ---connection server_2 -SET debug_sync='now WAIT_FOR d2_query'; ---let $d2_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(60%' AND INFO NOT LIKE '%LIKE%'` - ---connection server_1 -SET gtid_domain_id=1; -BEGIN; -# These debug_sync's will linger on and be used to control T3 later. -INSERT INTO t3 VALUES (61, foo(61, - 'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', - 'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); -INSERT INTO t3 VALUES (62, foo(62, - 'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', - 'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); -COMMIT; -SET gtid_domain_id=0; - ---connection server_2 -SET debug_sync='now WAIT_FOR d1_query'; ---let $d1_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(62%' AND INFO NOT LIKE '%LIKE%'` - ---connection server_1 -SET gtid_domain_id=0; -INSERT INTO t3 VALUES (63, foo(63, - 'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', - 'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); - ---connection server_2 -SET debug_sync='now WAIT_FOR d0_query'; ---let $d0_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(63%' AND INFO NOT LIKE '%LIKE%'` - ---connection server_1 -SET gtid_domain_id=3; -BEGIN; -# These debug_sync's will linger on and be used to control T2 later. -INSERT INTO t3 VALUES (68, foo(68, - 'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); -INSERT INTO t3 VALUES (69, foo(69, - 'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', - 'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); -COMMIT; -SET gtid_domain_id=0; - ---connection server_2 -SET debug_sync='now WAIT_FOR d3_query'; ---let $d3_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(69%' AND INFO NOT LIKE '%LIKE%'` - -SET debug_sync='now SIGNAL d2_cont2'; -SET debug_sync='now WAIT_FOR d2_done'; -SET debug_sync='now SIGNAL d1_cont2'; -SET debug_sync='now WAIT_FOR d1_done'; -SET debug_sync='now SIGNAL d0_cont2'; -SET debug_sync='now WAIT_FOR d0_done'; -SET debug_sync='now SIGNAL d3_cont2'; -SET debug_sync='now WAIT_FOR d3_done'; - -# Now prepare the real transactions T1, T2, T3, T4 on the master. - ---connection con_temp3 -# Create transaction T1. -SET binlog_format=statement; -INSERT INTO t3 VALUES (64, foo(64, - 'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); - -# Create transaction T2, as a group commit leader on the master. -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; -send INSERT INTO t3 VALUES (65, foo(65, '', '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; - ---connection con_temp4 -# Create transaction T3, participating in T2's group commit. -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -send INSERT INTO t3 VALUES (66, foo(66, '', '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued3'; - ---connection con_temp5 -# Create transaction T4, participating in group commit with T2 and T3. -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; -send INSERT INTO t3 VALUES (67, foo(67, '', '')); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued4'; -SET debug_sync='now SIGNAL master_cont2'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; ---connection con_temp5 -REAP; - ---connection server_1 -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -SET debug_sync='RESET'; - ---connection server_2 -# Now we have the four transactions pending for replication on the slave. -# Let them be queued for our three worker threads in a controlled fashion. -# We put them at a stage where T1 is delayed and T3 is waiting for T1 to -# commit before T3 can start. Then we kill T3. - -# Make the worker D0 free, and wait for T1 to be queued in it. -SET debug_sync='now SIGNAL d0_cont'; -SET debug_sync='now WAIT_FOR t1_waiting'; - -# Make the worker D3 free, and wait for T2 to be queued in it. -SET debug_sync='now SIGNAL d3_cont'; -SET debug_sync='now WAIT_FOR t2_waiting'; - -# Now release worker D1, and wait for T3 to be queued in it. -# T3 will wait for T1 to commit before it can start. -SET debug_sync='now SIGNAL d1_cont'; -SET debug_sync='now WAIT_FOR t3_waiting'; - -# Release worker D2. Wait for T4 to be queued, so we are sure it has -# received the debug_sync signal (else we might overwrite it with the -# next debug_sync). -SET debug_sync='now SIGNAL d2_cont'; -SET debug_sync='now WAIT_FOR t4_waiting'; - -# Now we kill the waiting transaction T3 in worker D1. ---replace_result $d1_thd_id THD_ID -eval KILL $d1_thd_id; - -# Wait until T3 has reacted on the kill. -SET debug_sync='now WAIT_FOR t3_killed'; - -# Now we can allow T1 to proceed. -SET debug_sync='now SIGNAL t1_cont'; - ---let $slave_sql_errno= 1317,1927,1964 ---source include/wait_for_slave_sql_error.inc -STOP SLAVE IO_THREAD; -# Since T2, T3, and T4 run in parallel, we can not be sure if T2 will have time -# to commit or not before the stop. However, T1 should commit, and T3/T4 may -# not have committed. (After slave restart we check that all become committed -# eventually). -SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; - -# Now we have to disable the debug_sync statements, so they do not trigger -# when the events are retried. -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - ---connection server_1 -UPDATE t3 SET b=b+1 WHERE a=60; ---save_master_pos - ---connection server_2 ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -# Restore the foo() function. -SET sql_log_bin=0; -DROP FUNCTION foo; ---delimiter || -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) - RETURNS INT DETERMINISTIC - BEGIN - IF d1 != '' THEN - SET debug_sync = d1; - END IF; - IF d2 != '' THEN - SET debug_sync = d2; - END IF; - RETURN x; - END -|| ---delimiter ; -SET sql_log_bin=1; - ---connection server_2 -# Respawn all worker threads to clear any left-over debug_sync or other stuff. ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** 5. Test killing thread that is waiting for queue of max length to shorten *** - -# Find the thread id of the driver SQL thread that we want to kill. ---let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%' ---source include/wait_condition.inc ---let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%'` -SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; -SET GLOBAL slave_parallel_max_queued=9000; - ---connection server_1 ---let bigstring= `SELECT REPEAT('x', 10000)` -SET binlog_format=statement; -# Create an event that will wait to be signalled. -INSERT INTO t3 VALUES (80, foo(0, - 'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); - ---connection server_2 -SET debug_sync='now WAIT_FOR query_waiting'; -# Inject that the SQL driver thread will signal `wait_queue_ready' to debug_sync -# as it goes to wait for the event queue to become smaller than the value of -# @@slave_parallel_max_queued. -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; - ---connection server_1 ---disable_query_log -# Create an event that will fill up the queue. -# The Xid event at the end of the event group will have to wait for the Query -# event with the INSERT to drain so the queue becomes shorter. However that in -# turn waits for the prior event group to continue. -eval INSERT INTO t3 VALUES (81, LENGTH('$bigstring')); ---enable_query_log -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; - ---connection server_2 -SET debug_sync='now WAIT_FOR wait_queue_ready'; - ---replace_result $thd_id THD_ID -eval KILL $thd_id; - -SET debug_sync='now WAIT_FOR wait_queue_killed'; -SET debug_sync='now SIGNAL query_cont'; - ---let $slave_sql_errno= 1317,1927,1964 ---source include/wait_for_slave_sql_error.inc -STOP SLAVE IO_THREAD; - -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_max_queued= @old_max_queued; - ---connection server_1 -INSERT INTO t3 VALUES (82,0); -SET binlog_format=@old_format; ---save_master_pos - ---connection server_2 -SET debug_sync='RESET'; ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; - - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - ---echo *** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** - ---connection server_2 -# Use just two worker threads, so we are sure to get the rpl_group_info added -# to the free list, which is what triggered the bug. ---source include/stop_slave.inc -SET GLOBAL replicate_ignore_table="test.t3"; -SET GLOBAL slave_parallel_threads=2; ---source include/start_slave.inc - ---connection server_1 -INSERT INTO t3 VALUES (100, rand()); -INSERT INTO t3 VALUES (101, rand()); - ---save_master_pos - ---connection server_2 ---sync_with_master - ---connection server_1 -INSERT INTO t3 VALUES (102, rand()); -INSERT INTO t3 VALUES (103, rand()); -INSERT INTO t3 VALUES (104, rand()); -INSERT INTO t3 VALUES (105, rand()); - ---save_master_pos - ---connection server_2 ---sync_with_master ---source include/stop_slave.inc -SET GLOBAL replicate_ignore_table=""; ---source include/start_slave.inc - ---connection server_1 -INSERT INTO t3 VALUES (106, rand()); -INSERT INTO t3 VALUES (107, rand()); ---save_master_pos - ---connection server_2 ---sync_with_master ---replace_column 2 # -SELECT * FROM t3 WHERE a >= 100 ORDER BY a; - - ---echo *** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - ---connection server_1 -INSERT INTO t3 VALUES (110, 1); ---save_master_pos - ---connection server_2 ---sync_with_master -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -# Inject a duplicate key error. -SET sql_log_bin=0; -INSERT INTO t3 VALUES (111, 666); -SET sql_log_bin=1; - ---connection server_1 - -# Create a group commit with two inserts, the first one conflicts with a row on the slave ---connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -send INSERT INTO t3 VALUES (111, 2); ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,) -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send INSERT INTO t3 VALUES (112, 3); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con1 -REAP; ---connection con2 -REAP; -SET debug_sync='RESET'; ---save_master_pos - ---connection server_2 ---let $slave_sql_errno= 1062 ---source include/wait_for_slave_sql_error.inc ---source include/wait_for_slave_sql_to_stop.inc -# We should not see the row (112,3) here, it should be rolled back due to -# error signal from the prior transaction. -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -SET sql_log_bin=0; -DELETE FROM t3 WHERE a=111 AND b=666; -SET sql_log_bin=1; -START SLAVE SQL_THREAD; ---sync_with_master -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; - - ---echo ***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** ---connection server_2 ---source include/stop_slave.inc - ---connection server_1 -CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); - -# Create a group commit with UPDATE and DELETE, in that order. -# The bug was that while the UPDATE's row lock does not block the DELETE, the -# DELETE's gap lock _does_ block the UPDATE. This could cause a deadlock -# on the slave. ---connection con1 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -send UPDATE t4 SET b=NULL WHERE a=6; ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con2 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send DELETE FROM t4 WHERE b <= 3; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con1 -REAP; ---connection con2 -REAP; -SET debug_sync='RESET'; ---save_master_pos - ---connection server_2 ---source include/start_slave.inc ---sync_with_master ---source include/stop_slave.inc - -SELECT * FROM t4 ORDER BY a; - - -# Another example, this one with INSERT vs. DELETE ---connection server_1 -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); - -# Create a group commit with INSERT and DELETE, in that order. -# The bug was that while the INSERT's insert intention lock does not block -# the DELETE, the DELETE's gap lock _does_ block the INSERT. This could cause -# a deadlock on the slave. ---connection con1 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -send INSERT INTO t4 VALUES (7, NULL); ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con2 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send DELETE FROM t4 WHERE b <= 3; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con1 -REAP; ---connection con2 -REAP; -SET debug_sync='RESET'; ---save_master_pos - ---connection server_2 ---source include/start_slave.inc ---sync_with_master ---source include/stop_slave.inc - -SELECT * FROM t4 ORDER BY a; - - -# MDEV-6549, failing to update gtid_slave_pos for a transaction that was retried. -# The problem was that when a transaction updates the mysql.gtid_slave_pos -# table, it clears the flag that marks that there is a GTID position that -# needs to be updated. Then, if the transaction got killed after that due -# to a deadlock, the subsequent retry would fail to notice that the GTID needs -# to be recorded in gtid_slave_pos. -# -# (In the original bug report, the symptom was an assertion; this was however -# just a side effect of the missing update of gtid_slave_pos, which also -# happened to cause a missing clear of OPTION_GTID_BEGIN). ---connection server_1 -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); - -# Create two transactions that can run in parallel on the slave but cause -# a deadlock if the second runs before the first. ---connection con1 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -send UPDATE t4 SET b=NULL WHERE a=6; ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con2 -# Must use statement-based binlogging. Otherwise the transaction will not be -# binlogged at all, as it modifies no rows. -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send DELETE FROM t4 WHERE b <= 1; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con1 -REAP; ---connection con2 -REAP; -SET @old_format=@@GLOBAL.binlog_format; -SET debug_sync='RESET'; ---save_master_pos ---let $last_gtid= `SELECT @@last_gtid` - ---connection server_2 -# Disable the usual skip of gap locks for transactions that are run in -# parallel, using DBUG. This allows the deadlock to occur, and this in turn -# triggers a retry of the second transaction, and the code that was buggy and -# caused the gtid_slave_pos update to be skipped in the retry. -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; ---source include/start_slave.inc ---sync_with_master -SET GLOBAL debug_dbug=@old_dbug; - -SELECT * FROM t4 ORDER BY a; -# Check that the GTID of the second transaction was correctly recorded in -# gtid_slave_pos, in the variable as well as in the table. ---replace_result $last_gtid GTID -eval SET @last_gtid= '$last_gtid'; -SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", - CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) - AS result; -SELECT "ROW FOUND" AS `Is the row found?` - FROM mysql.gtid_slave_pos - WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; - - ---echo *** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET DEBUG_SYNC= 'RESET'; ---source include/start_slave.inc - ---connection server_1 -CREATE TABLE t5 (a INT PRIMARY KEY, b INT); -INSERT INTO t5 VALUES (1,1); -INSERT INTO t5 VALUES (2,2), (3,8); -INSERT INTO t5 VALUES (4,16); ---save_master_pos - ---connection server_2 ---sync_with_master -let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1); -let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1); -let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1); -let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); ---disable_query_log -eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check; -eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check; ---enable_query_log - ---connection server_1 -FLUSH LOGS; ---source include/wait_for_binlog_checkpoint.inc ---save_master_pos - ---connection server_2 ---sync_with_master -let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1); -let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1); -let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1); -let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); ---disable_query_log -eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check; -eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check; ---enable_query_log - - ---echo *** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** - ---connection server_1 -CREATE TABLE t6 (a INT) ENGINE=MyISAM; -CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; - ---connection con1 -SET @old_format= @@binlog_format; -SET binlog_format= statement; ---let $conid = `SELECT CONNECTION_ID()` -SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; -send INSERT INTO t6 VALUES (1), (2), (3); - ---connection server_1 -SET debug_sync='now WAIT_FOR ready'; ---replace_result $conid CONID -eval KILL QUERY $conid; -SET debug_sync='now SIGNAL cont'; - ---connection con1 ---error ER_QUERY_INTERRUPTED ---reap -SET binlog_format= @old_format; -SET debug_sync='RESET'; ---let $after_error_gtid_pos= `SELECT @@gtid_binlog_pos` - ---connection server_1 -SET debug_sync='RESET'; - - ---connection server_2 ---let $slave_sql_errno= 1317 ---source include/wait_for_slave_sql_error.inc -STOP SLAVE IO_THREAD; ---replace_result $after_error_gtid_pos AFTER_ERROR_GTID_POS -eval SET GLOBAL gtid_slave_pos= '$after_error_gtid_pos'; ---source include/start_slave.inc - ---connection server_1 -INSERT INTO t6 VALUES (4); -SELECT * FROM t6 ORDER BY a; ---save_master_pos - ---connection server_2 ---sync_with_master -SELECT * FROM t6 ORDER BY a; - - ---echo *** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** - ---connection server_1 -INSERT INTO t2 VALUES (31); ---let $gtid1= `SELECT @@LAST_GTID` ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads= 0; ---source include/start_slave.inc - -# Force a duplicate key error on the slave. -SET sql_log_bin= 0; -INSERT INTO t2 VALUES (32); -SET sql_log_bin= 1; - ---connection server_1 -INSERT INTO t2 VALUES (32); ---let $gtid2= `SELECT @@LAST_GTID` -# Rotate the binlog; the bug is triggered when the master binlog file changes -# after the event group that causes the duplicate key error. -FLUSH LOGS; -INSERT INTO t2 VALUES (33); -INSERT INTO t2 VALUES (34); -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---let $slave_sql_errno= 1062 ---source include/wait_for_slave_sql_error.inc - ---connection server_2 ---source include/stop_slave_io.inc -SET GLOBAL slave_parallel_threads=10; -START SLAVE; - ---let $slave_sql_errno= 1062 ---source include/wait_for_slave_sql_error.inc - -# Note: IO thread is still running at this point. -# The bug seems to have been that restarting the SQL thread after an error with -# the IO thread still running, somehow picks up a later relay log position and -# thus ends up skipping the failing event, rather than re-executing. - -START SLAVE SQL_THREAD; ---let $slave_sql_errno= 1062 ---source include/wait_for_slave_sql_error.inc - -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; - -# Skip the duplicate error, so we can proceed. ---error ER_SLAVE_SKIP_NOT_IN_GTID -SET sql_slave_skip_counter= 1; ---source include/stop_slave_io.inc ---disable_query_log -eval SET GLOBAL gtid_slave_pos = REPLACE(@@gtid_slave_pos, "$gtid1", "$gtid2"); ---enable_query_log ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; - - ---echo *** MDEV-6775: Wrong binlog order in parallel replication *** ---connection server_1 -# A bit tricky bug to reproduce. On the master, we binlog in statement-mode -# two transactions, an UPDATE followed by a DELETE. On the slave, we replicate -# with binlog-mode set to ROW, which means the DELETE, which modifies no rows, -# is not binlogged. Then we inject a wait in the group commit code on the -# slave, shortly before the actual commit of the UPDATE. The bug was that the -# DELETE could wake up from wait_for_prior_commit() before the commit of the -# UPDATE. So the test could see the slave position updated to after DELETE, -# while the UPDATE was still not visible. -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=ROW; -# Re-spawn the worker threads to be sure they pick up the new binlog format -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; - ---connection con1 -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -send UPDATE t4 SET b=NULL WHERE a=6; ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con2 -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send DELETE FROM t4 WHERE b <= 3; - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con1 -REAP; -SET binlog_format= @old_format; ---connection con2 -REAP; -SET binlog_format= @old_format; -SET debug_sync='RESET'; ---save_master_pos -SELECT * FROM t4 ORDER BY a; - ---connection server_2 ---source include/start_slave.inc -SET debug_sync= 'now WAIT_FOR waiting'; ---sync_with_master -SELECT * FROM t4 ORDER BY a; -SET debug_sync= 'now SIGNAL cont'; - -# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC. ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL binlog_format= @old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** ---connection server_1 -INSERT INTO t2 VALUES (40); ---save_master_pos - ---connection server_2 ---sync_with_master ---source include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; -SET @old_dbug= @@GLOBAL.debug_dbug; -# This DBUG injection causes a DEBUG_SYNC signal "scheduled_gtid_0_x_100" when -# GTID 0-1-100 has been scheduled for and fetched by a worker thread. -SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; -# This DBUG injection causes a DEBUG_SYNC signal "wait_for_done_waiting" when -# STOP SLAVE has signalled all worker threads to stop. -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -# Reset worker threads to make DBUG setting catch on. -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; - - ---connection server_1 -# Setup some transaction for the slave to replicate. -INSERT INTO t2 VALUES (41); -INSERT INTO t2 VALUES (42); -# Need to log the DELETE in statement format, so we can see it in processlist. -SET @old_format= @@binlog_format; -SET binlog_format= statement; -DELETE FROM t2 WHERE a=40; -SET binlog_format= @old_format; -INSERT INTO t2 VALUES (43); -INSERT INTO t2 VALUES (44); -# Force the slave to switch to a new relay log file. -FLUSH LOGS; -INSERT INTO t2 VALUES (45); -# Inject a GTID 0-1-100, which will trigger a DEBUG_SYNC signal when this -# transaction has been fetched by a worker thread. -SET gtid_seq_no=100; -INSERT INTO t2 VALUES (46); ---save_master_pos - ---connection con_temp2 -# Temporarily block the DELETE on a=40 from completing. -BEGIN; -SELECT * FROM t2 WHERE a=40 FOR UPDATE; - - ---connection server_2 ---source include/start_slave.inc - -# Wait for a worker thread to start on the DELETE that will be blocked -# temporarily by the SELECT FOR UPDATE. ---let $wait_condition= SELECT count(*) > 0 FROM information_schema.processlist WHERE state='updating' and info LIKE '%DELETE FROM t2 WHERE a=40%' ---source include/wait_condition.inc - -# The DBUG injection set above will make the worker thread signal the following -# debug_sync when the GTID 0-1-100 has been reached by a worker thread. -# Thus, at this point, the SQL driver thread has reached the next -# relay log file name, while a worker thread is still processing a -# transaction in the previous relay log file, blocked on the SELECT FOR -# UPDATE. -SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; -# At this point, the SQL driver thread is in the new relay log file, while -# the DELETE from the old relay log file is not yet complete. We will stop -# the slave at this point. The bug was that the DELETE statement would -# update the slave position to the _new_ relay log file name instead of -# its own old file name. Thus, by stoping and restarting the slave at this -# point, we would get an error at restart due to incorrect position. (If -# we would let the slave catch up before stopping, the incorrect position -# would be corrected by a later transaction). - -send STOP SLAVE; - ---connection con_temp2 -# Wait for STOP SLAVE to have proceeded sufficiently that it has signalled -# all worker threads to stop; this ensures that we will stop after the DELETE -# transaction (and not after a later transaction that might have been able -# to set a fixed position). -SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; -# Now release the row lock that was blocking the replication of DELETE. -ROLLBACK; - ---connection server_2 -reap; ---source include/wait_for_slave_sql_to_stop.inc -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -# Now restart the slave. With the bug present, this would start at an -# incorrect relay log position, causing relay log read error (or if unlucky, -# silently skip a number of events). ---source include/start_slave.inc ---sync_with_master -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET DEBUG_SYNC= 'RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -CHANGE MASTER TO master_use_gtid=slave_pos; ---source include/start_slave.inc - - ---echo *** MDEV-7326 Server deadlock in connection with parallel replication *** -# We use three transactions, each in a separate group commit. -# T1 does mark_start_commit(), then gets a deadlock error. -# T2 wakes up and starts running -# T1 does unmark_start_commit() -# T3 goes to wait for T2 to start its commit -# T2 does mark_start_commit() -# The bug was that at this point, T3 got deadlocked. Because T1 has unmarked(), -# T3 did not yet see the count_committing_event_groups reach its target value -# yet. But when T1 later re-did mark_start_commit(), it failed to send a wakeup -# to T3. - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; ---source include/start_slave.inc - ---connection server_1 -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -# This debug_sync will linger on and be used to control T3 later. -INSERT INTO t1 VALUES (foo(50, - "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", - "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); ---save_master_pos ---connection server_2 -# Wait for the debug_sync point for T3 to be set. But let the preparation -# transaction remain hanging, so that T1 and T2 will be scheduled for the -# remaining two worker threads. -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; - ---connection server_1 -INSERT INTO t2 VALUES (foo(50, - "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", - "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); ---save_master_pos - ---connection server_2 -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -# T1 has now done mark_start_commit(). It will later do a rollback and retry. - ---connection server_1 -# Use a MyISAM table for T2 and T3, so they do not trigger the -# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event. -INSERT INTO t1 VALUES (foo(51, - "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", - "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); - ---connection server_2 -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -# T2 has now started running, but has not yet done mark_start_commit() -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -# T1 has now done unmark_start_commit() in preparation for its retry. - ---connection server_1 -INSERT INTO t1 VALUES (52); -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -SELECT * FROM t1 WHERE a>=50 ORDER BY a; - ---connection server_2 -# Let the preparation transaction complete, so that the same worker thread -# can continue with the transaction T3. -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -# T3 has now gone to wait for T2 to start committing -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -# T2 has now done mark_start_commit(). -# Let things run, and check that T3 does not get deadlocked. -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; ---sync_with_master - ---connection server_1 ---save_master_pos ---connection server_2 ---sync_with_master -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -SELECT * FROM t1 WHERE a>=50 ORDER BY a; -SET DEBUG_SYNC="reset"; - -# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC. ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - - ---echo *** MDEV-7326 Server deadlock in connection with parallel replication *** -# Similar to the previous test, but with T2 and T3 in the same GCO. -# We use three transactions, T1 in one group commit and T2/T3 in another. -# T1 does mark_start_commit(), then gets a deadlock error. -# T2 wakes up and starts running -# T1 does unmark_start_commit() -# T3 goes to wait for T1 to start its commit -# T2 does mark_start_commit() -# The bug was that at this point, T3 got deadlocked. T2 increments the -# count_committing_event_groups but does not signal T3, as they are in -# the same GCO. Then later when T1 increments, it would also not signal -# T3, because now the count_committing_event_groups is not equal to the -# wait_count of T3 (it is one larger). - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; ---source include/start_slave.inc - ---connection server_1 -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -# This debug_sync will linger on and be used to control T3 later. -INSERT INTO t1 VALUES (foo(60, - "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", - "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); ---save_master_pos ---connection server_2 -# Wait for the debug_sync point for T3 to be set. But let the preparation -# transaction remain hanging, so that T1 and T2 will be scheduled for the -# remaining two worker threads. -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; - ---connection server_1 -INSERT INTO t2 VALUES (foo(60, - "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", - "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); ---save_master_pos - ---connection server_2 -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -# T1 has now done mark_start_commit(). It will later do a rollback and retry. - -# Do T2 and T3 in a single group commit. -# Use a MyISAM table for T2 and T3, so they do not trigger the -# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event. ---connection con_temp3 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -send INSERT INTO t1 VALUES (foo(61, - "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", - "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued1'; - ---connection con_temp4 -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -send INSERT INTO t6 VALUES (62); - ---connection server_1 -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; - ---connection con_temp3 -REAP; ---connection con_temp4 -REAP; - ---connection server_1 -SET debug_sync='RESET'; -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -SELECT * FROM t6 WHERE a>=60 ORDER BY a; - ---connection server_2 -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -# T2 has now started running, but has not yet done mark_start_commit() -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -# T1 has now done unmark_start_commit() in preparation for its retry. - ---connection server_2 -# Let the preparation transaction complete, so that the same worker thread -# can continue with the transaction T3. -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -# T3 has now gone to wait for T2 to start committing -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -# T2 has now done mark_start_commit(). -# Let things run, and check that T3 does not get deadlocked. -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; ---sync_with_master - ---connection server_1 ---save_master_pos ---connection server_2 ---sync_with_master -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -SELECT * FROM t6 WHERE a>=60 ORDER BY a; -SET DEBUG_SYNC="reset"; - -# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC. ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - ---echo *** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; - ---connection server_1 -INSERT INTO t2 VALUES (101); -INSERT INTO t2 VALUES (102); -INSERT INTO t2 VALUES (103); -INSERT INTO t2 VALUES (104); -INSERT INTO t2 VALUES (105); -# Inject a partial event group (missing XID at the end). The bug was that such -# partial group was not handled appropriately, leading to server deadlock. -SET gtid_seq_no=1000; -INSERT INTO t2 VALUES (106); -INSERT INTO t2 VALUES (107); -INSERT INTO t2 VALUES (108); -INSERT INTO t2 VALUES (109); -INSERT INTO t2 VALUES (110); -INSERT INTO t2 VALUES (111); -INSERT INTO t2 VALUES (112); -INSERT INTO t2 VALUES (113); -INSERT INTO t2 VALUES (114); -INSERT INTO t2 VALUES (115); -INSERT INTO t2 VALUES (116); -INSERT INTO t2 VALUES (117); -INSERT INTO t2 VALUES (118); -INSERT INTO t2 VALUES (119); -INSERT INTO t2 VALUES (120); -INSERT INTO t2 VALUES (121); -INSERT INTO t2 VALUES (122); -INSERT INTO t2 VALUES (123); -INSERT INTO t2 VALUES (124); -INSERT INTO t2 VALUES (125); -INSERT INTO t2 VALUES (126); -INSERT INTO t2 VALUES (127); -INSERT INTO t2 VALUES (128); -INSERT INTO t2 VALUES (129); -INSERT INTO t2 VALUES (130); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -# The partial event group (a=106) should be rolled back and thus missing. -SELECT * FROM t2 WHERE a >= 100 ORDER BY a; - ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - ---echo *** MDEV-6676 - test syntax of @@slave_parallel_mode *** ---connection server_2 - ---let $status_items= Parallel_Mode ---source include/show_slave_status.inc ---source include/stop_slave.inc -SET GLOBAL slave_parallel_mode='aggressive'; ---let $status_items= Parallel_Mode ---source include/show_slave_status.inc -SET GLOBAL slave_parallel_mode='conservative'; ---let $status_items= Parallel_Mode ---source include/show_slave_status.inc - - ---echo *** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** ---connection server_1 -INSERT INTO t2 VALUES (1040); ---source include/save_master_gtid.inc - ---connection server_2 -SET GLOBAL slave_parallel_mode='none'; -# Test that we do not use parallel apply, by injecting an unconditional -# crash in the parallel apply code. -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; - - ---echo *** MDEV-6676 - test disabling domain-based parallel replication *** ---connection server_1 -# Let's do a bunch of transactions that will conflict if run out-of-order in -# domain-based parallel replication mode. -SET gtid_domain_id = 1; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -DELETE FROM t2 WHERE a >= 1041; -SET gtid_domain_id = 2; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -SET gtid_domain_id = 0; ---source include/save_master_gtid.inc ---connection server_2 -SET GLOBAL slave_parallel_mode=minimal; ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; - ---echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** - ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_mode='conservative'; -SET GLOBAL slave_parallel_threads=10; - -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; - ---connection server_1 -# Inject two group commits. The bug was that ANALYZE TABLE would call -# wakeup_subsequent_commits() too early, allowing the following transaction -# in the same group to run ahead and binlog and free the GCO. Then we get -# wrong binlog order and later access freed GCO, which causes lost wakeup -# of following GCO and thus replication hang. -# We injected a small sleep in ANALYZE to make the race easier to hit (this -# can only cause false negatives in versions with the bug, not false positives, -# so sleep is ok here. And it's in general not possible to trigger reliably -# the race with debug_sync, since the bugfix makes the race impossible). - -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; - -# Group commit with cid=10000, two event groups. -SET @commit_id= 10000; -ANALYZE TABLE t2; -INSERT INTO t3 VALUES (120, 0); - -# Group commit with cid=10001, one event group. -SET @commit_id= 10001; -INSERT INTO t3 VALUES (121, 0); - -SET SESSION debug_dbug=@old_dbug; - -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; - ---source include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; ---source include/start_slave.inc - - ---echo *** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** - ---connection server_2 ---source include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; - ---connection server_1 -# Inject two group commits. The bug was that record_gtid for a -# non-transactional event group would commit its own transaction, which would -# cause ha_commit_trans() to call wakeup_subsequent_commits() too early. This -# in turn lead to access to freed group_commit_orderer object, losing a wakeup -# and causing slave threads to hang. -# We inject a small sleep in the corresponding record_gtid() to make the race -# easier to hit. - -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; - -# Group commit with cid=10010, two event groups. -SET @old_server_id= @@SESSION.server_id; -SET SESSION server_id= 100; -SET @commit_id= 10010; -ALTER TABLE t1 COMMENT "Hulubulu!"; -SET SESSION server_id= @old_server_id; -INSERT INTO t3 VALUES (130, 0); - -# Group commit with cid=10011, one event group. -SET @commit_id= 10011; -INSERT INTO t3 VALUES (131, 0); - -SET SESSION debug_dbug=@old_dbug; - -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; - ---source include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; ---source include/start_slave.inc - - ---echo *** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** - ---connection server_1 -INSERT INTO t3 VALUES (201,0), (202,0); ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc ---source include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_mdev8031'; - ---connection server_1 -# We artificially create a situation that hopefully resembles the original -# bug which was only seen "in the wild", and only once. -# Setup a fake group commit with lots of conflicts that will lead to deadloc -# kill. The slave DBUG injection causes the slave to be deadlock killed at -# a particular point during the retry, and then later do a small sleep at -# another critical point where the prior transaction then has a chance to -# complete. Finally an extra KILL check catches an unhandled, lingering -# deadlock kill. So rather artificial, but at least it exercises the -# relevant code paths. -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; - -SET @commit_id= 10200; -INSERT INTO t3 VALUES (203, 1); -INSERT INTO t3 VALUES (204, 1); -INSERT INTO t3 VALUES (205, 1); -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=205; -UPDATE t3 SET b=b+1 WHERE a=205; -SET SESSION debug_dbug=@old_dbug; - -SELECT * FROM t3 WHERE a>=200 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -SELECT * FROM t3 WHERE a>=200 ORDER BY a; ---source include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; ---source include/start_slave.inc - - ---echo *** Check getting deadlock killed inside open_binlog() during retry. *** - ---connection server_2 ---source include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; -SET @old_max= @@GLOBAL.max_relay_log_size; -SET GLOBAL max_relay_log_size= 4096; - ---connection server_1 -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; - ---let $large= `SELECT REPEAT("*", 8192)` -SET @commit_id= 10210; ---echo Omit long queries that cause relaylog rotations and transaction retries... ---disable_query_log -eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */; -eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */; ---enable_query_log -SET SESSION debug_dbug=@old_dbug; - -SELECT * FROM t3 WHERE a>=200 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc - -SELECT * FROM t3 WHERE a>=200 ORDER BY a; ---source include/stop_slave.inc -SET GLOBAL debug_dbug= @old_debg; -SET GLOBAL max_relay_log_size= @old_max; ---source include/start_slave.inc - ---echo *** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** ---connection server_1 -# Inject an event group terminated by ROLLBACK, by mixing MyISAM and InnoDB -# in a transaction. The bug was an assertion on the ROLLBACK due to -# mark_start_commit() being already called. ---disable_warnings -BEGIN; -INSERT INTO t2 VALUES (2000); -INSERT INTO t1 VALUES (2000); -INSERT INTO t2 VALUES (2001); -ROLLBACK; ---enable_warnings -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; ---source include/save_master_gtid.inc - ---connection server_2 ---source include/sync_with_master_gtid.inc -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; - - -# Clean up. ---connection server_2 ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=@old_parallel_threads; ---source include/start_slave.inc -SET DEBUG_SYNC= 'RESET'; - ---connection server_1 -DROP function foo; -DROP TABLE t1,t2,t3,t4,t5,t6; -SET DEBUG_SYNC= 'RESET'; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_analyze_table_hang.inc b/mysql-test/suite/rpl/include/rpl_parallel_analyze_table_hang.inc new file mode 100644 index 00000000000..62a7501c36b --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_analyze_table_hang.inc @@ -0,0 +1,73 @@ +--echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; + +--connection server_1 +# Inject two group commits. The bug was that ANALYZE TABLE would call +# wakeup_subsequent_commits() too early, allowing the following transaction +# in the same group to run ahead and binlog and free the GCO. Then we get +# wrong binlog order and later access freed GCO, which causes lost wakeup +# of following GCO and thus replication hang. +# We injected a small sleep in ANALYZE to make the race easier to hit (this +# can only cause false negatives in versions with the bug, not false positives, +# so sleep is ok here. And it's in general not possible to trigger reliably +# the race with debug_sync, since the bugfix makes the race impossible). + +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; + +# Group commit with cid=10000, two event groups. +SET @commit_id= 10000; +ANALYZE TABLE t2; +INSERT INTO t3 VALUES (120, 0); + +# Group commit with cid=10001, one event group. +SET @commit_id= 10001; +INSERT INTO t3 VALUES (121, 0); + +SET SESSION debug_dbug=@old_dbug; + +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; + +# Clean up. +--source include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t2,t3; + +--source include/rpl_end.inc + diff --git a/mysql-test/suite/rpl/include/rpl_parallel_deadlock_corrupt_binlog.inc b/mysql-test/suite/rpl/include/rpl_parallel_deadlock_corrupt_binlog.inc new file mode 100644 index 00000000000..3a135ef5cc4 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_deadlock_corrupt_binlog.inc @@ -0,0 +1,79 @@ +--echo *** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; + +--connection server_1 +INSERT INTO t2 VALUES (101); +INSERT INTO t2 VALUES (102); +INSERT INTO t2 VALUES (103); +INSERT INTO t2 VALUES (104); +INSERT INTO t2 VALUES (105); +# Inject a partial event group (missing XID at the end). The bug was that such +# partial group was not handled appropriately, leading to server deadlock. +SET gtid_seq_no=1000; +INSERT INTO t2 VALUES (106); +INSERT INTO t2 VALUES (107); +INSERT INTO t2 VALUES (108); +INSERT INTO t2 VALUES (109); +INSERT INTO t2 VALUES (110); +INSERT INTO t2 VALUES (111); +INSERT INTO t2 VALUES (112); +INSERT INTO t2 VALUES (113); +INSERT INTO t2 VALUES (114); +INSERT INTO t2 VALUES (115); +INSERT INTO t2 VALUES (116); +INSERT INTO t2 VALUES (117); +INSERT INTO t2 VALUES (118); +INSERT INTO t2 VALUES (119); +INSERT INTO t2 VALUES (120); +INSERT INTO t2 VALUES (121); +INSERT INTO t2 VALUES (122); +INSERT INTO t2 VALUES (123); +INSERT INTO t2 VALUES (124); +INSERT INTO t2 VALUES (125); +INSERT INTO t2 VALUES (126); +INSERT INTO t2 VALUES (127); +INSERT INTO t2 VALUES (128); +INSERT INTO t2 VALUES (129); +INSERT INTO t2 VALUES (130); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +# The partial event group (a=106) should be rolled back and thus missing. +SELECT * FROM t2 WHERE a >= 100 ORDER BY a; + +# Cleanup +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP TABLE t2; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_domain.inc b/mysql-test/suite/rpl/include/rpl_parallel_domain.inc new file mode 100644 index 00000000000..eda08cc2916 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_domain.inc @@ -0,0 +1,87 @@ +# Test should work with both conservative and optimistic modes + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +# Test various aspects of parallel replication. + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--error ER_SLAVE_MUST_STOP +SET GLOBAL slave_parallel_threads=10; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; + +# Check that we do not spawn any worker threads when no slave is running. +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; + +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +# Check that worker threads get spawned when slave starts. +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +# ... and that worker threads get removed when slave stops. +--source include/stop_slave.inc +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +--source include/start_slave.inc +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; + +--echo *** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +--save_master_pos + +--connection server_2 +--sync_with_master + +# Block the table t1 to simulate a replicated query taking a long time. +--connect (con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +LOCK TABLE t1 WRITE; + +--connection server_1 +SET gtid_domain_id=1; +# This query will be blocked on the slave until UNLOCK TABLES. +INSERT INTO t1 VALUES (2); +SET gtid_domain_id=0; +# These t2 queries can be replicated in parallel with the prior t1 query, as +# they are in a separate replication domain. +INSERT INTO t2 VALUES (2); +INSERT INTO t2 VALUES (3); +BEGIN; +INSERT INTO t2 VALUES (4); +INSERT INTO t2 VALUES (5); +COMMIT; +INSERT INTO t2 VALUES (6); + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 6 FROM t2 +--source include/wait_condition.inc + +SELECT * FROM t2 ORDER by a; + +--connection con_temp1 +SELECT * FROM t1; +UNLOCK TABLES; + +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 2 FROM t1 +--source include/wait_condition.inc + +SELECT * FROM t1 ORDER BY a; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1,t2; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_domain_slave_single_grp.inc b/mysql-test/suite/rpl/include/rpl_parallel_domain_slave_single_grp.inc new file mode 100644 index 00000000000..856efd065df --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_domain_slave_single_grp.inc @@ -0,0 +1,128 @@ +# Test is independent of slave_parallel_mode +--echo *** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +# Test various aspects of parallel replication. + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +--connection server_1 +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +SET @old_format= @@SESSION.binlog_format; +SET binlog_format='statement'; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (foo(10, + 'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', + 'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); + +--connection server_2 +FLUSH LOGS; +--source include/wait_for_binlog_checkpoint.inc +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=statement; +# We need to restart all parallel threads for the new global setting to +# be copied to the session-level values. +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +--source include/start_slave.inc + +# First make sure the first insert is ready to commit, but not queued yet. +SET debug_sync='now WAIT_FOR ready1'; + +--connection server_1 +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (foo(11, + 'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', + 'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); +SET gtid_domain_id=0; +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; + +--connection server_2 +# Now wait for the second insert to queue itself as the leader, and then +# wait for more commits to queue up. +SET debug_sync='now WAIT_FOR ready3'; +SET debug_sync='now SIGNAL cont3'; +SET debug_sync='now WAIT_FOR ready4'; +# Now allow the first insert to queue up to participate in group commit. +SET debug_sync='now SIGNAL cont1'; +SET debug_sync='now WAIT_FOR ready2'; +# Finally allow the second insert to proceed and do the group commit. +SET debug_sync='now SIGNAL cont4'; + +--let $wait_condition= SELECT COUNT(*) = 2 FROM t2 WHERE a >= 10 +--source include/wait_condition.inc +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; +# The two INSERT transactions should have been committed in opposite order, +# but in the same group commit (seen by precense of cid=# in the SHOW +# BINLOG output). +--let $binlog_file= slave-bin.000002 +--source include/show_binlog_events.inc +FLUSH LOGS; +--source include/wait_for_binlog_checkpoint.inc + + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; + +--connection server_1 +DROP function foo; +DROP TABLE t1,t2; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_free_deferred_event.inc b/mysql-test/suite/rpl/include/rpl_parallel_free_deferred_event.inc new file mode 100644 index 00000000000..41f372eddf8 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_free_deferred_event.inc @@ -0,0 +1,67 @@ +--echo *** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** + +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +# Use just two worker threads, so we are sure to get the rpl_group_info added +# to the free list, which is what triggered the bug. +--source include/stop_slave.inc +SET GLOBAL replicate_ignore_table="test.t3"; +SET GLOBAL slave_parallel_threads=2; +--source include/start_slave.inc + +--connection server_1 +INSERT INTO t3 VALUES (100, rand()); +INSERT INTO t3 VALUES (101, rand()); +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +INSERT INTO t3 VALUES (102, rand()); +INSERT INTO t3 VALUES (103, rand()); +INSERT INTO t3 VALUES (104, rand()); +INSERT INTO t3 VALUES (105, rand()); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET GLOBAL replicate_ignore_table=""; +--source include/start_slave.inc + +--connection server_1 +INSERT INTO t3 VALUES (106, rand()); +INSERT INTO t3 VALUES (107, rand()); +--save_master_pos + +--connection server_2 +--sync_with_master +--replace_column 2 # +SELECT * FROM t3 WHERE a >= 100 ORDER BY a; + + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t3; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_gco_wait_kill.inc b/mysql-test/suite/rpl/include/rpl_parallel_gco_wait_kill.inc new file mode 100644 index 00000000000..d918b2ea692 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_gco_wait_kill.inc @@ -0,0 +1,366 @@ +--echo *** Test killing thread that is waiting to start transaction until previous transaction commits *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +--source include/stop_slave.inc +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Slave: Connection was killed"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode= 'conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +--connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; +# We need to restart all parallel threads for the new global setting to +# be copied to the session-level values. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=4; +--source include/start_slave.inc + + +# We set up four transactions T1, T2, T3, and T4 on the master. T2, T3, and T4 +# can run in parallel with each other (same group commit and commit id), +# but not in parallel with T1. +# +# We use four worker threads, each Ti will be queued on each their own +# worker thread. We will delay T1 commit, T3 will wait for T1 to begin +# commit before it can start. We will kill T3 during this wait, and +# check that everything works correctly. +# +# It is rather tricky to get the correct thread id of the worker to kill. +# We start by injecting four dummy transactions in a debug_sync-controlled +# manner to be able to get known thread ids for the workers in a pool with +# just 4 worker threads. Then we let in each of the real test transactions +# T1-T4 one at a time in a way which allows us to know which transaction +# ends up with which thread id. + +--connection server_1 +SET gtid_domain_id=2; +BEGIN; +# This debug_sync will linger on and be used to control T4 later. +INSERT INTO t3 VALUES (70, foo(70, + 'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); +INSERT INTO t3 VALUES (60, foo(60, + 'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', + 'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); +COMMIT; +SET gtid_domain_id=0; + +--connection server_2 +SET debug_sync='now WAIT_FOR d2_query'; +--let $d2_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(60%' AND INFO NOT LIKE '%LIKE%'` + +--connection server_1 +SET gtid_domain_id=1; +BEGIN; +# These debug_sync's will linger on and be used to control T3 later. +INSERT INTO t3 VALUES (61, foo(61, + 'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', + 'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); +INSERT INTO t3 VALUES (62, foo(62, + 'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', + 'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); +COMMIT; +SET gtid_domain_id=0; + +--connection server_2 +SET debug_sync='now WAIT_FOR d1_query'; +--let $d1_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(62%' AND INFO NOT LIKE '%LIKE%'` + +--connection server_1 +SET gtid_domain_id=0; +INSERT INTO t3 VALUES (63, foo(63, + 'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', + 'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); + +--connection server_2 +SET debug_sync='now WAIT_FOR d0_query'; +--let $d0_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(63%' AND INFO NOT LIKE '%LIKE%'` + +--connection server_1 +SET gtid_domain_id=3; +BEGIN; +# These debug_sync's will linger on and be used to control T2 later. +INSERT INTO t3 VALUES (68, foo(68, + 'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); +INSERT INTO t3 VALUES (69, foo(69, + 'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', + 'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); +COMMIT; +SET gtid_domain_id=0; + +--connection server_2 +SET debug_sync='now WAIT_FOR d3_query'; +--let $d3_thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(69%' AND INFO NOT LIKE '%LIKE%'` + +SET debug_sync='now SIGNAL d2_cont2'; +SET debug_sync='now WAIT_FOR d2_done'; +SET debug_sync='now SIGNAL d1_cont2'; +SET debug_sync='now WAIT_FOR d1_done'; +SET debug_sync='now SIGNAL d0_cont2'; +SET debug_sync='now WAIT_FOR d0_done'; +SET debug_sync='now SIGNAL d3_cont2'; +SET debug_sync='now WAIT_FOR d3_done'; + +# Now prepare the real transactions T1, T2, T3, T4 on the master. + +--connection con_temp3 +# Create transaction T1. +INSERT INTO t3 VALUES (64, foo(64, + 'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); + +# Create transaction T2, as a group commit leader on the master. +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; +send INSERT INTO t3 VALUES (65, foo(65, '', '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; + +--connection con_temp4 +# Create transaction T3, participating in T2's group commit. +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +send INSERT INTO t3 VALUES (66, foo(66, '', '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued3'; + +--connection con_temp5 +# Create transaction T4, participating in group commit with T2 and T3. +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; +send INSERT INTO t3 VALUES (67, foo(67, '', '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued4'; +SET debug_sync='now SIGNAL master_cont2'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; +--connection con_temp5 +REAP; + +--connection server_1 +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +SET debug_sync='RESET'; + +--connection server_2 +# Now we have the four transactions pending for replication on the slave. +# Let them be queued for our three worker threads in a controlled fashion. +# We put them at a stage where T1 is delayed and T3 is waiting for T1 to +# commit before T3 can start. Then we kill T3. + +# Make the worker D0 free, and wait for T1 to be queued in it. +SET debug_sync='now SIGNAL d0_cont'; +SET debug_sync='now WAIT_FOR t1_waiting'; + +# Make the worker D3 free, and wait for T2 to be queued in it. +SET debug_sync='now SIGNAL d3_cont'; +SET debug_sync='now WAIT_FOR t2_waiting'; + +# Now release worker D1, and wait for T3 to be queued in it. +# T3 will wait for T1 to commit before it can start. +SET debug_sync='now SIGNAL d1_cont'; +SET debug_sync='now WAIT_FOR t3_waiting'; + +# Release worker D2. Wait for T4 to be queued, so we are sure it has +# received the debug_sync signal (else we might overwrite it with the +# next debug_sync). +SET debug_sync='now SIGNAL d2_cont'; +SET debug_sync='now WAIT_FOR t4_waiting'; + +# Now we kill the waiting transaction T3 in worker D1. +--replace_result $d1_thd_id THD_ID +eval KILL $d1_thd_id; + +# Wait until T3 has reacted on the kill. +SET debug_sync='now WAIT_FOR t3_killed'; + +# Now we can allow T1 to proceed. +SET debug_sync='now SIGNAL t1_cont'; + +--let $slave_sql_errno= 1317,1927,1964 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; +# Since T2, T3, and T4 run in parallel, we can not be sure if T2 will have time +# to commit or not before the stop. However, T1 should commit, and T3/T4 may +# not have committed. (After slave restart we check that all become committed +# eventually). +SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; + +# Now we have to disable the debug_sync statements, so they do not trigger +# when the events are retried. +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_1 +UPDATE t3 SET b=b+1 WHERE a=60; +--save_master_pos + +--connection server_2 +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +# Restore the foo() function. +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +# Respawn all worker threads to clear any left-over debug_sync or other stuff. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +--source include/start_slave.inc + +--echo *** 5. Test killing thread that is waiting for queue of max length to shorten *** + +# Find the thread id of the driver SQL thread that we want to kill. +--let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%' +--source include/wait_condition.inc +--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Slave has read all relay log%'` +SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; +SET GLOBAL slave_parallel_max_queued=9000; + +--connection server_1 +--let bigstring= `SELECT REPEAT('x', 10000)` +# Create an event that will wait to be signalled. +INSERT INTO t3 VALUES (80, foo(0, + 'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); + +--connection server_2 +SET debug_sync='now WAIT_FOR query_waiting'; +# Inject that the SQL driver thread will signal `wait_queue_ready' to debug_sync +# as it goes to wait for the event queue to become smaller than the value of +# @@slave_parallel_max_queued. +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; + +--connection server_1 +--disable_query_log +# Create an event that will fill up the queue. +# The Xid event at the end of the event group will have to wait for the Query +# event with the INSERT to drain so the queue becomes shorter. However that in +# turn waits for the prior event group to continue. +eval INSERT INTO t3 VALUES (81, LENGTH('$bigstring')); +--enable_query_log +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; + +--connection server_2 +SET debug_sync='now WAIT_FOR wait_queue_ready'; + +--replace_result $thd_id THD_ID +eval KILL $thd_id; + +SET debug_sync='now WAIT_FOR wait_queue_killed'; +SET debug_sync='now SIGNAL query_cont'; + +--let $slave_sql_errno= 1317,1927,1964 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; + +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_max_queued= @old_max_queued; + +--connection server_1 +INSERT INTO t3 VALUES (82,0); +--save_master_pos + +--connection server_2 +SET debug_sync='RESET'; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; + +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_gtid_slave_pos_update_fail.inc b/mysql-test/suite/rpl/include/rpl_parallel_gtid_slave_pos_update_fail.inc new file mode 100644 index 00000000000..da1a07d3b87 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_gtid_slave_pos_update_fail.inc @@ -0,0 +1,98 @@ +# MDEV-6549, failing to update gtid_slave_pos for a transaction that was retried. + +# The problem was that when a transaction updates the mysql.gtid_slave_pos +# table, it clears the flag that marks that there is a GTID position that +# needs to be updated. Then, if the transaction got killed after that due +# to a deadlock, the subsequent retry would fail to notice that the GTID needs +# to be recorded in gtid_slave_pos. +# +# (In the original bug report, the symptom was an assertion; this was however +# just a side effect of the missing update of gtid_slave_pos, which also +# happened to cause a missing clear of OPTION_GTID_BEGIN). +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +# Must use statement-based binlogging. Otherwise the transaction will not be +# binlogged at all, as it modifies no rows. +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +# Create two transactions that can run in parallel on the slave but cause +# a deadlock if the second runs before the first. +--connection con1 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send UPDATE t4 SET b=NULL WHERE a=6; +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con2 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send DELETE FROM t4 WHERE b <= 1; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +--connection con2 +REAP; +SET debug_sync='RESET'; +--save_master_pos +--let $last_gtid= `SELECT @@last_gtid` + +--connection server_2 +# Disable the usual skip of gap locks for transactions that are run in +# parallel, using DBUG. This allows the deadlock to occur, and this in turn +# triggers a retry of the second transaction, and the code that was buggy and +# caused the gtid_slave_pos update to be skipped in the retry. +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; + +SELECT * FROM t4 ORDER BY a; +# Check that the GTID of the second transaction was correctly recorded in +# gtid_slave_pos, in the variable as well as in the table. +--replace_result $last_gtid GTID +eval SET @last_gtid= '$last_gtid'; +SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", + CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) + AS result; +SELECT "ROW FOUND" AS `Is the row found?` + FROM mysql.gtid_slave_pos + WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +--disconnect con1 +--disconnect con2 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_ignore_error_on_rotate.inc b/mysql-test/suite/rpl/include/rpl_parallel_ignore_error_on_rotate.inc new file mode 100644 index 00000000000..2fab9f8032b --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_ignore_error_on_rotate.inc @@ -0,0 +1,96 @@ +--echo *** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** + +--source include/have_innodb.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (1); +SET gtid_domain_id=0; +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (2); +SET gtid_domain_id=0; +INSERT INTO t2 VALUES (31); +--let $gtid1= `SELECT @@LAST_GTID` +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads= 0; +--source include/start_slave.inc + +# Force a duplicate key error on the slave. +SET sql_log_bin= 0; +INSERT INTO t2 VALUES (32); +SET sql_log_bin= 1; + +--connection server_1 +INSERT INTO t2 VALUES (32); +--let $gtid2= `SELECT @@LAST_GTID` +# Rotate the binlog; the bug is triggered when the master binlog file changes +# after the event group that causes the duplicate key error. +FLUSH LOGS; +INSERT INTO t2 VALUES (33); +INSERT INTO t2 VALUES (34); +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +--connection server_2 +--source include/stop_slave_io.inc +SET GLOBAL slave_parallel_threads=10; +START SLAVE; + +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +# Note: IO thread is still running at this point. +# The bug seems to have been that restarting the SQL thread after an error with +# the IO thread still running, somehow picks up a later relay log position and +# thus ends up skipping the failing event, rather than re-executing. + +START SLAVE SQL_THREAD; +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; + +# Skip the duplicate error, so we can proceed. +--error ER_SLAVE_SKIP_NOT_IN_GTID +SET sql_slave_skip_counter= 1; +--source include/stop_slave_io.inc +--disable_query_log +eval SET GLOBAL gtid_slave_pos = REPLACE(@@gtid_slave_pos, "$gtid1", "$gtid2"); +--enable_query_log +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t2; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_incorrect_relay_pos.inc b/mysql-test/suite/rpl/include/rpl_parallel_incorrect_relay_pos.inc new file mode 100644 index 00000000000..3cf0afd63ca --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_incorrect_relay_pos.inc @@ -0,0 +1,128 @@ +--echo *** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +# Test assumes that 'conservative' mode is in effect. i.e +# Do not start parallel execution of this event group until all prior groups +# have reached the commit phase. Upon execution of STOP SLAVE there can be one +# group which is executing and the rest are doing group commit order wait. +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t2 VALUES (40); +--save_master_pos + +--connection server_2 +--sync_with_master +--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +SET @old_dbug= @@GLOBAL.debug_dbug; +# This DBUG injection causes a DEBUG_SYNC signal "scheduled_gtid_0_x_100" when +# GTID 0-1-100 has been scheduled for and fetched by a worker thread. +SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; +# This DBUG injection causes a DEBUG_SYNC signal "wait_for_done_waiting" when +# STOP SLAVE has signalled all worker threads to stop. +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +# Reset worker threads to make DBUG setting catch on. +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; + + +--connection server_1 +# Setup some transaction for the slave to replicate. +INSERT INTO t2 VALUES (41); +INSERT INTO t2 VALUES (42); +# Need to log the DELETE in statement format, so we can see it in processlist. +SET @old_format= @@binlog_format; +SET binlog_format= statement; +DELETE FROM t2 WHERE a=40; +SET binlog_format= @old_format; +INSERT INTO t2 VALUES (43); +INSERT INTO t2 VALUES (44); +# Force the slave to switch to a new relay log file. +FLUSH LOGS; +INSERT INTO t2 VALUES (45); +# Inject a GTID 0-1-100, which will trigger a DEBUG_SYNC signal when this +# transaction has been fetched by a worker thread. +SET gtid_seq_no=100; +INSERT INTO t2 VALUES (46); +--save_master_pos + +--connection con_temp2 +# Temporarily block the DELETE on a=40 from completing. +BEGIN; +SELECT * FROM t2 WHERE a=40 FOR UPDATE; + +--connection server_2 +--source include/start_slave.inc + +# Wait for a worker thread to start on the DELETE that will be blocked +# temporarily by the SELECT FOR UPDATE. +--let $wait_condition= SELECT count(*) > 0 FROM information_schema.processlist WHERE state='updating' and info LIKE '%DELETE FROM t2 WHERE a=40%' +--source include/wait_condition.inc + +# The DBUG injection set above will make the worker thread signal the following +# debug_sync when the GTID 0-1-100 has been reached by a worker thread. +# Thus, at this point, the SQL driver thread has reached the next +# relay log file name, while a worker thread is still processing a +# transaction in the previous relay log file, blocked on the SELECT FOR +# UPDATE. +SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; +# At this point, the SQL driver thread is in the new relay log file, while +# the DELETE from the old relay log file is not yet complete. We will stop +# the slave at this point. The bug was that the DELETE statement would +# update the slave position to the _new_ relay log file name instead of +# its own old file name. Thus, by stoping and restarting the slave at this +# point, we would get an error at restart due to incorrect position. (If +# we would let the slave catch up before stopping, the incorrect position +# would be corrected by a later transaction). + +send STOP SLAVE; + +--connection con_temp2 +# Wait for STOP SLAVE to have proceeded sufficiently that it has signalled +# all worker threads to stop; this ensures that we will stop after the DELETE +# transaction (and not after a later transaction that might have been able +# to set a fixed position). +SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; +# Now release the row lock that was blocking the replication of DELETE. +ROLLBACK; + +--connection server_2 +reap; +--source include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; +# Now restart the slave. With the bug present, this would start at an +# incorrect relay log position, causing relay log read error (or if unlucky, +# silently skip a number of events). +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; + +# Clean up. +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t2; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_innodb_lock_conflict.inc b/mysql-test/suite/rpl/include/rpl_parallel_innodb_lock_conflict.inc new file mode 100644 index 00000000000..90304937445 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_innodb_lock_conflict.inc @@ -0,0 +1,107 @@ +--echo ***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET sql_log_bin=0; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,) + +# Create a group commit with UPDATE and DELETE, in that order. +# The bug was that while the UPDATE's row lock does not block the DELETE, the +# DELETE's gap lock _does_ block the UPDATE. This could cause a deadlock +# on the slave. +--connection con1 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send UPDATE t4 SET b=NULL WHERE a=6; +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con2 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send DELETE FROM t4 WHERE b <= 3; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +--connection con2 +REAP; +SET debug_sync='RESET'; +--save_master_pos + +--connection server_2 +--source include/start_slave.inc +--sync_with_master +--source include/stop_slave.inc + +SELECT * FROM t4 ORDER BY a; + + +# Another example, this one with INSERT vs. DELETE +--connection server_1 +DELETE FROM t4; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); + +# Create a group commit with INSERT and DELETE, in that order. +# The bug was that while the INSERT's insert intention lock does not block +# the DELETE, the DELETE's gap lock _does_ block the INSERT. This could cause +# a deadlock on the slave. +--connection con1 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send INSERT INTO t4 VALUES (7, NULL); +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con2 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send DELETE FROM t4 WHERE b <= 3; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +--connection con2 +REAP; +SET debug_sync='RESET'; +--save_master_pos + +--connection server_2 +--source include/start_slave.inc +--sync_with_master +--source include/stop_slave.inc + +SELECT * FROM t4 ORDER BY a; + + +# Clean up. +--connection server_2 +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +--disconnect con1 +--disconnect con2 +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_missed_error_handling.inc b/mysql-test/suite/rpl/include/rpl_parallel_missed_error_handling.inc new file mode 100644 index 00000000000..33b1bcb11d9 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_missed_error_handling.inc @@ -0,0 +1,87 @@ +--echo *** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +INSERT INTO t3 VALUES (110, 1); +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +# Inject a duplicate key error. +SET sql_log_bin=0; +INSERT INTO t3 VALUES (111, 666); +SET sql_log_bin=1; + +--connection server_1 + +# Create a group commit with two inserts, the first one conflicts with a row on the slave +--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send INSERT INTO t3 VALUES (111, 2); +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send INSERT INTO t3 VALUES (112, 3); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +--connection con2 +REAP; +SET debug_sync='RESET'; +--save_master_pos + +--connection server_2 +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc +--source include/wait_for_slave_sql_to_stop.inc +# We should not see the row (112,3) here, it should be rolled back due to +# error signal from the prior transaction. +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +SET sql_log_bin=0; +DELETE FROM t3 WHERE a=111 AND b=666; +SET sql_log_bin=1; +START SLAVE SQL_THREAD; +--sync_with_master +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +--disconnect con1 +--disconnect con2 +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_mode.inc b/mysql-test/suite/rpl/include/rpl_parallel_mode.inc new file mode 100644 index 00000000000..67104069e9a --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_mode.inc @@ -0,0 +1,87 @@ +--echo *** MDEV-6676 - test syntax of @@slave_parallel_mode *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master + +--let $status_items= Parallel_Mode +--source include/show_slave_status.inc +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode='aggressive'; +--let $status_items= Parallel_Mode +--source include/show_slave_status.inc +SET GLOBAL slave_parallel_mode='conservative'; +--let $status_items= Parallel_Mode +--source include/show_slave_status.inc + +--echo *** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** +--connection server_1 +INSERT INTO t2 VALUES (1040); +--source include/save_master_gtid.inc + +--connection server_2 +SET GLOBAL slave_parallel_mode='none'; +# Test that we do not use parallel apply, by injecting an unconditional +# crash in the parallel apply code. +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; + + +--echo *** MDEV-6676 - test disabling domain-based parallel replication *** +--connection server_1 +# Let's do a bunch of transactions that will conflict if run out-of-order in +# domain-based parallel replication mode. +SET gtid_domain_id = 1; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +DELETE FROM t2 WHERE a >= 1041; +SET gtid_domain_id = 2; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +SET gtid_domain_id = 0; +--source include/save_master_gtid.inc +--connection server_2 +SET GLOBAL slave_parallel_mode=minimal; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; + +# Cleanup +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t2; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_partial_binlog_trans.inc b/mysql-test/suite/rpl/include/rpl_parallel_partial_binlog_trans.inc new file mode 100644 index 00000000000..7247925285f --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_partial_binlog_trans.inc @@ -0,0 +1,71 @@ +--echo *** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; +--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) + +--connection con1 +--let $conid = `SELECT CONNECTION_ID()` +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +send INSERT INTO t6 VALUES (1), (2), (3); + +--connection server_1 +SET debug_sync='now WAIT_FOR ready'; +--replace_result $conid CONID +eval KILL QUERY $conid; +SET debug_sync='now SIGNAL cont'; + +--connection con1 +--error ER_QUERY_INTERRUPTED +--reap +SET debug_sync='RESET'; +--let $after_error_gtid_pos= `SELECT @@gtid_binlog_pos` + +--connection server_1 +SET debug_sync='RESET'; + +--connection server_2 +--let $slave_sql_errno= 1317 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; +--replace_result $after_error_gtid_pos AFTER_ERROR_GTID_POS +eval SET GLOBAL gtid_slave_pos= '$after_error_gtid_pos'; +--source include/start_slave.inc + +--connection server_1 +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t6 ORDER BY a; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP TABLE t6; +SET DEBUG_SYNC= 'RESET'; +--disconnect con1 + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_record_gtid_wakeup.inc b/mysql-test/suite/rpl/include/rpl_parallel_record_gtid_wakeup.inc new file mode 100644 index 00000000000..0f94d8f9943 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_record_gtid_wakeup.inc @@ -0,0 +1,72 @@ +--echo *** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; + +--connection server_1 +# Inject two group commits. The bug was that record_gtid for a +# non-transactional event group would commit its own transaction, which would +# cause ha_commit_trans() to call wakeup_subsequent_commits() too early. This +# in turn lead to access to freed group_commit_orderer object, losing a wakeup +# and causing slave threads to hang. +# We inject a small sleep in the corresponding record_gtid() to make the race +# easier to hit. + +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; + +# Group commit with cid=10010, two event groups. +SET @old_server_id= @@SESSION.server_id; +SET SESSION server_id= 100; +SET @commit_id= 10010; +ALTER TABLE t1 COMMENT "Hulubulu!"; +SET SESSION server_id= @old_server_id; +INSERT INTO t3 VALUES (130, 0); + +# Group commit with cid=10011, one event group. +SET @commit_id= 10011; +INSERT INTO t3 VALUES (131, 0); + +SET SESSION debug_dbug=@old_dbug; + +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; + +# Clean up. +--source include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1,t3; + +--source include/rpl_end.inc + diff --git a/mysql-test/suite/rpl/include/rpl_parallel_retry_deadlock.inc b/mysql-test/suite/rpl/include/rpl_parallel_retry_deadlock.inc new file mode 100644 index 00000000000..54ac859bb33 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_retry_deadlock.inc @@ -0,0 +1,281 @@ +--echo *** MDEV-7326 Server deadlock in connection with parallel replication *** +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +--source include/stop_slave.inc +# Test assumes that 'conservative' mode is in effect. i.e +# Do not start parallel execution of this event group until all prior groups +# have reached the commit phase. Refer 'rpl_parallel_start_waiting_for_prior' +# debug simumation. +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +# We use three transactions, each in a separate group commit. +# T1 does mark_start_commit(), then gets a deadlock error. +# T2 wakes up and starts running +# T1 does unmark_start_commit() +# T3 goes to wait for T2 to start its commit +# T2 does mark_start_commit() +# The bug was that at this point, T3 got deadlocked. Because T1 has unmarked(), +# T3 did not yet see the count_committing_event_groups reach its target value +# yet. But when T1 later re-did mark_start_commit(), it failed to send a wakeup +# to T3. + +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +--source include/start_slave.inc + +--connection server_1 +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +# This debug_sync will linger on and be used to control T3 later. +INSERT INTO t1 VALUES (foo(50, + "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", + "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +--save_master_pos +--connection server_2 +# Wait for the debug_sync point for T3 to be set. But let the preparation +# transaction remain hanging, so that T1 and T2 will be scheduled for the +# remaining two worker threads. +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; + +--connection server_1 +INSERT INTO t2 VALUES (foo(50, + "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", + "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +--save_master_pos + +--connection server_2 +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +# T1 has now done mark_start_commit(). It will later do a rollback and retry. + +--connection server_1 +# Use a MyISAM table for T2 and T3, so they do not trigger the +# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event. +INSERT INTO t1 VALUES (foo(51, + "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", + "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); + +--connection server_2 +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +# T2 has now started running, but has not yet done mark_start_commit() +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +# T1 has now done unmark_start_commit() in preparation for its retry. + +--connection server_1 +INSERT INTO t1 VALUES (52); +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +SELECT * FROM t1 WHERE a>=50 ORDER BY a; + +--connection server_2 +# Let the preparation transaction complete, so that the same worker thread +# can continue with the transaction T3. +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +# T3 has now gone to wait for T2 to start committing +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +# T2 has now done mark_start_commit(). +# Let things run, and check that T3 does not get deadlocked. +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +--sync_with_master + +--connection server_1 +--save_master_pos +--connection server_2 +--sync_with_master +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +SELECT * FROM t1 WHERE a>=50 ORDER BY a; +SET DEBUG_SYNC="reset"; + +# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC. +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +--source include/start_slave.inc + + +--echo *** MDEV-7326 Server deadlock in connection with parallel replication *** +# Similar to the previous test, but with T2 and T3 in the same GCO. +# We use three transactions, T1 in one group commit and T2/T3 in another. +# T1 does mark_start_commit(), then gets a deadlock error. +# T2 wakes up and starts running +# T1 does unmark_start_commit() +# T3 goes to wait for T1 to start its commit +# T2 does mark_start_commit() +# The bug was that at this point, T3 got deadlocked. T2 increments the +# count_committing_event_groups but does not signal T3, as they are in +# the same GCO. Then later when T1 increments, it would also not signal +# T3, because now the count_committing_event_groups is not equal to the +# wait_count of T3 (it is one larger). + +--connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,) + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +--source include/start_slave.inc + +--connection server_1 +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +# This debug_sync will linger on and be used to control T3 later. +INSERT INTO t1 VALUES (foo(60, + "rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", + "rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +--save_master_pos +--connection server_2 +# Wait for the debug_sync point for T3 to be set. But let the preparation +# transaction remain hanging, so that T1 and T2 will be scheduled for the +# remaining two worker threads. +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; + +--connection server_1 +INSERT INTO t2 VALUES (foo(60, + "rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", + "rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +--save_master_pos + +--connection server_2 +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +# T1 has now done mark_start_commit(). It will later do a rollback and retry. + +# Do T2 and T3 in a single group commit. +# Use a MyISAM table for T2 and T3, so they do not trigger the +# rpl_parallel_simulate_temp_err_xid DBUG insertion on XID event. +--connection con_temp3 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +send INSERT INTO t1 VALUES (foo(61, + "rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", + "rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con_temp4 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send INSERT INTO t6 VALUES (62); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; + +--connection server_1 +SET debug_sync='RESET'; +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +SELECT * FROM t6 WHERE a>=60 ORDER BY a; + +--connection server_2 +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +# T2 has now started running, but has not yet done mark_start_commit() +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +# T1 has now done unmark_start_commit() in preparation for its retry. + +--connection server_2 +# Let the preparation transaction complete, so that the same worker thread +# can continue with the transaction T3. +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +# T3 has now gone to wait for T2 to start committing +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +# T2 has now done mark_start_commit(). +# Let things run, and check that T3 does not get deadlocked. +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +--sync_with_master + +--connection server_1 +--save_master_pos +--connection server_2 +--sync_with_master +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +SELECT * FROM t6 WHERE a>=60 ORDER BY a; +SET DEBUG_SYNC="reset"; + +# Clean up. +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP function foo; +DROP TABLE t1,t2,t6; +--disconnect con_temp3 +--disconnect con_temp4 +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_rollback_assert.inc b/mysql-test/suite/rpl/include/rpl_parallel_rollback_assert.inc new file mode 100644 index 00000000000..eec331b3d64 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_rollback_assert.inc @@ -0,0 +1,62 @@ +--echo *** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +# Inject an event group terminated by ROLLBACK, by mixing MyISAM and InnoDB +# in a transaction. The bug was an assertion on the ROLLBACK due to +# mark_start_commit() being already called. +--disable_warnings +BEGIN; +INSERT INTO t2 VALUES (2000); +INSERT INTO t1 VALUES (2000); +INSERT INTO t2 VALUES (2001); +ROLLBACK; +--enable_warnings +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +--connection server_1 +INSERT INTO t2 VALUES (2020); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +let $rows_in_t1= `SELECT COUNT(*) FROM t1 WHERE a>=2000 ORDER BY a`; +if ($rows_in_t1 == 0) +{ +--query_vertical SHOW SLAVE STATUS +} +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; + +# Clean up. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1,t2; + +--source include/rpl_end.inc + diff --git a/mysql-test/suite/rpl/include/rpl_parallel_single_grpcmt.inc b/mysql-test/suite/rpl/include/rpl_parallel_single_grpcmt.inc new file mode 100644 index 00000000000..cf4c547b73b --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_single_grpcmt.inc @@ -0,0 +1,170 @@ +--echo *** Test that group-committed transactions on the master can replicate in parallel on the slave. *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +# Test various aspects of parallel replication. + +--connection server_1 +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +--source include/stop_slave.inc +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +# We need to restart all parallel threads for the new global setting to +# be copied to the session-level values. +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +# Create some sentinel rows so that the rows inserted in parallel fall into +# separate gaps and do not cause gap lock conflicts. +INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); +--save_master_pos +--connection server_2 +--sync_with_master + + +# We want to test that the transactions can execute out-of-order on +# the slave, but still end up committing in-order, and in a single +# group commit. +# +# The idea is to group-commit three transactions together on the master: +# A, B, and C. On the slave, C will execute the insert first, then A, +# and then B. But B manages to complete before A has time to commit, so +# all three end up committing together. +# +# So we start by setting up some row locks that will block transactions +# A and B from executing, allowing C to run first. + +--connect (con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +BEGIN; +INSERT INTO t3 VALUES (2,102); +--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +BEGIN; +INSERT INTO t3 VALUES (4,104); + +# On the master, queue three INSERT transactions as a single group commit. +--connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +send INSERT INTO t3 VALUES (2, foo(12, + 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +SET binlog_format=statement; +send INSERT INTO t3 VALUES (4, foo(14, + 'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; + +--connect (con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +SET binlog_format=statement; +send INSERT INTO t3 VALUES (6, foo(16, + 'group_commit_waiting_for_prior SIGNAL slave_queued3', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; +--connection con_temp5 +REAP; +SET debug_sync='RESET'; + +--connection server_1 +SELECT * FROM t3 ORDER BY a; +--let $binlog_file= master-bin.000001 +--source include/show_binlog_events.inc + +# First, wait until insert 3 is ready to queue up for group commit, but is +# waiting for insert 2 to commit before it can do so itself. +--connection server_2 +SET debug_sync='now WAIT_FOR slave_queued3'; + +# Next, let insert 1 proceed, and allow it to queue up as the group commit +# leader, but let it wait for insert 2 to also queue up before proceeding. +--connection con_temp1 +ROLLBACK; +--connection server_2 +SET debug_sync='now WAIT_FOR slave_queued1'; + +# Now let insert 2 proceed and queue up. +--connection con_temp2 +ROLLBACK; +--connection server_2 +SET debug_sync='now WAIT_FOR slave_queued2'; +# And finally, we can let insert 1 proceed and do the group commit with all +# three insert transactions together. +SET debug_sync='now SIGNAL slave_cont1'; + +# Wait for the commit to complete and check that all three transactions +# group-committed together (will be seen in the binlog as all three having +# cid=# on their GTID event). +--let $wait_condition= SELECT COUNT(*) = 3 FROM t3 WHERE a IN (2,4,6) +--source include/wait_condition.inc +SELECT * FROM t3 ORDER BY a; +--let $binlog_file= slave-bin.000001 +--source include/show_binlog_events.inc + + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; + +--disable_connect_log +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_slave_bgc_kill.inc b/mysql-test/suite/rpl/include/rpl_parallel_slave_bgc_kill.inc new file mode 100644 index 00000000000..a78dbad052f --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_slave_bgc_kill.inc @@ -0,0 +1,454 @@ +--echo *** Test killing slave threads at various wait points *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +# Test various aspects of parallel replication. + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +--connect (con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master + +--connection server_1 +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--echo *** 1. Test killing transaction waiting in commit for previous transaction to commit *** + +# Set up three transactions on the master that will be group-committed +# together so they can be replicated in parallel on the slave. +--connection con_temp3 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send INSERT INTO t3 VALUES (31, foo(31, + 'commit_before_prepare_ordered WAIT_FOR t2_waiting', + 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con_temp4 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +# This insert is just so we can get T2 to wait while a query is running that we +# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. +INSERT INTO t3 VALUES (32, foo(32, + 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', + '')); +# This insert sets up debug_sync points so that T2 will tell when it is at its +# wait point where we want to kill it - and when it has been killed. +INSERT INTO t3 VALUES (33, foo(33, + 'group_commit_waiting_for_prior SIGNAL t2_waiting', + 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +send COMMIT; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; + +--connection con_temp5 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +send INSERT INTO t3 VALUES (34, foo(34, + '', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; +--connection con_temp5 +REAP; + +--connection server_1 +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +SET debug_sync='RESET'; + +--connection server_2 +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +CALL mtr.add_suppression("Slave: Connection was killed"); +SET sql_log_bin=1; +# Wait until T2 is inside executing its insert of 32, then find it in SHOW +# PROCESSLIST to know its thread id for KILL later. +SET debug_sync='now WAIT_FOR t2_query'; +--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(32%' AND INFO NOT LIKE '%LIKE%'` +SET debug_sync='now SIGNAL t2_cont'; + +# Wait until T2 has entered its wait for T1 to commit, and T1 has +# progressed into its commit phase. +SET debug_sync='now WAIT_FOR t1_ready'; + +# Now kill the transaction T2. +--replace_result $thd_id THD_ID +eval KILL $thd_id; + +# Wait until T2 has reacted on the kill. +SET debug_sync='now WAIT_FOR t2_killed'; + +# Now we can allow T1 to proceed. +SET debug_sync='now SIGNAL t1_cont'; + +--let $slave_sql_errno= 1317,1927,1964 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; + +# Now we have to disable the debug_sync statements, so they do not trigger +# when the events are retried. +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_1 +INSERT INTO t3 VALUES (39,0); +--save_master_pos + +--connection server_2 +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +# Restore the foo() function. +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + + +--connection server_2 +# Respawn all worker threads to clear any left-over debug_sync or other stuff. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +--source include/start_slave.inc + + +--echo *** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** + +# Set up three transactions on the master that will be group-committed +# together so they can be replicated in parallel on the slave. +--connection con_temp3 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send INSERT INTO t3 VALUES (41, foo(41, + 'commit_before_prepare_ordered WAIT_FOR t2_waiting', + 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con_temp4 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +# This insert is just so we can get T2 to wait while a query is running that we +# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. +INSERT INTO t3 VALUES (42, foo(42, + 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', + '')); +# This insert sets up debug_sync points so that T2 will tell when it is at its +# wait point where we want to kill it - and when it has been killed. +INSERT INTO t3 VALUES (43, foo(43, + 'group_commit_waiting_for_prior SIGNAL t2_waiting', + 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +send COMMIT; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; + +--connection con_temp5 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +send INSERT INTO t3 VALUES (44, foo(44, + '', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; +--connection con_temp5 +REAP; + +--connection server_1 +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +SET debug_sync='RESET'; + +--connection server_2 +# Wait until T2 is inside executing its insert of 42, then find it in SHOW +# PROCESSLIST to know its thread id for KILL later. +SET debug_sync='now WAIT_FOR t2_query'; +--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(42%' AND INFO NOT LIKE '%LIKE%'` +SET debug_sync='now SIGNAL t2_cont'; + +# Wait until T2 has entered its wait for T1 to commit, and T1 has +# progressed into its commit phase. +SET debug_sync='now WAIT_FOR t1_ready'; + +# Now kill the transaction T2. +--replace_result $thd_id THD_ID +eval KILL $thd_id; + +# Wait until T2 has reacted on the kill. +SET debug_sync='now WAIT_FOR t2_killed'; + +# Now we can allow T1 to proceed. +SET debug_sync='now SIGNAL t1_cont'; + +--let $slave_sql_errno= 1317,1927,1964 +--source include/wait_for_slave_sql_error.inc + +# Now we have to disable the debug_sync statements, so they do not trigger +# when the events are retried. +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_1 +INSERT INTO t3 VALUES (49,0); +--save_master_pos + +--connection server_2 +START SLAVE SQL_THREAD; +--sync_with_master +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +# Restore the foo() function. +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + + +--connection server_2 +# Respawn all worker threads to clear any left-over debug_sync or other stuff. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +--source include/start_slave.inc + + +--echo *** 3. Same as (2), but not using gtid mode *** + +--connection server_2 +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +--source include/start_slave.inc + +--connection server_1 +# Set up three transactions on the master that will be group-committed +# together so they can be replicated in parallel on the slave. +--connection con_temp3 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send INSERT INTO t3 VALUES (51, foo(51, + 'commit_before_prepare_ordered WAIT_FOR t2_waiting', + 'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con_temp4 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +# This insert is just so we can get T2 to wait while a query is running that we +# can see in SHOW PROCESSLIST so we can get its thread_id to kill later. +INSERT INTO t3 VALUES (52, foo(52, + 'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', + '')); +# This insert sets up debug_sync points so that T2 will tell when it is at its +# wait point where we want to kill it - and when it has been killed. +INSERT INTO t3 VALUES (53, foo(53, + 'group_commit_waiting_for_prior SIGNAL t2_waiting', + 'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +send COMMIT; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; + +--connection con_temp5 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +send INSERT INTO t3 VALUES (54, foo(54, + '', + '')); + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con_temp3 +REAP; +--connection con_temp4 +REAP; +--connection con_temp5 +REAP; + +--connection server_1 +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +SET debug_sync='RESET'; + +--connection server_2 +# Wait until T2 is inside executing its insert of 52, then find it in SHOW +# PROCESSLIST to know its thread id for KILL later. +SET debug_sync='now WAIT_FOR t2_query'; +--let $thd_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE '%foo(52%' AND INFO NOT LIKE '%LIKE%'` +SET debug_sync='now SIGNAL t2_cont'; + +# Wait until T2 has entered its wait for T1 to commit, and T1 has +# progressed into its commit phase. +SET debug_sync='now WAIT_FOR t1_ready'; + +# Now kill the transaction T2. +--replace_result $thd_id THD_ID +eval KILL $thd_id; + +# Wait until T2 has reacted on the kill. +SET debug_sync='now WAIT_FOR t2_killed'; + +# Now we can allow T1 to proceed. +SET debug_sync='now SIGNAL t1_cont'; + +--let $slave_sql_errno= 1317,1927,1964 +--source include/wait_for_slave_sql_error.inc +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; + +# Now we have to disable the debug_sync statements, so they do not trigger +# when the events are retried. +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_1 +INSERT INTO t3 VALUES (59,0); +--save_master_pos + +--connection server_2 +START SLAVE SQL_THREAD; +--sync_with_master +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP function foo; +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_stop_on_con_kill.inc b/mysql-test/suite/rpl/include/rpl_parallel_stop_on_con_kill.inc new file mode 100644 index 00000000000..63c483ea6ad --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_stop_on_con_kill.inc @@ -0,0 +1,129 @@ +--echo *** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t3 VALUES (201,0), (202,0); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_mdev8031'; + +--connection server_1 +# We artificially create a situation that hopefully resembles the original +# bug which was only seen "in the wild", and only once. +# Setup a fake group commit with lots of conflicts that will lead to deadloc +# kill. The slave DBUG injection causes the slave to be deadlock killed at +# a particular point during the retry, and then later do a small sleep at +# another critical point where the prior transaction then has a chance to +# complete. Finally an extra KILL check catches an unhandled, lingering +# deadlock kill. So rather artificial, but at least it exercises the +# relevant code paths. +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; + +SET @commit_id= 10200; +INSERT INTO t3 VALUES (203, 1); +INSERT INTO t3 VALUES (204, 1); +INSERT INTO t3 VALUES (205, 1); +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=205; +UPDATE t3 SET b=b+1 WHERE a=205; +SET SESSION debug_dbug=@old_dbug; + +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +--source include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +--source include/start_slave.inc + + +--echo *** Check getting deadlock killed inside open_binlog() during retry. *** + +--connection server_2 +--source include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size= 4096; + +--connection server_1 +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; + +--let $large= `SELECT REPEAT("*", 8192)` +SET @commit_id= 10210; +--echo Omit long queries that cause relaylog rotations and transaction retries... +--disable_query_log +eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=201 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=202 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=204 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=203 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */; +eval UPDATE t3 SET b=b+1 WHERE a=205 /* $large */; +--enable_query_log +SET SESSION debug_dbug=@old_dbug; + +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t3 WHERE a>=200 ORDER BY a; + +# Cleanup +--source include/stop_slave.inc +SET GLOBAL debug_dbug= @old_debg; +SET GLOBAL max_relay_log_size= @old_max; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t3; + +--source include/rpl_end.inc + diff --git a/mysql-test/suite/rpl/include/rpl_parallel_stop_slave.inc b/mysql-test/suite/rpl/include/rpl_parallel_stop_slave.inc new file mode 100644 index 00000000000..4eeddc927e0 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_stop_slave.inc @@ -0,0 +1,114 @@ +--echo *** Test STOP SLAVE in parallel mode *** + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; + +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc +--connect (con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,) +--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,) + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +--connection server_1 +# Set up a couple of transactions. The first will be blocked halfway +# through on a lock, and while it is blocked we initiate STOP SLAVE. +# We then test that the halfway-initiated transaction is allowed to +# complete, but no subsequent ones. +# We have to use statement-based mode and set +# binlog_direct_non_transactional_updates=0; otherwise the binlog will +# be split into two event groups, one for the MyISAM part and one for the +# InnoDB part. +SET binlog_direct_non_transactional_updates=0; +SET sql_log_bin=0; +CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); +SET sql_log_bin=1; +BEGIN; +INSERT INTO t2 VALUES (20); +--disable_warnings +INSERT INTO t1 VALUES (20); +--enable_warnings +INSERT INTO t2 VALUES (21); +INSERT INTO t3 VALUES (20, 20); +COMMIT; +INSERT INTO t3 VALUES(21, 21); +INSERT INTO t3 VALUES(22, 22); +--save_master_pos + +# Start a connection that will block the replicated transaction halfway. +--connection con_temp1 +BEGIN; +INSERT INTO t2 VALUES (21); + +--connection server_2 +START SLAVE; +# Wait for the MyISAM change to be visible, after which replication will wait +# for con_temp1 to roll back. +--let $wait_condition= SELECT COUNT(*) = 1 FROM t1 WHERE a=20 +--source include/wait_condition.inc + +--connection con_temp2 +# Initiate slave stop. It will have to wait for the current event group +# to complete. +# The dbug injection causes debug_sync to signal 'wait_for_done_waiting' +# when the SQL driver thread is ready. +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +send STOP SLAVE; + +--connection con_temp1 +SET debug_sync='now WAIT_FOR wait_for_done_waiting'; +ROLLBACK; + +--connection con_temp2 +reap; +SET GLOBAL debug_dbug=@old_dbug; +SET debug_sync='RESET'; + +--connection server_2 +--source include/wait_for_slave_to_stop.inc +# We should see the first transaction applied, but not the two others. +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; + +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; + +--connection server_2 +# Respawn all worker threads to clear any left-over debug_sync or other stuff. +--source include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +--disconnect con_temp1 +--disconnect con_temp2 + +--connection server_1 +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_wrong_binlog_order.inc b/mysql-test/suite/rpl/include/rpl_parallel_wrong_binlog_order.inc new file mode 100644 index 00000000000..093693d453e --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_wrong_binlog_order.inc @@ -0,0 +1,91 @@ +--echo *** MDEV-6775: Wrong binlog order in parallel replication *** + +# A bit tricky bug to reproduce. On the master, we binlog in statement-mode +# two transactions, an UPDATE followed by a DELETE. On the slave, we replicate +# with binlog-mode set to ROW, which means the DELETE, which modifies no rows, +# is not binlogged. Then we inject a wait in the group commit code on the +# slave, shortly before the actual commit of the UPDATE. The bug was that the +# DELETE could wake up from wait_for_prior_commit() before the commit of the +# UPDATE. So the test could see the slave position updated to after DELETE, +# while the UPDATE was still not visible. + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); +--connect (con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--connect (con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,) +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=ROW; +# Re-spawn the worker threads to be sure they pick up the new binlog format +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; + +--connection con1 +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send UPDATE t4 SET b=NULL WHERE a=6; +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con2 +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send DELETE FROM t4 WHERE b <= 3; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +SET binlog_format= @old_format; +--connection con2 +REAP; +SET binlog_format= @old_format; +SET debug_sync='RESET'; +--save_master_pos +SELECT * FROM t4 ORDER BY a; + +--connection server_2 +--source include/start_slave.inc +SET debug_sync= 'now WAIT_FOR waiting'; +--sync_with_master +SELECT * FROM t4 ORDER BY a; +SET debug_sync= 'now SIGNAL cont'; + +# Re-spawn the worker threads to remove any DBUG injections or DEBUG_SYNC. +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL binlog_format= @old_format; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +--disconnect con1 +--disconnect con2 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/include/rpl_parallel_wrong_exec_master_pos.inc b/mysql-test/suite/rpl/include/rpl_parallel_wrong_exec_master_pos.inc new file mode 100644 index 00000000000..672ade57ca3 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_parallel_wrong_exec_master_pos.inc @@ -0,0 +1,56 @@ +--echo *** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +--source include/start_slave.inc + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t5 (a INT PRIMARY KEY, b INT); +INSERT INTO t5 VALUES (1,1); +INSERT INTO t5 VALUES (2,2), (3,8); +INSERT INTO t5 VALUES (4,16); +--save_master_pos + +--connection server_2 +--sync_with_master +let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1); +let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1); +let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1); +let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); +--disable_query_log +eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check; +eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check; +--enable_query_log + +--connection server_1 +FLUSH LOGS; +--source include/wait_for_binlog_checkpoint.inc +--save_master_pos + +--connection server_2 +--sync_with_master +let $io_file= query_get_value(SHOW SLAVE STATUS, Master_Log_File, 1); +let $io_pos= query_get_value(SHOW SLAVE STATUS, Read_Master_Log_Pos, 1); +let $sql_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1); +let $sql_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); +--disable_query_log +eval SELECT IF('$io_file' = '$sql_file', "OK", "Not ok, $io_file <> $sql_file") AS test_check; +eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS test_check; +--enable_query_log + +# Clean up. +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t5; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_delayed_slave.result b/mysql-test/suite/rpl/r/rpl_delayed_slave.result index bcfd49934b4..e7daa3328ce 100644 --- a/mysql-test/suite/rpl/r/rpl_delayed_slave.result +++ b/mysql-test/suite/rpl/r/rpl_delayed_slave.result @@ -185,7 +185,7 @@ DROP FUNCTION delay_on_slave; connection slave; SELECT @@GLOBAL.slave_parallel_mode; @@GLOBAL.slave_parallel_mode -conservative +optimistic SELECT @@GLOBAL.slave_parallel_threads; @@GLOBAL.slave_parallel_threads 0 diff --git a/mysql-test/suite/rpl/r/rpl_mdev6386.result b/mysql-test/suite/rpl/r/rpl_mdev6386.result index 91ba9569343..d0e5144857a 100644 --- a/mysql-test/suite/rpl/r/rpl_mdev6386.result +++ b/mysql-test/suite/rpl/r/rpl_mdev6386.result @@ -7,6 +7,7 @@ connection slave; connection slave; include/stop_slave.inc SET sql_log_bin= 0; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); INSERT INTO t1 VALUES (1, 2); SET sql_log_bin= 1; CHANGE MASTER TO master_use_gtid= current_pos; diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result deleted file mode 100644 index 9258deadaca..00000000000 --- a/mysql-test/suite/rpl/r/rpl_parallel.result +++ /dev/null @@ -1,1690 +0,0 @@ -include/master-slave.inc -[connection master] -connection server_2; -SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; -SET GLOBAL slave_parallel_threads=10; -ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) -OK -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) -OK -include/stop_slave.inc -SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) -OK -include/start_slave.inc -SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; -IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) -OK -*** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** -connection server_1; -ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; -CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; -CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; -INSERT INTO t1 VALUES (1); -INSERT INTO t2 VALUES (1); -connection server_2; -connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; -LOCK TABLE t1 WRITE; -connection server_1; -SET gtid_domain_id=1; -INSERT INTO t1 VALUES (2); -SET gtid_domain_id=0; -INSERT INTO t2 VALUES (2); -INSERT INTO t2 VALUES (3); -BEGIN; -INSERT INTO t2 VALUES (4); -INSERT INTO t2 VALUES (5); -COMMIT; -INSERT INTO t2 VALUES (6); -connection server_2; -SELECT * FROM t2 ORDER by a; -a -1 -2 -3 -4 -5 -6 -connection con_temp1; -SELECT * FROM t1; -a -1 -UNLOCK TABLES; -connection server_2; -SELECT * FROM t1 ORDER BY a; -a -1 -2 -*** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** -connection server_2; -include/stop_slave.inc -connection server_1; -SET sql_log_bin=0; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET gtid_domain_id=1; -INSERT INTO t2 VALUES (foo(10, -'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); -connection server_2; -FLUSH LOGS; -SET sql_log_bin=0; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=statement; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -SET debug_sync='now WAIT_FOR ready1'; -connection server_1; -SET gtid_domain_id=2; -INSERT INTO t2 VALUES (foo(11, -'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); -SET gtid_domain_id=0; -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; -a -10 -11 -connection server_2; -SET debug_sync='now WAIT_FOR ready3'; -SET debug_sync='now SIGNAL cont3'; -SET debug_sync='now WAIT_FOR ready4'; -SET debug_sync='now SIGNAL cont1'; -SET debug_sync='now WAIT_FOR ready2'; -SET debug_sync='now SIGNAL cont4'; -SELECT * FROM t2 WHERE a >= 10 ORDER BY a; -a -10 -11 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002 -slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(11, -'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')) -slave-bin.000002 # Xid # # COMMIT /* XID */ -slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(10, -'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', -'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')) -slave-bin.000002 # Xid # # COMMIT /* XID */ -FLUSH LOGS; -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET debug_sync='RESET'; -include/start_slave.inc -*** Test that group-committed transactions on the master can replicate in parallel on the slave. *** -connection server_1; -SET debug_sync='RESET'; -FLUSH LOGS; -CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; -INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); -connection server_2; -connection con_temp1; -BEGIN; -INSERT INTO t3 VALUES (2,102); -connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; -BEGIN; -INSERT INTO t3 VALUES (4,104); -connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -SET debug_sync='RESET'; -connection server_1; -SELECT * FROM t3 ORDER BY a; -a b -1 1 -2 12 -3 3 -4 14 -5 5 -6 16 -7 7 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000002 # Binlog_checkpoint # # master-bin.000002 -master-bin.000002 # Gtid # # GTID #-#-# -master-bin.000002 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB -master-bin.000002 # Gtid # # BEGIN GTID #-#-# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -master-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# -master-bin.000002 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')) -master-bin.000002 # Xid # # COMMIT /* XID */ -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued3'; -connection con_temp1; -ROLLBACK; -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued1'; -connection con_temp2; -ROLLBACK; -connection server_2; -SET debug_sync='now WAIT_FOR slave_queued2'; -SET debug_sync='now SIGNAL slave_cont1'; -SELECT * FROM t3 ORDER BY a; -a b -1 1 -2 12 -3 3 -4 14 -5 5 -6 16 -7 7 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -slave-bin.000003 # Binlog_checkpoint # # slave-bin.000003 -slave-bin.000003 # Gtid # # GTID #-#-# -slave-bin.000003 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, -'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -slave-bin.000003 # Gtid # # BEGIN GTID #-#-# cid=# -slave-bin.000003 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, -'group_commit_waiting_for_prior SIGNAL slave_queued3', -'')) -slave-bin.000003 # Xid # # COMMIT /* XID */ -*** Test STOP SLAVE in parallel mode *** -connection server_2; -include/stop_slave.inc -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection server_1; -SET binlog_direct_non_transactional_updates=0; -SET sql_log_bin=0; -CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); -SET sql_log_bin=1; -BEGIN; -INSERT INTO t2 VALUES (20); -INSERT INTO t1 VALUES (20); -INSERT INTO t2 VALUES (21); -INSERT INTO t3 VALUES (20, 20); -COMMIT; -INSERT INTO t3 VALUES(21, 21); -INSERT INTO t3 VALUES(22, 22); -SET binlog_format=@old_format; -connection con_temp1; -BEGIN; -INSERT INTO t2 VALUES (21); -connection server_2; -START SLAVE; -connection con_temp2; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -STOP SLAVE; -connection con_temp1; -SET debug_sync='now WAIT_FOR wait_for_done_waiting'; -ROLLBACK; -connection con_temp2; -SET GLOBAL debug_dbug=@old_dbug; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_to_stop.inc -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -a -20 -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -a -20 -21 -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; -a b -20 20 -include/start_slave.inc -SELECT * FROM t1 WHERE a >= 20 ORDER BY a; -a -20 -SELECT * FROM t2 WHERE a >= 20 ORDER BY a; -a -20 -21 -SELECT * FROM t3 WHERE a >= 20 ORDER BY a; -a b -20 20 -21 21 -22 22 -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** Test killing slave threads at various wait points *** -*** 1. Test killing transaction waiting in commit for previous transaction to commit *** -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (31, foo(31, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (32, foo(32, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (33, foo(33, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (34, foo(34, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -32 32 -33 33 -34 34 -SET debug_sync='RESET'; -connection server_2; -SET sql_log_bin=0; -CALL mtr.add_suppression("Query execution was interrupted"); -CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); -CALL mtr.add_suppression("Slave: Connection was killed"); -SET sql_log_bin=1; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (39,0); -connection server_2; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 30 ORDER BY a; -a b -31 31 -32 32 -33 33 -34 34 -39 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (41, foo(41, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (42, foo(42, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (43, foo(43, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (44, foo(44, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -a b -41 41 -42 42 -43 43 -44 44 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (49,0); -connection server_2; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 40 ORDER BY a; -a b -41 41 -42 42 -43 43 -44 44 -49 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 3. Same as (2), but not using gtid mode *** -connection server_2; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; -include/start_slave.inc -connection server_1; -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (51, foo(51, -'commit_before_prepare_ordered WAIT_FOR t2_waiting', -'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -SET binlog_format=statement; -BEGIN; -INSERT INTO t3 VALUES (52, foo(52, -'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', -'')); -INSERT INTO t3 VALUES (53, foo(53, -'group_commit_waiting_for_prior SIGNAL t2_waiting', -'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); -COMMIT; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -SET binlog_format=statement; -INSERT INTO t3 VALUES (54, foo(54, -'', -'')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -52 52 -53 53 -54 54 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now WAIT_FOR t2_query'; -SET debug_sync='now SIGNAL t2_cont'; -SET debug_sync='now WAIT_FOR t1_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t2_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -INSERT INTO t3 VALUES (59,0); -connection server_2; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 50 ORDER BY a; -a b -51 51 -52 52 -53 53 -54 54 -59 0 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=4; -include/start_slave.inc -*** 4. Test killing thread that is waiting to start transaction until previous transaction commits *** -connection server_1; -SET binlog_format=statement; -SET gtid_domain_id=2; -BEGIN; -INSERT INTO t3 VALUES (70, foo(70, -'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); -INSERT INTO t3 VALUES (60, foo(60, -'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', -'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d2_query'; -connection server_1; -SET gtid_domain_id=1; -BEGIN; -INSERT INTO t3 VALUES (61, foo(61, -'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', -'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); -INSERT INTO t3 VALUES (62, foo(62, -'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', -'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d1_query'; -connection server_1; -SET gtid_domain_id=0; -INSERT INTO t3 VALUES (63, foo(63, -'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', -'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); -connection server_2; -SET debug_sync='now WAIT_FOR d0_query'; -connection server_1; -SET gtid_domain_id=3; -BEGIN; -INSERT INTO t3 VALUES (68, foo(68, -'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); -INSERT INTO t3 VALUES (69, foo(69, -'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', -'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); -COMMIT; -SET gtid_domain_id=0; -connection server_2; -SET debug_sync='now WAIT_FOR d3_query'; -SET debug_sync='now SIGNAL d2_cont2'; -SET debug_sync='now WAIT_FOR d2_done'; -SET debug_sync='now SIGNAL d1_cont2'; -SET debug_sync='now WAIT_FOR d1_done'; -SET debug_sync='now SIGNAL d0_cont2'; -SET debug_sync='now WAIT_FOR d0_done'; -SET debug_sync='now SIGNAL d3_cont2'; -SET debug_sync='now WAIT_FOR d3_done'; -connection con_temp3; -SET binlog_format=statement; -INSERT INTO t3 VALUES (64, foo(64, -'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; -INSERT INTO t3 VALUES (65, foo(65, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; -INSERT INTO t3 VALUES (66, foo(66, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued3'; -connection con_temp5; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; -INSERT INTO t3 VALUES (67, foo(67, '', '')); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued4'; -SET debug_sync='now SIGNAL master_cont2'; -connection con_temp3; -connection con_temp4; -connection con_temp5; -connection server_1; -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -a b -60 60 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -SET debug_sync='RESET'; -connection server_2; -SET debug_sync='now SIGNAL d0_cont'; -SET debug_sync='now WAIT_FOR t1_waiting'; -SET debug_sync='now SIGNAL d3_cont'; -SET debug_sync='now WAIT_FOR t2_waiting'; -SET debug_sync='now SIGNAL d1_cont'; -SET debug_sync='now WAIT_FOR t3_waiting'; -SET debug_sync='now SIGNAL d2_cont'; -SET debug_sync='now WAIT_FOR t4_waiting'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR t3_killed'; -SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; -a b -60 60 -61 61 -62 62 -63 63 -64 64 -68 68 -69 69 -70 70 -SET debug_sync='RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_1; -UPDATE t3 SET b=b+1 WHERE a=60; -connection server_2; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 60 ORDER BY a; -a b -60 61 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -SET sql_log_bin=0; -DROP FUNCTION foo; -CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) -RETURNS INT DETERMINISTIC -BEGIN -IF d1 != '' THEN -SET debug_sync = d1; -END IF; -IF d2 != '' THEN -SET debug_sync = d2; -END IF; -RETURN x; -END -|| -SET sql_log_bin=1; -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** 5. Test killing thread that is waiting for queue of max length to shorten *** -SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; -SET GLOBAL slave_parallel_max_queued=9000; -connection server_1; -SET binlog_format=statement; -INSERT INTO t3 VALUES (80, foo(0, -'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); -connection server_2; -SET debug_sync='now WAIT_FOR query_waiting'; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; -connection server_1; -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; -a b -80 0 -81 10000 -connection server_2; -SET debug_sync='now WAIT_FOR wait_queue_ready'; -KILL THD_ID; -SET debug_sync='now WAIT_FOR wait_queue_killed'; -SET debug_sync='now SIGNAL query_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] -STOP SLAVE IO_THREAD; -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_max_queued= @old_max_queued; -connection server_1; -INSERT INTO t3 VALUES (82,0); -SET binlog_format=@old_format; -connection server_2; -SET debug_sync='RESET'; -include/start_slave.inc -SELECT * FROM t3 WHERE a >= 80 ORDER BY a; -a b -80 0 -81 10000 -82 0 -connection server_2; -include/stop_slave.inc -SET GLOBAL binlog_format=@old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL replicate_ignore_table="test.t3"; -SET GLOBAL slave_parallel_threads=2; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (100, rand()); -INSERT INTO t3 VALUES (101, rand()); -connection server_2; -connection server_1; -INSERT INTO t3 VALUES (102, rand()); -INSERT INTO t3 VALUES (103, rand()); -INSERT INTO t3 VALUES (104, rand()); -INSERT INTO t3 VALUES (105, rand()); -connection server_2; -include/stop_slave.inc -SET GLOBAL replicate_ignore_table=""; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (106, rand()); -INSERT INTO t3 VALUES (107, rand()); -connection server_2; -SELECT * FROM t3 WHERE a >= 100 ORDER BY a; -a b -106 # -107 # -*** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -connection server_1; -INSERT INTO t3 VALUES (110, 1); -connection server_2; -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -SET sql_log_bin=0; -INSERT INTO t3 VALUES (111, 666); -SET sql_log_bin=1; -connection server_1; -connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -INSERT INTO t3 VALUES (111, 2); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -INSERT INTO t3 VALUES (112, 3); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1062] -include/wait_for_slave_sql_to_stop.inc -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -111 666 -SET sql_log_bin=0; -DELETE FROM t3 WHERE a=111 AND b=666; -SET sql_log_bin=1; -START SLAVE SQL_THREAD; -SELECT * FROM t3 WHERE a >= 110 ORDER BY a; -a b -110 1 -111 2 -112 3 -***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** -connection server_2; -include/stop_slave.inc -connection server_1; -CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/start_slave.inc -include/stop_slave.inc -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -INSERT INTO t4 VALUES (7, NULL); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET debug_sync='RESET'; -connection server_2; -include/start_slave.inc -include/stop_slave.inc -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 6 -7 NULL -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); -connection con1; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format='statement'; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 1; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -connection con2; -SET @old_format=@@GLOBAL.binlog_format; -SET debug_sync='RESET'; -connection server_2; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; -include/start_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -2 2 -3 NULL -4 4 -5 NULL -6 NULL -SET @last_gtid= 'GTID'; -SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", -CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) -AS result; -result -GTID found ok -SELECT "ROW FOUND" AS `Is the row found?` - FROM mysql.gtid_slave_pos -WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; -Is the row found? -ROW FOUND -*** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET DEBUG_SYNC= 'RESET'; -include/start_slave.inc -connection server_1; -CREATE TABLE t5 (a INT PRIMARY KEY, b INT); -INSERT INTO t5 VALUES (1,1); -INSERT INTO t5 VALUES (2,2), (3,8); -INSERT INTO t5 VALUES (4,16); -connection server_2; -test_check -OK -test_check -OK -connection server_1; -FLUSH LOGS; -connection server_2; -test_check -OK -test_check -OK -*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** -connection server_1; -CREATE TABLE t6 (a INT) ENGINE=MyISAM; -CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; -connection con1; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; -INSERT INTO t6 VALUES (1), (2), (3); -connection server_1; -SET debug_sync='now WAIT_FOR ready'; -KILL QUERY CONID; -SET debug_sync='now SIGNAL cont'; -connection con1; -ERROR 70100: Query execution was interrupted -SET binlog_format= @old_format; -SET debug_sync='RESET'; -connection server_1; -SET debug_sync='RESET'; -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1317] -STOP SLAVE IO_THREAD; -SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; -include/start_slave.inc -connection server_1; -INSERT INTO t6 VALUES (4); -SELECT * FROM t6 ORDER BY a; -a -1 -4 -connection server_2; -SELECT * FROM t6 ORDER BY a; -a -4 -*** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** -connection server_1; -INSERT INTO t2 VALUES (31); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET GLOBAL slave_parallel_threads= 0; -include/start_slave.inc -SET sql_log_bin= 0; -INSERT INTO t2 VALUES (32); -SET sql_log_bin= 1; -connection server_1; -INSERT INTO t2 VALUES (32); -FLUSH LOGS; -INSERT INTO t2 VALUES (33); -INSERT INTO t2 VALUES (34); -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -33 -34 -include/save_master_gtid.inc -connection server_2; -include/wait_for_slave_sql_error.inc [errno=1062] -connection server_2; -include/stop_slave_io.inc -SET GLOBAL slave_parallel_threads=10; -START SLAVE; -include/wait_for_slave_sql_error.inc [errno=1062] -START SLAVE SQL_THREAD; -include/wait_for_slave_sql_error.inc [errno=1062] -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -SET sql_slave_skip_counter= 1; -ERROR HY000: When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position -include/stop_slave_io.inc -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 30 ORDER BY a; -a -31 -32 -33 -34 -*** MDEV-6775: Wrong binlog order in parallel replication *** -connection server_1; -DELETE FROM t4; -INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; -SET @old_format=@@GLOBAL.binlog_format; -SET GLOBAL binlog_format=ROW; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection con1; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -UPDATE t4 SET b=NULL WHERE a=6; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con2; -SET @old_format= @@binlog_format; -SET binlog_format= statement; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -DELETE FROM t4 WHERE b <= 3; -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con1; -SET binlog_format= @old_format; -connection con2; -SET binlog_format= @old_format; -SET debug_sync='RESET'; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -connection server_2; -include/start_slave.inc -SET debug_sync= 'now WAIT_FOR waiting'; -SELECT * FROM t4 ORDER BY a; -a b -1 NULL -3 NULL -4 4 -5 NULL -6 NULL -SET debug_sync= 'now SIGNAL cont'; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL binlog_format= @old_format; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** -connection server_1; -INSERT INTO t2 VALUES (40); -connection server_2; -include/stop_slave.inc -CHANGE MASTER TO master_use_gtid=no; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; -SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -connection server_1; -INSERT INTO t2 VALUES (41); -INSERT INTO t2 VALUES (42); -SET @old_format= @@binlog_format; -SET binlog_format= statement; -DELETE FROM t2 WHERE a=40; -SET binlog_format= @old_format; -INSERT INTO t2 VALUES (43); -INSERT INTO t2 VALUES (44); -FLUSH LOGS; -INSERT INTO t2 VALUES (45); -SET gtid_seq_no=100; -INSERT INTO t2 VALUES (46); -connection con_temp2; -BEGIN; -SELECT * FROM t2 WHERE a=40 FOR UPDATE; -a -40 -connection server_2; -include/start_slave.inc -SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; -STOP SLAVE; -connection con_temp2; -SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; -ROLLBACK; -connection server_2; -include/wait_for_slave_sql_to_stop.inc -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -a -41 -42 -include/start_slave.inc -SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -a -41 -42 -43 -44 -45 -46 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET DEBUG_SYNC= 'RESET'; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -CHANGE MASTER TO master_use_gtid=slave_pos; -include/start_slave.inc -*** MDEV-7326 Server deadlock in connection with parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; -include/start_slave.inc -connection server_1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -INSERT INTO t1 VALUES (foo(50, -"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", -"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; -connection server_1; -INSERT INTO t2 VALUES (foo(50, -"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", -"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -connection server_1; -INSERT INTO t1 VALUES (foo(51, -"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", -"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -connection server_1; -INSERT INTO t1 VALUES (52); -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -a -50 -SELECT * FROM t1 WHERE a>=50 ORDER BY a; -a -50 -51 -52 -connection server_2; -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; -connection server_1; -connection server_2; -SELECT * FROM t2 WHERE a>=50 ORDER BY a; -a -50 -SELECT * FROM t1 WHERE a>=50 ORDER BY a; -a -50 -51 -52 -SET DEBUG_SYNC="reset"; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7326 Server deadlock in connection with parallel replication *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=3; -SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; -include/start_slave.inc -connection server_1; -SET @old_format= @@SESSION.binlog_format; -SET binlog_format= STATEMENT; -INSERT INTO t1 VALUES (foo(60, -"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", -"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; -connection server_1; -INSERT INTO t2 VALUES (foo(60, -"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", -"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; -connection con_temp3; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; -SET binlog_format=statement; -INSERT INTO t1 VALUES (foo(61, -"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", -"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued1'; -connection con_temp4; -SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; -INSERT INTO t6 VALUES (62); -connection server_1; -SET debug_sync='now WAIT_FOR master_queued2'; -SET debug_sync='now SIGNAL master_cont1'; -connection con_temp3; -connection con_temp4; -connection server_1; -SET debug_sync='RESET'; -SET BINLOG_FORMAT= @old_format; -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -a -60 -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -a -60 -61 -SELECT * FROM t6 WHERE a>=60 ORDER BY a; -a -62 -connection server_2; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; -SET DEBUG_SYNC= "now SIGNAL t1_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; -connection server_2; -SET DEBUG_SYNC= "now SIGNAL prep_cont"; -SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; -SET DEBUG_SYNC= "now SIGNAL t2_cont1"; -SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; -SET DEBUG_SYNC= "now SIGNAL t1_cont2"; -connection server_1; -connection server_2; -SELECT * FROM t2 WHERE a>=60 ORDER BY a; -a -60 -SELECT * FROM t1 WHERE a>=60 ORDER BY a; -a -60 -61 -SELECT * FROM t6 WHERE a>=60 ORDER BY a; -a -62 -SET DEBUG_SYNC="reset"; -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=0; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=1; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; -connection server_1; -INSERT INTO t2 VALUES (101); -INSERT INTO t2 VALUES (102); -INSERT INTO t2 VALUES (103); -INSERT INTO t2 VALUES (104); -INSERT INTO t2 VALUES (105); -SET gtid_seq_no=1000; -INSERT INTO t2 VALUES (106); -INSERT INTO t2 VALUES (107); -INSERT INTO t2 VALUES (108); -INSERT INTO t2 VALUES (109); -INSERT INTO t2 VALUES (110); -INSERT INTO t2 VALUES (111); -INSERT INTO t2 VALUES (112); -INSERT INTO t2 VALUES (113); -INSERT INTO t2 VALUES (114); -INSERT INTO t2 VALUES (115); -INSERT INTO t2 VALUES (116); -INSERT INTO t2 VALUES (117); -INSERT INTO t2 VALUES (118); -INSERT INTO t2 VALUES (119); -INSERT INTO t2 VALUES (120); -INSERT INTO t2 VALUES (121); -INSERT INTO t2 VALUES (122); -INSERT INTO t2 VALUES (123); -INSERT INTO t2 VALUES (124); -INSERT INTO t2 VALUES (125); -INSERT INTO t2 VALUES (126); -INSERT INTO t2 VALUES (127); -INSERT INTO t2 VALUES (128); -INSERT INTO t2 VALUES (129); -INSERT INTO t2 VALUES (130); -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 100 ORDER BY a; -a -101 -102 -103 -104 -105 -107 -108 -109 -110 -111 -112 -113 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -127 -128 -129 -130 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc -*** MDEV-6676 - test syntax of @@slave_parallel_mode *** -connection server_2; -Parallel_Mode = 'conservative' -include/stop_slave.inc -SET GLOBAL slave_parallel_mode='aggressive'; -Parallel_Mode = 'aggressive' -SET GLOBAL slave_parallel_mode='conservative'; -Parallel_Mode = 'conservative' -*** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** -connection server_1; -INSERT INTO t2 VALUES (1040); -include/save_master_gtid.inc -connection server_2; -SET GLOBAL slave_parallel_mode='none'; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; -a -1040 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -*** MDEV-6676 - test disabling domain-based parallel replication *** -connection server_1; -SET gtid_domain_id = 1; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -DELETE FROM t2 WHERE a >= 1041; -SET gtid_domain_id = 2; -INSERT INTO t2 VALUES (1041); -INSERT INTO t2 VALUES (1042); -INSERT INTO t2 VALUES (1043); -INSERT INTO t2 VALUES (1044); -INSERT INTO t2 VALUES (1045); -INSERT INTO t2 VALUES (1046); -SET gtid_domain_id = 0; -include/save_master_gtid.inc -connection server_2; -SET GLOBAL slave_parallel_mode=minimal; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; -a -1040 -1041 -1042 -1043 -1044 -1045 -1046 -*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_mode='conservative'; -SET GLOBAL slave_parallel_threads=10; -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10000; -ANALYZE TABLE t2; -Table Op Msg_type Msg_text -test.t2 analyze status Engine-independent statistics collected -test.t2 analyze status OK -INSERT INTO t3 VALUES (120, 0); -SET @commit_id= 10001; -INSERT INTO t3 VALUES (121, 0); -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; -a b -120 0 -121 0 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a >= 120 ORDER BY a; -a b -120 0 -121 0 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** -connection server_2; -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @old_server_id= @@SESSION.server_id; -SET SESSION server_id= 100; -SET @commit_id= 10010; -ALTER TABLE t1 COMMENT "Hulubulu!"; -SET SESSION server_id= @old_server_id; -INSERT INTO t3 VALUES (130, 0); -SET @commit_id= 10011; -INSERT INTO t3 VALUES (131, 0); -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; -a b -130 0 -131 0 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a >= 130 ORDER BY a; -a b -130 0 -131 0 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** -connection server_1; -INSERT INTO t3 VALUES (201,0), (202,0); -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_mdev8031'; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10200; -INSERT INTO t3 VALUES (203, 1); -INSERT INTO t3 VALUES (204, 1); -INSERT INTO t3 VALUES (205, 1); -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=201; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=202; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=204; -UPDATE t3 SET b=b+1 WHERE a=203; -UPDATE t3 SET b=b+1 WHERE a=205; -UPDATE t3 SET b=b+1 WHERE a=205; -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 3 -202 4 -203 4 -204 4 -205 3 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 3 -202 4 -203 4 -204 4 -205 3 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_dbug; -include/start_slave.inc -*** Check getting deadlock killed inside open_binlog() during retry. *** -connection server_2; -include/stop_slave.inc -SET @old_dbug= @@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; -SET @old_max= @@GLOBAL.max_relay_log_size; -SET GLOBAL max_relay_log_size= 4096; -connection server_1; -SET @old_dbug= @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET @commit_id= 10210; -Omit long queries that cause relaylog rotations and transaction retries... -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 6 -202 8 -203 7 -204 7 -205 5 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t3 WHERE a>=200 ORDER BY a; -a b -201 6 -202 8 -203 7 -204 7 -205 5 -include/stop_slave.inc -SET GLOBAL debug_dbug= @old_debg; -SET GLOBAL max_relay_log_size= @old_max; -include/start_slave.inc -*** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** -connection server_1; -BEGIN; -INSERT INTO t2 VALUES (2000); -INSERT INTO t1 VALUES (2000); -INSERT INTO t2 VALUES (2001); -ROLLBACK; -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -a -2000 -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; -a -include/save_master_gtid.inc -connection server_2; -include/sync_with_master_gtid.inc -SELECT * FROM t1 WHERE a>=2000 ORDER BY a; -a -2000 -SELECT * FROM t2 WHERE a>=2000 ORDER BY a; -a -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=@old_parallel_threads; -include/start_slave.inc -SET DEBUG_SYNC= 'RESET'; -connection server_1; -DROP function foo; -DROP TABLE t1,t2,t3,t4,t5,t6; -SET DEBUG_SYNC= 'RESET'; -include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_analyze_table_hang.result b/mysql-test/suite/rpl/r/rpl_parallel_analyze_table_hang.result new file mode 100644 index 00000000000..3c3cd2601e8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_analyze_table_hang.result @@ -0,0 +1,51 @@ +*** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10000; +ANALYZE TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +INSERT INTO t3 VALUES (120, 0); +SET @commit_id= 10001; +INSERT INTO t3 VALUES (121, 0); +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; +a b +120 0 +121 0 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a >= 120 ORDER BY a; +a b +120 0 +121 0 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2,t3; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result b/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result index b15de6fc215..09597ba81c1 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result @@ -8,6 +8,12 @@ CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; connection server_2; include/stop_slave.inc +SET @old_mode= @@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='conservative'; +SET @old_threads= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=40; +SET @old_transaction_retries= @@GLOBAL.slave_transaction_retries; +SET GLOBAL slave_transaction_retries=5; connection server_1; INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); SET @old_dbug= @@SESSION.debug_dbug; @@ -325,6 +331,9 @@ a b 104 4 include/stop_slave.inc SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_mode; +SET GLOBAL slave_parallel_threads=@old_threads; +SET GLOBAL slave_transaction_retries=@old_transaction_retries; include/start_slave.inc SET DEBUG_SYNC= 'RESET'; connection server_1; diff --git a/mysql-test/suite/rpl/r/rpl_parallel_deadlock_corrupt_binlog.result b/mysql-test/suite/rpl/r/rpl_parallel_deadlock_corrupt_binlog.result new file mode 100644 index 00000000000..74d1d53b67c --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_deadlock_corrupt_binlog.result @@ -0,0 +1,93 @@ +*** MDEV-7335: Potential parallel slave deadlock with specific binlog corruption *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_discard_xid_for_gtid_0_x_1000"; +connection server_1; +INSERT INTO t2 VALUES (101); +INSERT INTO t2 VALUES (102); +INSERT INTO t2 VALUES (103); +INSERT INTO t2 VALUES (104); +INSERT INTO t2 VALUES (105); +SET gtid_seq_no=1000; +INSERT INTO t2 VALUES (106); +INSERT INTO t2 VALUES (107); +INSERT INTO t2 VALUES (108); +INSERT INTO t2 VALUES (109); +INSERT INTO t2 VALUES (110); +INSERT INTO t2 VALUES (111); +INSERT INTO t2 VALUES (112); +INSERT INTO t2 VALUES (113); +INSERT INTO t2 VALUES (114); +INSERT INTO t2 VALUES (115); +INSERT INTO t2 VALUES (116); +INSERT INTO t2 VALUES (117); +INSERT INTO t2 VALUES (118); +INSERT INTO t2 VALUES (119); +INSERT INTO t2 VALUES (120); +INSERT INTO t2 VALUES (121); +INSERT INTO t2 VALUES (122); +INSERT INTO t2 VALUES (123); +INSERT INTO t2 VALUES (124); +INSERT INTO t2 VALUES (125); +INSERT INTO t2 VALUES (126); +INSERT INTO t2 VALUES (127); +INSERT INTO t2 VALUES (128); +INSERT INTO t2 VALUES (129); +INSERT INTO t2 VALUES (130); +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 100 ORDER BY a; +a +101 +102 +103 +104 +105 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t2; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_domain.result b/mysql-test/suite/rpl/r/rpl_parallel_domain.result new file mode 100644 index 00000000000..69b9678d149 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_domain.result @@ -0,0 +1,71 @@ +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) +OK +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) +OK +include/stop_slave.inc +SELECT IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) < 10, "OK", CONCAT("Found too many system user processes: ", COUNT(*))) +OK +include/start_slave.inc +SELECT IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) FROM information_schema.processlist WHERE user = "system user"; +IF(COUNT(*) >= 10, "OK", CONCAT("Found too few system user processes: ", COUNT(*))) +OK +*** Test long-running query in domain 1 can run in parallel with short queries in domain 0 *** +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +connection server_2; +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +LOCK TABLE t1 WRITE; +connection server_1; +SET gtid_domain_id=1; +INSERT INTO t1 VALUES (2); +SET gtid_domain_id=0; +INSERT INTO t2 VALUES (2); +INSERT INTO t2 VALUES (3); +BEGIN; +INSERT INTO t2 VALUES (4); +INSERT INTO t2 VALUES (5); +COMMIT; +INSERT INTO t2 VALUES (6); +connection server_2; +SELECT * FROM t2 ORDER by a; +a +1 +2 +3 +4 +5 +6 +connection con_temp1; +SELECT * FROM t1; +a +1 +UNLOCK TABLES; +connection server_2; +SELECT * FROM t1 ORDER BY a; +a +1 +2 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_domain_slave_single_grp.result b/mysql-test/suite/rpl/r/rpl_parallel_domain_slave_single_grp.result new file mode 100644 index 00000000000..613aac64487 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_domain_slave_single_grp.result @@ -0,0 +1,101 @@ +*** Test two transactions in different domains committed in opposite order on slave but in a single group commit. *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +INSERT INTO t2 VALUES (1); +connection server_2; +include/stop_slave.inc +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format='statement'; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (foo(10, +'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')); +connection server_2; +FLUSH LOGS; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=statement; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +SET debug_sync='now WAIT_FOR ready1'; +connection server_1; +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (foo(11, +'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')); +SET gtid_domain_id=0; +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; +a +10 +11 +connection server_2; +SET debug_sync='now WAIT_FOR ready3'; +SET debug_sync='now SIGNAL cont3'; +SET debug_sync='now WAIT_FOR ready4'; +SET debug_sync='now SIGNAL cont1'; +SET debug_sync='now WAIT_FOR ready2'; +SET debug_sync='now SIGNAL cont4'; +SELECT * FROM t2 WHERE a >= 10 ORDER BY a; +a +10 +11 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002 +slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(11, +'commit_before_enqueue SIGNAL ready3 WAIT_FOR cont3', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready4 WAIT_FOR cont4')) +slave-bin.000002 # Xid # # COMMIT /* XID */ +slave-bin.000002 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000002 # Query # # use `test`; INSERT INTO t2 VALUES (foo(10, +'commit_before_enqueue SIGNAL ready1 WAIT_FOR cont1', +'commit_after_release_LOCK_prepare_ordered SIGNAL ready2')) +slave-bin.000002 # Xid # # COMMIT /* XID */ +FLUSH LOGS; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; +connection server_1; +DROP function foo; +DROP TABLE t1,t2; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL binlog_format=@old_format; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_free_deferred_event.result b/mysql-test/suite/rpl/r/rpl_parallel_free_deferred_event.result new file mode 100644 index 00000000000..6718561a321 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_free_deferred_event.result @@ -0,0 +1,44 @@ +*** MDEV-5788 Incorrect free of rgi->deferred_events in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL replicate_ignore_table="test.t3"; +SET GLOBAL slave_parallel_threads=2; +include/start_slave.inc +connection server_1; +INSERT INTO t3 VALUES (100, rand()); +INSERT INTO t3 VALUES (101, rand()); +connection server_2; +connection server_1; +INSERT INTO t3 VALUES (102, rand()); +INSERT INTO t3 VALUES (103, rand()); +INSERT INTO t3 VALUES (104, rand()); +INSERT INTO t3 VALUES (105, rand()); +connection server_2; +include/stop_slave.inc +SET GLOBAL replicate_ignore_table=""; +include/start_slave.inc +connection server_1; +INSERT INTO t3 VALUES (106, rand()); +INSERT INTO t3 VALUES (107, rand()); +connection server_2; +SELECT * FROM t3 WHERE a >= 100 ORDER BY a; +a b +106 # +107 # +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t3; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_gco_wait_kill.result b/mysql-test/suite/rpl/r/rpl_parallel_gco_wait_kill.result new file mode 100644 index 00000000000..4472550c4f2 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_gco_wait_kill.result @@ -0,0 +1,257 @@ +*** Test killing thread that is waiting to start transaction until previous transaction commits *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Slave: Connection was killed"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode= 'conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=4; +include/start_slave.inc +connection server_1; +SET gtid_domain_id=2; +BEGIN; +INSERT INTO t3 VALUES (70, foo(70, +'rpl_parallel_start_waiting_for_prior SIGNAL t4_waiting', '')); +INSERT INTO t3 VALUES (60, foo(60, +'ha_write_row_end SIGNAL d2_query WAIT_FOR d2_cont2', +'rpl_parallel_end_of_group SIGNAL d2_done WAIT_FOR d2_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d2_query'; +connection server_1; +SET gtid_domain_id=1; +BEGIN; +INSERT INTO t3 VALUES (61, foo(61, +'rpl_parallel_start_waiting_for_prior SIGNAL t3_waiting', +'rpl_parallel_start_waiting_for_prior_killed SIGNAL t3_killed')); +INSERT INTO t3 VALUES (62, foo(62, +'ha_write_row_end SIGNAL d1_query WAIT_FOR d1_cont2', +'rpl_parallel_end_of_group SIGNAL d1_done WAIT_FOR d1_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d1_query'; +connection server_1; +SET gtid_domain_id=0; +INSERT INTO t3 VALUES (63, foo(63, +'ha_write_row_end SIGNAL d0_query WAIT_FOR d0_cont2', +'rpl_parallel_end_of_group SIGNAL d0_done WAIT_FOR d0_cont')); +connection server_2; +SET debug_sync='now WAIT_FOR d0_query'; +connection server_1; +SET gtid_domain_id=3; +BEGIN; +INSERT INTO t3 VALUES (68, foo(68, +'rpl_parallel_start_waiting_for_prior SIGNAL t2_waiting', '')); +INSERT INTO t3 VALUES (69, foo(69, +'ha_write_row_end SIGNAL d3_query WAIT_FOR d3_cont2', +'rpl_parallel_end_of_group SIGNAL d3_done WAIT_FOR d3_cont')); +COMMIT; +SET gtid_domain_id=0; +connection server_2; +SET debug_sync='now WAIT_FOR d3_query'; +SET debug_sync='now SIGNAL d2_cont2'; +SET debug_sync='now WAIT_FOR d2_done'; +SET debug_sync='now SIGNAL d1_cont2'; +SET debug_sync='now WAIT_FOR d1_done'; +SET debug_sync='now SIGNAL d0_cont2'; +SET debug_sync='now WAIT_FOR d0_done'; +SET debug_sync='now SIGNAL d3_cont2'; +SET debug_sync='now WAIT_FOR d3_done'; +connection con_temp3; +INSERT INTO t3 VALUES (64, foo(64, +'rpl_parallel_before_mark_start_commit SIGNAL t1_waiting WAIT_FOR t1_cont', '')); +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2 WAIT_FOR master_cont2'; +INSERT INTO t3 VALUES (65, foo(65, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (66, foo(66, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued4'; +INSERT INTO t3 VALUES (67, foo(67, '', '')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued4'; +SET debug_sync='now SIGNAL master_cont2'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +a b +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now SIGNAL d0_cont'; +SET debug_sync='now WAIT_FOR t1_waiting'; +SET debug_sync='now SIGNAL d3_cont'; +SET debug_sync='now WAIT_FOR t2_waiting'; +SET debug_sync='now SIGNAL d1_cont'; +SET debug_sync='now WAIT_FOR t3_waiting'; +SET debug_sync='now SIGNAL d2_cont'; +SET debug_sync='now WAIT_FOR t4_waiting'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t3_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SELECT * FROM t3 WHERE a >= 60 AND a != 65 ORDER BY a; +a b +60 60 +61 61 +62 62 +63 63 +64 64 +68 68 +69 69 +70 70 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +UPDATE t3 SET b=b+1 WHERE a=60; +connection server_2; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 60 ORDER BY a; +a b +60 61 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 5. Test killing thread that is waiting for queue of max length to shorten *** +SET @old_max_queued= @@GLOBAL.slave_parallel_max_queued; +SET GLOBAL slave_parallel_max_queued=9000; +connection server_1; +INSERT INTO t3 VALUES (80, foo(0, +'ha_write_row_end SIGNAL query_waiting WAIT_FOR query_cont', '')); +connection server_2; +SET debug_sync='now WAIT_FOR query_waiting'; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_queue_max"; +connection server_1; +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; +a b +80 0 +81 10000 +connection server_2; +SET debug_sync='now WAIT_FOR wait_queue_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR wait_queue_killed'; +SET debug_sync='now SIGNAL query_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_max_queued= @old_max_queued; +connection server_1; +INSERT INTO t3 VALUES (82,0); +connection server_2; +SET debug_sync='RESET'; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 80 ORDER BY a; +a b +80 0 +81 10000 +82 0 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_gtid_slave_pos_update_fail.result b/mysql-test/suite/rpl/r/rpl_parallel_gtid_slave_pos_update_fail.result new file mode 100644 index 00000000000..2e7e7f547af --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_gtid_slave_pos_update_fail.result @@ -0,0 +1,65 @@ +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection server_2; +include/stop_slave.inc +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 1; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +2 2 +3 NULL +4 4 +5 NULL +6 NULL +SET @last_gtid= 'GTID'; +SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", +CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) +AS result; +result +GTID found ok +SELECT "ROW FOUND" AS `Is the row found?` + FROM mysql.gtid_slave_pos +WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; +Is the row found? +ROW FOUND +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +disconnect con2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_ignore_error_on_rotate.result b/mysql-test/suite/rpl/r/rpl_parallel_ignore_error_on_rotate.result new file mode 100644 index 00000000000..d00740dba3d --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_ignore_error_on_rotate.result @@ -0,0 +1,74 @@ +*** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +SET gtid_domain_id=1; +INSERT INTO t2 VALUES (1); +SET gtid_domain_id=0; +SET gtid_domain_id=2; +INSERT INTO t2 VALUES (2); +SET gtid_domain_id=0; +INSERT INTO t2 VALUES (31); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= 0; +include/start_slave.inc +SET sql_log_bin= 0; +INSERT INTO t2 VALUES (32); +SET sql_log_bin= 1; +connection server_1; +INSERT INTO t2 VALUES (32); +FLUSH LOGS; +INSERT INTO t2 VALUES (33); +INSERT INTO t2 VALUES (34); +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 +include/save_master_gtid.inc +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1062] +connection server_2; +include/stop_slave_io.inc +SET GLOBAL slave_parallel_threads=10; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +START SLAVE SQL_THREAD; +include/wait_for_slave_sql_error.inc [errno=1062] +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +SET sql_slave_skip_counter= 1; +ERROR HY000: When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position +include/stop_slave_io.inc +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_incorrect_relay_pos.result b/mysql-test/suite/rpl/r/rpl_parallel_incorrect_relay_pos.result new file mode 100644 index 00000000000..6ca7f2b68e8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_incorrect_relay_pos.result @@ -0,0 +1,75 @@ +*** MDEV-7237: Parallel replication: incorrect relaylog position after stop/start the slave *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t2 VALUES (40); +connection server_2; +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_scheduled_gtid_0_x_100"; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +connection server_1; +INSERT INTO t2 VALUES (41); +INSERT INTO t2 VALUES (42); +SET @old_format= @@binlog_format; +SET binlog_format= statement; +DELETE FROM t2 WHERE a=40; +SET binlog_format= @old_format; +INSERT INTO t2 VALUES (43); +INSERT INTO t2 VALUES (44); +FLUSH LOGS; +INSERT INTO t2 VALUES (45); +SET gtid_seq_no=100; +INSERT INTO t2 VALUES (46); +connection con_temp2; +BEGIN; +SELECT * FROM t2 WHERE a=40 FOR UPDATE; +a +40 +connection server_2; +include/start_slave.inc +SET debug_sync= 'now WAIT_FOR scheduled_gtid_0_x_100'; +STOP SLAVE; +connection con_temp2; +SET debug_sync= 'now WAIT_FOR wait_for_done_waiting'; +ROLLBACK; +connection server_2; +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; +a +41 +42 +include/start_slave.inc +SELECT * FROM t2 WHERE a >= 40 ORDER BY a; +a +41 +42 +43 +44 +45 +46 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET DEBUG_SYNC= 'RESET'; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result b/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result new file mode 100644 index 00000000000..1411db16af6 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_innodb_lock_conflict.result @@ -0,0 +1,79 @@ +***MDEV-5914: Parallel replication deadlock due to InnoDB lock conflicts *** +include/master-slave.inc +[connection master] +connection server_2; +SET sql_log_bin=0; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/start_slave.inc +include/stop_slave.inc +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +connection server_1; +DELETE FROM t4; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +connection con1; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t4 VALUES (7, NULL); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/start_slave.inc +include/stop_slave.inc +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 6 +7 NULL +connection server_2; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +disconnect con1; +disconnect con2; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_mdev6589.result b/mysql-test/suite/rpl/r/rpl_parallel_mdev6589.result index e05e824eed3..47cdb3496da 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_mdev6589.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_mdev6589.result @@ -92,6 +92,7 @@ MASTER_GTID_WAIT('WAIT_POS') 0 connection con_temp1; COMMIT; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); SET sql_log_bin=1; connection server_2; include/wait_for_slave_sql_error.inc [errno=1062] diff --git a/mysql-test/suite/rpl/r/rpl_parallel_missed_error_handling.result b/mysql-test/suite/rpl/r/rpl_parallel_missed_error_handling.result new file mode 100644 index 00000000000..e9d04c02d7a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_missed_error_handling.result @@ -0,0 +1,65 @@ +*** MDEV-5921: In parallel replication, an error is not correctly signalled to the next transaction *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +INSERT INTO t3 VALUES (110, 1); +connection server_2; +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +SET sql_log_bin=0; +INSERT INTO t3 VALUES (111, 666); +SET sql_log_bin=1; +connection server_1; +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (111, 2); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +INSERT INTO t3 VALUES (112, 3); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +connection con2; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1062] +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +111 666 +SET sql_log_bin=0; +DELETE FROM t3 WHERE a=111 AND b=666; +SET sql_log_bin=1; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 110 ORDER BY a; +a b +110 1 +111 2 +112 3 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +disconnect con1; +disconnect con2; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_mode.result b/mysql-test/suite/rpl/r/rpl_parallel_mode.result new file mode 100644 index 00000000000..313290b1fd2 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_mode.result @@ -0,0 +1,75 @@ +*** MDEV-6676 - test syntax of @@slave_parallel_mode *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +Parallel_Mode = 'optimistic' +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='aggressive'; +Parallel_Mode = 'aggressive' +SET GLOBAL slave_parallel_mode='conservative'; +Parallel_Mode = 'conservative' +*** MDEV-6676 - test that empty parallel_mode does not replicate in parallel *** +connection server_1; +INSERT INTO t2 VALUES (1040); +include/save_master_gtid.inc +connection server_2; +SET GLOBAL slave_parallel_mode='none'; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,slave_crash_if_parallel_apply"; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; +a +1040 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +*** MDEV-6676 - test disabling domain-based parallel replication *** +connection server_1; +SET gtid_domain_id = 1; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +DELETE FROM t2 WHERE a >= 1041; +SET gtid_domain_id = 2; +INSERT INTO t2 VALUES (1041); +INSERT INTO t2 VALUES (1042); +INSERT INTO t2 VALUES (1043); +INSERT INTO t2 VALUES (1044); +INSERT INTO t2 VALUES (1045); +INSERT INTO t2 VALUES (1046); +SET gtid_domain_id = 0; +include/save_master_gtid.inc +connection server_2; +SET GLOBAL slave_parallel_mode=minimal; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; +a +1040 +1041 +1042 +1043 +1044 +1045 +1046 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_partial_binlog_trans.result b/mysql-test/suite/rpl/r/rpl_parallel_partial_binlog_trans.result new file mode 100644 index 00000000000..ab1cac692a0 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_partial_binlog_trans.result @@ -0,0 +1,51 @@ +*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection con1; +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +INSERT INTO t6 VALUES (1), (2), (3); +connection server_1; +SET debug_sync='now WAIT_FOR ready'; +KILL QUERY CONID; +SET debug_sync='now SIGNAL cont'; +connection con1; +ERROR 70100: Query execution was interrupted +SET debug_sync='RESET'; +connection server_1; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_sql_error.inc [errno=1317] +STOP SLAVE IO_THREAD; +SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; +include/start_slave.inc +connection server_1; +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +a +1 +4 +connection server_2; +SELECT * FROM t6 ORDER BY a; +a +4 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t6; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_record_gtid_wakeup.result b/mysql-test/suite/rpl/r/rpl_parallel_record_gtid_wakeup.result new file mode 100644 index 00000000000..cbe53e4f623 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_record_gtid_wakeup.result @@ -0,0 +1,48 @@ +*** MDEV-7929: record_gtid() for non-transactional event group calls wakeup_subsequent_commits() too early, causing slave hang. *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_record_gtid_serverid_100_sleep'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @old_server_id= @@SESSION.server_id; +SET SESSION server_id= 100; +SET @commit_id= 10010; +ALTER TABLE t1 COMMENT "Hulubulu!"; +SET SESSION server_id= @old_server_id; +INSERT INTO t3 VALUES (130, 0); +SET @commit_id= 10011; +INSERT INTO t3 VALUES (131, 0); +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; +a b +130 0 +131 0 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a >= 130 ORDER BY a; +a b +130 0 +131 0 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t3; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_retry_deadlock.result b/mysql-test/suite/rpl/r/rpl_parallel_retry_deadlock.result new file mode 100644 index 00000000000..1f5a23db848 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_retry_deadlock.result @@ -0,0 +1,192 @@ +*** MDEV-7326 Server deadlock in connection with parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +include/start_slave.inc +connection server_1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +INSERT INTO t1 VALUES (foo(50, +"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", +"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; +connection server_1; +INSERT INTO t2 VALUES (foo(50, +"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", +"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +connection server_1; +INSERT INTO t1 VALUES (foo(51, +"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", +"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +connection server_1; +INSERT INTO t1 VALUES (52); +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +a +50 +SELECT * FROM t1 WHERE a>=50 ORDER BY a; +a +50 +51 +52 +connection server_2; +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +connection server_1; +connection server_2; +SELECT * FROM t2 WHERE a>=50 ORDER BY a; +a +50 +SELECT * FROM t1 WHERE a>=50 ORDER BY a; +a +50 +51 +52 +SET DEBUG_SYNC="reset"; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** MDEV-7326 Server deadlock in connection with parallel replication *** +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connection server_2; +include/stop_slave.inc +SET @old_parallel_mode= @@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=3; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_xid"; +include/start_slave.inc +connection server_1; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format= STATEMENT; +INSERT INTO t1 VALUES (foo(60, +"rpl_parallel_start_waiting_for_prior SIGNAL t3_ready", +"rpl_parallel_end_of_group SIGNAL prep_ready WAIT_FOR prep_cont")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR prep_ready"; +connection server_1; +INSERT INTO t2 VALUES (foo(60, +"rpl_parallel_simulate_temp_err_xid SIGNAL t1_ready1 WAIT_FOR t1_cont1", +"rpl_parallel_retry_after_unmark SIGNAL t1_ready2 WAIT_FOR t1_cont2")); +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready1"; +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +INSERT INTO t1 VALUES (foo(61, +"rpl_parallel_before_mark_start_commit SIGNAL t2_ready1 WAIT_FOR t2_cont1", +"rpl_parallel_after_mark_start_commit SIGNAL t2_ready2")); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +INSERT INTO t6 VALUES (62); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection server_1; +SET debug_sync='RESET'; +SET BINLOG_FORMAT= @old_format; +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +a +60 +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +a +60 +61 +SELECT * FROM t6 WHERE a>=60 ORDER BY a; +a +62 +connection server_2; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready1"; +SET DEBUG_SYNC= "now SIGNAL t1_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t1_ready2"; +connection server_2; +SET DEBUG_SYNC= "now SIGNAL prep_cont"; +SET DEBUG_SYNC= "now WAIT_FOR t3_ready"; +SET DEBUG_SYNC= "now SIGNAL t2_cont1"; +SET DEBUG_SYNC= "now WAIT_FOR t2_ready2"; +SET DEBUG_SYNC= "now SIGNAL t1_cont2"; +connection server_1; +connection server_2; +SELECT * FROM t2 WHERE a>=60 ORDER BY a; +a +60 +SELECT * FROM t1 WHERE a>=60 ORDER BY a; +a +60 +61 +SELECT * FROM t6 WHERE a>=60 ORDER BY a; +a +62 +SET DEBUG_SYNC="reset"; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP function foo; +DROP TABLE t1,t2,t6; +disconnect con_temp3; +disconnect con_temp4; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_rollback_assert.result b/mysql-test/suite/rpl/r/rpl_parallel_rollback_assert.result new file mode 100644 index 00000000000..af9c5f14687 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_rollback_assert.result @@ -0,0 +1,45 @@ +*** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +connection server_2; +connection server_1; +BEGIN; +INSERT INTO t2 VALUES (2000); +INSERT INTO t1 VALUES (2000); +INSERT INTO t2 VALUES (2001); +ROLLBACK; +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; +a +2000 +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +a +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +connection server_1; +INSERT INTO t2 VALUES (2020); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a>=2000 ORDER BY a; +a +2020 +SELECT * FROM t1 WHERE a>=2000 ORDER BY a; +a +2000 +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t1,t2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_single_grpcmt.result b/mysql-test/suite/rpl/r/rpl_parallel_single_grpcmt.result new file mode 100644 index 00000000000..18ffac4fa5d --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_single_grpcmt.result @@ -0,0 +1,160 @@ +*** Test that group-committed transactions on the master can replicate in parallel on the slave. *** +include/master-slave.inc +[connection master] +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7); +connection server_2; +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +BEGIN; +INSERT INTO t3 VALUES (2,102); +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +BEGIN; +INSERT INTO t3 VALUES (4,104); +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +SET binlog_format=statement; +INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +SET debug_sync='RESET'; +connection server_1; +SELECT * FROM t3 ORDER BY a; +a b +1 1 +2 12 +3 3 +4 14 +5 5 +6 16 +7 7 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +master-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')) +master-bin.000001 # Xid # # COMMIT /* XID */ +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued3'; +connection con_temp1; +ROLLBACK; +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued1'; +connection con_temp2; +ROLLBACK; +connection server_2; +SET debug_sync='now WAIT_FOR slave_queued2'; +SET debug_sync='now SIGNAL slave_cont1'; +SELECT * FROM t3 ORDER BY a; +a b +1 1 +2 12 +3 3 +4 14 +5 5 +6 16 +7 7 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +slave-bin.000001 # Gtid # # GTID #-#-# +slave-bin.000001 # Query # # use `test`; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB +slave-bin.000001 # Gtid # # GTID #-#-# +slave-bin.000001 # Query # # use `test`; CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (1,1), (3,3), (5,5), (7,7) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (2, foo(12, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued1 WAIT_FOR slave_cont1', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (4, foo(14, +'commit_after_release_LOCK_prepare_ordered SIGNAL slave_queued2', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +slave-bin.000001 # Gtid # # BEGIN GTID #-#-# cid=# +slave-bin.000001 # Query # # use `test`; INSERT INTO t3 VALUES (6, foo(16, +'group_commit_waiting_for_prior SIGNAL slave_queued3', +'')) +slave-bin.000001 # Xid # # COMMIT /* XID */ +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_slave_bgc_kill.result b/mysql-test/suite/rpl/r/rpl_parallel_slave_bgc_kill.result new file mode 100644 index 00000000000..320bf0e49f8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_slave_bgc_kill.result @@ -0,0 +1,323 @@ +*** Test killing slave threads at various wait points *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +connect con_temp3,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp4,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con_temp5,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +connection server_1; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +*** 1. Test killing transaction waiting in commit for previous transaction to commit *** +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (31, foo(31, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (32, foo(32, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (33, foo(33, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (34, foo(34, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +32 32 +33 33 +34 34 +SET debug_sync='RESET'; +connection server_2; +SET sql_log_bin=0; +CALL mtr.add_suppression("Query execution was interrupted"); +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); +CALL mtr.add_suppression("Slave: Connection was killed"); +SET sql_log_bin=1; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +STOP SLAVE IO_THREAD; +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (39,0); +connection server_2; +include/start_slave.inc +SELECT * FROM t3 WHERE a >= 30 ORDER BY a; +a b +31 31 +32 32 +33 33 +34 34 +39 0 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 2. Same as (1), but without restarting IO thread after kill of SQL threads *** +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (41, foo(41, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (42, foo(42, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (43, foo(43, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (44, foo(44, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +a b +41 41 +42 42 +43 43 +44 44 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (49,0); +connection server_2; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 40 ORDER BY a; +a b +41 41 +42 42 +43 43 +44 44 +49 0 +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +include/start_slave.inc +*** 3. Same as (2), but not using gtid mode *** +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=no; +include/start_slave.inc +connection server_1; +connection con_temp3; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +INSERT INTO t3 VALUES (51, foo(51, +'commit_before_prepare_ordered WAIT_FOR t2_waiting', +'commit_after_prepare_ordered SIGNAL t1_ready WAIT_FOR t1_cont')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con_temp4; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +BEGIN; +INSERT INTO t3 VALUES (52, foo(52, +'ha_write_row_end SIGNAL t2_query WAIT_FOR t2_cont', +'')); +INSERT INTO t3 VALUES (53, foo(53, +'group_commit_waiting_for_prior SIGNAL t2_waiting', +'group_commit_waiting_for_prior_killed SIGNAL t2_killed')); +COMMIT; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +connection con_temp5; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued3'; +INSERT INTO t3 VALUES (54, foo(54, +'', +'')); +connection server_1; +SET debug_sync='now WAIT_FOR master_queued3'; +SET debug_sync='now SIGNAL master_cont1'; +connection con_temp3; +connection con_temp4; +connection con_temp5; +connection server_1; +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +52 52 +53 53 +54 54 +SET debug_sync='RESET'; +connection server_2; +SET debug_sync='now WAIT_FOR t2_query'; +SET debug_sync='now SIGNAL t2_cont'; +SET debug_sync='now WAIT_FOR t1_ready'; +KILL THD_ID; +SET debug_sync='now WAIT_FOR t2_killed'; +SET debug_sync='now SIGNAL t1_cont'; +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +SET debug_sync='RESET'; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +SET sql_log_bin=0; +DROP FUNCTION foo; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +connection server_1; +INSERT INTO t3 VALUES (59,0); +connection server_2; +START SLAVE SQL_THREAD; +SELECT * FROM t3 WHERE a >= 50 ORDER BY a; +a b +51 51 +52 52 +53 53 +54 54 +59 0 +connection server_2; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP function foo; +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_stop_on_con_kill.result b/mysql-test/suite/rpl/r/rpl_parallel_stop_on_con_kill.result new file mode 100644 index 00000000000..bf0ed9e4374 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_stop_on_con_kill.result @@ -0,0 +1,102 @@ +*** MDEV-8031: Parallel replication stops on "connection killed" error (probably incorrectly handled deadlock kill) *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t3 VALUES (201,0), (202,0); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_mdev8031'; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10200; +INSERT INTO t3 VALUES (203, 1); +INSERT INTO t3 VALUES (204, 1); +INSERT INTO t3 VALUES (205, 1); +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=201; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=202; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=204; +UPDATE t3 SET b=b+1 WHERE a=203; +UPDATE t3 SET b=b+1 WHERE a=205; +UPDATE t3 SET b=b+1 WHERE a=205; +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 3 +202 4 +203 4 +204 4 +205 3 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 3 +202 4 +203 4 +204 4 +205 3 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_dbug; +include/start_slave.inc +*** Check getting deadlock killed inside open_binlog() during retry. *** +connection server_2; +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug= '+d,inject_retry_event_group_open_binlog_kill'; +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size= 4096; +connection server_1; +SET @old_dbug= @@SESSION.debug_dbug; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET @commit_id= 10210; +Omit long queries that cause relaylog rotations and transaction retries... +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 6 +202 8 +203 7 +204 7 +205 5 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t3 WHERE a>=200 ORDER BY a; +a b +201 6 +202 8 +203 7 +204 7 +205 5 +include/stop_slave.inc +SET GLOBAL debug_dbug= @old_debg; +SET GLOBAL max_relay_log_size= @old_max; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t3; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result b/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result new file mode 100644 index 00000000000..6c9fd168e73 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result @@ -0,0 +1,85 @@ +*** Test STOP SLAVE in parallel mode *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connect con_temp1,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,; +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE t2 (a int PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t3 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +connection server_1; +SET binlog_direct_non_transactional_updates=0; +SET sql_log_bin=0; +CALL mtr.add_suppression("Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction"); +SET sql_log_bin=1; +BEGIN; +INSERT INTO t2 VALUES (20); +INSERT INTO t1 VALUES (20); +INSERT INTO t2 VALUES (21); +INSERT INTO t3 VALUES (20, 20); +COMMIT; +INSERT INTO t3 VALUES(21, 21); +INSERT INTO t3 VALUES(22, 22); +connection con_temp1; +BEGIN; +INSERT INTO t2 VALUES (21); +connection server_2; +START SLAVE; +connection con_temp2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; +STOP SLAVE; +connection con_temp1; +SET debug_sync='now WAIT_FOR wait_for_done_waiting'; +ROLLBACK; +connection con_temp2; +SET GLOBAL debug_dbug=@old_dbug; +SET debug_sync='RESET'; +connection server_2; +include/wait_for_slave_to_stop.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +a +20 +21 +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; +a b +20 20 +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT * FROM t2 WHERE a >= 20 ORDER BY a; +a +20 +21 +SELECT * FROM t3 WHERE a >= 20 ORDER BY a; +a b +20 20 +21 21 +22 22 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_mode=@old_parallel_mode; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +disconnect con_temp1; +disconnect con_temp2; +connection server_1; +DROP TABLE t1,t2,t3; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_wrong_binlog_order.result b/mysql-test/suite/rpl/r/rpl_parallel_wrong_binlog_order.result new file mode 100644 index 00000000000..f6781f64d30 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_wrong_binlog_order.result @@ -0,0 +1,75 @@ +*** MDEV-6775: Wrong binlog order in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t4 (a INT PRIMARY KEY, b INT, KEY b_idx(b)) ENGINE=InnoDB; +INSERT INTO t4 VALUES (1,NULL), (3,NULL), (4,4), (5, NULL), (6, 6); +connect con1,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +connect con2,127.0.0.1,root,,test,$SERVER_MYPORT_1,; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,inject_binlog_commit_before_get_LOCK_log"; +SET @old_format=@@GLOBAL.binlog_format; +SET GLOBAL binlog_format=ROW; +SET GLOBAL slave_parallel_threads=0; +SET GLOBAL slave_parallel_threads=10; +connection con1; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued1'; +connection con2; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 3; +connection server_1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +connection con1; +SET binlog_format= @old_format; +connection con2; +SET binlog_format= @old_format; +SET debug_sync='RESET'; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +connection server_2; +include/start_slave.inc +SET debug_sync= 'now WAIT_FOR waiting'; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +3 NULL +4 4 +5 NULL +6 NULL +SET debug_sync= 'now SIGNAL cont'; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL binlog_format= @old_format; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t4; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +disconnect con2; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_wrong_exec_master_pos.result b/mysql-test/suite/rpl/r/rpl_parallel_wrong_exec_master_pos.result new file mode 100644 index 00000000000..47cfa5e08e2 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_wrong_exec_master_pos.result @@ -0,0 +1,34 @@ +*** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** +include/master-slave.inc +[connection master] +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=1; +CHANGE MASTER TO master_use_gtid=slave_pos; +include/start_slave.inc +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t5 (a INT PRIMARY KEY, b INT); +INSERT INTO t5 VALUES (1,1); +INSERT INTO t5 VALUES (2,2), (3,8); +INSERT INTO t5 VALUES (4,16); +connection server_2; +test_check +OK +test_check +OK +connection server_1; +FLUSH LOGS; +connection server_2; +test_check +OK +test_check +OK +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +connection server_1; +DROP TABLE t5; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_delayed_slave.combinations b/mysql-test/suite/rpl/t/rpl_delayed_slave.combinations index 8adc75e834f..bac7cb337c9 100644 --- a/mysql-test/suite/rpl/t/rpl_delayed_slave.combinations +++ b/mysql-test/suite/rpl/t/rpl_delayed_slave.combinations @@ -1,5 +1,4 @@ [nonparallel] [parallel] ---slave-parallel-mode=conservative --slave-parallel-threads=10 diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386.test b/mysql-test/suite/rpl/t/rpl_mdev6386.test index e85b1ae0132..e6fb72dc788 100644 --- a/mysql-test/suite/rpl/t/rpl_mdev6386.test +++ b/mysql-test/suite/rpl/t/rpl_mdev6386.test @@ -10,6 +10,7 @@ CREATE TABLE t1 (a INT PRIMARY KEY, b INT) Engine=InnoDB; --source include/stop_slave.inc # Provoke a duplicate key error on replication. SET sql_log_bin= 0; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); INSERT INTO t1 VALUES (1, 2); SET sql_log_bin= 1; CHANGE MASTER TO master_use_gtid= current_pos; diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test deleted file mode 100644 index ee39bfa7a39..00000000000 --- a/mysql-test/suite/rpl/t/rpl_parallel.test +++ /dev/null @@ -1 +0,0 @@ ---source include/rpl_parallel.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_analyze_table_hang.test b/mysql-test/suite/rpl/t/rpl_parallel_analyze_table_hang.test new file mode 100644 index 00000000000..c1f1b9bac43 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_analyze_table_hang.test @@ -0,0 +1 @@ +--source include/rpl_parallel_analyze_table_hang.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt b/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt deleted file mode 100644 index af7bd138793..00000000000 --- a/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt +++ /dev/null @@ -1 +0,0 @@ ---slave_parallel_threads=40 --slave_parallel_mode=conservative --slave_transaction_retries=5 diff --git a/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test b/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test index fc294f68197..0ba6a2b2dc1 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test @@ -15,7 +15,12 @@ CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; --connection server_2 --sync_with_master --source include/stop_slave.inc - +SET @old_mode= @@GLOBAL.slave_parallel_mode; +SET GLOBAL slave_parallel_mode='conservative'; +SET @old_threads= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=40; +SET @old_transaction_retries= @@GLOBAL.slave_transaction_retries; +SET GLOBAL slave_transaction_retries=5; # Using dbug error injection, we artificially create event groups with a lot of # conflicting transactions in each event group. The bugs were originally seen @@ -249,6 +254,9 @@ SELECT * FROM t7 ORDER BY a; --source include/stop_slave.inc SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_mode=@old_mode; +SET GLOBAL slave_parallel_threads=@old_threads; +SET GLOBAL slave_transaction_retries=@old_transaction_retries; # Clean up. --source include/start_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_deadlock_corrupt_binlog.test b/mysql-test/suite/rpl/t/rpl_parallel_deadlock_corrupt_binlog.test new file mode 100644 index 00000000000..9ee06f59858 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_deadlock_corrupt_binlog.test @@ -0,0 +1 @@ +--source include/rpl_parallel_deadlock_corrupt_binlog.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_domain.test b/mysql-test/suite/rpl/t/rpl_parallel_domain.test new file mode 100644 index 00000000000..f5864380f02 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_domain.test @@ -0,0 +1 @@ +--source include/rpl_parallel_domain.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_domain_slave_single_grp.test b/mysql-test/suite/rpl/t/rpl_parallel_domain_slave_single_grp.test new file mode 100644 index 00000000000..5ddd2af323c --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_domain_slave_single_grp.test @@ -0,0 +1 @@ +--source include/rpl_parallel_domain_slave_single_grp.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_free_deferred_event.test b/mysql-test/suite/rpl/t/rpl_parallel_free_deferred_event.test new file mode 100644 index 00000000000..e2a41d0db7e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_free_deferred_event.test @@ -0,0 +1 @@ +--source include/rpl_parallel_free_deferred_event.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_gco_wait_kill.test b/mysql-test/suite/rpl/t/rpl_parallel_gco_wait_kill.test new file mode 100644 index 00000000000..d9dc4dfd293 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_gco_wait_kill.test @@ -0,0 +1 @@ +--source include/rpl_parallel_gco_wait_kill.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_gtid_slave_pos_update_fail.test b/mysql-test/suite/rpl/t/rpl_parallel_gtid_slave_pos_update_fail.test new file mode 100644 index 00000000000..0756caca084 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_gtid_slave_pos_update_fail.test @@ -0,0 +1 @@ +--source include/rpl_parallel_gtid_slave_pos_update_fail.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_ignore_error_on_rotate.test b/mysql-test/suite/rpl/t/rpl_parallel_ignore_error_on_rotate.test new file mode 100644 index 00000000000..92c84d497a7 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_ignore_error_on_rotate.test @@ -0,0 +1 @@ +--source include/rpl_parallel_ignore_error_on_rotate.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_incorrect_relay_pos.test b/mysql-test/suite/rpl/t/rpl_parallel_incorrect_relay_pos.test new file mode 100644 index 00000000000..e0226d9453e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_incorrect_relay_pos.test @@ -0,0 +1 @@ +--source include/rpl_parallel_incorrect_relay_pos.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test b/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test new file mode 100644 index 00000000000..3838973c201 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_innodb_lock_conflict.test @@ -0,0 +1 @@ +--source include/rpl_parallel_innodb_lock_conflict.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test b/mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test index 5929fad71df..981c6216376 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_mdev6589.test @@ -99,6 +99,7 @@ eval SELECT MASTER_GTID_WAIT('$wait_pos'); # duplicate key error. --connection con_temp1 COMMIT; +CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends"); SET sql_log_bin=1; --connection server_2 diff --git a/mysql-test/suite/rpl/t/rpl_parallel_missed_error_handling.test b/mysql-test/suite/rpl/t/rpl_parallel_missed_error_handling.test new file mode 100644 index 00000000000..de9dc7f782a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_missed_error_handling.test @@ -0,0 +1 @@ +--source include/rpl_parallel_missed_error_handling.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_mode.test b/mysql-test/suite/rpl/t/rpl_parallel_mode.test new file mode 100644 index 00000000000..afd9e038912 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_mode.test @@ -0,0 +1 @@ +--source include/rpl_parallel_mode.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_partial_binlog_trans.test b/mysql-test/suite/rpl/t/rpl_parallel_partial_binlog_trans.test new file mode 100644 index 00000000000..7be26edabb8 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_partial_binlog_trans.test @@ -0,0 +1 @@ +--source include/rpl_parallel_partial_binlog_trans.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_record_gtid_wakeup.test b/mysql-test/suite/rpl/t/rpl_parallel_record_gtid_wakeup.test new file mode 100644 index 00000000000..4529a268103 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_record_gtid_wakeup.test @@ -0,0 +1 @@ +--source include/rpl_parallel_record_gtid_wakeup.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry_deadlock.test b/mysql-test/suite/rpl/t/rpl_parallel_retry_deadlock.test new file mode 100644 index 00000000000..50b216b6f58 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_retry_deadlock.test @@ -0,0 +1 @@ +--source include/rpl_parallel_retry_deadlock.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_rollback_assert.test b/mysql-test/suite/rpl/t/rpl_parallel_rollback_assert.test new file mode 100644 index 00000000000..f66375b7fb4 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_rollback_assert.test @@ -0,0 +1 @@ +--source include/rpl_parallel_rollback_assert.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_single_grpcmt.test b/mysql-test/suite/rpl/t/rpl_parallel_single_grpcmt.test new file mode 100644 index 00000000000..20919823945 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_single_grpcmt.test @@ -0,0 +1 @@ +--source include/rpl_parallel_single_grpcmt.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_slave_bgc_kill.test b/mysql-test/suite/rpl/t/rpl_parallel_slave_bgc_kill.test new file mode 100644 index 00000000000..7b0f9485e5e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_slave_bgc_kill.test @@ -0,0 +1 @@ +--source include/rpl_parallel_slave_bgc_kill.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_stop_on_con_kill.test b/mysql-test/suite/rpl/t/rpl_parallel_stop_on_con_kill.test new file mode 100644 index 00000000000..64b4cb77dd4 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_stop_on_con_kill.test @@ -0,0 +1 @@ +--source include/rpl_parallel_stop_on_con_kill.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test b/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test new file mode 100644 index 00000000000..131ddc2efd9 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test @@ -0,0 +1 @@ +--source include/rpl_parallel_stop_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_wrong_binlog_order.test b/mysql-test/suite/rpl/t/rpl_parallel_wrong_binlog_order.test new file mode 100644 index 00000000000..3c920e1539a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_wrong_binlog_order.test @@ -0,0 +1 @@ +--source include/rpl_parallel_wrong_binlog_order.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_wrong_exec_master_pos.test b/mysql-test/suite/rpl/t/rpl_parallel_wrong_exec_master_pos.test new file mode 100644 index 00000000000..9cd700f57fa --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_wrong_exec_master_pos.test @@ -0,0 +1 @@ +--source include/rpl_parallel_wrong_exec_master_pos.inc diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b7f7615636b..09d8b4b7d1c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -536,7 +536,7 @@ ulong stored_program_cache_size= 0; ulong opt_slave_parallel_threads= 0; ulong opt_slave_domain_parallel_threads= 0; -ulong opt_slave_parallel_mode= SLAVE_PARALLEL_CONSERVATIVE; +ulong opt_slave_parallel_mode; ulong opt_binlog_commit_wait_count= 0; ulong opt_binlog_commit_wait_usec= 0; ulong opt_slave_parallel_max_queued= 131072; diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index b167b849923..5d10cc661ca 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -24,7 +24,7 @@ #define TABLE_RULE_ARR_SIZE 16 Rpl_filter::Rpl_filter() : - parallel_mode(SLAVE_PARALLEL_CONSERVATIVE), + parallel_mode(SLAVE_PARALLEL_OPTIMISTIC), table_rules_on(0), do_table_inited(0), ignore_table_inited(0), wild_do_table_inited(0), wild_ignore_table_inited(0) diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 3441a1ae4b1..0d035d002c4 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2224,7 +2224,7 @@ static Sys_var_slave_parallel_mode Sys_slave_parallel_mode( "\"minimal\" only parallelizes the commit steps of transactions. " "\"none\" disables parallel apply completely.", GLOBAL_VAR(opt_slave_parallel_mode), NO_CMD_LINE, - slave_parallel_mode_names, DEFAULT(SLAVE_PARALLEL_CONSERVATIVE)); + slave_parallel_mode_names, DEFAULT(SLAVE_PARALLEL_OPTIMISTIC)); static Sys_var_bit Sys_skip_parallel_replication(
1 0
0 0
[Commits] a49376056f9: MDEV-21383: Possible range plan is not used under certain conditions
by psergey 22 Dec '19

22 Dec '19
revision-id: a49376056f9eeeb76f1818c0d8b9f9e006d4dfe3 (mariadb-10.3.10-2-ga49376056f9) parent(s): ea9a7ef6ec0e3e24927fb1fc39e00cb526eec89c author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2019-12-23 01:13:38 +0300 message: MDEV-21383: Possible range plan is not used under certain conditions (10.3's variant of the fix) make_join_select() has a section of code which starts with "We plan to scan all rows. Check again if we should use an index." the code in that section will [unnecessarily] re-run the range optimizer using this condition: condition_attached_to_current_table AND current_table's_ON_expr Note that the original invocation of range optimizer in make_join_statistics was done using the whole select's WHERE condition. taking the whole select's WHERE condition and using multiple-equalities allowed the range optimizer to do more inferences. This fix uses a very conservative approach: we still re-run the range optimizer, but we save the quick select we already have, and take the best of the two quick selects. --- mysql-test/main/distinct.result | 2 +- mysql-test/main/group_min_max.result | 2 +- sql/sql_select.cc | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index 70bce519ad2..9091d78d9fb 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -212,7 +212,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 Using index explain SELECT distinct a from t3 order by a desc limit 2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t3 index NULL a 5 NULL 40 Using index +1 SIMPLE t3 range NULL a 5 NULL 10 Using index for group-by; Using temporary; Using filesort explain SELECT distinct a,b from t3 order by a+1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t3 ALL NULL NULL NULL NULL 204 Using temporary; Using filesort diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result index cfdf9ef9865..80de24e350c 100644 --- a/mysql-test/main/group_min_max.result +++ b/mysql-test/main/group_min_max.result @@ -2429,7 +2429,7 @@ EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE EXISTS (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index -2 SUBQUERY t1 index NULL a 10 NULL 15 Using index +2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12; id select_type table type possible_keys key key_len ref rows Extra diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 491e684f7a0..be0f85e9aa2 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -10670,6 +10670,11 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (sel->cond && !sel->cond->fixed) sel->cond->quick_fix_field(); + // Move away the quick select (the first thing test_quick_select() + // will do is delete it anyway) + SQL_SELECT save_sel= *sel; + tab->select->quick=NULL; + if (sel->test_quick_select(thd, tab->keys, ((used_tables & ~ current_map) | OUTER_REF_TABLE_BIT), @@ -10697,6 +10702,16 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) else sel->cond=orig_cond; + // If save_sel_copy has a better quick select than sel, put it + // back. + if (save_sel.quick && + (!sel->quick || sel->quick->read_time > save_sel.read_time)) + { + delete sel->quick; + *sel= save_sel; + save_sel.quick= NULL; + } + /* Fix for EXPLAIN */ if (sel->quick) join->best_positions[i].records_read= (double)sel->quick->records;
1 0
0 0
  • ← Newer
  • 1
  • ...
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • ...
  • 1461
  • Older →

HyperKitty Powered by HyperKitty version 1.3.12.