commit 52f3829b95cea6e97a1eedfd8e5a73ddaf611809
Author: Sachin <sachin.setiya@mariadb.com>
Date:   Wed Oct 9 21:16:31 2019 +0530

    MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
   
    Problem:-
   
    So the issue is when we do bulk insert with rows
    > MI_MIN_ROWS_TO_DISABLE_INDEXES(100) , We try to disable the indexes to
    speedup insert. But current logic also disables the long unique indexes.
   
    Solution:- In ha_myisam::start_bulk_insert if we find long hash index
    (HA_KEY_ALG_LONG_HASH) we will not disable the index.
   
    This commit also refactors the mi_disable_indexes_for_rebuild function,
    Since this is function is called at only one place, it is inlined into
    start_bulk_insert
   
    mi_clear_key_active is added into myisamdef.h because now it is also used
    in ha_myisam.cc file.

diff --git a/include/myisam.h b/include/myisam.h
index 216f041c8a9..f2e31bb9f60 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -430,6 +430,7 @@ int sort_ft_buf_flush(MI_SORT_PARAM *sort_param);
 int thr_write_keys(MI_SORT_PARAM *sort_param);
 int sort_write_record(MI_SORT_PARAM *sort_param);
 int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, ulonglong);
+my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows);
 
 #ifdef __cplusplus
 }
diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result
index d4b71e2bc46..c0ba4d0b87d 100644
--- a/mysql-test/main/long_unique_bugs.result
+++ b/mysql-test/main/long_unique_bugs.result
@@ -267,3 +267,6 @@ connection default;
 DROP TABLE t1, t2;
 CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
 ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t1(a int, unique(a) using hash);
+#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
+drop table t1;
diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test
index 62c4076cee0..13a4e1367a0 100644
--- a/mysql-test/main/long_unique_bugs.test
+++ b/mysql-test/main/long_unique_bugs.test
@@ -323,3 +323,20 @@ DROP TABLE t1, t2;
 #
 --error ER_TOO_LONG_KEY
 CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
+
+#
+# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
+#
+create table t1(a int, unique(a) using hash);
+--let $count=150
+--let insert_stmt= insert into t1 values(200)
+while ($count)
+{
+  --let $insert_stmt=$insert_stmt,($count)
+  --dec $count
+}
+--disable_query_log
+--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
+--eval $insert_stmt
+--enable_query_log
+drop table t1;
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index bde9c99288f..54c38ae69b4 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -1749,7 +1749,35 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
     else
     {
       my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
-      mi_disable_indexes_for_rebuild(file, rows, all_keys);
+      MYISAM_SHARE *share=file->s;
+      MI_KEYDEF    *key=share->keyinfo;
+      uint          i;
+     /*
+      Deactivate all indexes that can be recreated fast.
+      These include packed keys on which sorting will use more temporary
+      space than the max allowed file length or for which the unpacked keys
+      will take much more space than packed keys.
+      Note that 'rows' may be zero for the case when we don't know how many
+      rows we will put into the file.
+      Long Unique Index (HA_KEY_ALG_LONG_HASH) will not be disabled because
+      there unique property is enforced at the time of ha_write_row
+      (check_duplicate_long_entries). So we need active index at the time of
+      insert.
+     */
+      DBUG_ASSERT(file->state->records == 0 &&
+                  (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
+      for (i=0 ; i < share->base.keys ; i++,key++)
+      {
+        if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
+            ! mi_too_big_key_for_sort(key,rows) && file->s->base.auto_key != i+1 &&
+            (all_keys || !(key->flag & HA_NOSAME)) &&
+            table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH)
+        {
+          mi_clear_key_active(share->state.key_map, i);
+          file->update|= HA_STATE_CHANGED;
+          file->create_unique_index_by_sort= all_keys;
+        }
+      }
     }
   }
   else
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 22eb97b24f3..2a4ef8851c9 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -4670,7 +4670,7 @@ static ha_checksum mi_byte_checksum(const uchar *buf, uint length)
   return crc;
 }
 
-static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
+my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
 {
   uint key_maxlength=key->maxlength;
   if (key->flag & HA_FULLTEXT)
@@ -4684,38 +4684,6 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
   ((ulonglong) rows * key_maxlength > myisam_max_temp_length));
 }
 
-/*
-  Deactivate all indexes that can be recreated fast.
-  These include packed keys on which sorting will use more temporary
-  space than the max allowed file length or for which the unpacked keys
-  will take much more space than packed keys.
-  Note that 'rows' may be zero for the case when we don't know how many
-  rows we will put into the file.
- */
-
-void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
-                                    my_bool all_keys)
-{
-  MYISAM_SHARE *share=info->s;
-  MI_KEYDEF    *key=share->keyinfo;
-  uint          i;
-
-  DBUG_ASSERT(info->state->records == 0 &&
-              (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
-  for (i=0 ; i < share->base.keys ; i++,key++)
-  {
-    if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
-        ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1 &&
-        (all_keys || !(key->flag & HA_NOSAME)))
-    {
-      mi_clear_key_active(share->state.key_map, i);
-      info->update|= HA_STATE_CHANGED;
-      info->create_unique_index_by_sort= all_keys;
-    }
-  }
-}
-
-
 /*
   Return TRUE if we can use repair by sorting
   One can set the force argument to force to use sorting
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index c6fa777774a..f7b61ae638c 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -715,8 +715,6 @@ void mi_restore_status(void *param);
 void mi_copy_status(void *to, void *from);
 my_bool mi_check_status(void *param);
 void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table);
-void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
-                                    my_bool all_keys);
 extern MI_INFO *test_if_reopen(char *filename);
 my_bool check_table_is_closed(const char *name, const char *where);
 int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share);

--
Regards
Sachin Setiya
Software Engineer at  MariaDB