
[Commits] facf250: MDEV-7974 backport fix for mysql bug#12161 (XA and binlog).
by holyfootï¼ askmonty.org 21 Feb '19
by holyfootï¼ askmonty.org 21 Feb '19
21 Feb '19
revision-id: facf2504fa8006b8ee577da1a235beef6db750bb (mariadb-10.4.1-103-gfacf250)
parent(s): 8aae31cf494678b6253031c627566e50bc666920
committer: Alexey Botchkov
timestamp: 2019-02-21 12:44:53 +0400
message:
MDEV-7974 backport fix for mysql bug#12161 (XA and binlog).
XA transactions now are kept persistent after prepare.
XA_prepare_log_event implamented, and XA tranasctions are logged
as XA transactions.
---
mysql-test/suite/rpl/r/rpl_xa.result | 31 +++
mysql-test/suite/rpl/t/rpl_xa.test | 46 ++++
sql/handler.cc | 9 +
sql/handler.h | 25 ++
sql/log.cc | 115 +++++++--
sql/log.h | 10 +
sql/log_event.cc | 449 +++++++++++++++++++++++++++-------
sql/log_event.h | 116 ++++++++-
sql/sql_class.cc | 18 +-
sql/sql_class.h | 45 +++-
sql/sql_connect.cc | 1 +
sql/transaction.cc | 201 ++++++++++++++-
sql/transaction.h | 2 +
storage/innobase/handler/ha_innodb.cc | 45 +++-
14 files changed, 987 insertions(+), 126 deletions(-)
diff --git a/mysql-test/suite/rpl/r/rpl_xa.result b/mysql-test/suite/rpl/r/rpl_xa.result
new file mode 100644
index 0000000..58c1b09
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_xa.result
@@ -0,0 +1,31 @@
+include/master-slave.inc
+[connection master]
+create table t1 (a int, b int) engine=InnoDB;
+xa start 't';
+insert into t1 values(1, 2);
+xa end 't';
+xa prepare 't';
+xa commit 't';
+select * from t1;
+a b
+1 2
+connection slave;
+select * from t1;
+a b
+1 2
+connection master;
+xa start 't';
+insert into t1 values(3, 4);
+xa end 't';
+xa prepare 't';
+xa rollback 't';
+select * from t1;
+a b
+1 2
+connection slave;
+select * from t1;
+a b
+1 2
+connection master;
+drop table t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_xa.test b/mysql-test/suite/rpl/t/rpl_xa.test
new file mode 100644
index 0000000..c28a205
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_xa.test
@@ -0,0 +1,46 @@
+source include/have_innodb.inc;
+source include/master-slave.inc;
+
+create table t1 (a int, b int) engine=InnoDB;
+xa start 't';
+insert into t1 values(1, 2);
+xa end 't';
+xa prepare 't';
+xa commit 't';
+select * from t1;
+sync_slave_with_master;
+select * from t1;
+connection master;
+
+xa start 't';
+insert into t1 values(3, 4);
+xa end 't';
+xa prepare 't';
+xa rollback 't';
+select * from t1;
+sync_slave_with_master;
+select * from t1;
+
+connection master;
+SET pseudo_slave_mode=1;
+create table t2 (a int) engine=InnoDB;
+xa start 't';
+insert into t1 values (5, 6);
+xa end 't';
+xa prepare 't';
+xa start 's';
+insert into t2 values (0);
+xa end 's';
+xa prepare 's';
+xa commit 't';
+xa commit 's';
+select * from t1;
+select * from t2;
+sync_slave_with_master;
+select * from t1;
+select * from t2;
+
+connection master;
+SET pseudo_slave_mode=0;
+drop table t1, t2;
+source include/rpl_end.inc;
diff --git a/sql/handler.cc b/sql/handler.cc
index 001055c..3b2a3e0 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1214,6 +1214,9 @@ static int prepare_or_error(handlerton *ht, THD *thd, bool all)
}
+/*static inline */int
+binlog_commit_flush_xid_caches(THD *thd, binlog_cache_mngr *cache_mngr,
+ bool all, my_xid xid);
/**
@retval
0 ok
@@ -1225,6 +1228,7 @@ int ha_prepare(THD *thd)
int error=0, all=1;
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
Ha_trx_info *ha_info= trans->ha_list;
+
DBUG_ENTER("ha_prepare");
if (ha_info)
@@ -1250,6 +1254,11 @@ int ha_prepare(THD *thd)
}
}
+ if (unlikely(tc_log->log_prepare(thd, all)))
+ {
+ ha_rollback_trans(thd, all);
+ error=1;
+ }
}
DBUG_RETURN(error);
diff --git a/sql/handler.h b/sql/handler.h
index fc6246c..0c65842 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -810,6 +810,16 @@ struct xid_t {
long gtrid_length;
long bqual_length;
char data[XIDDATASIZE]; // not \0-terminated !
+ /*
+ The size of the string containing serialized Xid representation
+ is computed as a sum of
+ eight as the number of formatting symbols (X'',X'',)
+ plus 2 x XIDDATASIZE (2 due to hex format),
+ plus space for decimal digits of XID::formatID,
+ plus one for 0x0.
+ */
+ static const uint ser_buf_size=
+ 8 + 2 * XIDDATASIZE + 4 * sizeof(long) + 1;
xid_t() {} /* Remove gcc warning */
bool eq(struct xid_t *xid)
@@ -1626,6 +1636,21 @@ struct handlerton
/* backup */
void (*prepare_for_backup)(void);
void (*end_backup)(void);
+ /**
+ @param[in,out] thd pointer to THD
+ @param[in] new_trx_arg pointer to replacement transaction
+ @param[out] ptr_trx_arg double pointer to being replaced transaction
+
+ Associated with THD engine's native transaction is replaced
+ with @c new_trx_arg. The old value is returned through a buffer if non-null
+ pointer is provided with @c ptr_trx_arg.
+ The method is adapted by XA start and XA prepare handlers to
+ handle XA transaction that is logged as two parts by slave applier.
+
+ This interface concerns engines that are aware of XA transaction.
+ */
+ void (*replace_native_transaction_in_thd)(THD *thd, void *new_trx_arg,
+ void **ptr_trx_arg);
};
diff --git a/sql/log.cc b/sql/log.cc
index a56117a..316b871 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -87,6 +87,9 @@ static bool binlog_savepoint_rollback_can_release_mdl(handlerton *hton,
static int binlog_commit(handlerton *hton, THD *thd, bool all);
static int binlog_rollback(handlerton *hton, THD *thd, bool all);
static int binlog_prepare(handlerton *hton, THD *thd, bool all);
+static int binlog_xa_recover(handlerton *hton, XID *xid_list, uint len);
+static int binlog_commit_by_xid(handlerton *hton, XID *xid);
+static int binlog_rollback_by_xid(handlerton *hton, XID *xid);
static int binlog_start_consistent_snapshot(handlerton *hton, THD *thd);
static const LEX_CSTRING write_error_msg=
@@ -1688,6 +1691,9 @@ int binlog_init(void *p)
binlog_hton->commit= binlog_commit;
binlog_hton->rollback= binlog_rollback;
binlog_hton->prepare= binlog_prepare;
+ binlog_hton->recover= binlog_xa_recover;
+ binlog_hton->commit_by_xid = binlog_commit_by_xid;
+ binlog_hton->rollback_by_xid = binlog_rollback_by_xid;
binlog_hton->start_consistent_snapshot= binlog_start_consistent_snapshot;
binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN;
return 0;
@@ -1883,23 +1889,16 @@ static inline int
binlog_commit_flush_xid_caches(THD *thd, binlog_cache_mngr *cache_mngr,
bool all, my_xid xid)
{
- if (xid)
- {
- Xid_log_event end_evt(thd, xid, TRUE);
- return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
- }
- else
+ /* Mask XA COMMIT ... ONE PHASE as plain BEGIN ... COMMIT */
+ if (!xid)
{
- /*
- Empty xid occurs in XA COMMIT ... ONE PHASE.
- In this case, we do not have a MySQL xid for the transaction, and the
- external XA transaction coordinator will have to handle recovery if
- needed. So we end the transaction with a plain COMMIT query event.
- */
- Query_log_event end_evt(thd, STRING_WITH_LEN("COMMIT"),
- TRUE, TRUE, TRUE, 0);
- return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
+ DBUG_ASSERT(thd->transaction.xid_state.xa_state == XA_IDLE &&
+ thd->lex->xa_opt == XA_ONE_PHASE);
+ xid= thd->query_id;
}
+
+ Xid_log_event end_evt(thd, xid, TRUE);
+ return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
}
/**
@@ -1961,11 +1960,77 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all)
do nothing.
just pretend we can do 2pc, so that MySQL won't
switch to 1pc.
- real work will be done in MYSQL_BIN_LOG::log_and_order()
+ real work is done in MYSQL_BIN_LOG::log_and_order()
*/
return 0;
}
+
+static int serialize_xid(XID *xid, char *buf)
+{
+ size_t size;
+ buf[0]= '\'';
+ memcpy(buf+1, xid->data, xid->gtrid_length);
+ size= xid->gtrid_length + 2;
+ buf[size-1]= '\'';
+ if (xid->bqual_length == 0 && xid->formatID == 1)
+ return size;
+
+ memcpy(buf+size, ", '", 3);
+ memcpy(buf+size+3, xid->data+xid->gtrid_length, xid->bqual_length);
+ size+= 3 + xid->bqual_length;
+ buf[size]= '\'';
+ size++;
+ if (xid->formatID != 1)
+ size+= sprintf(buf+size, ", %ld", xid->formatID);
+ return size;
+}
+
+
+static int binlog_xa_recover(handlerton *hton __attribute__((unused)),
+ XID *xid_list __attribute__((unused)),
+ uint len __attribute__((unused)))
+{
+ /* Does nothing. */
+ return 0;
+}
+
+
+static int binlog_commit_by_xid(handlerton *hton, XID *xid)
+{
+ THD *thd= current_thd;
+ const size_t xc_len= sizeof("XA COMMIT ") - 1; // do not count trailing 0
+ char buf[xc_len + xid_t::ser_buf_size];
+ size_t buflen;
+ binlog_cache_mngr *const cache_mngr= thd->binlog_setup_trx_data();
+
+ DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT);
+
+ if (!cache_mngr)
+ return 1;
+
+ memcpy(buf, "XA COMMIT ", xc_len);
+ buflen= xc_len + serialize_xid(xid, buf+xc_len);
+ Query_log_event qe(thd, buf, buflen, TRUE, FALSE, FALSE, 0);
+ return binlog_flush_cache(thd, cache_mngr, &qe, TRUE, TRUE, TRUE);
+}
+
+
+static int binlog_rollback_by_xid(handlerton *hton, XID *xid)
+{
+ THD *thd= current_thd;
+ const size_t xr_len= sizeof("XA ROLLBACK ") - 1; // do not count trailing 0
+ char buf[xr_len + xid_t::ser_buf_size];
+ size_t buflen;
+
+ DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_ROLLBACK);
+ memcpy(buf, "XA ROLLBACK ", xr_len);
+ buflen= xr_len + serialize_xid(xid, buf+xr_len);
+ Query_log_event qe(thd, buf, buflen, FALSE, TRUE, TRUE, 0);
+ return mysql_bin_log.write_event(&qe);
+}
+
+
/*
We flush the cache wrapped in a beging/rollback if:
. aborting a single or multi-statement transaction and;
@@ -9809,6 +9874,24 @@ int TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
DBUG_RETURN(BINLOG_COOKIE_GET_ERROR_FLAG(cookie));
}
+
+int TC_LOG_BINLOG::log_prepare(THD *thd, bool all)
+{
+ XID *xid= &thd->transaction.xid_state.xid;
+ XA_prepare_log_event end_evt(thd, xid, FALSE);
+ binlog_cache_mngr *cache_mngr= thd->binlog_setup_trx_data();
+
+ if (!cache_mngr)
+ {
+ WSREP_DEBUG("Skipping empty log_xid: %s", thd->query());
+ return 0;
+ }
+
+ cache_mngr->using_xa= FALSE;
+ return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
+}
+
+
void
TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
{
diff --git a/sql/log.h b/sql/log.h
index 7dfdb36..92fdf95 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -61,6 +61,7 @@ class TC_LOG
bool need_prepare_ordered,
bool need_commit_ordered) = 0;
virtual int unlog(ulong cookie, my_xid xid)=0;
+ virtual int log_prepare(THD *thd, bool all)= 0;
virtual void commit_checkpoint_notify(void *cookie)= 0;
protected:
@@ -115,6 +116,10 @@ class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
return 1;
}
int unlog(ulong cookie, my_xid xid) { return 0; }
+ int log_prepare(THD *thd, bool all)
+ {
+ return 0;
+ }
void commit_checkpoint_notify(void *cookie) { DBUG_ASSERT(0); };
};
@@ -198,6 +203,10 @@ class TC_LOG_MMAP: public TC_LOG
int log_and_order(THD *thd, my_xid xid, bool all,
bool need_prepare_ordered, bool need_commit_ordered);
int unlog(ulong cookie, my_xid xid);
+ int log_prepare(THD *thd, bool all)
+ {
+ return 0;
+ }
void commit_checkpoint_notify(void *cookie);
int recover();
@@ -698,6 +707,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
int log_and_order(THD *thd, my_xid xid, bool all,
bool need_prepare_ordered, bool need_commit_ordered);
int unlog(ulong cookie, my_xid xid);
+ int log_prepare(THD *thd, bool all);
void commit_checkpoint_notify(void *cookie);
int recover(LOG_INFO *linfo, const char *last_log_name, IO_CACHE *first_log,
Format_description_log_event *fdle, bool do_xa);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 7a0d0be..6465f0c 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -2139,6 +2139,9 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case XID_EVENT:
ev = new Xid_log_event(buf, fdle);
break;
+ case XA_PREPARE_LOG_EVENT:
+ ev = new XA_prepare_log_event(buf, fdle);
+ break;
case RAND_EVENT:
ev = new Rand_log_event(buf, fdle);
break;
@@ -2190,7 +2193,6 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case PREVIOUS_GTIDS_LOG_EVENT:
case TRANSACTION_CONTEXT_EVENT:
case VIEW_CHANGE_EVENT:
- case XA_PREPARE_LOG_EVENT:
ev= new Ignorable_log_event(buf, fdle,
get_type_str((Log_event_type) event_type));
break;
@@ -4418,6 +4420,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, size_t que
case SQLCOM_RELEASE_SAVEPOINT:
case SQLCOM_ROLLBACK_TO_SAVEPOINT:
case SQLCOM_SAVEPOINT:
+ case SQLCOM_XA_END:
use_cache= trx_cache= TRUE;
break;
default:
@@ -6222,6 +6225,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
post_header_len[USER_VAR_EVENT-1]= USER_VAR_HEADER_LEN;
post_header_len[FORMAT_DESCRIPTION_EVENT-1]= FORMAT_DESCRIPTION_HEADER_LEN;
post_header_len[XID_EVENT-1]= XID_HEADER_LEN;
+ post_header_len[XA_PREPARE_LOG_EVENT-1]= XA_PREPARE_HEADER_LEN;
post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= BEGIN_LOAD_QUERY_HEADER_LEN;
post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN;
/*
@@ -7874,7 +7878,7 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
buf+= 8;
domain_id= uint4korr(buf);
buf+= 4;
- flags2= *buf;
+ flags2= *(buf++);
if (flags2 & FL_GROUP_COMMIT_ID)
{
if (event_len < (uint)header_size + GTID_HEADER_LEN + 2)
@@ -7882,8 +7886,22 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
seq_no= 0; // So is_valid() returns false
return;
}
- ++buf;
commit_id= uint8korr(buf);
+ buf+= 8;
+ }
+ if (flags2 & FL_XA_TRANSACTION)
+ {
+ xid.formatID= (long) buf[0];
+ xid.gtrid_length= (long) buf[1];
+ xid.bqual_length= (long) buf[2];
+
+ buf+= 3;
+ if (xid.formatID >= 0)
+ {
+ long data_length= xid.bqual_length + xid.gtrid_length;
+ memcpy(xid.data, buf, data_length);
+ buf+= data_length;
+ }
}
}
@@ -7914,6 +7932,12 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
/* Preserve any DDL or WAITED flag in the slave's binlog. */
if (thd_arg->rgi_slave)
flags2|= (thd_arg->rgi_slave->gtid_ev_flags2 & (FL_DDL|FL_WAITED));
+ if (thd->transaction.xid_state.xa_state == XA_IDLE &&
+ thd->lex->xa_opt != XA_ONE_PHASE)
+ {
+ flags2|= FL_XA_TRANSACTION;
+ xid= thd->transaction.xid_state.xid;
+ }
}
@@ -7956,7 +7980,7 @@ Gtid_log_event::peek(const char *event_start, size_t event_len,
bool
Gtid_log_event::write()
{
- uchar buf[GTID_HEADER_LEN+2];
+ uchar buf[GTID_HEADER_LEN+2+sizeof(XID)];
size_t write_len;
int8store(buf, seq_no);
@@ -7968,8 +7992,25 @@ Gtid_log_event::write()
write_len= GTID_HEADER_LEN + 2;
}
else
+ write_len= 13;
+
+ if (flags2 & FL_XA_TRANSACTION)
{
- bzero(buf+13, GTID_HEADER_LEN-13);
+ buf[write_len]= (uchar) ((char) xid.formatID);
+ buf[write_len+1]= (uchar) xid.gtrid_length;
+ buf[write_len+2]= (uchar) xid.bqual_length;
+ write_len+= 3;
+ if (xid.formatID >= 0)
+ {
+ long data_length= xid.bqual_length + xid.gtrid_length;
+ memcpy(buf+write_len, xid.data, data_length);
+ write_len+= data_length;
+ }
+ }
+
+ if (write_len < GTID_HEADER_LEN)
+ {
+ bzero(buf+write_len, GTID_HEADER_LEN-write_len);
write_len= GTID_HEADER_LEN;
}
return write_header(write_len) ||
@@ -8012,7 +8053,7 @@ Gtid_log_event::make_compatible_event(String *packet, bool *need_dummy_event,
void
Gtid_log_event::pack_info(Protocol *protocol)
{
- char buf[6+5+10+1+10+1+20+1+4+20+1];
+ char buf[6+5+10+1+10+1+20+1+4+20+1+5+128];
char *p;
p = strmov(buf, (flags2 & FL_STANDALONE ? "GTID " : "BEGIN GTID "));
p= longlong10_to_str(domain_id, p, 10);
@@ -8026,6 +8067,12 @@ Gtid_log_event::pack_info(Protocol *protocol)
p= longlong10_to_str(commit_id, p, 10);
}
+ if (flags2 & FL_XA_TRANSACTION)
+ {
+ p= strmov(p, " XID :");
+ p= strnmov(p, xid.data, xid.bqual_length + xid.gtrid_length);
+ }
+
protocol->store(buf, p-buf, &my_charset_bin);
}
@@ -8079,11 +8126,25 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi)
thd->lex->sql_command= SQLCOM_BEGIN;
thd->is_slave_error= 0;
status_var_increment(thd->status_var.com_stat[thd->lex->sql_command]);
- if (trans_begin(thd, 0))
+ if (flags2 & FL_XA_TRANSACTION)
{
- DBUG_PRINT("error", ("trans_begin() failed"));
- thd->is_slave_error= 1;
+ thd->lex->xid= &xid;
+ thd->lex->xa_opt= XA_NONE;
+ if (trans_xa_start(thd))
+ {
+ DBUG_PRINT("error", ("trans_xa_start() failed"));
+ thd->is_slave_error= 1;
+ }
}
+ else
+ {
+ if (trans_begin(thd, 0))
+ {
+ DBUG_PRINT("error", ("trans_begin() failed"));
+ thd->is_slave_error= 1;
+ }
+ }
+
thd->update_stats();
if (likely(!thd->is_slave_error))
@@ -8202,9 +8263,29 @@ Gtid_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
buf, print_event_info->delimiter))
goto err;
}
- if (!(flags2 & FL_STANDALONE))
- if (my_b_printf(&cache, is_flashback ? "COMMIT\n%s\n" : "BEGIN\n%s\n", print_event_info->delimiter))
+ if ((flags2 & FL_XA_TRANSACTION) && !is_flashback)
+ {
+ my_b_write_string(&cache, "XA START '");
+ my_b_write(&cache, (uchar *) xid.data, xid.gtrid_length);
+ my_b_write_string(&cache, "'");
+ if (xid.bqual_length > 0 || xid.formatID != 1)
+ {
+ my_b_write_string(&cache, ", '");
+ my_b_write(&cache, (uchar *) xid.data+xid.gtrid_length, xid.bqual_length);
+ my_b_write_string(&cache, "'");
+ if (xid.formatID != 1)
+ if (my_b_printf(&cache, ", %d", xid.formatID))
+ goto err;
+ }
+ if (my_b_printf(&cache, "%s\n", print_event_info->delimiter))
goto err;
+ }
+ else if (!(flags2 & FL_STANDALONE))
+ {
+ if (my_b_printf(&cache, is_flashback ? "COMMIT\n%s\n" : "BEGIN\n%s\n",
+ print_event_info->delimiter))
+ goto err;
+ }
return cache.flush_data();
err:
@@ -8825,80 +8906,20 @@ bool slave_execute_deferred_events(THD *thd)
/**************************************************************************
- Xid_log_event methods
+ Xid_apply_log_event methods
**************************************************************************/
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-void Xid_log_event::pack_info(Protocol *protocol)
-{
- char buf[128], *pos;
- pos= strmov(buf, "COMMIT /* xid=");
- pos= longlong10_to_str(xid, pos, 10);
- pos= strmov(pos, " */");
- protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
-}
-#endif
-
-/**
- @note
- It's ok not to use int8store here,
- as long as xid_t::set(ulonglong) and
- xid_t::get_my_xid doesn't do it either.
- We don't care about actual values of xids as long as
- identical numbers compare identically
-*/
-Xid_log_event::
-Xid_log_event(const char* buf,
- const Format_description_log_event *description_event)
- :Log_event(buf, description_event)
+int Xid_apply_log_event::record_gtid(const rpl_gtid *gtid, uint64 sub_id,
+ void **out_hton)
{
- /* The Post-Header is empty. The Variable Data part begins immediately. */
- buf+= description_event->common_header_len +
- description_event->post_header_len[XID_EVENT-1];
- memcpy((char*) &xid, buf, sizeof(xid));
+ return rpl_global_gtid_slave_state->record_gtid(thd, gtid, sub_id, true,
+ false, out_hton);
}
-#ifndef MYSQL_CLIENT
-bool Xid_log_event::write()
-{
- DBUG_EXECUTE_IF("do_not_write_xid", return 0;);
- return write_header(sizeof(xid)) ||
- write_data((uchar*)&xid, sizeof(xid)) ||
- write_footer();
-}
-#endif
-
-
-#ifdef MYSQL_CLIENT
-bool Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
-{
- Write_on_release_cache cache(&print_event_info->head_cache, file,
- Write_on_release_cache::FLUSH_F, this);
-
- if (!print_event_info->short_form)
- {
- char buf[64];
- longlong10_to_str(xid, buf, 10);
-
- if (print_header(&cache, print_event_info, FALSE) ||
- my_b_printf(&cache, "\tXid = %s\n", buf))
- goto err;
- }
- if (my_b_printf(&cache, is_flashback ? "BEGIN%s\n" : "COMMIT%s\n",
- print_event_info->delimiter))
- goto err;
-
- return cache.flush_data();
-err:
- return 1;
-}
-#endif /* MYSQL_CLIENT */
-
-
-#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-int Xid_log_event::do_apply_event(rpl_group_info *rgi)
+int Xid_apply_log_event::do_apply_event(rpl_group_info *rgi)
{
bool res;
int err;
@@ -8929,8 +8950,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
rgi->gtid_pending= false;
gtid= rgi->current_gtid;
- err= rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, true,
- false, &hton);
+ err= record_gtid(>id, sub_id, &hton);
if (unlikely(err))
{
int ec= thd->get_stmt_da()->sql_errno();
@@ -8959,8 +8979,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
general_log_print(thd, COM_QUERY,
"COMMIT /* implicit, from Xid_log_event */");
thd->variables.option_bits&= ~OPTION_GTID_BEGIN;
- res= trans_commit(thd); /* Automatically rolls back on error. */
- thd->mdl_context.release_transactional_locks();
+ res= do_commit();
if (likely(!res) && sub_id)
rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi);
@@ -8973,10 +8992,11 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
return res;
}
+
Log_event::enum_skip_reason
-Xid_log_event::do_shall_skip(rpl_group_info *rgi)
+Xid_apply_log_event::do_shall_skip(rpl_group_info *rgi)
{
- DBUG_ENTER("Xid_log_event::do_shall_skip");
+ DBUG_ENTER("Xid_apply_log_event::do_shall_skip");
if (rgi->rli->slave_skip_counter > 0)
{
DBUG_ASSERT(!rgi->rli->get_flag(Relay_log_info::IN_TRANSACTION));
@@ -9000,6 +9020,268 @@ Xid_log_event::do_shall_skip(rpl_group_info *rgi)
#endif
DBUG_RETURN(Log_event::do_shall_skip(rgi));
}
+
+#endif /*MYSQL_SERVER*/
+
+
+/**************************************************************************
+ Xid_log_event methods
+**************************************************************************/
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+void Xid_log_event::pack_info(Protocol *protocol)
+{
+ char buf[128], *pos;
+ pos= strmov(buf, "COMMIT /* xid=");
+ pos= longlong10_to_str(xid, pos, 10);
+ pos= strmov(pos, " */");
+ protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
+}
+#endif
+
+/**
+ @note
+ It's ok not to use int8store here,
+ as long as xid_t::set(ulonglong) and
+ xid_t::get_my_xid doesn't do it either.
+ We don't care about actual values of xids as long as
+ identical numbers compare identically
+*/
+
+Xid_log_event::
+Xid_log_event(const char* buf,
+ const Format_description_log_event *description_event)
+ :Xid_apply_log_event(buf, description_event)
+{
+ /* The Post-Header is empty. The Variable Data part begins immediately. */
+ buf+= description_event->common_header_len +
+ description_event->post_header_len[XID_EVENT-1];
+ memcpy((char*) &xid, buf, sizeof(xid));
+}
+
+
+#ifndef MYSQL_CLIENT
+bool Xid_log_event::write()
+{
+ DBUG_EXECUTE_IF("do_not_write_xid", return 0;);
+ return write_header(sizeof(xid)) ||
+ write_data((uchar*)&xid, sizeof(xid)) ||
+ write_footer();
+}
+#endif
+
+
+#ifdef MYSQL_CLIENT
+bool Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F, this);
+
+ if (!print_event_info->short_form)
+ {
+ char buf[64];
+ longlong10_to_str(xid, buf, 10);
+
+ if (print_header(&cache, print_event_info, FALSE) ||
+ my_b_printf(&cache, "\tXid = %s\n", buf))
+ goto err;
+ }
+ if (my_b_printf(&cache, is_flashback ? "BEGIN%s\n" : "COMMIT%s\n",
+ print_event_info->delimiter))
+ goto err;
+
+ return cache.flush_data();
+err:
+ return 1;
+}
+#endif /* MYSQL_CLIENT */
+
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+int Xid_log_event::do_commit()
+{
+ bool res;
+ res= trans_commit(thd); /* Automatically rolls back on error. */
+ thd->mdl_context.release_transactional_locks();
+ return res;
+}
+#endif /* !MYSQL_CLIENT */
+
+
+char *XA_prepare_log_event::event_xid_t::serialize(char *buf) const
+{
+ char *c= buf;
+
+ c[0]= '\'';
+ memcpy(c+1, data, gtrid_length);
+ c[gtrid_length+1]= '\'';
+ c+= gtrid_length + 2;
+
+ if (bqual_length)
+ {
+ c[0]= ',';
+ c[1]= '\'';
+ memcpy(c+2, data+gtrid_length, bqual_length);
+ c[bqual_length+2]= '\'';
+ c+= bqual_length+3;
+ }
+
+ if (formatID != 1)
+ sprintf(c, ",%lu", formatID);
+ else
+ c[0]=0;
+
+ return buf;
+}
+
+
+/**************************************************************************
+ XA_prepare_log_event methods
+**************************************************************************/
+/**
+ @note
+ It's ok not to use int8store here,
+ as long as xid_t::set(ulonglong) and
+ xid_t::get_n_xid doesn't do it either.
+ We don't care about actual values of xids as long as
+ identical numbers compare identically
+*/
+
+XA_prepare_log_event::
+XA_prepare_log_event(const char* buf,
+ const Format_description_log_event *description_event)
+ :Xid_apply_log_event(buf, description_event)
+{
+ uint32 temp= 0;
+ uint8 temp_byte;
+
+ buf+= description_event->common_header_len +
+ description_event->post_header_len[XA_PREPARE_LOG_EVENT-1];
+ memcpy(&temp_byte, buf, 1);
+ one_phase= (bool) temp_byte;
+ buf += sizeof(temp_byte);
+ memcpy(&temp, buf, sizeof(temp));
+ m_xid.formatID= le32toh(temp);
+ buf += sizeof(temp);
+ memcpy(&temp, buf, sizeof(temp));
+ m_xid.gtrid_length= le32toh(temp);
+ buf += sizeof(temp);
+ memcpy(&temp, buf, sizeof(temp));
+ m_xid.bqual_length= le32toh(temp);
+ buf += sizeof(temp);
+ memcpy(m_xid.data, buf, m_xid.gtrid_length + m_xid.bqual_length);
+
+ xid= NULL;
+}
+
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+void XA_prepare_log_event::pack_info(Protocol *protocol)
+{
+ char buf[ser_buf_size];
+ char query[sizeof("XA COMMIT ONE PHASE") + 1 + sizeof(buf)];
+
+ /* RHS of the following assert is unknown to client sources */
+ compile_time_assert(ser_buf_size == XID::ser_buf_size);
+ m_xid.serialize(buf);
+ sprintf(query,
+ (one_phase ? "XA COMMIT %s ONE PHASE" : "XA PREPARE %s"),
+ buf);
+
+ protocol->store(query, strlen(query), &my_charset_bin);
+}
+#endif
+
+
+#ifndef MYSQL_CLIENT
+bool XA_prepare_log_event::write()
+{
+ uchar data[1 + 4 + 4 + 4];
+ uint8 one_phase_byte= one_phase;
+
+ data[0]= one_phase;
+ int4store(data+1, static_cast<XID*>(xid)->formatID);
+ int4store(data+(1+4), static_cast<XID*>(xid)->gtrid_length);
+ int4store(data+(1+4+4), static_cast<XID*>(xid)->bqual_length);
+
+ DBUG_ASSERT(xid_bufs_size == sizeof(data) - 1);
+
+ return write_header(sizeof(one_phase_byte) + xid_bufs_size +
+ static_cast<XID*>(xid)->gtrid_length +
+ static_cast<XID*>(xid)->bqual_length) ||
+ write_data(data, sizeof(data)) ||
+ write_data((uchar*) static_cast<XID*>(xid)->data,
+ static_cast<XID*>(xid)->gtrid_length +
+ static_cast<XID*>(xid)->bqual_length) ||
+ write_footer();
+}
+#endif
+
+
+#ifdef MYSQL_CLIENT
+bool XA_prepare_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F, this);
+ char buf[ser_buf_size];
+
+ m_xid.serialize(buf);
+
+ if (!print_event_info->short_form)
+ {
+ print_header(&cache, print_event_info, FALSE);
+ if (my_b_printf(&cache, "\tXID = %s\n", buf))
+ goto error;
+ }
+
+ if (my_b_printf(&cache, "XA PREPARE %s\n%s\n",
+ buf, print_event_info->delimiter))
+ goto error;
+
+ return cache.flush_data();
+error:
+ return TRUE;
+}
+#endif /* MYSQL_CLIENT */
+
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+int XA_prepare_log_event::record_gtid(const rpl_gtid *gtid, uint64 sub_id,
+ void **out_hton)
+{
+ int err;
+ xa_states c_state= thd->transaction.xid_state.xa_state;
+ thd->transaction.xid_state.xa_state= XA_ACTIVE;
+ err= rpl_global_gtid_slave_state->record_gtid(thd, gtid, sub_id, true,
+ false, out_hton);
+ thd->transaction.xid_state.xa_state= c_state;
+ return err;
+}
+
+
+int XA_prepare_log_event::do_commit()
+{
+ int res;
+ xid_t xid;
+ xid.set(m_xid.formatID,
+ m_xid.data, m_xid.gtrid_length,
+ m_xid.data + m_xid.gtrid_length, m_xid.bqual_length);
+
+ thd->lex->xid= &xid;
+ if (!one_phase)
+ {
+ thd->lex->sql_command= SQLCOM_XA_PREPARE;
+ if ((res= trans_xa_prepare(thd)) == 0)
+ res= applier_reset_xa_trans(thd);
+ }
+ else
+ {
+ res= trans_xa_commit(thd);
+ thd->mdl_context.release_transactional_locks();
+ }
+
+ return res;
+}
#endif /* !MYSQL_CLIENT */
@@ -14789,7 +15071,6 @@ bool event_that_should_be_ignored(const char *buf)
event_type == PREVIOUS_GTIDS_LOG_EVENT ||
event_type == TRANSACTION_CONTEXT_EVENT ||
event_type == VIEW_CHANGE_EVENT ||
- event_type == XA_PREPARE_LOG_EVENT ||
(uint2korr(buf + FLAGS_OFFSET) & LOG_EVENT_IGNORABLE_F))
return 1;
return 0;
diff --git a/sql/log_event.h b/sql/log_event.h
index 38a40c9..f2f784b 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -217,6 +217,7 @@ class String;
#define GTID_HEADER_LEN 19
#define GTID_LIST_HEADER_LEN 4
#define START_ENCRYPTION_HEADER_LEN 0
+#define XA_PREPARE_HEADER_LEN 0
/*
Max number of possible extra bytes in a replication event compared to a
@@ -3016,6 +3017,30 @@ class Rand_log_event: public Log_event
#endif
};
+
+class Xid_apply_log_event: public Log_event
+{
+public:
+#ifdef MYSQL_SERVER
+ Xid_apply_log_event(THD* thd_arg):
+ Log_event(thd_arg, 0, TRUE) {}
+#endif
+ Xid_apply_log_event(const char* buf,
+ const Format_description_log_event *description_event):
+ Log_event(buf, description_event) {}
+
+ ~Xid_apply_log_event() {}
+ bool is_valid() const { return 1; }
+private:
+#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
+ virtual int do_commit()= 0;
+ virtual int record_gtid(const rpl_gtid *gtid, uint64 sub_id, void **out_hton);
+ virtual int do_apply_event(rpl_group_info *rgi);
+ enum_skip_reason do_shall_skip(rpl_group_info *rgi);
+#endif
+};
+
+
/**
@class Xid_log_event
@@ -3028,14 +3053,14 @@ class Rand_log_event: public Log_event
typedef ulonglong my_xid; // this line is the same as in handler.h
#endif
-class Xid_log_event: public Log_event
+class Xid_log_event: public Xid_apply_log_event
{
- public:
- my_xid xid;
+public:
+ my_xid xid;
#ifdef MYSQL_SERVER
Xid_log_event(THD* thd_arg, my_xid x, bool direct):
- Log_event(thd_arg, 0, TRUE), xid(x)
+ Xid_apply_log_event(thd_arg)
{
if (direct)
cache_type= Log_event::EVENT_NO_CACHE;
@@ -3055,15 +3080,85 @@ class Xid_log_event: public Log_event
#ifdef MYSQL_SERVER
bool write();
#endif
- bool is_valid() const { return 1; }
private:
#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
- enum_skip_reason do_shall_skip(rpl_group_info *rgi);
+ int do_commit();
#endif
};
+
+/**
+ @class XA_prepare_log_event
+
+ Similar to Xid_log_event except that
+ - it is specific to XA transaction
+ - it carries out the prepare logics rather than the final committing
+ when @c one_phase member is off.
+ From the groupping perspective the event finalizes the current "prepare" group
+ started with XA START Query-log-event.
+ When @c one_phase is false Commit of Rollback for XA transaction are
+ logged separately to the prepare-group events so being a groups of
+ their own.
+*/
+
+class XA_prepare_log_event: public Xid_apply_log_event
+{
+protected:
+ /* The event_xid_t members were copied from handler.h */
+ struct event_xid_t
+ {
+ long formatID;
+ long gtrid_length;
+ long bqual_length;
+ char data[MYSQL_XIDDATASIZE]; // not \0-terminated !
+ char *serialize(char *buf) const;
+ };
+
+ /* size of serialization buffer is explained in $MYSQL/sql/xa.h. */
+ static const uint ser_buf_size=
+ 8 + 2 * MYSQL_XIDDATASIZE + 4 * sizeof(long) + 1;
+
+ /* Total size of buffers to hold serialized members of XID struct */
+ static const int xid_bufs_size= 12;
+ event_xid_t m_xid;
+ void *xid;
+ bool one_phase;
+
+public:
+#ifdef MYSQL_SERVER
+ XA_prepare_log_event(THD* thd_arg, XID *xid_arg, bool one_phase_arg):
+ Xid_apply_log_event(thd_arg), xid(xid_arg), one_phase(one_phase_arg)
+ {
+ cache_type= Log_event::EVENT_NO_CACHE;
+ }
+#ifdef HAVE_REPLICATION
+ void pack_info(Protocol* protocol);
+#endif /* HAVE_REPLICATION */
+#else
+ bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
+#endif
+ XA_prepare_log_event(const char* buf,
+ const Format_description_log_event *description_event);
+ ~XA_prepare_log_event() {}
+ Log_event_type get_type_code() { return XA_PREPARE_LOG_EVENT; }
+ int get_data_size()
+ {
+ return xid_bufs_size + m_xid.gtrid_length + m_xid.bqual_length;
+ }
+
+#ifdef MYSQL_SERVER
+ bool write();
+#endif
+
+private:
+#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
+ int record_gtid(const rpl_gtid *gtid, uint64 sub_id, void **out_hton);
+ int do_commit();
+#endif
+};
+
+
/**
@class User_var_log_event
@@ -3376,6 +3471,11 @@ class Gtid_log_event: public Log_event
uint64 seq_no;
uint64 commit_id;
uint32 domain_id;
+#ifdef MYSQL_SERVER
+ XID xid;
+#else
+ struct st_mysql_xid xid;
+#endif
uchar flags2;
/* Flags2. */
@@ -3404,6 +3504,8 @@ class Gtid_log_event: public Log_event
static const uchar FL_WAITED= 16;
/* FL_DDL is set for event group containing DDL. */
static const uchar FL_DDL= 32;
+ /* FL_XA_TRANSACTION is set for XA transaction. */
+ static const uchar FL_XA_TRANSACTION= 64;
#ifdef MYSQL_SERVER
Gtid_log_event(THD *thd_arg, uint64 seq_no, uint32 domain_id, bool standalone,
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index fa2f866..88bbc9a 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1461,12 +1461,19 @@ void THD::cleanup(void)
DBUG_ASSERT(cleanup_done == 0);
set_killed(KILL_CONNECTION);
-#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
if (transaction.xid_state.xa_state == XA_PREPARED)
{
-#error xid_state in the cache should be replaced by the allocated value
+ trans_xa_detach(this);
+ transaction.xid_state.xa_state= XA_NOTR;
+ transaction.xid_state.rm_error= 0;
+ }
+ else
+ {
+ transaction.xid_state.xa_state= XA_NOTR;
+ transaction.xid_state.rm_error= 0;
+ trans_rollback(this);
+ xid_cache_delete(this, &transaction.xid_state);
}
-#endif
mysql_ha_cleanup(this);
locked_tables_list.unlock_locked_tables(this);
@@ -1474,11 +1481,6 @@ void THD::cleanup(void)
delete_dynamic(&user_var_events);
close_temporary_tables();
- transaction.xid_state.xa_state= XA_NOTR;
- transaction.xid_state.rm_error= 0;
- trans_rollback(this);
- xid_cache_delete(this, &transaction.xid_state);
-
DBUG_ASSERT(open_tables == NULL);
/*
If the thread was in the middle of an ongoing transaction (rolled
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 69fabee..d13a3a4 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1255,6 +1255,18 @@ typedef struct st_xid_state {
/* Error reported by the Resource Manager (RM) to the Transaction Manager. */
uint rm_error;
XID_cache_element *xid_cache_element;
+ /*
+ Binary logging status.
+ It is set to TRUE at XA PREPARE if the transaction was written
+ to the binlog.
+ Naturally FALSE means the transaction was not written to
+ the binlog. Happens if the trnasaction did not modify anything
+ or binlogging was turned off. In that case we shouldn't binlog
+ the consequent XA COMMIT/ROLLBACK.
+ The recovered transaction after server restart sets it to TRUE always.
+ That can cause inconsistencies (shoud be fixed?).
+ */
+ bool is_binlogged;
/**
Check that XA transaction has an uncommitted work. Report an error
@@ -1278,6 +1290,12 @@ typedef struct st_xid_state {
}
return false;
}
+
+ void reset()
+ {
+ xid.null();
+ is_binlogged= FALSE;
+ }
} XID_STATE;
void xid_cache_init(void);
@@ -1917,6 +1935,17 @@ struct Ha_data
*/
void *ha_ptr;
/**
+ A memorizer to engine specific "native" transaction object to provide
+ storage engine detach-re-attach facility.
+ The server level transaction object can dissociate from storage engine
+ transactions. The released "native" transaction reference
+ can be hold in the member until it is reconciled later.
+ Lifetime: Depends on caller of @c hton::replace_native_transaction_in_thd.
+ For instance in the case of slave server applier handling XA transaction
+ it is from XA START to XA PREPARE.
+ */
+ void *ha_ptr_backup;
+ /**
0: Life time: one statement within a transaction. If @@autocommit is
on, also represents the entire transaction.
@sa trans_register_ha()
@@ -1932,7 +1961,7 @@ struct Ha_data
non-NULL: engine is bound to this thread, engine shutdown forbidden
*/
plugin_ref lock;
- Ha_data() :ha_ptr(NULL) {}
+ Ha_data() :ha_ptr(NULL), ha_ptr_backup(NULL) {}
};
/**
@@ -2603,7 +2632,7 @@ class THD :public Statement,
then.
*/
if (!xid_state.rm_error)
- xid_state.xid.null();
+ xid_state.reset();
free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
DBUG_VOID_RETURN;
}
@@ -6456,6 +6485,18 @@ inline bool add_group_to_list(THD *thd, Item *item, bool asc)
return thd->lex->current_select->add_group_to_list(thd, item, asc);
}
+/**
+ @param THD thread context
+ @param hton pointer to handlerton
+ @return address of the placeholder of handlerton's specific transaction
+ object (data)
+*/
+
+inline void **thd_ha_data_backup(const THD *thd, const struct handlerton *hton)
+{
+ return (void **) &thd->ha_data[hton->slot].ha_ptr_backup;
+}
+
inline Item *and_conds(THD *thd, Item *a, Item *b)
{
if (!b) return a;
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index b48070b..3e4a067 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -1414,6 +1414,7 @@ void do_handle_one_connection(CONNECT *connect)
#endif
end_thread:
close_connection(thd);
+ thd->get_stmt_da()->reset_diagnostics_area();
if (thd->userstat_running)
update_global_user_stats(thd, create_user, time(NULL));
diff --git a/sql/transaction.cc b/sql/transaction.cc
index 13614d3..c9b85d8 100644
--- a/sql/transaction.cc
+++ b/sql/transaction.cc
@@ -790,6 +790,135 @@ bool trans_release_savepoint(THD *thd, LEX_CSTRING name)
/**
+ Detach the current XA transaction;
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_detach(THD *thd)
+{
+ XID_STATE *xid_s= &thd->transaction.xid_state;
+ Ha_trx_info *ha_info, *ha_info_next;
+
+ DBUG_ENTER("trans_xa_detach");
+
+// DBUG_ASSERT(xid_s->xa_state == XA_PREPARED &&
+// xid_cache_search(thd, &xid_s->xid));
+
+ xid_cache_delete(thd, xid_s);
+ if (xid_cache_insert(&xid_s->xid, XA_PREPARED))
+ DBUG_RETURN(TRUE);
+
+ for (ha_info= thd->transaction.all.ha_list;
+ ha_info;
+ ha_info= ha_info_next)
+ {
+ ha_info_next= ha_info->next();
+ ha_info->reset(); /* keep it conveniently zero-filled */
+ }
+
+ thd->transaction.all.ha_list= 0;
+ thd->transaction.all.no_2pc= 0;
+
+ DBUG_RETURN(FALSE);
+}
+
+
+void attach_native_trx(THD *thd);
+/**
+ This is a specific to "slave" applier collection of standard cleanup
+ actions to reset XA transaction states at the end of XA prepare rather than
+ to do it at the transaction commit, see @c ha_commit_one_phase.
+ THD of the slave applier is dissociated from a transaction object in engine
+ that continues to exist there.
+
+ @param THD current thread
+ @return the value of is_error()
+*/
+
+bool applier_reset_xa_trans(THD *thd)
+{
+ XID_STATE *xid_s= &thd->transaction.xid_state;
+
+ thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
+ thd->server_status&=
+ ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
+ DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
+ xid_cache_delete(thd, xid_s);
+ if (xid_cache_insert(&xid_s->xid, XA_PREPARED))
+ return true;
+
+ attach_native_trx(thd);
+ thd->transaction.cleanup();
+ thd->transaction.xid_state.xa_state= XA_NOTR;
+ thd->mdl_context.release_transactional_locks();
+
+ return thd->is_error();
+}
+
+
+/**
+ The function detaches existing storage engines transaction
+ context from thd. Backup area to save it is provided to low level
+ storage engine function.
+
+ is invoked by plugin_foreach() after
+ trans_xa_start() for each storage engine.
+
+ @param[in,out] thd Thread context
+ @param plugin Reference to handlerton
+
+ @return FALSE on success, TRUE otherwise.
+*/
+
+my_bool detach_native_trx(THD *thd, plugin_ref plugin, void *unused)
+{
+ handlerton *hton= plugin_hton(plugin);
+ if (hton->replace_native_transaction_in_thd)
+ hton->replace_native_transaction_in_thd(thd, NULL,
+ thd_ha_data_backup(thd, hton));
+
+ return FALSE;
+
+}
+
+/**
+ The function restores previously saved storage engine transaction context.
+
+ @param thd Thread context
+*/
+void attach_native_trx(THD *thd)
+{
+ Ha_trx_info *ha_info= thd->transaction.all.ha_list;
+ Ha_trx_info *ha_info_next;
+
+ if (ha_info)
+ {
+ for (; ha_info; ha_info= ha_info_next)
+ {
+ handlerton *hton= ha_info->ht();
+ if (hton->replace_native_transaction_in_thd)
+ {
+ /* restore the saved original engine transaction's link with thd */
+ void **trx_backup= thd_ha_data_backup(thd, hton);
+
+ hton->
+ replace_native_transaction_in_thd(thd, *trx_backup, NULL);
+ *trx_backup= NULL;
+ }
+ ha_info_next= ha_info->next();
+ ha_info->reset();
+ }
+ }
+ thd->transaction.all.ha_list= 0;
+ thd->transaction.all.no_2pc= 0;
+}
+
+
+/**
Starts an XA transaction with the given xid value.
@param thd Current thread
@@ -833,6 +962,17 @@ bool trans_xa_start(THD *thd)
trans_rollback(thd);
DBUG_RETURN(true);
}
+
+ if (thd->variables.pseudo_slave_mode || thd->slave_thread)
+ {
+ /*
+ In case of slave thread applier or processing binlog by client,
+ detach the "native" thd's trx in favor of dynamically created.
+ */
+ plugin_foreach(thd, detach_native_trx,
+ MYSQL_STORAGE_ENGINE_PLUGIN, NULL);
+ }
+
DBUG_RETURN(FALSE);
}
@@ -862,7 +1002,15 @@ bool trans_xa_end(THD *thd)
else if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
my_error(ER_XAER_NOTA, MYF(0));
else if (!xa_trans_rolled_back(&thd->transaction.xid_state))
- thd->transaction.xid_state.xa_state= XA_IDLE;
+ {
+ if ((WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) &&
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ thd->query(), thd->query_length(),
+ TRUE, FALSE, FALSE, 0))
+ my_error(ER_XAER_RMERR, MYF(0));
+ else
+ thd->transaction.xid_state.xa_state= XA_IDLE;
+ }
DBUG_RETURN(thd->is_error() ||
thd->transaction.xid_state.xa_state != XA_IDLE);
@@ -880,6 +1028,8 @@ bool trans_xa_end(THD *thd)
bool trans_xa_prepare(THD *thd)
{
+ int res= 1;
+
DBUG_ENTER("trans_xa_prepare");
if (thd->transaction.xid_state.xa_state != XA_IDLE)
@@ -894,10 +1044,14 @@ bool trans_xa_prepare(THD *thd)
my_error(ER_XA_RBROLLBACK, MYF(0));
}
else
+ {
+ res= 0;
thd->transaction.xid_state.xa_state= XA_PREPARED;
+ if (thd->variables.pseudo_slave_mode)
+ res= applier_reset_xa_trans(thd);
+ }
- DBUG_RETURN(thd->is_error() ||
- thd->transaction.xid_state.xa_state != XA_PREPARED);
+ DBUG_RETURN(res);
}
@@ -928,6 +1082,12 @@ bool trans_xa_commit(THD *thd)
res= !xs;
if (res)
my_error(ER_XAER_NOTA, MYF(0));
+ else if (thd->in_multi_stmt_transaction_mode())
+ {
+ my_error(ER_XAER_RMFAIL, MYF(0),
+ xa_state_names[thd->transaction.xid_state.xa_state]);
+ res= TRUE;
+ }
else
{
res= xa_trans_rolled_back(xs);
@@ -978,8 +1138,16 @@ bool trans_xa_commit(THD *thd)
{
DEBUG_SYNC(thd, "trans_xa_commit_after_acquire_commit_lock");
- res= MY_TEST(ha_commit_one_phase(thd, 1));
- if (res)
+ if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
+ {
+ res= thd->binlog_query(THD::THD::STMT_QUERY_TYPE,
+ thd->query(), thd->query_length(),
+ FALSE, FALSE, FALSE, 0);
+ }
+ else
+ res= 0;
+
+ if (res || (res= MY_TEST(ha_commit_one_phase(thd, 1))))
my_error(ER_XAER_RMERR, MYF(0));
}
}
@@ -1032,19 +1200,36 @@ bool trans_xa_rollback(THD *thd)
else
{
xa_trans_rolled_back(xs);
- ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
+ if (ha_commit_or_rollback_by_xid(thd->lex->xid, 0) == 0 &&
+ (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()))
+ thd->binlog_query(THD::THD::STMT_QUERY_TYPE,
+ thd->query(), thd->query_length(),
+ FALSE, FALSE, FALSE, 0);
xid_cache_delete(thd, xs);
}
DBUG_RETURN(thd->get_stmt_da()->is_error());
}
- if (xa_state != XA_IDLE && xa_state != XA_PREPARED && xa_state != XA_ROLLBACK_ONLY)
+ if (xa_state != XA_IDLE && xa_state != XA_PREPARED &&
+ xa_state != XA_ROLLBACK_ONLY)
{
my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
DBUG_RETURN(TRUE);
}
- res= xa_trans_force_rollback(thd);
+ if(xa_state == XA_PREPARED &&
+ (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()))
+ {
+ res= thd->binlog_query(THD::THD::STMT_QUERY_TYPE,
+ thd->query(), thd->query_length(),
+ FALSE, FALSE, FALSE, 0);
+ }
+ else
+ res= 0;
+
+ res= res || xa_trans_force_rollback(thd);
+ if (res || (res= MY_TEST(xa_trans_force_rollback(thd))))
+ my_error(ER_XAER_RMERR, MYF(0));
thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
thd->transaction.all.reset();
diff --git a/sql/transaction.h b/sql/transaction.h
index 7e34693..1109a61 100644
--- a/sql/transaction.h
+++ b/sql/transaction.h
@@ -42,6 +42,8 @@ bool trans_xa_end(THD *thd);
bool trans_xa_prepare(THD *thd);
bool trans_xa_commit(THD *thd);
bool trans_xa_rollback(THD *thd);
+bool trans_xa_detach(THD *thd);
+bool applier_reset_xa_trans(THD *thd);
void trans_reset_one_shot_chistics(THD *thd);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 35c8b7f..b9e75f4 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -1827,7 +1827,7 @@ thd_innodb_tmpdir(
/** Obtain the InnoDB transaction of a MySQL thread.
@param[in,out] thd thread handle
@return reference to transaction pointer */
-static trx_t* thd_to_trx(THD* thd)
+static trx_t*& thd_to_trx(THD* thd)
{
return *reinterpret_cast<trx_t**>(thd_ha_data(thd, innodb_hton_ptr));
}
@@ -3625,6 +3625,46 @@ static ulonglong innodb_prepare_commit_versioned(THD* thd, ulonglong *trx_id)
return 0;
}
+/** InnoDB transaction object that is currently associated with THD is
+replaced with that of the 2nd argument. The previous value is
+returned through the 3rd argument's buffer, unless it's NULL. When
+the buffer is not provided (value NULL) that should mean the caller
+restores previously saved association so the current trx has to be
+additionally freed from all association with MYSQL.
+
+@param[in,out] thd MySQL thread handle
+@param[in] new_trx_arg replacement trx_t
+@param[in,out] ptr_trx_arg pointer to a buffer to store old trx_t */
+static
+void
+innodb_replace_trx_in_thd(
+ THD* thd,
+ void* new_trx_arg,
+ void** ptr_trx_arg)
+{
+ trx_t*& trx = thd_to_trx(thd);
+
+ ut_ad(new_trx_arg == NULL
+ || (((trx_t*) new_trx_arg)->mysql_thd == thd
+ && !((trx_t*) new_trx_arg)->is_recovered));
+
+ if (ptr_trx_arg) {
+ *ptr_trx_arg = trx;
+
+ ut_ad(trx == NULL
+ || (trx->mysql_thd == thd && !trx->is_recovered));
+
+ } else if (trx->state == TRX_STATE_NOT_STARTED) {
+ ut_ad(thd == trx->mysql_thd);
+ trx->read_view.close();
+ } else {
+ ut_ad(thd == trx->mysql_thd);
+ ut_ad(trx_state_eq(trx, TRX_STATE_PREPARED));
+ trx_disconnect_prepared(trx);
+ }
+ trx = static_cast<trx_t*>(new_trx_arg);
+}
+
/** Initialize and normalize innodb_buffer_pool_size. */
static void innodb_buffer_pool_size_init()
{
@@ -4177,6 +4217,9 @@ static int innodb_init(void* p)
innobase_hton->prepare_commit_versioned
= innodb_prepare_commit_versioned;
+ innobase_hton->replace_native_transaction_in_thd =
+ innodb_replace_trx_in_thd;
+
innodb_remember_check_sysvar_funcs();
compile_time_assert(DATA_MYSQL_TRUE_VARCHAR == MYSQL_TYPE_VARCHAR);
1
0

[Commits] b88a803459c: MDEV-17428: Update wsrep_max_ws_rows and wsrep_max_ws_size values in wsrep.cnf.sh
by jan 21 Feb '19
by jan 21 Feb '19
21 Feb '19
revision-id: b88a803459c479f41bbff0a2bfe4c8d90cd1c446 (mariadb-10.1.38-11-gb88a803459c)
parent(s): d9f7b6be5aa8d0eea18d1f122338b6058c03fdc3
author: Jan Lindström
committer: Jan Lindström
timestamp: 2019-02-21 09:19:18 +0200
message:
MDEV-17428: Update wsrep_max_ws_rows and wsrep_max_ws_size values in wsrep.cnf.sh
Since MariaDB 10.1.17 the new default values for wsrep_max_ws_rows and wsrep_max_ws_size were set:
wsrep_max_ws_rows
Default Value:
0 (>= MariaDB Galera 10.0.27, MariaDB 10.1.17)
131072 (<= MariaDB Galera 10.0.26, MariaDB 10.1.16)
wsrep_max_ws_size
Default Value:
2147483647 (2GB, >= MariaDB Galera 10.0.27, MariaDB 10.1.17)
1073741824 (1GB, <= MariaDB Galera 10.0.26, MariaDB 10.1.16)
---
support-files/wsrep.cnf.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/support-files/wsrep.cnf.sh b/support-files/wsrep.cnf.sh
index 51ce3dca2dd..7242fff9f7a 100644
--- a/support-files/wsrep.cnf.sh
+++ b/support-files/wsrep.cnf.sh
@@ -67,10 +67,10 @@ wsrep_slave_threads=1
wsrep_certify_nonPK=1
# Maximum number of rows in write set
-wsrep_max_ws_rows=131072
+wsrep_max_ws_rows=0
# Maximum size of write set
-wsrep_max_ws_size=1073741824
+wsrep_max_ws_size=2147483647
# to enable debug level logging, set this to 1
wsrep_debug=0
1
0
revision-id: 80c17fb3a842fa68df0d69e10e16da853de3a872 (mariadb-10.3.6-142-g80c17fb3a84)
parent(s): 0e186837c6b67214174d8f3ed98ef2e2f2f86ddb a40de1bdeb218d66d5cc737758a4bab1b06f255d
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2019-02-20 17:25:11 +0100
message:
Merge branch '10.3' into 10.4
debian/autobake-deb.sh | 27 ----------------------
.../mariabackup/encrypted_page_compressed.test | 2 +-
.../mariabackup/encrypted_page_corruption.test | 2 +-
.../suite/mariabackup/log_checksum_mismatch.test | 2 +-
.../mariabackup/unencrypted_page_compressed.test | 2 +-
5 files changed, 4 insertions(+), 31 deletions(-)
diff --cc debian/autobake-deb.sh
index 3442afbf376,5ca59e0305d..6ef0cb33553
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@@ -94,36 -94,9 +94,9 @@@ f
# x86 32 bit.
if [[ $GCCVERSION -lt 40800 ]] || [[ $(arch) =~ i[346]86 ]] || [[ $TRAVIS ]]
then
- sed '/Package: mariadb-plugin-rocksdb/,+14d' -i debian/control
+ sed '/Package: mariadb-plugin-rocksdb/,/^$/d' -i debian/control
fi
- # AWS SDK requires c++11 -capable compiler.
- # Minimal supported versions are g++ 4.8 and clang 3.3.
- # AWS SDK also requires the build machine to have network access and git, so
- # it cannot be part of the base version included in Linux distros, but a pure
- # custom built plugin.
- if [[ $GCCVERSION -gt 40800 ]] && [[ ! $TRAVIS ]] && [[ -x "$(command -v git)" ]] && timeout 3s bash -c 'sed -n q </dev/tcp/github.com/22'
- then
- cat <<EOF >> debian/control
-
- Package: mariadb-plugin-aws-key-management
- Architecture: any
- Breaks: mariadb-aws-key-management-10.1,
- mariadb-aws-key-management-10.2
- Replaces: mariadb-aws-key-management-10.1,
- mariadb-aws-key-management-10.2
- Depends: mariadb-server-10.4,
- \${misc:Depends},
- \${shlibs:Depends}
- Description: Amazon Web Service Key Management Service Plugin for MariaDB
- This encryption key management plugin gives an interface to the Amazon Web
- Services Key Management Service for managing encryption keys used for MariaDB
- data-at-rest encryption.
- EOF
-
- sed -i -e "/-DPLUGIN_AWS_KEY_MANAGEMENT=NO/d" debian/rules
- fi
-
# Don't build cassandra package if thrift is not installed
if [[ ! -f /usr/local/include/thrift/Thrift.h && ! -f /usr/include/thrift/Thrift.h ]]
then
1
0

[Commits] 5e7d0af0293: MDEV-18659: Fix gcc-8 compiler warnings produced by -Wstringop-truncation and -Wstringop-overflow for InnoDB
by jan 20 Feb '19
by jan 20 Feb '19
20 Feb '19
revision-id: 5e7d0af0293ec0e8713efba6cf311e595f96e310 (mariadb-10.1.38-7-g5e7d0af0293)
parent(s): 346e46089621e6951e076c82ed5690aa23dcb5fe
author: Jan Lindström
committer: Jan Lindström
timestamp: 2019-02-20 15:26:27 +0200
message:
MDEV-18659: Fix gcc-8 compiler warnings produced by -Wstringop-truncation and -Wstringop-overflow for InnoDB
dict_table_rename_in_cache
dict/dict0dict.cc:1870:11: warning: ‘char* strncpy(char*, const char*, size_t)’ output may be truncated copying 320 bytes from a string of length 654 [-Wstringop-truncation]
Use memcpy instead as it is safe to read and copy MAX_TABLE_NAME_LEN
characters as both old_name_cs_filename and old_name
have allocated more space.
fts_write_node
Make sure than inserted word is not NULL. Note that there
is similar assertions before we search stopwords or insert
data.
fts_fetch_index_words
fts/fts0opt.cc:681:16: warning: ‘int __builtin_memcmp_eq(const void*, const void*, long unsigned int)’ specified size 18446744073709551615 exceeds maximum object size 9223372036854775807 [-Wstringop-overflow=]
dfield_get_len returns a ulint as it can return UNIV_SQL_NULL
(ULINT32_UNDEFINED) and storing return value to short can
cause overflow. However, NULL-values are not actually stored
"word"-column in FTS-index.
dict_create_add_foreign_id
include/dict0crea.ic:71:11: warning: ‘char* strncpy(char*, const char*, size_t)’ specified bound 340 equals destination size [-Wstringop-truncation]
Add extra space for string constant to avoid this.
row_rename_table_for_mysql
row/row0mysql.cc:5190:10: warning: ‘char* strncpy(char*, const char*, size_t)’ specified bound 320 equals destination size [-Wstringop-truncation]
Add extra space for string constant to avoid this.
log_online_setup_bitmap_file_range
log/log0online.cc:1455:11: warning: ‘char* strncpy(char*, const char*, size_t)’ output may be truncated copying 512 bytes from a string of length 3999 [-Wstringop-truncation]
Copy only FN_REFLEN - 1 characters as we will add string end marker there anyway.
---
storage/innobase/dict/dict0dict.cc | 4 ++--
storage/innobase/fts/fts0fts.cc | 1 +
storage/innobase/fts/fts0opt.cc | 10 +++++-----
storage/innobase/include/dict0crea.ic | 2 +-
storage/innobase/row/row0mysql.cc | 4 ++--
storage/xtradb/dict/dict0dict.cc | 6 +++---
storage/xtradb/fts/fts0fts.cc | 1 +
storage/xtradb/fts/fts0opt.cc | 10 +++++-----
storage/xtradb/include/dict0crea.ic | 2 +-
storage/xtradb/log/log0online.cc | 2 +-
storage/xtradb/row/row0mysql.cc | 4 ++--
11 files changed, 24 insertions(+), 22 deletions(-)
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 06c6c3effab..d14a2320795 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1861,7 +1861,7 @@ dict_table_rename_in_cache(
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1883,7 +1883,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 75258bb8610..14ef9960cee 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -3830,6 +3830,7 @@ fts_write_node(
info = pars_info_create();
}
+ ut_ad(word->f_len != UNIV_SQL_NULL);
pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len);
/* Convert to "storage" byte order. */
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic
index 1cbaa47032b..88919fbd24c 100644
--- a/storage/innobase/include/dict0crea.ic
+++ b/storage/innobase/include/dict0crea.ic
@@ -65,7 +65,7 @@ dict_create_add_foreign_id(
sprintf(id, "%s_ibfk_%lu", name,
(ulong) (*id_nr)++);
} else {
- char table_name[MAX_TABLE_NAME_LEN + 20] = "";
+ char table_name[MAX_TABLE_NAME_LEN + 21] = "";
uint errors = 0;
strncpy(table_name, name,
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 2a9ade1da2c..84f30e226dd 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -5173,8 +5173,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 1c489d13f1a..3961e0c4639 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -1867,7 +1867,7 @@ dict_table_rename_in_cache(
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1889,7 +1889,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
@@ -1916,7 +1916,7 @@ dict_table_rename_in_cache(
/* This is a generated >= 4.0.18 format id */
- char table_name[MAX_TABLE_NAME_LEN] = "";
+ char table_name[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
if (strlen(table->name) > strlen(old_name)) {
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 02d81551e75..e6549e40232 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -3829,6 +3829,7 @@ fts_write_node(
info = pars_info_create();
}
+ ut_ad(word->f_len != UNIV_SQL_NULL);
pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len);
/* Convert to "storage" byte order. */
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/xtradb/include/dict0crea.ic b/storage/xtradb/include/dict0crea.ic
index 1cbaa47032b..88919fbd24c 100644
--- a/storage/xtradb/include/dict0crea.ic
+++ b/storage/xtradb/include/dict0crea.ic
@@ -65,7 +65,7 @@ dict_create_add_foreign_id(
sprintf(id, "%s_ibfk_%lu", name,
(ulong) (*id_nr)++);
} else {
- char table_name[MAX_TABLE_NAME_LEN + 20] = "";
+ char table_name[MAX_TABLE_NAME_LEN + 21] = "";
uint errors = 0;
strncpy(table_name, name,
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index bc1667e1c20..43ed68cbd57 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -1453,7 +1453,7 @@ log_online_setup_bitmap_file_range(
bitmap_files->files[array_pos].seq_num = file_seq_num;
strncpy(bitmap_files->files[array_pos].name,
- bitmap_dir_file_info.name, FN_REFLEN);
+ bitmap_dir_file_info.name, FN_REFLEN - 1);
bitmap_files->files[array_pos].name[FN_REFLEN - 1]
= '\0';
bitmap_files->files[array_pos].start_lsn
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 93a4db98e7b..0ea2ae1b6d4 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -5183,8 +5183,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
1
0

[Commits] 56a0da6b1d9: MDEV-18659: Fix gcc-8 compiler warnings produced by -Wstringop-truncation and -Wstringop-overflow for InnoDB
by jan 20 Feb '19
by jan 20 Feb '19
20 Feb '19
revision-id: 56a0da6b1d99e305572826ee82868b1ea373b8e2 (mariadb-10.1.38-7-g56a0da6b1d9)
parent(s): 346e46089621e6951e076c82ed5690aa23dcb5fe
author: Jan Lindström
committer: Jan Lindström
timestamp: 2019-02-20 14:52:11 +0200
message:
MDEV-18659: Fix gcc-8 compiler warnings produced by -Wstringop-truncation and -Wstringop-overflow for InnoDB
dict_table_rename_in_cache
dict/dict0dict.cc:1870:11: warning: ‘char* strncpy(char*, const char*, size_t)’ output may be truncated copying 320 bytes from a string of length 654 [-Wstringop-truncation]
Use memcpy instead as it is safe to read and copy MAX_TABLE_NAME_LEN
characters as both old_name_cs_filename and old_name
have allocated more space.
fts_write_node
Make sure than inserted word is not NULL. Note that there
is similar assertions before we search stopwords or insert
data to FTS-index.
fts_fetch_index_words
fts/fts0opt.cc:681:16: warning: ‘int __builtin_memcmp_eq(const void*, const void*, long unsigned int)’ specified size 18446744073709551615 exceeds maximum object size 9223372036854775807 [-Wstringop-overflow=]
dfield_get_len returns a ulint as it can return UNIV_SQL_NULL
(ULINT32_UNDEFINED) and storing return value to short can
cause overflow. However, NULL-values are not actually stored
"word"-column in FTS-index.
dict_create_add_foreign_id
include/dict0crea.ic:71:11: warning: ‘char* strncpy(char*, const char*, size_t)’ specified bound 340 equals destination size [-Wstringop-truncation]
Add extra space for string constant to avoid this.
row_rename_table_for_mysql
row/row0mysql.cc:5190:10: warning: ‘char* strncpy(char*, const char*, size_t)’ specified bound 320 equals destination size [-Wstringop-truncation]
Add extra space for string constant to avoid this.
---
storage/innobase/dict/dict0dict.cc | 4 ++--
storage/innobase/fts/fts0fts.cc | 1 +
storage/innobase/fts/fts0opt.cc | 10 +++++-----
storage/innobase/include/dict0crea.ic | 2 +-
storage/innobase/row/row0mysql.cc | 4 ++--
storage/xtradb/dict/dict0dict.cc | 6 +++---
storage/xtradb/fts/fts0fts.cc | 1 +
storage/xtradb/fts/fts0opt.cc | 10 +++++-----
storage/xtradb/include/dict0crea.ic | 2 +-
storage/xtradb/row/row0mysql.cc | 4 ++--
10 files changed, 23 insertions(+), 21 deletions(-)
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 06c6c3effab..d14a2320795 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1861,7 +1861,7 @@ dict_table_rename_in_cache(
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1883,7 +1883,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 75258bb8610..14ef9960cee 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -3830,6 +3830,7 @@ fts_write_node(
info = pars_info_create();
}
+ ut_ad(word->f_len != UNIV_SQL_NULL);
pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len);
/* Convert to "storage" byte order. */
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic
index 1cbaa47032b..88919fbd24c 100644
--- a/storage/innobase/include/dict0crea.ic
+++ b/storage/innobase/include/dict0crea.ic
@@ -65,7 +65,7 @@ dict_create_add_foreign_id(
sprintf(id, "%s_ibfk_%lu", name,
(ulong) (*id_nr)++);
} else {
- char table_name[MAX_TABLE_NAME_LEN + 20] = "";
+ char table_name[MAX_TABLE_NAME_LEN + 21] = "";
uint errors = 0;
strncpy(table_name, name,
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 2a9ade1da2c..84f30e226dd 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -5173,8 +5173,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 1c489d13f1a..3961e0c4639 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -1867,7 +1867,7 @@ dict_table_rename_in_cache(
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1889,7 +1889,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
@@ -1916,7 +1916,7 @@ dict_table_rename_in_cache(
/* This is a generated >= 4.0.18 format id */
- char table_name[MAX_TABLE_NAME_LEN] = "";
+ char table_name[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
if (strlen(table->name) > strlen(old_name)) {
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 02d81551e75..e6549e40232 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -3829,6 +3829,7 @@ fts_write_node(
info = pars_info_create();
}
+ ut_ad(word->f_len != UNIV_SQL_NULL);
pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len);
/* Convert to "storage" byte order. */
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/xtradb/include/dict0crea.ic b/storage/xtradb/include/dict0crea.ic
index 1cbaa47032b..88919fbd24c 100644
--- a/storage/xtradb/include/dict0crea.ic
+++ b/storage/xtradb/include/dict0crea.ic
@@ -65,7 +65,7 @@ dict_create_add_foreign_id(
sprintf(id, "%s_ibfk_%lu", name,
(ulong) (*id_nr)++);
} else {
- char table_name[MAX_TABLE_NAME_LEN + 20] = "";
+ char table_name[MAX_TABLE_NAME_LEN + 21] = "";
uint errors = 0;
strncpy(table_name, name,
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 93a4db98e7b..0ea2ae1b6d4 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -5183,8 +5183,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
1
0
revision-id: e5a4f316f5af8e21e73bcdee6e2b1ad95ebb206d (mariadb-10.3.6-139-ge5a4f316f5a)
parent(s): 539a165b7a470baad1f3fcda7dde1ed6a81b1c2e 1005376e588dd99846c86f33d86ffdec9de6c213
author: Marko Mäkelä
committer: Oleksandr Byelkin
timestamp: 2019-02-20 12:02:52 +0100
message:
WIP merge 10.3 into 10.4
Unresolved conflicts:
both modified: mysql-test/suite/galera_3nodes/r/galera_garbd.result
both modified: mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
both modified: mysql-test/suite/galera_3nodes/t/galera_garbd.test
both modified: scripts/mysql_install_db.sh
both modified: sql/item.cc
both modified: sql/sql_table.cc
both modified: sql/sql_time.cc
both modified: sql/sql_yacc.yy
both modified: sql/sql_yacc_ora.yy
both modified: sql/table.cc
CMakeLists.txt | 12 +-
client/mysqlbinlog.cc | 23 +-
client/mysqltest.cc | 66 +-
cmake/build_configurations/mysql_release.cmake | 1 -
cmake/readline.cmake | 2 +-
cmake/submodules.cmake | 12 +-
config.h.cmake | 6 +-
configure.cmake | 2 +-
extra/mariabackup/backup_copy.cc | 13 +
extra/mariabackup/xtrabackup.cc | 44 +-
include/my_sys.h | 5 +-
include/my_valgrind.h | 4 +-
include/mysql.h | 2 +-
include/mysql.h.pp | 2 +-
libmariadb | 2 +-
mysql-test/include/check-testcase.test | 1 -
mysql-test/include/mtr_check.sql | 1 -
mysql-test/main/alter_table.result | 49 +
mysql-test/main/alter_table.test | 17 +
mysql-test/main/alter_table_errors.result | 6 +
mysql-test/main/alter_table_errors.test | 10 +
mysql-test/main/check.result | 7 +
mysql-test/main/check.test | 9 +
mysql-test/main/check_constraint.result | 2 +-
mysql-test/main/check_constraint.test | 1 -
mysql-test/main/check_constraint_innodb.result | 9 +
mysql-test/main/check_constraint_innodb.test | 14 +
mysql-test/main/disabled.def | 1 +
mysql-test/main/error_simulation.result | 5 +-
mysql-test/main/error_simulation.test | 7 +-
mysql-test/main/func_misc.result | 8 +
mysql-test/main/func_misc.test | 8 +
mysql-test/main/gis.test | 1 -
mysql-test/main/gis2.result | 2 +-
mysql-test/main/mysql.result | 26 +
mysql-test/main/mysql.test | 22 +
mysql-test/main/mysqlbinlog_row_minimal.result | 122 +-
mysql-test/main/mysqlbinlog_row_minimal.test | 41 +
mysql-test/main/row-checksum-old.result | 16 +
mysql-test/main/row-checksum.result | 16 +
mysql-test/main/row-checksum.test | 17 +
mysql-test/main/subselect2.result | 22 +
mysql-test/main/subselect2.test | 20 +
mysql-test/main/subselect_mat.result | 16 +
mysql-test/main/subselect_mat.test | 13 +
mysql-test/main/subselect_sj2_mat.result | 4 +-
mysql-test/suite/archive/partition_archive.result | 4 +-
mysql-test/suite/archive/partition_archive.test | 3 +-
.../suite/binlog/r/binlog_base64_flag.result | 19 +
.../binlog/r/binlog_mysqlbinlog_row_frag.result | 24 +
mysql-test/suite/binlog/t/binlog_base64_flag.test | 22 +
.../binlog/t/binlog_mysqlbinlog_row_frag.test | 46 +
.../encryption/r/innodb-encryption-alter.result | 4 +-
.../encryption/t/innodb-encryption-alter.test | 2 -
.../suite/encryption/t/innodb-spatial-index.test | 2 -
mysql-test/suite/galera/r/galera_defaults.result | 1 +
mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf | 1 +
mysql-test/suite/galera_3nodes/galera_3nodes.cnf | 1 +
.../galera_3nodes/include/have_mariabackup.inc | 4 -
.../suite/galera_3nodes/r/galera_garbd.result | 8 +
.../galera_3nodes/r/galera_var_dirty_reads2.result | 8 +
mysql-test/suite/galera_3nodes/suite.pm | 9 +-
mysql-test/suite/galera_3nodes/t/galera_garbd.test | 26 +
.../galera_3nodes/t/galera_ipv6_mariabackup.test | 2 +-
.../galera_3nodes/t/galera_var_dirty_reads2.test | 14 +
.../suite/gcol/inc/gcol_column_def_options.inc | 1 -
mysql-test/suite/gcol/inc/gcol_keys.inc | 1 -
mysql-test/suite/gcol/r/gcol_keys_innodb.result | 2 +-
mysql-test/suite/gcol/r/innodb_virtual_fk.result | 4 +-
mysql-test/suite/gcol/t/innodb_virtual_fk.test | 2 -
mysql-test/suite/innodb/r/add_constraint.result | 2 +-
.../suite/innodb/r/alter_foreign_crash.result | 2 +-
.../suite/innodb/r/alter_varchar_change.result | 9 +
mysql-test/suite/innodb/r/foreign_key.result | 57 +-
.../suite/innodb/r/innodb-fk-warnings.result | 24 +-
mysql-test/suite/innodb/r/innodb-fk.result | 6 +-
.../suite/innodb/r/innodb-index-online.result | 2 +-
mysql-test/suite/innodb/r/innodb-index.result | 10 +-
.../suite/innodb/r/innodb_force_recovery.result | 8 +-
.../suite/innodb/r/instant_varchar_enlarge.result | 9 -
.../suite/innodb/r/undo_truncate_recover.result | 1 +
mysql-test/suite/innodb/t/add_constraint.test | 1 -
.../suite/innodb/t/alter_varchar_change.test | 7 +
mysql-test/suite/innodb/t/foreign_key.test | 52 +-
mysql-test/suite/innodb/t/innodb-fk-warnings.test | 18 -
mysql-test/suite/innodb/t/innodb-fk.test | 2 -
mysql-test/suite/innodb/t/innodb-index-online.test | 1 -
mysql-test/suite/innodb/t/innodb-index.test | 5 -
.../innodb/t/innodb-page_compression_tables.test | 4 -
.../suite/innodb/t/innodb_force_recovery.test | 4 -
.../suite/innodb/t/instant_varchar_enlarge.test | 8 -
.../suite/innodb/t/undo_truncate_recover.test | 6 +-
.../suite/innodb_gis/r/alter_spatial_index.result | 2 +-
mysql-test/suite/innodb_gis/r/point_basic.result | 14 +-
mysql-test/suite/innodb_gis/t/point_basic.test | 6 -
.../suite/mariabackup/incremental_rocksdb.opt | 1 +
.../suite/mariabackup/incremental_rocksdb.result | 19 +
.../suite/mariabackup/incremental_rocksdb.test | 38 +
.../perfschema/r/dml_setup_instruments.result | 4 +-
.../suite/perfschema/t/dml_setup_instruments.test | 5 +-
mysql-test/suite/plugins/t/audit_null.test | 2 +-
mysql-test/suite/rpl/r/rpl_set_statement.test | 0
mysql-test/suite/sys_vars/r/all_vars.result | 1 +
.../r/sysvars_innodb,32bit,xtradb.rdiff-disabled | 1236 +++++++++++
.../r/sysvars_innodb,xtradb.rdiff-disabled | 697 ++++++
mysql-test/suite/sys_vars/r/sysvars_wsrep.result | 14 +
.../sys_vars/r/tmp_disk_table_size_func.result | 2 +-
.../suite/sys_vars/t/tmp_disk_table_size_func.test | 1 -
mysql-test/suite/vcol/inc/vcol_keys.inc | 1 -
mysql-test/suite/vcol/r/vcol_keys_innodb.result | 2 +-
mysys/mf_iocache2.c | 61 +-
mysys/my_delete.c | 61 +
mysys/my_file.c | 7 +-
mysys/my_malloc.c | 1 +
plugin/aws_key_management/CMakeLists.txt | 6 +-
plugin/wsrep_info/mysql-test/wsrep_info/suite.pm | 2 +
scripts/mysql_install_db.sh | 26 +-
scripts/wsrep_sst_mariabackup.sh | 6 +-
scripts/wsrep_sst_rsync.sh | 2 +-
sql-common/client.c | 30 +-
sql/field.cc | 4 +-
sql/item.cc | 18 +-
sql/item_func.cc | 2 +-
sql/item_func.h | 4 +
sql/log.cc | 6 +-
sql/log_event.cc | 386 +++-
sql/log_event.h | 24 +-
sql/log_event_old.cc | 17 +-
sql/partition_info.cc | 2 +-
sql/rpl_tblmap.cc | 20 +-
sql/rpl_tblmap.h | 10 +-
sql/sql_binlog.cc | 91 +-
sql/sql_class.cc | 23 +-
sql/sql_class.h | 2 +-
sql/sql_lex.cc | 3 +-
sql/sql_lex.h | 4 +
sql/sql_repl.cc | 6 +-
sql/sql_select.cc | 21 +-
sql/sql_table.cc | 142 +-
sql/sql_yacc.yy | 19 +-
sql/sql_yacc_ora.yy | 19 +-
sql/sys_vars.cc | 13 +
sql/table.cc | 2 +-
sql/table.h | 12 +-
sql/wsrep_mysqld.cc | 1 +
sql/wsrep_mysqld.h | 1 +
sql/wsrep_mysqld_c.h | 30 +
storage/connect/ha_connect.cc | 45 +-
storage/connect/jsonudf.cpp | 48 +-
storage/connect/jsonudf.h | 5 +
.../mysql-test/connect/r/jdbc_postgresql.result | 2 +-
.../connect/mysql-test/connect/r/part_table.result | 4 +-
.../connect/mysql-test/connect/t/part_table.test | 2 +-
storage/connect/tabext.cpp | 43 +-
storage/connect/tabext.h | 5 +-
storage/connect/tabjdbc.cpp | 89 +-
storage/connect/tabjson.cpp | 46 +-
storage/connect/tabjson.h | 2 +-
storage/connect/tabxml.cpp | 279 +--
storage/connect/tabxml.h | 1 +
storage/innobase/dict/dict0dict.cc | 49 +-
storage/innobase/handler/handler0alter.cc | 107 +-
storage/innobase/handler/i_s.cc | 87 +-
storage/innobase/include/trx0rseg.h | 4 +-
storage/innobase/log/log0recv.cc | 2 +-
storage/innobase/row/row0mysql.cc | 3 -
storage/innobase/row/row0sel.cc | 43 +-
storage/innobase/srv/srv0start.cc | 6 +-
storage/maria/ma_delete.c | 2 -
storage/maria/ma_write.c | 3 +-
storage/maria/maria_def.h | 4 +-
storage/tokudb/PerconaFT/COPYING.APACHEv2 | 174 ++
storage/tokudb/PerconaFT/README.md | 5 +-
storage/tokudb/PerconaFT/ft/txn/txn_manager.h | 4 +-
.../tokudb/PerconaFT/locktree/concurrent_tree.cc | 14 +
.../tokudb/PerconaFT/locktree/concurrent_tree.h | 14 +
storage/tokudb/PerconaFT/locktree/keyrange.cc | 13 +
storage/tokudb/PerconaFT/locktree/keyrange.h | 13 +
storage/tokudb/PerconaFT/locktree/lock_request.cc | 13 +
storage/tokudb/PerconaFT/locktree/lock_request.h | 13 +
storage/tokudb/PerconaFT/locktree/locktree.cc | 13 +
storage/tokudb/PerconaFT/locktree/locktree.h | 13 +
storage/tokudb/PerconaFT/locktree/manager.cc | 13 +
storage/tokudb/PerconaFT/locktree/range_buffer.cc | 13 +
storage/tokudb/PerconaFT/locktree/range_buffer.h | 13 +
storage/tokudb/PerconaFT/locktree/treenode.cc | 13 +
storage/tokudb/PerconaFT/locktree/treenode.h | 13 +
storage/tokudb/PerconaFT/locktree/txnid_set.cc | 13 +
storage/tokudb/PerconaFT/locktree/txnid_set.h | 13 +
storage/tokudb/PerconaFT/locktree/wfg.cc | 13 +
storage/tokudb/PerconaFT/locktree/wfg.h | 13 +
.../PerconaFT/portability/toku_instr_mysql.cc | 12 +-
.../PerconaFT/portability/toku_instr_mysql.h | 11 +-
.../tokudb/PerconaFT/portability/toku_pthread.h | 78 +-
storage/tokudb/PerconaFT/util/growable_array.h | 13 +
storage/tokudb/PerconaFT/util/omt.cc | 2261 +++++++++++---------
storage/tokudb/PerconaFT/util/omt.h | 13 +
storage/tokudb/ha_tokudb.cc | 10 +
storage/tokudb/hatoku_hton.cc | 4 +-
storage/tokudb/hatoku_hton.h | 1 -
.../tokudb/mysql-test/tokudb_bugs/r/PS-4979.result | 2 +
.../tokudb/mysql-test/tokudb_bugs/t/PS-4979.test | 13 +
storage/tokudb/tokudb_background.cc | 4 +-
storage/tokudb/tokudb_sysvars.cc | 14 +-
storage/tokudb/tokudb_sysvars.h | 4 +-
support-files/mysql.server.sh | 6 +-
unittest/mysys/thr_template.c | 4 +-
unittest/sql/mf_iocache-t.cc | 70 +-
win/packaging/heidisql.cmake | 2 +-
209 files changed, 6128 insertions(+), 1947 deletions(-)
diff --cc cmake/submodules.cmake
index 9f04c26e8d6,34d1f37c956..c8f7b3cc400
--- a/cmake/submodules.cmake
+++ b/cmake/submodules.cmake
@@@ -31,7 -36,6 +36,6 @@@ ENDIF(
IF(update_result OR NOT EXISTS ${CMAKE_SOURCE_DIR}/libmariadb/CMakeLists.txt)
MESSAGE(FATAL_ERROR "No MariaDB Connector/C! Run
- git submodule update --init --recursive
- Then restart the build.
- ")
- ${GIT_EXECUTABLE} submodule update --init
++ ${GIT_EXECUTABLE} submodule update --init --recursive
+ Then restart the build.${SUBMODULE_UPDATE_CONFIG_MESSAGE}")
ENDIF()
diff --cc libmariadb
index beb9d5ea899,2c5aebb3bc7..4aad20db15d
--- a/libmariadb
+++ b/libmariadb
@@@ -1,1 -1,1 +1,1 @@@
- Subproject commit beb9d5ea8994bb90361c4b9f3d926eee24055178
-Subproject commit 2c5aebb3bc724c1663c481ba2fedde00ab494fa4
++Subproject commit 4aad20db15d137009422416d53b2e0de5b819808
diff --cc mysql-test/main/disabled.def
index eecee845a35,b6991cc1d37..c1cfd229a9b
--- a/mysql-test/main/disabled.def
+++ b/mysql-test/main/disabled.def
@@@ -18,6 -21,4 +18,7 @@@ innodb-wl5522-debug-zip : broken upstr
innodb_bug12902967 : broken upstream
file_contents : MDEV-6526 these files are not installed anymore
max_statement_time : cannot possibly work, depends on timing
+ partition_open_files_limit : open_files_limit check broken by MDEV-18360
+mysqlcheck : special tables like proxy , host specific to a system are shown
+flush_read_lock : special tables like proxy , host specific to a system are shown
+join_cache : enable after MDEV-17752 is fixed
diff --cc mysql-test/main/subselect2.result
index 21bf9aad122,cae0f2286c1..0e71f22e52e
--- a/mysql-test/main/subselect2.result
+++ b/mysql-test/main/subselect2.result
@@@ -394,3 -394,25 +394,25 @@@ select null in (select a from t1 where
(select a from t3) +1 < out3.a+1) from t3 out3;
ERROR 21000: Subquery returns more than 1 row
drop table t1, t2, t3;
+ CREATE TABLE t1(
+ q11 int, q12 int, q13 int, q14 int, q15 int, q16 int, q17 int, q18 int, q19 int,
+ q21 int, q22 int, q23 int, q24 int, q25 int, q26 int, q27 int, q28 int, q29 int,
+ f1 int
+ );
+ CREATE TABLE t2(f2 int, f21 int, f3 timestamp, f4 int, f5 int, f6 int);
+ INSERT INTO t1 (f1) VALUES (1),(1),(2),(2);
+ INSERT INTO t2 VALUES (1,1,"2004-02-29 11:11:11",0,0,0), (2,2,"2004-02-29 11:11:11",0,0,0);
+ SELECT f1,
+ (SELECT t.f21 from t2 t where max(
+ q11+q12+q13+q14+q15+q16+q17+q18+q19+
+ q21+q22+q23+q24+q25+q26+q27+q28+q29) = t.f2 UNION
+ SELECT t.f3 FROM t2 AS t WHERE t1.f1=t.f2 AND t.f3=MAX(t1.f1) UNION
+ SELECT 1 LIMIT 1) AS test
+ FROM t1 GROUP BY f1;
+ f1 test
+ 1 1
+ 2 1
+ Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
++Warning 1292 Truncated incorrect datetime value: '1'
++Warning 1292 Truncated incorrect datetime value: '2'
+ DROP TABLE t1,t2;
diff --cc mysql-test/main/subselect_sj2_mat.result
index e4583cf6348,884451d7dff..f42b5ce1542
--- a/mysql-test/main/subselect_sj2_mat.result
+++ b/mysql-test/main/subselect_sj2_mat.result
@@@ -1743,10 -1701,10 +1743,10 @@@ O
(t.id IN (0,4,12,13,1,10,3,11))
);
id select_type table type possible_keys key key_len ref rows Extra
--1 PRIMARY t index PRIMARY PRIMARY 4 NULL 13 Using where; Using index
++1 PRIMARY t index PRIMARY PRIMARY 4 NULL 14 Using where; Using index
2 MATERIALIZED <subquery3> ALL distinct_key NULL NULL NULL 8
--2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 13 Using where; Using join buffer (flat, BNL join)
-3 MATERIALIZED B ALL PRIMARY NULL NULL NULL 13 Using where
++2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 14 Using where; Using join buffer (flat, BNL join)
+3 MATERIALIZED B range PRIMARY PRIMARY 4 NULL 8 Using where
SELECT SQL_NO_CACHE t.id
FROM t1 t
WHERE (
diff --cc mysql-test/suite/encryption/t/innodb-spatial-index.test
index a50ef306351,2bf56817740..0465225cd4f
--- a/mysql-test/suite/encryption/t/innodb-spatial-index.test
+++ b/mysql-test/suite/encryption/t/innodb-spatial-index.test
@@@ -7,21 -6,10 +7,20 @@@
#
#
-# (1) Do not allow creating table with ENCRYPTED=YES
#
#
---error ER_CANT_CREATE_TABLE
+let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
+let $error_code = ER_CANT_CREATE_TABLE, ER_ILLEGAL_HA_CREATE_OPTION;
+if ($checksum_algorithm == "full_crc32")
+{
+ let $error_code = 0;
+}
+if ($checksum_algorithm == "strict_full_crc32")
+{
+ let $error_code = 0;
+}
+
- --replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+--error $error_code
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB
ENCRYPTED=YES;
@@@ -49,15 -30,14 +48,14 @@@ CREATE TABLE t1 (pk INT PRIMARY KEY AUT
c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=YES ENGINE=INNODB;
# FIXME: MDEV-13851 Encrypted table refuses some form of ALGORITHM=COPY,
# but allows rebuild by FORCE
- --replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), ALGORITHM=COPY;
---error ER_ILLEGAL_HA_CREATE_OPTION
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), FORCE, ALGORITHM=INPLACE;
---error ER_ILLEGAL_HA_CREATE_OPTION
+--error $error_code
+ALTER TABLE t1 ADD SPATIAL INDEX b1(coordinate), ALGORITHM=COPY;
+--error $error_code
+ALTER TABLE t1 ADD SPATIAL INDEX b2(coordinate), FORCE, ALGORITHM=INPLACE;
+--error $error_code
ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
---error ER_ILLEGAL_HA_CREATE_OPTION
-CREATE SPATIAL INDEX b on t1(coordinate);
+--error $error_code
+CREATE SPATIAL INDEX b3 on t1(coordinate);
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
diff --cc mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
index 7aac1662edd,477789175fb..a0c7e64af91
--- a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
+++ b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
@@@ -9,7 -9,9 +9,8 @@@ innodb-autoinc-lock-mode=
default-storage-engine=innodb
wsrep_gtid_mode=1
gtid_ignore_duplicates
+ auto_increment_increment=3
-wsrep-on=1
wsrep-provider=(a)ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
diff --cc mysql-test/suite/galera_3nodes/galera_3nodes.cnf
index ab7493e313b,e5aa81b8742..d33ed0caddf
--- a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
+++ b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
@@@ -5,7 -5,9 +5,8 @@@
binlog-format=row
innodb-autoinc-lock-mode=2
default-storage-engine=innodb
+ auto_increment_increment=3
-wsrep-on=1
wsrep-provider=(a)ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
diff --cc mysql-test/suite/galera_3nodes/r/galera_garbd.result
index 88bb3ca2ff9,fb7e729dc77..1396179b871
--- a/mysql-test/suite/galera_3nodes/r/galera_garbd.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_garbd.result
@@@ -1,10 -1,8 +1,18 @@@
++<<<<<<< HEAD
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_3;
++||||||| merged common ancestors
++=======
+ connection node_1;
+ connection node_2;
+ connection node_3;
++>>>>>>> 10.3
Killing node #3 to free ports for garbd ...
+ connection node_3;
connection node_1;
Starting garbd ...
CREATE TABLE t1 (f1 INTEGER);
diff --cc mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
index 8e6d27823f6,77991a6d468..b0b5c8d26bd
--- a/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
@@@ -1,5 -1,7 +1,13 @@@
++<<<<<<< HEAD
+connection node_2;
+connection node_1;
++||||||| merged common ancestors
++=======
+ connection node_1;
+ connection node_2;
+ connection node_3;
+ connection node_1;
++>>>>>>> 10.3
CREATE TABLE t1 (f1 INTEGER);
INSERT INTO t1 VALUES (1);
connection node_2;
diff --cc mysql-test/suite/galera_3nodes/t/galera_garbd.test
index 68e288c06c7,2d03e8897b9..cd401a5376a
--- a/mysql-test/suite/galera_3nodes/t/galera_garbd.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_garbd.test
@@@ -7,21 -7,20 +7,45 @@@
--source include/have_innodb.inc
--source include/big_test.inc
++<<<<<<< HEAD
+# Save galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_3 = $_NODE_GALERAPORT
+
+--connection node_3
+--echo Killing node #3 to free ports for garbd ...
++||||||| merged common ancestors
++--echo Killing node #3 to free ports for garbd ...
++--let $galera_connection_name = node_3
++--let $galera_server_number = 3
++--source include/galera_connect.inc
++--let $gp3 = `SELECT SUBSTR(@@wsrep_provider_options, LOCATE('base_port =', @@wsrep_provider_options) + LENGTH('base_port = '))`
++--let $galera_port_3 = `SELECT SUBSTR('$gp3', 1, LOCATE(';', '$gp3') - 1)`
++=======
+ --let $galera_connection_name = node_3
+ --let $galera_server_number = 3
+ --source include/galera_connect.inc
+
+ # Save original auto_increment_offset values.
+ --let $node_1=node_1
+ --let $node_2=node_2
+ --let $node_3=node_3
+ --source ../galera/include/auto_increment_offset_save.inc
+
+ --echo Killing node #3 to free ports for garbd ...
+ --connection node_3
+ --let $gp3 = `SELECT SUBSTR(@@wsrep_provider_options, LOCATE('base_port =', @@wsrep_provider_options) + LENGTH('base_port = '))`
+ --let $galera_port_3 = `SELECT SUBSTR('$gp3', 1, LOCATE(';', '$gp3') - 1)`
++>>>>>>> 10.3
--source include/shutdown_mysqld.inc
--connection node_1
diff --cc mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
index 2ceda1ed352,e3f94a012b8..9f9d6da17b9
--- a/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
@@@ -106,8 -117,10 +117,11 @@@ SET GLOBAL wsrep_provider_options='gmca
--source include/wait_condition.inc
--connection node_2
---let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
DROP TABLE t1;
+
+ # Restore original auto_increment_offset values.
+ --source ../galera/include/auto_increment_offset_restore.inc
diff --cc mysql-test/suite/innodb/r/undo_truncate_recover.result
index 018ac65537a,bcfc136e8c0..8ab41331950
--- a/mysql-test/suite/innodb/r/undo_truncate_recover.result
+++ b/mysql-test/suite/innodb/r/undo_truncate_recover.result
@@@ -9,6 -9,8 +9,7 @@@ update t1 set c = 'MariaDB'
update t1 set c = 'InnoDB';
set global debug_dbug = '+d,ib_undo_trunc';
commit;
+ call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
-call mtr.add_suppression("InnoDB: The redo log transaction size ");
SET GLOBAL innodb_fast_shutdown=0;
FOUND 1 /ib_undo_trunc/ in mysqld.1.err
drop table t1;
diff --cc mysql-test/suite/innodb/t/undo_truncate_recover.test
index 5d680525c5b,c3fa698ff13..1da64497550
--- a/mysql-test/suite/innodb/t/undo_truncate_recover.test
+++ b/mysql-test/suite/innodb/t/undo_truncate_recover.test
@@@ -39,18 -38,14 +39,20 @@@ update t1 set c = 'MariaDB'
update t1 set c = 'InnoDB';
eval set global debug_dbug = '+d,$SEARCH_PATTERN';
commit;
+ call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
-# FIXME: remove this work-around, and generate less log!
-call mtr.add_suppression("InnoDB: The redo log transaction size ");
SET GLOBAL innodb_fast_shutdown=0;
--source include/shutdown_mysqld.inc
--source include/search_pattern_in_file.inc
-# FIXME: remove this work-around, and generate less log!
---let $restart_parameters= --innodb-buffer-pool-size=16m --innodb-undo-tablespaces=1
++--let $restart_parameters= --innodb-undo-tablespaces=1
+if ($checksum_algorithm == "strict_full_crc32")
+{
- let $restart_parameters= --innodb_checksum_algorithm=strict_crc32;
++ let $restart_parameters= $restart_parameters --innodb_checksum_algorithm=strict_crc32;
+}
+
+if ($checksum_algorithm == "strict_crc32")
+{
- let $restart_parameters= --innodb_checksum_algorithm=strict_full_crc32;
++ let $restart_parameters= $restart_parameters --innodb_checksum_algorithm=strict_full_crc32;
+}
--source include/start_mysqld.inc
drop table t1;
diff --cc mysql-test/suite/perfschema/r/dml_setup_instruments.result
index e43841c80ac,a5184782af8..a972cf02285
--- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result
+++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
@@@ -16,10 -16,11 +16,12 @@@ wait/synch/mutex/sql/LOCK_after_binlog_
wait/synch/mutex/sql/LOCK_audit_mask YES YES
select * from performance_schema.setup_instruments
where name like 'Wait/Synch/Rwlock/sql/%'
- and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock')
+ and name not in (
+ 'wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock',
+ 'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group')
order by name limit 10;
NAME ENABLED TIMED
+wait/synch/rwlock/sql/LOCK_all_status_vars YES YES
wait/synch/rwlock/sql/LOCK_dboptions YES YES
wait/synch/rwlock/sql/LOCK_grant YES YES
wait/synch/rwlock/sql/LOCK_SEQUENCE YES YES
diff --cc plugin/aws_key_management/CMakeLists.txt
index b6f48cd18a0,e9e1b49d5f2..90170746eb5
--- a/plugin/aws_key_management/CMakeLists.txt
+++ b/plugin/aws_key_management/CMakeLists.txt
@@@ -1,14 -1,174 +1,18 @@@
-# We build parts of AWS C++ SDK as CMake external project
-# The restrictions of the SDK (https://github.com/awslabs/aws-sdk-cpp/blob/master/README.md)
-# are
-
-# - OS : Windows,Linux or OSX
-# - C++11 compiler : VS2013+, gcc 4.8+, clang 3.3+
-# - libcurl development package needs to be present on Unixes
-#
-# If we build SDK outselves, we'll need require GIT to be present on the build machine
-
-
-# Give message why the building this plugin is skipped (only if -DVERBOSE is defined)
-# or if plugin is explicitly requested to build. Then bail out.
-MACRO(SKIP_AWS_PLUGIN msg)
- MESSAGE_ONCE(SKIP_AWS_PLUGIN "Skip aws_key_management - ${msg}")
+INCLUDE(aws_sdk)
+CHECK_AWS_SDK(HAVE_AWS_SDK REASON)
+IF(NOT HAVE_AWS_SDK)
+ MESSAGE_ONCE(AWS_KEY_MANAGEMENT_NO_AWS_SDK "Can't build aws_key_management - AWS SDK not available (${REASON})")
RETURN()
-ENDMACRO()
-SET(CMAKE_CXX_STANDARD 11)
-
+ENDIF()
+ IF(NOT NOT_FOR_DISTRIBUTION)
+ SKIP_AWS_PLUGIN("AWS SDK has Apache 2.0 License which is not complatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need this plugin")
+ ENDIF()
-# This plugin needs recent C++ compilers (AWS C++ SDK header files are using C++11 features)
-SET(CXX11_FLAGS)
-SET(OLD_COMPILER_MSG "AWS SDK requires c++11 -capable compiler (minimal supported versions are g++ 4.8, clang 3.3, VS2103)")
-
-IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
- EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
- IF (GCC_VERSION VERSION_LESS 4.8)
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
- SET(CXX11_FLAGS "-std=c++11")
-ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR
- (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3))
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
- SET(CXX11_FLAGS "-stdlib=libc++")
-ELSEIF(MSVC)
- IF (MSVC_VERSION LESS 1800)
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
-ELSE()
- SKIP_AWS_PLUGIN("Compiler not supported by AWS C++ SDK")
-ENDIF()
-
-IF (NOT(WIN32 OR APPLE OR (CMAKE_SYSTEM_NAME MATCHES "Linux")))
- SKIP_AWS_PLUGIN("OS unsupported by AWS SDK")
-ENDIF()
-
-
-
-FIND_LIBRARY(AWS_CPP_SDK_CORE NAMES aws-cpp-sdk-core PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}")
-FIND_LIBRARY(AWS_CPP_SDK_KMS NAMES aws-cpp-sdk-kms PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}")
-FIND_PATH(AWS_CPP_SDK_INCLUDE_DIR NAMES aws/kms/KMSClient.h)
-
-IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND AWS_CPP_SDK_INCLUDE_DIR)
- # AWS C++ SDK installed
- INCLUDE_DIRECTORIES(${AWS_CPP_SDK_INCLUDE_DIR})
- SET(AWS_SDK_LIBS ${AWS_CPP_SDK_CORE} ${AWS_CPP_SDK_KMS})
-ELSE()
- OPTION(AWS_SDK_EXTERNAL_PROJECT "Allow download and build AWS C++ SDK" OFF)
- IF(NOT AWS_SDK_EXTERNAL_PROJECT)
- SKIP_AWS_PLUGIN("AWS_SDK_EXTERNAL_PROJECT is not set")
- ENDIF()
- # Build from source, using ExternalProject_Add
- # AWS C++ SDK requires cmake 2.8.12
- IF(CMAKE_VERSION VERSION_LESS "2.8.12")
- SKIP_AWS_PLUGIN("CMake is too old")
- ENDIF()
- FIND_PACKAGE(Git)
- IF(NOT GIT_FOUND)
- SKIP_AWS_PLUGIN("no GIT")
- ENDIF()
- INCLUDE(ExternalProject)
- IF(UNIX)
- FIND_PACKAGE(CURL)
- IF(NOT CURL_FOUND)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires libcurl development package")
- ENDIF()
- SET(PIC_FLAG -fPIC)
- FIND_PATH(UUID_INCLUDE_DIR uuid/uuid.h)
- IF(NOT UUID_INCLUDE_DIR)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires uuid development package")
- ENDIF()
- IF(NOT APPLE)
- FIND_LIBRARY(UUID_LIBRARIES uuid)
- IF(NOT UUID_LIBRARIES)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires uuid development package")
- ENDIF()
- FIND_PACKAGE(OpenSSL)
- IF(NOT OPENSSL_FOUND)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires openssl development package")
- ENDIF()
- ENDIF()
- ENDIF()
- IF(MSVC)
- SET(EXTRA_SDK_CMAKE_FLAGS -DCMAKE_CXX_FLAGS_DEBUGOPT="" -DCMAKE_EXE_LINKER_FLAGS_DEBUGOPT="" "-DCMAKE_CXX_FLAGS=/wd4530 /wd4577 /WX-")
- ENDIF()
- IF(CMAKE_CXX_COMPILER)
- SET(EXTRA_SDK_CMAKE_FLAGS ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER})
- ENDIF()
-
- SET(byproducts )
- # We do not need to build the whole SDK , just 2 of its libs
- set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms)
- FOREACH(lib ${AWS_SDK_LIBS})
- ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL)
- ADD_DEPENDENCIES(${lib} aws_sdk_cpp)
- SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}")
- IF(CMAKE_VERSION VERSION_GREATER "3.1")
- SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc})
- ENDIF()
- SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc})
- ENDFOREACH()
-
- # To be compatible with older cmake, we use older version of the SDK
- IF(CMAKE_VERSION LESS "3.0")
- SET(GIT_TAG "1.0.8")
- ELSE()
- SET(GIT_TAG "1.2.11")
- ENDIF()
-
- SET(AWS_SDK_PATCH_COMMAND )
- ExternalProject_Add(
- aws_sdk_cpp
- GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git"
- GIT_TAG ${GIT_TAG}
- UPDATE_COMMAND ""
- SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp"
- ${byproducts}
- CMAKE_ARGS
- -DBUILD_ONLY=kms
- -DBUILD_SHARED_LIBS=OFF
- -DFORCE_SHARED_CRT=OFF
- -DENABLE_TESTING=OFF
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_RELWITHDEBINFO=${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}"
- ${EXTRA_SDK_CMAKE_FLAGS}
- -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp
- -DCMAKE_INSTALL_LIBDIR=lib
- TEST_COMMAND ""
- )
- SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)
-
- IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
- # Need whole-archive , otherwise static libraries are not linked
- SET(AWS_SDK_LIBS -Wl,--whole-archive ${AWS_SDK_LIBS} -Wl,--no-whole-archive)
- ENDIF()
- SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)
- INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/include)
-ENDIF()
-
-ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized
-IF(CMAKE_VERSION GREATER "3.0")
- SET(CMAKE_CXX_STANDARD 11)
-ELSE()
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}")
-ENDIF()
-IF(WIN32)
- SET(AWS_CPP_SDK_DEPENDENCIES bcrypt winhttp wininet userenv version)
-ELSE()
- SET(AWS_CPP_SDK_DEPENDENCIES ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES})
-ENDIF()
-MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc
- LINK_LIBRARIES ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}
- COMPONENT aws-key-management)
+MYSQL_ADD_PLUGIN(aws_key_management
+ aws_key_management_plugin.cc
+ COMPONENT aws-key-management)
-IF (TARGET aws_key_management)
- SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" PARENT_SCOPE)
+IF(TARGET aws_key_management)
++ SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" PARENT_SCOPE)
+ USE_AWS_SDK_LIBS(aws_key_management kms)
- ENDIF()
+ ENDIF()
diff --cc scripts/mysql_install_db.sh
index 54b5bed4546,52107405525..06c03dfcb40
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@@ -37,10 -37,13 +37,13 @@@ force=
in_rpm=0
ip_only=0
cross_bootstrap=0
-auth_root_authentication_method=normal
-auth_root_socket_user='root'
+auth_root_authentication_method=socket
+auth_root_socket_user=""
skip_test_db=0
+ dirname0=`dirname $0 2>/dev/null`
+ dirname0=`dirname $dirname0 2>/dev/null`
+
usage()
{
cat <<EOF
@@@ -343,7 -345,16 +346,18 @@@ the
exit 1
fi
plugindir=`find_in_dirs --dir auth_socket.so $basedir/lib*/plugin $basedir/lib*/mysql/plugin`
+ pamtooldir=$plugindir
+ # relative from where the script was run for a relocatable install
+ elif test -n "$dirname0" -a -x "$rel_mysqld" -a ! "$rel_mysqld" -ef "@sbindir@/mysqld"
+ then
+ basedir="$dirname0"
+ bindir="$basedir/@INSTALL_BINDIR@"
+ resolveip="$bindir/resolveip"
+ mysqld="$rel_mysqld"
+ srcpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
+ buildpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
+ plugindir="$basedir/@INSTALL_PLUGINDIR@"
++ pamtooldir=$plugindir
else
basedir="@prefix@"
bindir="@bindir@"
@@@ -352,7 -363,6 +366,7 @@@
srcpkgdatadir="@pkgdatadir@"
buildpkgdatadir="@pkgdatadir@"
plugindir="@pkgplugindir@"
- pamtooldir="@pkgplugindir@"
++ pamtooldir=$plugindir
fi
# Set up paths to SQL scripts required for bootstrap
diff --cc sql-common/client.c
index 69404e43613,160a85fd7b5..c66cb1a749d
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@@ -1375,10 -1634,16 +1381,12 @@@ mysql_init(MYSQL *mysql
--enable-local-infile
*/
- #if defined(ENABLED_LOCAL_INFILE) && !defined(MYSQL_SERVER)
+ #if ENABLED_LOCAL_INFILE && !defined(MYSQL_SERVER)
mysql->options.client_flag|= CLIENT_LOCAL_FILES;
+ mysql->auto_local_infile= ENABLED_LOCAL_INFILE == LOCAL_INFILE_MODE_AUTO
+ ? WAIT_FOR_QUERY : ALWAYS_ACCEPT;
#endif
-#ifdef HAVE_SMEM
- mysql->options.shared_memory_base_name= (char*) def_shared_memory_base_name;
-#endif
-
mysql->options.methods_to_use= MYSQL_OPT_GUESS_CONNECTION;
mysql->options.report_data_truncation= TRUE; /* default */
diff --cc sql/item.cc
index a6b4402b6ba,ada79fcb9df..47e10ba4004
--- a/sql/item.cc
+++ b/sql/item.cc
@@@ -1950,9 -2195,34 +1950,14 @@@ bool Item_name_const::is_null(
Item_name_const::Item_name_const(THD *thd, Item *name_arg, Item *val):
- Item(thd), value_item(val), name_item(name_arg)
+ Item_fixed_hybrid(thd), value_item(val), name_item(name_arg)
{
+ StringBuffer<128> name_buffer;
+ String *name_str;
Item::maybe_null= TRUE;
- valid_args= true;
- if (!name_item->basic_const_item() ||
- !(name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name
- goto err;
- set_name(thd, name_str->ptr(), name_str->length(), name_str->charset());
-
- if (value_item->basic_const_item())
- return; // ok
-
- if (value_item->type() == FUNC_ITEM)
- {
- Item_func *value_func= (Item_func *) value_item;
- if (value_func->functype() != Item_func::COLLATE_FUNC &&
- value_func->functype() != Item_func::NEG_FUNC)
- goto err;
-
- if (value_func->key_item()->basic_const_item())
- return; // ok
- }
-
-err:
- valid_args= false;
- my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST");
++ if (name_item->basic_const_item() &&
++ (name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name
++ set_name(thd, name_str->ptr(), name_str->length(), name_str->charset());
}
@@@ -1988,16 -2266,12 +1993,10 @@@ Item::Type Item_name_const::type() cons
bool Item_name_const::fix_fields(THD *thd, Item **ref)
{
- char buf[128];
- String *item_name;
- String s(buf, sizeof(buf), &my_charset_bin);
- s.length(0);
-
- if ((!value_item->fixed &&
- value_item->fix_fields(thd, &value_item)) ||
- (!name_item->fixed &&
- name_item->fix_fields(thd, &name_item)) ||
+ if (value_item->fix_fields_if_needed(thd, &value_item) ||
+ name_item->fix_fields_if_needed(thd, &name_item) ||
!value_item->const_item() ||
- !name_item->const_item() ||
- !(item_name= name_item->val_str(&s))) // Can't have a NULL name
+ !name_item->const_item())
{
my_error(ER_RESERVED_SYNTAX, MYF(0), "NAME_CONST");
return TRUE;
diff --cc sql/sql_binlog.cc
index 1fa3cca7c27,60de2923a8f..97b8e2e4f91
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@@ -109,6 -109,64 +109,64 @@@ static int check_event_type(int type, R
}
}
+ /**
+ Copy fragments into the standard placeholder thd->lex->comment.str.
+
+ Compute the size of the (still) encoded total,
+ allocate and then copy fragments one after another.
+ The size can exceed max(max_allowed_packet) which is not a
+ problem as no String instance is created off this char array.
+
+ @param thd THD handle
+ @return
+ 0 at success,
+ -1 otherwise.
+ */
+ int binlog_defragment(THD *thd)
+ {
+ user_var_entry *entry[2];
+ LEX_CSTRING name[2]= { thd->lex->comment, thd->lex->ident };
+
+ /* compute the total size */
+ thd->lex->comment.str= NULL;
+ thd->lex->comment.length= 0;
+ for (uint k= 0; k < 2; k++)
+ {
+ entry[k]=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name[k].str,
+ name[k].length);
+ if (!entry[k] || entry[k]->type != STRING_RESULT)
+ {
+ my_error(ER_WRONG_TYPE_FOR_VAR, MYF(0), name[k].str);
+ return -1;
+ }
+ thd->lex->comment.length += entry[k]->length;
+ }
+
+ thd->lex->comment.str= // to be freed by the caller
+ (char *) my_malloc(thd->lex->comment.length, MYF(MY_WME));
+ if (!thd->lex->comment.str)
+ {
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
++ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1);
+ return -1;
+ }
+
+ /* fragments are merged into allocated buf while the user var:s get reset */
+ size_t gathered_length= 0;
+ for (uint k=0; k < 2; k++)
+ {
+ memcpy(const_cast<char*>(thd->lex->comment.str) + gathered_length, entry[k]->value,
+ entry[k]->length);
+ gathered_length += entry[k]->length;
+ update_hash(entry[k], true, NULL, 0, STRING_RESULT, &my_charset_bin, 0);
+ }
+
+ DBUG_ASSERT(gathered_length == thd->lex->comment.length);
+
+ return 0;
+ }
+
+
/**
Execute a BINLOG statement.
@@@ -171,14 -223,31 +223,31 @@@ void mysql_client_binlog_statement(THD
/*
Out of memory check
*/
- if (!(rli && buf))
+ if (!(rli))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1); /* needed 1 bytes */
goto end;
}
DBUG_ASSERT(rli->belongs_to_client());
+ if (unlikely(is_fragmented= thd->lex->comment.str && thd->lex->ident.str))
+ if (binlog_defragment(thd))
+ goto end;
+
+ if (!(coded_len= thd->lex->comment.length))
+ {
+ my_error(ER_SYNTAX_ERROR, MYF(0));
+ goto end;
+ }
+
+ decoded_len= my_base64_needed_decoded_length((int)coded_len);
+ if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME))))
+ {
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
++ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1);
+ goto end;
+ }
+
for (char const *strptr= thd->lex->comment.str ;
strptr < thd->lex->comment.str + thd->lex->comment.length ; )
{
diff --cc sql/sql_class.h
index 88494f8c169,b1fc89db7f2..361340ee235
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@@ -4420,80 -4375,6 +4420,80 @@@ private
return raised;
}
+private:
+ void push_warning_truncated_priv(Sql_condition::enum_warning_level level,
+ uint sql_errno,
+ const char *type_str, const char *val)
+ {
+ DBUG_ASSERT(sql_errno == ER_TRUNCATED_WRONG_VALUE ||
+ sql_errno == ER_WRONG_VALUE);
+ char buff[MYSQL_ERRMSG_SIZE];
+ CHARSET_INFO *cs= &my_charset_latin1;
+ cs->cset->snprintf(cs, buff, sizeof(buff),
+ ER_THD(this, sql_errno), type_str, val);
+ /*
+ Note: the format string can vary between ER_TRUNCATED_WRONG_VALUE
+ and ER_WRONG_VALUE, but the code passed to push_warning() is
+ always ER_TRUNCATED_WRONG_VALUE. This is intentional.
+ */
+ push_warning(this, level, ER_TRUNCATED_WRONG_VALUE, buff);
+ }
+public:
+ void push_warning_truncated_wrong_value(Sql_condition::enum_warning_level level,
+ const char *type_str, const char *val)
+ {
+ return push_warning_truncated_priv(level, ER_TRUNCATED_WRONG_VALUE,
+ type_str, val);
+ }
+ void push_warning_wrong_value(Sql_condition::enum_warning_level level,
+ const char *type_str, const char *val)
+ {
+ return push_warning_truncated_priv(level, ER_WRONG_VALUE, type_str, val);
+ }
+ void push_warning_truncated_wrong_value(const char *type_str, const char *val)
+ {
+ return push_warning_truncated_wrong_value(Sql_condition::WARN_LEVEL_WARN,
+ type_str, val);
+ }
+ void push_warning_truncated_value_for_field(Sql_condition::enum_warning_level
+ level, const char *type_str,
+ const char *val,
+ const TABLE_SHARE *s,
+ const char *name)
+ {
+ DBUG_ASSERT(name);
+ char buff[MYSQL_ERRMSG_SIZE];
+ CHARSET_INFO *cs= &my_charset_latin1;
+ const char *db_name= s ? s->db.str : NULL;
- const char *table_name= s ? s->error_table_name() : NULL;
++ const char *table_name= s ? s->table_name.str : NULL;
+
+ if (!db_name)
+ db_name= "";
+ if (!table_name)
+ table_name= "";
+ cs->cset->snprintf(cs, buff, sizeof(buff),
+ ER_THD(this, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
+ type_str, val, db_name, table_name, name,
+ (ulong) get_stmt_da()->current_row_for_warning());
+ push_warning(this, level, ER_TRUNCATED_WRONG_VALUE, buff);
+
+ }
+ void push_warning_wrong_or_truncated_value(Sql_condition::enum_warning_level level,
+ bool totally_useless_value,
+ const char *type_str,
+ const char *val,
+ const TABLE_SHARE *s,
+ const char *field_name)
+ {
+ if (field_name)
+ push_warning_truncated_value_for_field(level, type_str, val,
+ s, field_name);
+ else if (totally_useless_value)
+ push_warning_wrong_value(level, type_str, val);
+ else
+ push_warning_truncated_wrong_value(level, type_str, val);
+ }
+
public:
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
diff --cc sql/sql_repl.cc
index 59f6a45a52f,fdca609f5af..647f83c31e7
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@@ -547,19 -556,29 +547,19 @@@ static my_bool adjust_callback(THD *thd
}
-bool log_in_use(const char* log_name)
+void adjust_linfo_offsets(my_off_t purge_offset)
{
- size_t log_name_len = strlen(log_name) + 1;
- THD *tmp;
- bool result = 0;
+ server_threads.iterate(adjust_callback, &purge_offset);
+}
- mysql_mutex_lock(&LOCK_thread_count);
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
- {
- LOG_INFO* linfo;
- if ((linfo = tmp->current_linfo))
- {
- mysql_mutex_lock(&linfo->lock);
- result = !strncmp(log_name, linfo->log_file_name, log_name_len);
- mysql_mutex_unlock(&linfo->lock);
- if (result)
- break;
- }
- }
-
- mysql_mutex_unlock(&LOCK_thread_count);
+static my_bool log_in_use_callback(THD *thd, const char *log_name)
+{
+ my_bool result= 0;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (auto linfo= thd->current_linfo)
- result= !memcmp(log_name, linfo->log_file_name, strlen(log_name) + 1);
++ result= !strncmp(log_name, linfo->log_file_name, strlen(log_name) + 1);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
return result;
}
diff --cc sql/sql_table.cc
index 4dd9d43a7b9,7f2003b765b..af92bc43c59
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@@ -9762,12 -9818,11 +9786,12 @@@ do_continue:
/* Mark that we have created table in storage engine. */
no_ha_table= false;
- new_table=
- thd->create_and_open_tmp_table(new_db_type, &frm, alter_ctx.get_tmp_path(),
- alter_ctx.new_db.str,
- alter_ctx.new_name.str,
- true, true);
+ /* Open the table since we need to copy the data. */
+ new_table= thd->create_and_open_tmp_table(new_db_type, &frm,
+ alter_ctx.get_tmp_path(),
+ alter_ctx.new_db.str,
- alter_ctx.tmp_name.str,
++ alter_ctx.new_name.str,
+ true, true);
if (!new_table)
goto err_new_table_cleanup;
@@@ -10687,9 -10732,12 +10710,12 @@@ bool mysql_checksum_table(THD *thd, TAB
{
/* calculating table's checksum */
ha_checksum crc= 0;
- uchar null_mask=256 - (1 << t->s->last_null_bit_pos);
+ DBUG_ASSERT(t->s->last_null_bit_pos < 8);
+ uchar null_mask= (t->s->last_null_bit_pos ?
+ (256 - (1 << t->s->last_null_bit_pos)):
+ 0);
- t->use_all_columns();
+ t->use_all_stored_columns();
if (t->file->ha_rnd_init(1))
protocol->store_null();
diff --cc sql/table.cc
index 7682119c241,10543a1b4f0..75f5a464186
--- a/sql/table.cc
+++ b/sql/table.cc
@@@ -5449,8 -5292,8 +5449,8 @@@ int TABLE::verify_constraints(bool igno
}
field_error.append((*chk)->name.str);
my_error(ER_CONSTRAINT_FAILED,
- MYF(ignore_failure ? ME_JUST_WARNING : 0), field_error.c_ptr(),
+ MYF(ignore_failure ? ME_WARNING : 0), field_error.c_ptr(),
- s->db.str, s->error_table_name());
+ s->db.str, s->table_name.str);
return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
}
}
diff --cc sql/wsrep_mysqld.h
index dc2793c384d,cca66922a24..d12cc835136
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@@ -20,25 -23,15 +20,26 @@@
#ifdef WITH_WSREP
+#include <mysql/plugin.h>
+#include "mysql/service_wsrep.h"
+
+#include <my_global.h>
+#include <my_pthread.h>
+#include "log.h"
+#include "mysqld.h"
+
typedef struct st_mysql_show_var SHOW_VAR;
#include <sql_priv.h>
-//#include "rpl_gtid.h"
-#include "../wsrep/wsrep_api.h"
#include "mdl.h"
-#include "mysqld.h"
#include "sql_table.h"
+ #include "wsrep_mysqld_c.h"
+#include "wsrep/provider.hpp"
+#include "wsrep/streaming_context.hpp"
+#include "wsrep_api.h"
+#include <vector>
+#include "wsrep_server_state.h"
+
#define WSREP_UNDEFINED_TRX_ID ULONGLONG_MAX
class set_var;
diff --cc storage/innobase/handler/handler0alter.cc
index 307374ec700,d3907f2d05e..b1dc295206a
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@@ -10176,12 -9087,12 +10191,13 @@@ commit_try_norebuild
after a successful commit_try_norebuild() call.
@param ha_alter_info algorithm=inplace context
@param ctx In-place ALTER TABLE context for the current partition
+@param altered_table the TABLE after the ALTER
@param table the TABLE before the ALTER
@param trx Data dictionary transaction
- (will be started and committed, for DROP INDEX) */
+ (will be started and committed, for DROP INDEX)
+ @return whether all replacements were found for dropped indexes */
inline MY_ATTRIBUTE((nonnull))
- void
+ bool
commit_cache_norebuild(
/*===================*/
Alter_inplace_info* ha_alter_info,
@@@ -11009,12 -9869,20 +11015,21 @@@ foreign_fail
m_user_thd,
Sql_condition::WARN_LEVEL_WARN,
ER_ALTER_INFO,
- "InnoDB: Could not add foreign"
- " key constraints.");
- } else {
- commit_cache_norebuild(ha_alter_info, ctx,
- altered_table,
- table, trx);
+ "failed to load FOREIGN KEY"
+ " constraints");
+ }
+ } else {
+ bool fk_fail = innobase_update_foreign_cache(
+ ctx, m_user_thd) != DB_SUCCESS;
+
- if (!commit_cache_norebuild(ha_alter_info, ctx, table,
++ if (!commit_cache_norebuild(ha_alter_info, ctx,
++ altered_table, table,
+ trx)) {
+ fk_fail = true;
+ }
+
+ if (fk_fail && m_prebuilt->trx->check_foreigns) {
+ goto foreign_fail;
}
}
1
0
revision-id: dadf5f78411b8f46ac53b8ea12c08ec317d9aab4 (mariadb-10.1.38-7-gdadf5f78411)
parent(s): 346e46089621e6951e076c82ed5690aa23dcb5fe
author: Jan Lindström
committer: Jan Lindström
timestamp: 2019-02-20 08:10:26 +0200
message:
Fix few compiler warnings.
dict_table_rename_in_cache
dict_create_add_foreign_id
Use of strncpy caused string truncation warnings and is
replaced with memcpy.
fts_fetch_index_words
dfield_get_len() can return UNIV_SQL_NULL (=ULINT32_UNDEFINED)
that is too big for used short datatype. Replaced with correct
ulint.
row_rename_table_for_mysql
Target string was exactly same length as maximum length
of copied string.
---
storage/innobase/dict/dict0dict.cc | 12 ++++++------
storage/innobase/fts/fts0opt.cc | 10 +++++-----
storage/innobase/include/dict0crea.ic | 4 ++--
storage/innobase/row/row0mysql.cc | 4 ++--
storage/xtradb/dict/dict0dict.cc | 12 ++++++------
storage/xtradb/fts/fts0opt.cc | 10 +++++-----
storage/xtradb/include/dict0crea.ic | 4 ++--
storage/xtradb/row/row0mysql.cc | 4 ++--
8 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 06c6c3effab..04b14e3ae06 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1845,7 +1845,7 @@ dict_table_rename_in_cache(
ulint db_len;
char* old_id;
- char old_name_cs_filename[MAX_TABLE_NAME_LEN+20];
+ char old_name_cs_filename[MAX_TABLE_NAME_LEN+20]="";
uint errors = 0;
/* All table names are internally stored in charset
@@ -1855,13 +1855,13 @@ dict_table_rename_in_cache(
in UTF-8 charset. The variable fkid here is used
to store foreign key constraint name in charset
my_charset_filename for comparison further below. */
- char fkid[MAX_TABLE_NAME_LEN+20];
+ char fkid[MAX_TABLE_NAME_LEN+20]="";
ibool on_tmp = FALSE;
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1883,7 +1883,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
@@ -1922,7 +1922,7 @@ dict_table_rename_in_cache(
}
/* Convert the table name to UTF-8 */
- strncpy(table_name, table->name,
+ memcpy(table_name, table->name,
MAX_TABLE_NAME_LEN);
innobase_convert_to_system_charset(
strchr(table_name, '/') + 1,
@@ -1934,7 +1934,7 @@ dict_table_rename_in_cache(
from charset my_charset_filename to
UTF-8. This means that the table name
is already in UTF-8 (#mysql#50). */
- strncpy(table_name, table->name,
+ memcpy(table_name, table->name,
MAX_TABLE_NAME_LEN);
}
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic
index 1cbaa47032b..cebbcf73864 100644
--- a/storage/innobase/include/dict0crea.ic
+++ b/storage/innobase/include/dict0crea.ic
@@ -68,7 +68,7 @@ dict_create_add_foreign_id(
char table_name[MAX_TABLE_NAME_LEN + 20] = "";
uint errors = 0;
- strncpy(table_name, name,
+ memcpy(table_name, name,
MAX_TABLE_NAME_LEN + 20);
innobase_convert_to_system_charset(
@@ -77,7 +77,7 @@ dict_create_add_foreign_id(
MAX_TABLE_NAME_LEN, &errors);
if (errors) {
- strncpy(table_name, name,
+ memcpy(table_name, name,
MAX_TABLE_NAME_LEN + 20);
}
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 2a9ade1da2c..84f30e226dd 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -5173,8 +5173,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 1c489d13f1a..d23be874c2f 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -1851,7 +1851,7 @@ dict_table_rename_in_cache(
ulint db_len;
char* old_id;
- char old_name_cs_filename[MAX_TABLE_NAME_LEN+20];
+ char old_name_cs_filename[MAX_TABLE_NAME_LEN+20]="";
uint errors = 0;
/* All table names are internally stored in charset
@@ -1861,13 +1861,13 @@ dict_table_rename_in_cache(
in UTF-8 charset. The variable fkid here is used
to store foreign key constraint name in charset
my_charset_filename for comparison further below. */
- char fkid[MAX_TABLE_NAME_LEN+20];
+ char fkid[MAX_TABLE_NAME_LEN+20]="";
ibool on_tmp = FALSE;
/* The old table name in my_charset_filename is stored
in old_name_cs_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) {
@@ -1889,7 +1889,7 @@ dict_table_rename_in_cache(
} else {
/* Old name already in
my_charset_filename */
- strncpy(old_name_cs_filename, old_name,
+ memcpy(old_name_cs_filename, old_name,
MAX_TABLE_NAME_LEN);
}
}
@@ -1928,7 +1928,7 @@ dict_table_rename_in_cache(
}
/* Convert the table name to UTF-8 */
- strncpy(table_name, table->name,
+ memcpy(table_name, table->name,
MAX_TABLE_NAME_LEN);
innobase_convert_to_system_charset(
strchr(table_name, '/') + 1,
@@ -1940,7 +1940,7 @@ dict_table_rename_in_cache(
from charset my_charset_filename to
UTF-8. This means that the table name
is already in UTF-8 (#mysql#50). */
- strncpy(table_name, table->name,
+ memcpy(table_name, table->name,
MAX_TABLE_NAME_LEN);
}
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index 77293bc867a..2a97f0fa527 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -673,18 +673,18 @@ fts_fetch_index_words(
fts_zip_t* zip = static_cast<fts_zip_t*>(user_arg);
que_node_t* exp = sel_node->select_list;
dfield_t* dfield = que_node_get_val(exp);
- short len = static_cast<short>(dfield_get_len(dfield));
+ ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
+ ut_a(len <= FTS_MAX_WORD_LEN);
+
/* Skip the duplicate words. */
- if (zip->word.f_len == static_cast<ulint>(len)
+ if (zip->word.f_len == len
&& !memcmp(zip->word.f_str, data, len)) {
return(TRUE);
}
- ut_a(len <= FTS_MAX_WORD_LEN);
-
memcpy(zip->word.f_str, data, len);
zip->word.f_len = len;
@@ -693,7 +693,7 @@ fts_fetch_index_words(
/* The string is prefixed by len. */
zip->zp->next_in = reinterpret_cast<byte*>(&len);
- zip->zp->avail_in = sizeof(len);
+ zip->zp->avail_in = sizeof(static_cast<short>(len));
/* Compress the word, create output blocks as necessary. */
while (zip->zp->avail_in > 0) {
diff --git a/storage/xtradb/include/dict0crea.ic b/storage/xtradb/include/dict0crea.ic
index 1cbaa47032b..cebbcf73864 100644
--- a/storage/xtradb/include/dict0crea.ic
+++ b/storage/xtradb/include/dict0crea.ic
@@ -68,7 +68,7 @@ dict_create_add_foreign_id(
char table_name[MAX_TABLE_NAME_LEN + 20] = "";
uint errors = 0;
- strncpy(table_name, name,
+ memcpy(table_name, name,
MAX_TABLE_NAME_LEN + 20);
innobase_convert_to_system_charset(
@@ -77,7 +77,7 @@ dict_create_add_foreign_id(
MAX_TABLE_NAME_LEN, &errors);
if (errors) {
- strncpy(table_name, name,
+ memcpy(table_name, name,
MAX_TABLE_NAME_LEN + 20);
}
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index 93a4db98e7b..0ea2ae1b6d4 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -5183,8 +5183,8 @@ row_rename_table_for_mysql(
if (!new_is_tmp) {
/* Rename all constraints. */
- char new_table_name[MAX_TABLE_NAME_LEN] = "";
- char old_table_utf8[MAX_TABLE_NAME_LEN] = "";
+ char new_table_name[MAX_TABLE_NAME_LEN+1] = "";
+ char old_table_utf8[MAX_TABLE_NAME_LEN+1] = "";
uint errors = 0;
strncpy(old_table_utf8, old_name, MAX_TABLE_NAME_LEN);
2
1
revision-id: c8f9b3f915a729ec35c00e92cc534a01271aa6e6 (mariadb-10.3.12-62-gc8f9b3f915a)
parent(s): 1005376e588dd99846c86f33d86ffdec9de6c213
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2019-02-20 08:44:08 +0100
message:
remove aws-key management plugin
---
debian/autobake-deb.sh | 27 ---------------------------
1 file changed, 27 deletions(-)
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh
index 93d2b695397..5ca59e0305d 100755
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@ -97,33 +97,6 @@ then
sed '/Package: mariadb-plugin-rocksdb/,+14d' -i debian/control
fi
-# AWS SDK requires c++11 -capable compiler.
-# Minimal supported versions are g++ 4.8 and clang 3.3.
-# AWS SDK also requires the build machine to have network access and git, so
-# it cannot be part of the base version included in Linux distros, but a pure
-# custom built plugin.
-if [[ $GCCVERSION -gt 40800 ]] && [[ ! $TRAVIS ]] && [[ -x "$(command -v git)" ]] && timeout 3s bash -c 'sed -n q </dev/tcp/github.com/22'
-then
- cat <<EOF >> debian/control
-
-Package: mariadb-plugin-aws-key-management
-Architecture: any
-Breaks: mariadb-aws-key-management-10.1,
- mariadb-aws-key-management-10.2
-Replaces: mariadb-aws-key-management-10.1,
- mariadb-aws-key-management-10.2
-Depends: mariadb-server-10.3,
- \${misc:Depends},
- \${shlibs:Depends}
-Description: Amazon Web Service Key Management Service Plugin for MariaDB
- This encryption key management plugin gives an interface to the Amazon Web
- Services Key Management Service for managing encryption keys used for MariaDB
- data-at-rest encryption.
-EOF
-
- sed -i -e "/-DPLUGIN_AWS_KEY_MANAGEMENT=NO/d" debian/rules
-fi
-
# Don't build cassandra package if thrift is not installed
if [[ ! -f /usr/local/include/thrift/Thrift.h && ! -f /usr/include/thrift/Thrift.h ]]
then
1
0
commit 57fd938fe33f9c3923f673c801a70b34c9c3e3d9
Author: Sachin <sachin.setiya(a)mariadb.com>
Date: Wed Feb 20 02:53:08 2019 +0530
MDEV-371 Unique Index for long columns
This patch implements engine independent unique hash index.
Usage:- Unique HASH index can be created automatically for
blob/varchar/test column whose key
length > handler->max_key_length()
or it can be explicitly specified.
Automatic Creation:-
Create TABLE t1 (a blob unique);
Explicit Creation:-
Create TABLE t1 (a int , unique(a) using HASH);
Internal KEY_PART Representations:-
Long unique key_info will have 2 representations.
(lets understand this with an example create table t1(a blob, b
blob , unique(a, b)); )
1. User Given Representation:- key_info->key_part array will be
similar to what user has defined.
So in case of example it will have 2 key_parts (a, b)
2. Storage Engine Representation:- In this case there will be
only one key_part and it will point to
HASH_FIELD. This key_part will be always after user defined key_parts.
So:- User Given Representation [a] [b] [hash_key_part]
key_info->key_part ----^
Storage Engine Representation [a] [b] [hash_key_part]
key_info->key_part ------------^
Table->s->key_info will have User Given Representation, While
table->key_info will have Storage Engine
Representation.Representation can be changed into each other by
calling re/setup_keyinfo_hash function.
Working:-
1. So when user specifies HASH_INDEX or key_length is >
handler->max_key_length(), In mysql_prepare_create_table
One extra vfield is added (for each long unique key). And
key_info->algorithm is set to HA_KEY_ALG_LONG_HASH.
2. In init_from_binary_frm_image values for hash_keypart is set
(like fieldnr , field and flags)
3. In parse_vcol_defs, HASH_FIELD->vcol_info is created.
Item_func_hash is used with list of Item_fields,
When Explicit length is given by user then Item_left is used to
concatenate Item_field values.
4. In ha_write_row/ha_update_row check_duplicate_long_entry_key is
called which will create the hash key from
table->record[0] and then call ha_index_read_map , if we found
duplicated hash , we will compare the result
field by field.
diff --git a/include/my_base.h b/include/my_base.h
index c36072c0bfa..8a8237ce8b2 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -99,7 +99,8 @@ enum ha_key_alg {
HA_KEY_ALG_BTREE= 1, /* B-tree, default one */
HA_KEY_ALG_RTREE= 2, /* R-tree, for spatial searches */
HA_KEY_ALG_HASH= 3, /* HASH keys (HEAP tables) */
- HA_KEY_ALG_FULLTEXT= 4 /* FULLTEXT (MyISAM tables) */
+ HA_KEY_ALG_FULLTEXT= 4, /* FULLTEXT (MyISAM tables) */
+ HA_KEY_ALG_LONG_HASH= 5 /* long BLOB keys */
};
/* Storage media types */
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 8b0847ab399..49769395697 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -203,6 +203,8 @@ enum enum_indicator_type
#define VERS_UPDATE_UNVERSIONED_FLAG (1 << 29) /* column that doesn't support
system versioning when table
itself supports it*/
+#define LONG_UNIQUE_HASH_FIELD (1<< 30) /* This field will
store hash for unique
+ column */
#define REFRESH_GRANT (1ULL << 0) /* Refresh grant tables */
#define REFRESH_LOG (1ULL << 1) /* Start on new log file */
diff --git a/mysql-test/main/long_unique.result
b/mysql-test/main/long_unique.result
new file mode 100644
index 00000000000..e2667bac31d
--- /dev/null
+++ b/mysql-test/main/long_unique.result
@@ -0,0 +1,1408 @@
+#Structure of tests
+#First we will check all option for
+#table containing single unique column
+#table containing keys like unique(a,b,c,d) etc
+#then table containing 2 blob unique etc
+set @allowed_packet= @@max_allowed_packet;
+#table with single long blob column;
+create table t1(a blob unique );
+insert into t1
values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+#blob with primary key not allowed
+create table t2(a blob,primary key(a(10000)));
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t3(a varchar(10000) primary key);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+insert into t1 values(2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 10 Deleted blocks: 0
+Recordlength: 20
+
+table description:
+Key Start Len Index Type
+1 12 8 multip. ulonglong NULL
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION
COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH
CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE
DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE
COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED
GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL
blob UNI select,insert,update,references NEVER NULL
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA
INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART
PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where
table_schema= 'test' and table_name= 't1';
+CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA test
+CONSTRAINT_NAME a
+TABLE_CATALOG def
+TABLE_SCHEMA test
+TABLE_NAME t1
+COLUMN_NAME a
+ORDINAL_POSITION 1
+POSITION_IN_UNIQUE_CONSTRAINT NULL
+REFERENCED_TABLE_SCHEMA NULL
+REFERENCED_TABLE_NAME NULL
+REFERENCED_COLUMN_NAME NULL
+# table select we should not be able to see db_row_hash_column;
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+123456789034567890
+123456789034567891
+2
+3
+56
+maria
+sachin
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#duplicate entry test;
+insert into t1 values(2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values(123456789034567891);
+ERROR 23000: Duplicate entry '123456789034567891' for key 'a'
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+123456789034567890
+123456789034567891
+2
+3
+56
+maria
+sachin
+insert into t1 values(11),(22),(33);
+insert into t1 values(12),(22);
+ERROR 23000: Duplicate entry '22' for key 'a'
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+11
+12
+123456789034567890
+123456789034567891
+2
+22
+3
+33
+56
+maria
+sachin
+insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
+insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
+ERROR 23000: Duplicate entry
'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm' for
key 'a'
+insert into t1 values(repeat('m',4001)),(repeat('m',4002));
+truncate table t1;
+insert into t1 values(1),(2),(3),(4),(5),(8),(7);
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 7 Deleted blocks: 0
+Recordlength: 20
+
+table description:
+Key Start Len Index Type
+1 12 8 multip. ulonglong NULL
+#now some alter commands;
+alter table t1 add column b int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1,2);
+ERROR 23000: Duplicate entry '1' for key 'a'
+insert into t1 values(2,2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#now try to change db_row_hash_1 column;
+alter table t1 drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column d int , add column e int , drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column a int , add column b int, modify column
db_row_hash_1 int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, change column db_row_hash_1 dsds int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 drop column b , add column c int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(45,1,55),(46,1,55);
+ERROR 23000: Duplicate entry '55' for key 'db_row_hash_1'
+insert into t1 values(45,1,55),(45,1,55);
+ERROR 23000: Duplicate entry '45' for key 'a'
+alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+c int(11) YES NULL
+db_row_hash_1 int(11) YES UNI NULL
+db_row_hash_2 int(11) YES NULL
+db_row_hash_3 int(11) YES NULL
+#this should also drop the unique index ;
+alter table t1 drop column a;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+#add column with unique index on blob ;
+alter table t1 add column a blob unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+# try to change the blob unique name;
+alter table t1 change column a aa blob ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES HASH
+# try to change the blob unique datatype;
+#this will change index to b tree;
+alter table t1 modify column aa int ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+alter table t1 add column clm blob unique;
+#try changing the name ;
+alter table t1 change column clm clm_changed blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ `clm_changed` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`),
+ UNIQUE KEY `clm` (`clm_changed`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+t1 0 clm 1 clm_changed A NULL NULL NULL YES HASH
+#now drop the unique key;
+alter table t1 drop key clm;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ `clm_changed` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+drop table t1;
+create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
+desc t1;
+Field Type Null Key Default Extra
+a text YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+insert into t1 values ('ae');
+insert into t1 values ('AE');
+ERROR 23000: Duplicate entry 'AE' for key 'a'
+insert into t1 values ('Ä');
+drop table t1;
+create table t1 (a int primary key, b blob unique);
+desc t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI NULL
+b blob YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+insert into t1 values(1,1),(2,2),(3,3);
+insert into t1 values(1,1);
+ERROR 23000: Duplicate entry '1' for key 'b'
+insert into t1 values(7,1);
+ERROR 23000: Duplicate entry '1' for key 'b'
+drop table t1;
+#table with multiple long blob column and varchar text column ;
+create table t1(a blob unique, b int , c blob unique , d text unique
, e varchar(3000) unique);
+insert into t1
values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
+('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
+(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
+(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 8 Deleted blocks: 0
+Recordlength: 3072
+
+table description:
+Key Start Len Index Type
+1 3039 8 multip. ulonglong NULL
+2 3047 8 multip. ulonglong NULL
+3 3055 8 multip. ulonglong NULL
+4 3063 8 multip. ulonglong NULL
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION
COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH
CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE
DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE
COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED
GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL
blob UNI select,insert,update,references NEVER NULL
+def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11)
select,insert,update,references NEVER NULL
+def test t1 c 3 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL
blob UNI select,insert,update,references NEVER NULL
+def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1
latin1_swedish_ci text UNI select,insert,update,references NEVER NULL
+def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1
latin1_swedish_ci varchar(3000) UNI select,insert,update,references
NEVER NULL
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA
INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART
PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+def test t1 0 test c 1 c A NULL NULL NULL YES HASH
+def test t1 0 test d 1 d A NULL NULL NULL YES HASH
+def test t1 0 test e 1 e A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where table_schema=
'test' and table_name= 't1';
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG
TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION
POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA
REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+def test a def test t1 a 1 NULL NULL NULL NULL
+def test c def test t1 c 1 NULL NULL NULL NULL
+def test d def test t1 d 1 NULL NULL NULL NULL
+def test e def test t1 e 1 NULL NULL NULL NULL
+#table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+a b c d e
+1 2 3 4 5
+123456789034567890 43545 657567567567 78967657567567 657567567567567676
+123456789034567891 353534 53453453453456 64565464564564 45435345345345
+2 11 22 33 44
+3111 222 333 444 555
+5611 2222 3333 4444 5555
+maria 345 frter dasd utyuty
+sachin 341 fdf gfgfgfg hghgr
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+select db_row_hash_2 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
+select db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
+#duplicate entry test;
+insert into t1 values(21,2,3,42,51);
+ERROR 23000: Duplicate entry '3' for key 'c'
+insert into t1 values('sachin',null,null,null,null);
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1
values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
+ERROR 23000: Duplicate entry '657567567567567676' for key 'e'
+select * from t1 order by a;
+a b c d e
+1 2 3 4 5
+123456789034567890 43545 657567567567 78967657567567 657567567567567676
+123456789034567891 353534 53453453453456 64565464564564 45435345345345
+2 11 22 33 44
+3111 222 333 444 555
+5611 2222 3333 4444 5555
+maria 345 frter dasd utyuty
+sachin 341 fdf gfgfgfg hghgr
+insert into t1
values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
+repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
+repeat('s',2995));
+insert into t1
values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
+ERROR 23000: Duplicate entry
'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for
key 'e'
+truncate table t1;
+insert into t1
values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
+#now some alter commands;
+alter table t1 add column f int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+f int(11) YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#unique key should not break;
+insert into t1 values(1,2,3,4,5,6);
+ERROR 23000: Duplicate entry '1' for key 'a'
+select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#now try to change db_row_hash_1 column;
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2,
drop column db_row_hash_3;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column dg int , add column ef int , drop column
db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int , modify column
db_row_hash_2 int, modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column ar int , add column rb int, modify column
db_row_hash_1 int , modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int , change column
db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, drop column a, change column
db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 drop column b , add column g int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+alter table t1 add column db_row_hash_2 int unique;
+alter table t1 add column db_row_hash_3 int unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `db_row_hash_3` (`db_row_hash_3`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t1 add column db_row_hash_7 int, add column db_row_hash_5
int , add column db_row_hash_4 int ;
+alter table t1 drop column db_row_hash_7,drop column db_row_hash_3,
drop column db_row_hash_4;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+f int(11) YES NULL
+g int(11) YES NULL
+db_row_hash_1 int(11) YES UNI NULL
+db_row_hash_2 int(11) YES UNI NULL
+db_row_hash_5 int(11) YES NULL
+#this should not break anything;
+insert into t1 values(1,2,3,4,5,6,23,5,6);
+ERROR 23000: Duplicate entry '1' for key 'a'
+#this should also drop the unique index;
+alter table t1 drop column a, drop column c;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+#add column with unique index on blob;
+alter table t1 add column a blob unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES HASH
+#try to change the blob unique column name;
+#this will change index to b tree;
+alter table t1 modify column a int , modify column e int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+alter table t1 add column clm1 blob unique,add column clm2 blob unique;
+#try changing the name;
+alter table t1 change column clm1 clm_changed1 blob, change column
clm2 clm_changed2 blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ `clm_changed1` blob DEFAULT NULL,
+ `clm_changed2` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`),
+ UNIQUE KEY `clm1` (`clm_changed1`) USING HASH,
+ UNIQUE KEY `clm2` (`clm_changed2`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+t1 0 clm1 1 clm_changed1 A NULL NULL NULL YES HASH
+t1 0 clm2 1 clm_changed2 A NULL NULL NULL YES HASH
+#now drop the unique key;
+alter table t1 drop key clm1, drop key clm2;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ `clm_changed1` blob DEFAULT NULL,
+ `clm_changed2` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+drop table t1;
+#now the table with key on multiple columns; the ultimate test;
+create table t1(a blob, b int , c varchar(2000) , d text , e
varchar(3000) , f longblob , g int , h text ,
+unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
+insert into t1
values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
+('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
+,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES MUL NULL
+b int(11) YES MUL NULL
+c varchar(2000) YES MUL NULL
+d text YES NULL
+e varchar(3000) YES MUL NULL
+f longblob YES NULL
+g int(11) YES NULL
+h text YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 9 Deleted blocks: 0
+Recordlength: 5092
+
+table description:
+Key Start Len Index Type
+1 5057 8 multip. ulonglong NULL
+2 5065 8 multip. ulonglong NULL
+3 5073 8 multip. ulonglong NULL
+4 5081 8 multip. ulonglong NULL
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION
COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH
CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE
DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE
COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED
GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL
blob MUL select,insert,update,references NEVER NULL
+def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11)
MUL select,insert,update,references NEVER NULL
+def test t1 c 3 NULL YES varchar 2000 2000 NULL NULL NULL latin1
latin1_swedish_ci varchar(2000) MUL select,insert,update,references
NEVER NULL
+def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1
latin1_swedish_ci text select,insert,update,references NEVER NULL
+def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1
latin1_swedish_ci varchar(3000) MUL select,insert,update,references
NEVER NULL
+def test t1 f 6 NULL YES longblob 4294967295 4294967295 NULL NULL
NULL NULL NULL longblob select,insert,update,references NEVER NULL
+def test t1 g 7 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11)
select,insert,update,references NEVER NULL
+def test t1 h 8 NULL YES text 65535 65535 NULL NULL NULL latin1
latin1_swedish_ci text select,insert,update,references NEVER NULL
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA
INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART
PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+def test t1 0 test a 2 b A NULL NULL NULL YES HASH
+def test t1 0 test a 3 c A NULL NULL NULL YES HASH
+def test t1 0 test c 1 c A NULL NULL NULL YES HASH
+def test t1 0 test c 2 d A NULL NULL NULL YES HASH
+def test t1 0 test c 3 e A NULL NULL NULL YES HASH
+def test t1 0 test e 1 e A NULL NULL NULL YES HASH
+def test t1 0 test e 2 f A NULL NULL NULL YES HASH
+def test t1 0 test e 3 g A NULL NULL NULL YES HASH
+def test t1 0 test e 4 h A NULL NULL NULL YES HASH
+def test t1 0 test b 1 b A NULL NULL NULL YES HASH
+def test t1 0 test b 2 d A NULL NULL NULL YES HASH
+def test t1 0 test b 3 g A NULL NULL NULL YES HASH
+def test t1 0 test b 4 h A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where table_schema=
'test' and table_name= 't1';
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG
TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION
POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA
REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+def test a def test t1 a 1 NULL NULL NULL NULL
+def test a def test t1 b 2 NULL NULL NULL NULL
+def test a def test t1 c 3 NULL NULL NULL NULL
+def test c def test t1 c 1 NULL NULL NULL NULL
+def test c def test t1 d 2 NULL NULL NULL NULL
+def test c def test t1 e 3 NULL NULL NULL NULL
+def test e def test t1 e 1 NULL NULL NULL NULL
+def test e def test t1 f 2 NULL NULL NULL NULL
+def test e def test t1 g 3 NULL NULL NULL NULL
+def test e def test t1 h 4 NULL NULL NULL NULL
+def test b def test t1 b 1 NULL NULL NULL NULL
+def test b def test t1 d 2 NULL NULL NULL NULL
+def test b def test t1 g 3 NULL NULL NULL NULL
+def test b def test t1 h 4 NULL NULL NULL NULL
+# table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+a b c d e f g h
+NULL NULL NULL NULL NULL NULL NULL NULL
+NULL NULL NULL NULL NULL NULL NULL NULL
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+maria 6 maria maria maria maria 6 maria
+mariadb 7 mariadb mariadb mariadb mariadb 8 mariadb
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+select db_row_hash_2 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
+select db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
+#duplicate entry test;
+#duplicate keys entry;
+insert into t1 values(1,1,1,0,0,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+insert into t1 values(0,0,1,1,1,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'c'
+insert into t1 values(0,0,0,0,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
+insert into t1 values(1,1,1,1,1,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+insert into t1 values(0,0,0,0,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
+insert into t1 values(1,1,1,1,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5
from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2,
drop column db_row_hash_3;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column dg int , add column ef int , drop column
db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int , modify column
db_row_hash_2 int, modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column ar int , add column rb int, modify column
db_row_hash_1 int , modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int , change column
db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, drop column a, change column
db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+# add column named db_row_hash_*;
+alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
+add column db_row_hash_1 int, add column db_row_hash_2 int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ `db_row_hash_7` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
+drop column db_row_hash_1, drop column db_row_hash_2 ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#try to change column names;
+alter table t1 change column a aa blob , change column b bb blob ,
change column d dd blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `aa` blob DEFAULT NULL,
+ `bb` blob DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `dd` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`aa`,`bb`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`dd`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`bb`,`dd`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 aa A NULL NULL NULL YES HASH
+t1 0 a 2 bb A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 dd A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 bb A NULL NULL NULL YES HASH
+t1 0 b 2 dd A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+alter table t1 change column aa a blob , change column bb b blob ,
change column dd d blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#now we will change the data type to int and varchar limit so that we
no longer require hash_index;
+#on key a_b_c;
+alter table t1 modify column a varchar(20) , modify column b
varchar(20) , modify column c varchar(20);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` varchar(20) DEFAULT NULL,
+ `b` varchar(20) DEFAULT NULL,
+ `c` varchar(20) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`),
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+t1 0 a 2 b A NULL NULL NULL YES BTREE
+t1 0 a 3 c A NULL NULL NULL YES BTREE
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#change it back;
+alter table t1 modify column a blob , modify column b blob , modify
column c blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#try to delete blob column in unique;
+truncate table t1;
+#now try to delete keys;
+alter table t1 drop key c, drop key e;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+drop table t1;
+#now alter table containing some data basically some tests with ignore;
+create table t1 (a blob);
+insert into t1 values(1),(2),(3);
+#normal alter table;
+alter table t1 add unique key(a);
+alter table t1 drop key a;
+truncate table t1;
+insert into t1 values(1),(1),(2),(2),(3);
+alter table t1 add unique key(a);
+ERROR 23000: Duplicate entry '1' for key 'a'
+alter ignore table t1 add unique key(a);
+select * from t1 order by a;
+a
+1
+2
+3
+insert into t1 values(1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(65535)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 65535 NULL YES HASH
+drop table t1;
+#Now with multiple keys;
+create table t1(a blob , b blob, c blob , d blob , e int);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (2,1,1,1,1);
+insert into t1 values (2,2,2,2,2);
+insert into t1 values (3,3,4,4,4);
+insert into t1 values (4,4,4,4,4);
+alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+ERROR 23000: Duplicate entry '1-1' for key 'a'
+alter ignore table t1 add unique key(a,c), add unique key(b,d), add
unique key(e);
+select * from t1 order by a;
+a b c d e
+1 1 1 1 1
+2 2 2 2 2
+3 3 4 4 4
+insert into t1 values (1,12,1,13,14);
+ERROR 23000: Duplicate entry '1-1' for key 'a'
+insert into t1 values (12,1,14,1,14);
+ERROR 23000: Duplicate entry '1-1' for key 'b'
+insert into t1 values (13,12,13,14,4);
+ERROR 23000: Duplicate entry '4' for key 'e'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(65535),`c`(65535)) USING HASH,
+ UNIQUE KEY `b` (`b`(65535),`d`(65535)) USING HASH,
+ UNIQUE KEY `e` (`e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 65535 NULL YES HASH
+t1 0 a 2 c A NULL 65535 NULL YES HASH
+t1 0 b 1 b A NULL 65535 NULL YES HASH
+t1 0 b 2 d A NULL 65535 NULL YES HASH
+t1 0 e 1 e A 0 NULL NULL YES BTREE
+drop table t1;
+#visibility of db_row_hash
+create table t1 (a blob unique , b blob unique);
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b blob YES UNI NULL
+insert into t1 values(1,19);
+insert into t1 values(2,29);
+insert into t1 values(3,39);
+insert into t1 values(4,49);
+create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
+insert into t2 values(11,1);
+insert into t2 values(22,2);
+insert into t2 values(33,3);
+insert into t2 values(44,4);
+select * from t1 order by a;
+a b
+1 19
+2 29
+3 39
+4 49
+select * from t2 order by DB_ROW_HASH_1;
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
+ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'field list'
+#bug
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+11 1
+11 1
+11 1
+22 2
+22 2
+22 2
+22 2
+33 3
+33 3
+33 3
+33 3
+44 4
+44 4
+44 4
+44 4
+select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'IN/ALL/ANY subquery'
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in
(select DB_ROW_HASH_1 from t2);
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
+ERROR 42S22: Unknown column 't1.DB_ROW_HASH_1' in 'where clause'
+select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
+DB_ROW_HASH_1
+11
+22
+33
+44
+drop table t1,t2;
+#very long blob entry;
+SET @@GLOBAL.max_allowed_packet=67108864;
+connect 'newcon', localhost, root,,;
+connection newcon;
+show variables like 'max_allowed_packet';
+Variable_name Value
+max_allowed_packet 67108864
+create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
+desc t1;
+Field Type Null Key Default Extra
+a longblob YES UNI NULL
+b longblob YES MUL NULL
+c longblob YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` longblob DEFAULT NULL,
+ `b` longblob DEFAULT NULL,
+ `c` longblob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `b` (`b`,`c`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 c A NULL NULL NULL YES HASH
+insert into t1
values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1
values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1
values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'4'));
+ERROR 23000: Duplicate entry
'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for
key 'a'
+insert into t1
values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+ERROR 23000: Duplicate entry
'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for
key 'b'
+drop table t1;
+#long key unique with different key length
+create table t1(a blob, unique(a(3000)));
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 3000 NULL YES HASH
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(3000)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 value(concat(repeat('s',3000),'1'));
+insert into t1 value(concat(repeat('s',3000),'2'));
+ERROR 23000: Duplicate entry
'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for
key 'a'
+insert into t1 value(concat(repeat('a',3000),'2'));
+drop table t1;
+create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
+unique(a(3500), b), unique(c(4500), d));
+desc t1;
+Field Type Null Key Default Extra
+a varchar(4000) YES MUL NULL
+b longblob YES NULL
+c varchar(5000) YES MUL NULL
+d longblob YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` varchar(4000) DEFAULT NULL,
+ `b` longblob DEFAULT NULL,
+ `c` varchar(5000) DEFAULT NULL,
+ `d` longblob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(3500),`b`) USING HASH,
+ UNIQUE KEY `c` (`c`(4500),`d`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 3500 NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL 4500 NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+drop table t1;
+disconnect newcon;
+connection default;
+SET @@GLOBAL.max_allowed_packet=4194304;
+#ext bug
+create table t1(a int primary key, b blob unique, c int, d blob , index(c));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ UNIQUE KEY `b` (`b`) USING HASH,
+ KEY `c` (`c`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1,23,1,33);
+insert into t1 values(2,23,1,33);
+ERROR 23000: Duplicate entry '23' for key 'b'
+drop table t1;
+create table t2 (a blob unique , c int , index(c));
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ KEY `c` (`c`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t2 values(1,1);
+insert into t2 values(2,1);
+drop table t2;
+#not null test
+create table t1(a blob unique not null);
+desc t1;
+Field Type Null Key Default Extra
+a blob NO UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob NOT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1);
+insert into t1 values(3);
+insert into t1 values(1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+drop table t1;
+create table t1(a int primary key, b blob unique , c blob unique not null);
+insert into t1 values(1,1,1);
+insert into t1 values(2,1,2);
+ERROR 23000: Duplicate entry '1' for key 'b'
+insert into t1 values(3,3,1);
+ERROR 23000: Duplicate entry '1' for key 'c'
+drop table t1;
+create table t1 (a blob unique not null, b blob not null, c blob not
null, unique(b,c));
+desc t1;
+Field Type Null Key Default Extra
+a blob NO UNI NULL
+b blob NO MUL NULL
+c blob NO NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob NOT NULL,
+ `b` blob NOT NULL,
+ `c` blob NOT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `b` (`b`,`c`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values (1, 2, 3);
+insert into t1 values (2, 1, 3);
+insert into t1 values (2, 1, 3);
+ERROR 23000: Duplicate entry '2' for key 'a'
+drop table t1;
+#partition
+create table t1(a blob unique) partition by hash(a);
+ERROR HY000: A BLOB field is not allowed in partition function
+#key length > 2^16 -1
+create table t1(a blob, unique(a(65536)));
+ERROR HY000: Max key segment length is 65535
+create table t1(a blob, unique(a(65535)));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(65535)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+#64 indexes
+create table t1 ( a63 blob unique, a62 blob unique, a61 blob unique,
a60 blob unique, a59 blob unique, a58 blob unique, a57 blob unique,
a56 blob unique, a55 blob unique, a54 blob unique, a53 blob unique,
a52 blob unique, a51 blob unique, a50 blob unique, a49 blob unique,
a48 blob unique, a47 blob unique, a46 blob unique, a45 blob unique,
a44 blob unique, a43 blob unique, a42 blob unique, a41 blob unique,
a40 blob unique, a39 blob unique, a38 blob unique, a37 blob unique,
a36 blob unique, a35 blob unique, a34 blob unique, a33 blob unique,
a32 blob unique, a31 blob unique, a30 blob unique, a29 blob unique,
a28 blob unique, a27 blob unique, a26 blob unique, a25 blob unique,
a24 blob unique, a23 blob unique, a22 blob unique, a21 blob unique,
a20 blob unique, a19 blob unique, a18 blob unique, a17 blob unique,
a16 blob unique, a15 blob unique, a14 blob unique, a13 blob unique,
a12 blob unique, a11 blob unique, a10 blob unique, a9 blob unique, a8
blob unique, a7 blob unique, a6 blob unique, a5 blob unique, a4 blob
unique, a3 blob unique, a2 blob unique, a1 blob unique, a blob
unique);;
+insert into t1 values( 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53,
52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36,
35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);;
+insert into t1 values( 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53,
52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36,
35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);;
+ERROR 23000: Duplicate entry '63' for key 'a63'
+insert into t1 values( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63);;
+insert into t1 values( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63);;
+ERROR 23000: Duplicate entry '0' for key 'a63'
+drop table t1;
+set @@GLOBAL.max_allowed_packet= @allowed_packet;
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
new file mode 100644
index 00000000000..11ab5038809
--- /dev/null
+++ b/mysql-test/main/long_unique.test
@@ -0,0 +1,511 @@
+let datadir=`select @@datadir`;
+--source include/have_partition.inc
+
+--echo #Structure of tests
+--echo #First we will check all option for
+--echo #table containing single unique column
+--echo #table containing keys like unique(a,b,c,d) etc
+--echo #then table containing 2 blob unique etc
+set @allowed_packet= @@max_allowed_packet;
+--echo #table with single long blob column;
+create table t1(a blob unique );
+insert into t1
values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+--echo #blob with primary key not allowed
+--error ER_TOO_LONG_KEY
+create table t2(a blob,primary key(a(10000)));
+--error ER_TOO_LONG_KEY
+create table t3(a varchar(10000) primary key);
+
+--error ER_DUP_ENTRY
+insert into t1 values(2);
+--echo #table structure;
+desc t1;
+show create table t1;
+query_vertical show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+query_vertical select * from information_schema.key_column_usage
where table_schema= 'test' and table_name= 't1';
+--echo # table select we should not be able to see db_row_hash_column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--echo #duplicate entry test;
+--error ER_DUP_ENTRY
+insert into t1 values(2);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values(123456789034567891);
+select * from t1 order by a;
+insert into t1 values(11),(22),(33);
+--error ER_DUP_ENTRY
+insert into t1 values(12),(22);
+select * from t1 order by a;
+insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
+--error ER_DUP_ENTRY
+insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
+insert into t1 values(repeat('m',4001)),(repeat('m',4002));
+truncate table t1;
+insert into t1 values(1),(2),(3),(4),(5),(8),(7);
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+--echo #now some alter commands;
+alter table t1 add column b int;
+desc t1;
+show create table t1;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2);
+--error ER_DUP_ENTRY
+insert into t1 values(2,2);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--echo #now try to change db_row_hash_1 column;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column d int , add column e int , drop column db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column a int , add column b int, modify column
db_row_hash_1 int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, change column db_row_hash_1 dsds int;
+alter table t1 drop column b , add column c int;
+show create table t1;
+
+--echo #now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+show create table t1;
+--error ER_DUP_ENTRY
+insert into t1 values(45,1,55),(46,1,55);
+--error ER_DUP_ENTRY
+insert into t1 values(45,1,55),(45,1,55);
+alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
+desc t1;
+--echo #this should also drop the unique index ;
+alter table t1 drop column a;
+show create table t1;
+show keys from t1;
+--echo #add column with unique index on blob ;
+alter table t1 add column a blob unique;
+show create table t1;
+--echo # try to change the blob unique name;
+alter table t1 change column a aa blob ;
+show create table t1;
+show keys from t1;
+--echo # try to change the blob unique datatype;
+--echo #this will change index to b tree;
+alter table t1 modify column aa int ;
+show create table t1;
+show keys from t1;
+alter table t1 add column clm blob unique;
+--echo #try changing the name ;
+alter table t1 change column clm clm_changed blob;
+show create table t1;
+show keys from t1;
+--echo #now drop the unique key;
+alter table t1 drop key clm;
+show create table t1;
+show keys from t1;
+drop table t1;
+
+create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
+desc t1;
+show keys from t1;
+ insert into t1 values ('ae');
+--error ER_DUP_ENTRY
+insert into t1 values ('AE');
+insert into t1 values ('Ä');
+drop table t1;
+create table t1 (a int primary key, b blob unique);
+desc t1;
+show keys from t1;
+insert into t1 values(1,1),(2,2),(3,3);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(7,1);
+drop table t1;
+
+--echo #table with multiple long blob column and varchar text column ;
+create table t1(a blob unique, b int , c blob unique , d text unique
, e varchar(3000) unique);
+insert into t1
values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
+('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
+(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
+(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
+
+--echo #table structure;
+desc t1;
+show create table t1;
+show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+select * from information_schema.key_column_usage where table_schema=
'test' and table_name= 't1';
+--echo #table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_2 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_3 from t1;
+--echo #duplicate entry test;
+--error ER_DUP_ENTRY
+insert into t1 values(21,2,3,42,51);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin',null,null,null,null);
+--error ER_DUP_ENTRY
+insert into t1
values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
+select * from t1 order by a;
+insert into t1
values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
+repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
+repeat('s',2995));
+--error ER_DUP_ENTRY
+insert into t1
values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
+truncate table t1;
+insert into t1
values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
+--echo #now some alter commands;
+alter table t1 add column f int;
+desc t1;
+show create table t1;
+--echo #unique key should not break;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2,3,4,5,6);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
+--echo #now try to change db_row_hash_1 column;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2,
drop column db_row_hash_3;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column dg int , add column ef int , drop column
db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int , modify column
db_row_hash_2 int, modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column ar int , add column rb int, modify column
db_row_hash_1 int , modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int , change column
db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, drop column a, change column
db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+alter table t1 drop column b , add column g int;
+show create table t1;
+
+--echo #now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+alter table t1 add column db_row_hash_2 int unique;
+alter table t1 add column db_row_hash_3 int unique;
+show create table t1;
+
+alter table t1 add column db_row_hash_7 int, add column db_row_hash_5
int , add column db_row_hash_4 int ;
+alter table t1 drop column db_row_hash_7,drop column db_row_hash_3,
drop column db_row_hash_4;
+desc t1;
+--echo #this should not break anything;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2,3,4,5,6,23,5,6);
+--echo #this should also drop the unique index;
+alter table t1 drop column a, drop column c;
+show create table t1;
+show keys from t1;
+--echo #add column with unique index on blob;
+alter table t1 add column a blob unique;
+show create table t1;
+show keys from t1;
+--echo #try to change the blob unique column name;
+--echo #this will change index to b tree;
+alter table t1 modify column a int , modify column e int;
+show create table t1;
+show keys from t1;
+alter table t1 add column clm1 blob unique,add column clm2 blob unique;
+--echo #try changing the name;
+alter table t1 change column clm1 clm_changed1 blob, change column
clm2 clm_changed2 blob;
+show create table t1;
+show keys from t1;
+--echo #now drop the unique key;
+alter table t1 drop key clm1, drop key clm2;
+show create table t1;
+show keys from t1;
+drop table t1;
+--echo #now the table with key on multiple columns; the ultimate test;
+create table t1(a blob, b int , c varchar(2000) , d text , e
varchar(3000) , f longblob , g int , h text ,
+ unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
+
+insert into t1
values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
+('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
+,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
+
+--echo #table structure;
+desc t1;
+show create table t1;
+show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select * from information_schema.columns where table_schema = 'test'
and table_name = 't1';
+select * from information_schema.statistics where table_schema =
'test' and table_name = 't1';
+select * from information_schema.key_column_usage where table_schema=
'test' and table_name= 't1';
+--echo # table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_2 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_3 from t1;
+--echo #duplicate entry test;
+--echo #duplicate keys entry;
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,0,0,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,1,1,1,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,0,0,1,1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,1,1,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,0,0,1,1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,1,1,1,1,1);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5
from t1;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2,
drop column db_row_hash_3;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column dg int , add column ef int , drop column
db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int , modify column
db_row_hash_2 int, modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column ar int , add column rb int, modify column
db_row_hash_1 int , modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int , change column
db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, drop column a, change column
db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+
+show create table t1;
+--echo # add column named db_row_hash_*;
+alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
+ add column db_row_hash_1 int, add column db_row_hash_2 int;
+show create table t1;
+show keys from t1;
+alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
+ drop column db_row_hash_1, drop column db_row_hash_2 ;
+show create table t1;
+show keys from t1;
+
+--echo #try to change column names;
+alter table t1 change column a aa blob , change column b bb blob ,
change column d dd blob;
+show create table t1;
+show keys from t1;
+alter table t1 change column aa a blob , change column bb b blob ,
change column dd d blob;
+show create table t1;
+show keys from t1;
+
+--echo #now we will change the data type to int and varchar limit so
that we no longer require hash_index;
+--echo #on key a_b_c;
+alter table t1 modify column a varchar(20) , modify column b
varchar(20) , modify column c varchar(20);
+show create table t1;
+show keys from t1;
+--echo #change it back;
+alter table t1 modify column a blob , modify column b blob , modify
column c blob;
+show create table t1;
+show keys from t1;
+
+--echo #try to delete blob column in unique;
+truncate table t1;
+## this feature removed in 10.2
+#alter table t1 drop column a, drop column b, drop column c;
+#show create table t1;
+#show keys from t1;
+--echo #now try to delete keys;
+alter table t1 drop key c, drop key e;
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #now alter table containing some data basically some tests with ignore;
+create table t1 (a blob);
+insert into t1 values(1),(2),(3);
+--echo #normal alter table;
+alter table t1 add unique key(a);
+alter table t1 drop key a;
+truncate table t1;
+insert into t1 values(1),(1),(2),(2),(3);
+--error ER_DUP_ENTRY
+alter table t1 add unique key(a);
+alter ignore table t1 add unique key(a);
+select * from t1 order by a;
+--error ER_DUP_ENTRY
+insert into t1 values(1);
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #Now with multiple keys;
+create table t1(a blob , b blob, c blob , d blob , e int);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (2,1,1,1,1);
+insert into t1 values (2,2,2,2,2);
+insert into t1 values (3,3,4,4,4);
+insert into t1 values (4,4,4,4,4);
+--error ER_DUP_ENTRY
+alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+alter ignore table t1 add unique key(a,c), add unique key(b,d), add
unique key(e);
+select * from t1 order by a;
+--error ER_DUP_ENTRY
+insert into t1 values (1,12,1,13,14);
+--error ER_DUP_ENTRY
+insert into t1 values (12,1,14,1,14);
+--error ER_DUP_ENTRY
+insert into t1 values (13,12,13,14,4);
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #visibility of db_row_hash
+create table t1 (a blob unique , b blob unique);
+desc t1;
+insert into t1 values(1,19);
+insert into t1 values(2,29);
+insert into t1 values(3,39);
+insert into t1 values(4,49);
+create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
+insert into t2 values(11,1);
+insert into t2 values(22,2);
+insert into t2 values(33,3);
+insert into t2 values(44,4);
+select * from t1 order by a;
+select * from t2 order by DB_ROW_HASH_1;
+--error ER_BAD_FIELD_ERROR
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
+--echo #bug
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
+--error ER_BAD_FIELD_ERROR
+select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in
(select DB_ROW_HASH_1 from t2);
+select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
+--error ER_BAD_FIELD_ERROR
+select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
+select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
+drop table t1,t2;
+
+--echo #very long blob entry;
+SET @@GLOBAL.max_allowed_packet=67108864;
+
+connect ('newcon', localhost, root,,);
+--connection newcon
+show variables like 'max_allowed_packet';
+create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
+desc t1;
+show create table t1;
+show keys from t1;
+insert into t1
values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1
values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'1'));
+--error ER_DUP_ENTRY
+insert into t1
values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'4'));
+--error ER_DUP_ENTRY
+insert into t1
values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+drop table t1;
+
+--echo #long key unique with different key length
+create table t1(a blob, unique(a(3000)));
+desc t1;
+show keys from t1;
+show create table t1;
+insert into t1 value(concat(repeat('s',3000),'1'));
+--error ER_DUP_ENTRY
+insert into t1 value(concat(repeat('s',3000),'2'));
+insert into t1 value(concat(repeat('a',3000),'2'));
+drop table t1;
+
+create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
+unique(a(3500), b), unique(c(4500), d));
+desc t1;
+show create table t1;
+show keys from t1;
+drop table t1;
+disconnect newcon;
+--connection default
+SET @@GLOBAL.max_allowed_packet=4194304;
+--echo #ext bug
+create table t1(a int primary key, b blob unique, c int, d blob , index(c));
+show create table t1;
+insert into t1 values(1,23,1,33);
+--error ER_DUP_ENTRY
+insert into t1 values(2,23,1,33);
+drop table t1;
+create table t2 (a blob unique , c int , index(c));
+show create table t2;
+insert into t2 values(1,1);
+insert into t2 values(2,1);
+drop table t2;
+--echo #not null test
+create table t1(a blob unique not null);
+desc t1;
+show create table t1;
+insert into t1 values(1);
+insert into t1 values(3);
+--error ER_DUP_ENTRY
+insert into t1 values(1);
+drop table t1;
+create table t1(a int primary key, b blob unique , c blob unique not null);
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(2,1,2);
+--error ER_DUP_ENTRY
+insert into t1 values(3,3,1);
+drop table t1;
+create table t1 (a blob unique not null, b blob not null, c blob not
null, unique(b,c));
+desc t1;
+show create table t1;
+insert into t1 values (1, 2, 3);
+insert into t1 values (2, 1, 3);
+--error ER_DUP_ENTRY
+insert into t1 values (2, 1, 3);
+drop table t1;
+
+--echo #partition
+--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
+create table t1(a blob unique) partition by hash(a);
+--echo #key length > 2^16 -1
+--error ER_TOO_LONG_HASH_KEYSEG
+create table t1(a blob, unique(a(65536)));
+create table t1(a blob, unique(a(65535)));
+show create table t1;
+drop table t1;
+
+--echo #64 indexes
+--let $create_table=create table t1 (
+--let $insert_data_1=insert into t1 values(
+--let $insert_data_2=insert into t1 values(
+--let $count= 63
+--let $index= 0
+while ($count)
+{
+ --let $create_table=$create_table a$count blob unique,
+ --let $insert_data_1=$insert_data_1 $count,
+ --let $insert_data_2=$insert_data_2 $index,
+ --dec $count
+ --inc $index
+}
+--let $create_table=$create_table a blob unique);
+--let $insert_data_1=$insert_data_1 0);
+--let $insert_data_2=$insert_data_2 63);
+
+--eval $create_table
+--eval $insert_data_1
+--error ER_DUP_ENTRY
+--eval $insert_data_1
+--eval $insert_data_2
+--error ER_DUP_ENTRY
+--eval $insert_data_2
+drop table t1;
+
+set @@GLOBAL.max_allowed_packet= @allowed_packet;
diff --git a/mysql-test/main/long_unique_debug.result
b/mysql-test/main/long_unique_debug.result
new file mode 100644
index 00000000000..fb56a9d024b
--- /dev/null
+++ b/mysql-test/main/long_unique_debug.result
@@ -0,0 +1,579 @@
+#In this test case we will check what will happen in the case of hash collision
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique);
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria');
+insert into t1 values('maria');
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+Variable_name Value
+Handler_read_next 11
+SET debug_dbug="";
+create table t1(a blob unique);
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria');
+insert into t1 values('maria');
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+Variable_name Value
+Handler_read_next 0
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique, b blob unique);
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+insert into t1 values('sachin', 4);
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria', 2);
+insert into t1 values('maria', 3);
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+create table t1(a blob , b blob , unique(a,b));
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+insert into t1 values('sachin', 1);
+ERROR 23000: Duplicate entry 'sachin-1' for key 'a'
+insert into t1 values('maria', 2);
+insert into t1 values('maria', 2);
+ERROR 23000: Duplicate entry 'maria-2' for key 'a'
+drop table t1;
+##Internal State of long unique tables
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique);
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 19
+table->s->fields 2
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 11
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name DB_ROW_HASH_1
+table->field[1]->offset = 11
+table->field[1]->field_length = 8
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique, b blob unique , c blob unique);
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 55
+table->s->fields 6
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 31
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 1
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 39
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 11
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+table->key_info[2] user_defined_key_parts = 1
+table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[2] flags & HA_NOSAME = 0
+
+table->s->key_info[2] user_defined_key_parts = 1
+table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[2] flags & HA_NOSAME = 1
+
+Printing table->key_info[2].key_part[0] info
+key_part->offset = 47
+key_part->field_name = DB_ROW_HASH_3
+key_part->length = 8
+key_part->null_bit = 32
+key_part->null_offset = 0
+
+Printing share->key_info[2].key_part[0] info
+key_part->offset = 21
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name b
+table->field[1]->offset = 11
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+table->field[2]->field_name c
+table->field[2]->offset = 21
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 4
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 31
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 8
+
+table->field[4]->field_name DB_ROW_HASH_2
+table->field[4]->offset = 39
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = 0
+table->field[4]->null_bit_pos = 16
+
+table->field[5]->field_name DB_ROW_HASH_3
+table->field[5]->offset = 47
+table->field[5]->field_length = 8
+table->field[5]->null_pos wrt to record 0 = 0
+table->field[5]->null_bit_pos = 32
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob , b blob , c blob , d blob , unique (a,b),
unique(c, d));
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 57
+table->s->fields 6
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 2
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 41
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 11
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 2
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 49
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 32
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 21
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[1] info
+key_part->offset = 31
+key_part->field_name = d
+key_part->length = 0
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name b
+table->field[1]->offset = 11
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+table->field[2]->field_name c
+table->field[2]->offset = 21
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 4
+
+table->field[3]->field_name d
+table->field[3]->offset = 31
+table->field[3]->field_length = 10
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 8
+
+table->field[4]->field_name DB_ROW_HASH_1
+table->field[4]->offset = 41
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = 0
+table->field[4]->null_bit_pos = 16
+
+table->field[5]->field_name DB_ROW_HASH_2
+table->field[5]->offset = 49
+table->field[5]->field_length = 8
+table->field[5]->null_pos wrt to record 0 = 0
+table->field[5]->null_bit_pos = 32
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int primary key, b blob unique , c blob unique not null);
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 41
+table->s->fields 5
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->key_info[0] flags & HA_NOSAME = 1
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 1
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 25
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+table->key_info[2] user_defined_key_parts = 1
+table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[2] flags & HA_NOSAME = 0
+
+table->s->key_info[2] user_defined_key_parts = 1
+table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[2] flags & HA_NOSAME = 1
+
+Printing table->key_info[2].key_part[0] info
+key_part->offset = 33
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing share->key_info[2].key_part[0] info
+key_part->offset = 15
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = -1
+table->field[0]->null_bit_pos = 0
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 1
+
+table->field[2]->field_name c
+table->field[2]->offset = 15
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = -1
+table->field[2]->null_bit_pos = 0
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 25
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 2
+
+table->field[4]->field_name DB_ROW_HASH_2
+table->field[4]->offset = 33
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = -1
+table->field[4]->null_bit_pos = 0
+
+SET debug_dbug="";
+drop table t1;
+##Using hash
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 21
+table->s->fields 4
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 3
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 13
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 4
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[2] info
+key_part->offset = 9
+key_part->field_name = c
+key_part->length = 4
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 2
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 4
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 4
+
+table->field[2]->field_name c
+table->field[2]->offset = 9
+table->field[2]->field_length = 4
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 8
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 13
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 16
+
+SET debug_dbug="";
+drop table t1;
+##Using hash but with memory engine so no long unique column
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash)
engine=memory;
+Warnings:
+Note 1 Printing Table state, It will print table fields,
fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 13
+table->s->fields 3
+
+table->key_info[0] user_defined_key_parts = 3
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->key_info[0] flags & HA_NOSAME = 1
+
+table->s->key_info[0] user_defined_key_parts = 3
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 4
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[2] info
+key_part->offset = 9
+key_part->field_name = c
+key_part->length = 4
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 2
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 4
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 4
+
+table->field[2]->field_name c
+table->field[2]->offset = 9
+table->field[2]->field_length = 4
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 8
+
+SET debug_dbug="";
+drop table t1;
diff --git a/mysql-test/main/long_unique_debug.test
b/mysql-test/main/long_unique_debug.test
new file mode 100644
index 00000000000..63ebfa89b48
--- /dev/null
+++ b/mysql-test/main/long_unique_debug.test
@@ -0,0 +1,91 @@
+--source include/have_debug.inc
+--source include/have_innodb.inc
+--echo #In this test case we will check what will happen in the case
of hash collision
+
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique);
+
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+insert into t1 values('maria');
+--error ER_DUP_ENTRY
+insert into t1 values('maria');
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+
+SET debug_dbug="";
+create table t1(a blob unique);
+FLUSH STATUS;
+
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+insert into t1 values('maria');
+--error ER_DUP_ENTRY
+insert into t1 values('maria');
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique, b blob unique);
+
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin', 4);
+insert into t1 values('maria', 2);
+--error ER_DUP_ENTRY
+insert into t1 values('maria', 3);
+drop table t1;
+
+create table t1(a blob , b blob , unique(a,b));
+
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin', 1);
+insert into t1 values('maria', 2);
+--error ER_DUP_ENTRY
+insert into t1 values('maria', 2);
+drop table t1;
+
+--echo ##Internal State of long unique tables
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique);
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique, b blob unique , c blob unique);
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob , b blob , c blob , d blob , unique (a,b),
unique(c, d));
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int primary key, b blob unique , c blob unique not null);
+SET debug_dbug="";
+drop table t1;
+
+--echo ##Using hash
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+SET debug_dbug="";
+drop table t1;
+
+--echo ##Using hash but with memory engine so no long unique column
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash)
engine=memory;
+SET debug_dbug="";
+drop table t1;
diff --git a/mysql-test/main/long_unique_innodb.result
b/mysql-test/main/long_unique_innodb.result
new file mode 100644
index 00000000000..efbddfb30a8
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb.result
@@ -0,0 +1,123 @@
+create table t1(a blob unique) engine= InnoDB;
+insert into t1 values('RUC');
+insert into t1 values ('RUC');
+ERROR 23000: Duplicate entry 'RUC' for key 'a'
+drop table t1;
+#test for concurrent insert of long unique in innodb
+create table t1(a blob unique) engine= InnoDB;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connect 'con1', localhost, root,,;
+connect 'con2', localhost, root,,;
+connection con1;
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+connection con2;
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values ('RUC');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+connection con2;
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values ('RC');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+commit;
+connection con1;
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+connection con2;
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values ('RR');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+connection con2;
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values ('S');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+commit;
+connection con1;
+commit;
+select * from t1;
+a
+RUC
+RC
+RR
+S
+drop table t1;
+create table t1(a blob unique) engine=Innodb;
+connection con1;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+connection con2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values ('RUC');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+connection con2;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values ('RC');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+connection con2;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values ('RR');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+connection con2;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values ('S');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection default;
+drop table t1;
+disconnect con1;
+disconnect con2;
diff --git a/mysql-test/main/long_unique_innodb.test
b/mysql-test/main/long_unique_innodb.test
new file mode 100644
index 00000000000..f3c7f7d952d
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb.test
@@ -0,0 +1,135 @@
+--source include/have_innodb.inc
+
+create table t1(a blob unique) engine= InnoDB;
+insert into t1 values('RUC');
+--error ER_DUP_ENTRY
+insert into t1 values ('RUC');
+drop table t1;
+
+--echo #test for concurrent insert of long unique in innodb
+create table t1(a blob unique) engine= InnoDB;
+show create table t1;
+connect ('con1', localhost, root,,);
+connect ('con2', localhost, root,,);
+
+--connection con1
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+--connection con2
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RUC');
+
+--connection con1
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+--connection con2
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+--error ER_DUP_ENTRY
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RC');
+commit;
+
+--connection con1
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+--connection con2
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+--error ER_DUP_ENTRY
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RR');
+
+--connection con1
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+--connection con2
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+--error ER_DUP_ENTRY
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('S');
+commit;
+
+--connection con1
+commit;
+
+select * from t1;
+drop table t1;
+
+create table t1(a blob unique) engine=Innodb;
+
+--connection con1
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+--connection con2
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+--send insert into t1 values ('RUC');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+--connection con2
+set transaction isolation level READ COMMITTED;
+start transaction;
+--send insert into t1 values ('RC');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+--connection con2
+set transaction isolation level REPEATABLE READ;
+start transaction;
+--send insert into t1 values ('RR');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+--connection con2
+set transaction isolation level SERIALIZABLE;
+start transaction;
+--send insert into t1 values ('S');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+connection default;
+drop table t1;
+disconnect con1;
+disconnect con2;
diff --git a/mysql-test/main/long_unique_update.result
b/mysql-test/main/long_unique_update.result
new file mode 100644
index 00000000000..60a4fb46558
--- /dev/null
+++ b/mysql-test/main/long_unique_update.result
@@ -0,0 +1,317 @@
+#structure of tests;
+#1 test of table containing single unique blob column;
+#2 test of table containing another unique int/ varchar etc column;
+#3 test of table containing multiple unique blob column like
unique(a),unique(b);
+#4 test of table containing multiple multiple unique blob column like
unique(a,b...),unique(c,d....);
+#structure of each test;
+#test if update works;
+#test update for duplicate entry;
+#test update for no change keys;
+#test update for ignore ;
+#test 1
+create table t1 (a blob unique);
+show keys from t1;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+insert into t1 values(1),(2),(3),(4),(5);
+select * from t1;
+a
+1
+2
+3
+4
+5
+update t1 set a=11 where a=5;
+update t1 set a=a+20 where a=1;
+select * from t1;
+a
+21
+2
+3
+4
+11
+update t1 set a=3 where a=2;
+ERROR 23000: Duplicate entry '3' for key 'a'
+update t1 set a=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'a'
+#no change in blob key
+update t1 set a=3 where a=3;
+update t1 set a=2 where a=2;
+select* from t1;
+a
+21
+2
+3
+4
+11
+#IGNORE;
+update ignore t1 set a=3 where a=2;
+update ignore t1 set a=4 where a=3;
+select * from t1;
+a
+21
+2
+3
+4
+11
+drop table t1;
+#test 2;
+create table t1 (a int primary key, b blob unique , c int unique );
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES BTREE
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+a b c
+1 1 1
+2 2 2
+3 3 3
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+a b c
+1 34 1
+2 40 2
+3 16 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+update t1 set b=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where b=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where c=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+#no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+#IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+select * from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+drop table t1;
+#test 3;
+create table t1 (a blob unique, b blob unique , c blob unique);
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+a b c
+1 1 1
+2 2 2
+3 3 3
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+a b c
+1 34 1
+2 40 2
+3 16 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+update t1 set b=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where b=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where c=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+#no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+#IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
+select * from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 8 5
+6 9 6
+7 10 7
+update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
+select * from t1;
+a b c
+1 1 1
+2 7 2
+3 3 3
+4 4 4
+5 8 5
+6 9 6
+7 10 7
+drop table t1;
+#test 4 ultimate test;
+create table t1 (a int primary key , b int, c blob , d blob , e
varchar(2000), f int , g text,
+unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
+desc t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI NULL
+b int(11) YES MUL NULL
+c blob YES NULL
+d blob YES NULL
+e varchar(2000) YES MUL NULL
+f int(11) YES NULL
+g text YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(2000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` text DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ UNIQUE KEY `b` (`b`,`c`) USING HASH,
+ UNIQUE KEY `b_2` (`b`,`f`),
+ UNIQUE KEY `e` (`e`,`g`) USING HASH,
+ UNIQUE KEY `a` (`a`,`b`,`c`,`d`,`e`,`f`,`g`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 c A NULL NULL NULL YES HASH
+t1 0 b_2 1 b A NULL NULL NULL YES BTREE
+t1 0 b_2 2 f A NULL NULL NULL YES BTREE
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 g A NULL NULL NULL YES HASH
+t1 0 a 1 a A NULL NULL NULL HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 a 4 d A NULL NULL NULL YES HASH
+t1 0 a 5 e A NULL NULL NULL YES HASH
+t1 0 a 6 f A NULL NULL NULL YES HASH
+t1 0 a 7 g A NULL NULL NULL YES HASH
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+select * from t1 limit 3;
+a b c d e f g
+1 1 1 1 1 1 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+#key b_c
+update t1 set b=2 ,c=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'b'
+update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
+update t1 set b=35, c=35 where e=1 and g=1 ;
+update t1 set b=b+1, c=c+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'b'
+update ignore t1 set b=b+1, c=c+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 37 37 1 1 1 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 10 10 9 9 9 9
+truncate table t1;
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+#key b_f no hash key
+update t1 set b=2 , f=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'b_2'
+update t1 set b=b+33, f=f+33 where e=1 and g=1;
+update t1 set b=34, f=34 where e=1 and g=1 ;
+update t1 set b=b+1, f=f+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'b_2'
+update ignore t1 set b=b+1, f=f+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 36 1 1 1 36 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 10 9 9 9 10 9
+truncate table t1;
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+#key e_g
+update t1 set e=2 , g=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'e'
+update t1 set e=e+34, g=g+34 where a=1;
+update t1 set e=34, g=34 where e=1 and g=1 ;
+select * from t1 where a=1;
+a b c d e f g
+1 1 1 1 35 1 35
+update t1 set e=e+1, g=g+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'e'
+update ignore t1 set e=e+1, g=g+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 1 1 1 37 1 37
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 9 9 9 10 9 10
+drop table t1;
diff --git a/mysql-test/main/long_unique_update.test
b/mysql-test/main/long_unique_update.test
new file mode 100644
index 00000000000..b160ebad9f1
--- /dev/null
+++ b/mysql-test/main/long_unique_update.test
@@ -0,0 +1,138 @@
+--echo #structure of tests;
+--echo #1 test of table containing single unique blob column;
+--echo #2 test of table containing another unique int/ varchar etc column;
+--echo #3 test of table containing multiple unique blob column like
unique(a),unique(b);
+--echo #4 test of table containing multiple multiple unique blob
column like unique(a,b...),unique(c,d....);
+--echo #structure of each test;
+--echo #test if update works;
+--echo #test update for duplicate entry;
+--echo #test update for no change keys;
+--echo #test update for ignore ;
+
+--echo #test 1
+create table t1 (a blob unique);
+query_vertical show keys from t1;
+insert into t1 values(1),(2),(3),(4),(5);
+select * from t1;
+update t1 set a=11 where a=5;
+update t1 set a=a+20 where a=1;
+select * from t1;
+--error ER_DUP_ENTRY
+update t1 set a=3 where a=2;
+--error ER_DUP_ENTRY
+update t1 set a=4 where a=3;
+--echo #no change in blob key
+update t1 set a=3 where a=3;
+update t1 set a=2 where a=2;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set a=3 where a=2;
+update ignore t1 set a=4 where a=3;
+select * from t1;
+drop table t1;
+
+--echo #test 2;
+create table t1 (a int primary key, b blob unique , c int unique );
+show keys from t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+--error ER_DUP_ENTRY
+update t1 set b=4 where a=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where b=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where c=3;
+--echo #no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+select * from t1;
+drop table t1;
+
+--echo #test 3;
+create table t1 (a blob unique, b blob unique , c blob unique);
+show keys from t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+--error ER_DUP_ENTRY
+update t1 set b=4 where a=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where b=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where c=3;
+--echo #no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
+select * from t1;
+update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
+select * from t1;
+drop table t1;
+
+--echo #test 4 ultimate test;
+create table t1 (a int primary key , b int, c blob , d blob , e
varchar(2000), f int , g text,
+unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
+desc t1;
+show create table t1;
+show keys from t1;
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+select * from t1 limit 3;
+--echo #key b_c
+--error ER_DUP_ENTRY
+update t1 set b=2 ,c=2 where a=1;
+update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
+update t1 set b=35, c=35 where e=1 and g=1 ;
+--error ER_DUP_ENTRY
+update t1 set b=b+1, c=c+1 where a>0;
+update ignore t1 set b=b+1, c=c+1 where a>0;
+select * from t1 ;
+truncate table t1;
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+--echo #key b_f no hash key
+--error ER_DUP_ENTRY
+update t1 set b=2 , f=2 where a=1;
+update t1 set b=b+33, f=f+33 where e=1 and g=1;
+update t1 set b=34, f=34 where e=1 and g=1 ;
+--error ER_DUP_ENTRY
+update t1 set b=b+1, f=f+1 where a>0;
+update ignore t1 set b=b+1, f=f+1 where a>0;
+select * from t1 ;
+truncate table t1;
+insert into t1
values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+--echo #key e_g
+--error ER_DUP_ENTRY
+update t1 set e=2 , g=2 where a=1;
+update t1 set e=e+34, g=g+34 where a=1;
+update t1 set e=34, g=34 where e=1 and g=1 ;
+select * from t1 where a=1;
+--error ER_DUP_ENTRY
+update t1 set e=e+1, g=g+1 where a>0;
+update ignore t1 set e=e+1, g=g+1 where a>0;
+select * from t1 ;
+drop table t1;
diff --git a/mysql-test/main/long_unique_using_hash.result
b/mysql-test/main/long_unique_using_hash.result
new file mode 100644
index 00000000000..987e11294ec
--- /dev/null
+++ b/mysql-test/main/long_unique_using_hash.result
@@ -0,0 +1,54 @@
+create table t1(a blob , unique(a) using hash);
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+drop table t1;
+create table t1(a blob , unique(a) using btree);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t1(a int , unique(a) using hash);
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+drop table t1;
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+insert into t1 values(1,1,1);
+insert into t1 values(1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+drop table t1;
+create table t1(a int ,b int , c int, unique(a, b, c) using hash)
engine=memory;
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation
Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a NULL NULL NULL NULL YES HASH
+t1 0 a 2 b NULL NULL NULL NULL YES HASH
+t1 0 a 3 c NULL 0 NULL NULL YES HASH
+insert into t1 values(1,1,1);
+insert into t1 values(1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+drop table t1;
diff --git a/mysql-test/main/long_unique_using_hash.test
b/mysql-test/main/long_unique_using_hash.test
new file mode 100644
index 00000000000..50f7a4e1920
--- /dev/null
+++ b/mysql-test/main/long_unique_using_hash.test
@@ -0,0 +1,25 @@
+
+create table t1(a blob , unique(a) using hash);
+--query_vertical show keys from t1;
+drop table t1;
+
+--error ER_TOO_LONG_KEY
+create table t1(a blob , unique(a) using btree);
+
+create table t1(a int , unique(a) using hash);
+--query_vertical show keys from t1;
+drop table t1;
+
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+show keys from t1;
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1);
+drop table t1;
+
+create table t1(a int ,b int , c int, unique(a, b, c) using hash)
engine=memory;
+show keys from t1;
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1);
+drop table t1;
diff --git a/mysql-test/main/type_blob.result b/mysql-test/main/type_blob.result
index 83a7f49f8c6..0654338a6ca 100644
--- a/mysql-test/main/type_blob.result
+++ b/mysql-test/main/type_blob.result
@@ -369,8 +369,6 @@ HELLO MY 1
a 1
hello 1
drop table t1;
-create table t1 (a text, unique (a(2100)));
-ERROR 42000: Specified key was too long; max key length is 1000 bytes
create table t1 (a text, key (a(2100)));
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
diff --git a/mysql-test/main/type_blob.test b/mysql-test/main/type_blob.test
index 4b5f3578e3a..993362c3298 100644
--- a/mysql-test/main/type_blob.test
+++ b/mysql-test/main/type_blob.test
@@ -131,8 +131,6 @@ select c,count(*) from t1 group by c;
select d,count(*) from t1 group by d;
drop table t1;
--- error 1071
-create table t1 (a text, unique (a(2100))); # should give an error
create table t1 (a text, key (a(2100))); # key is auto-truncated
show create table t1;
drop table t1;
diff --git a/mysql-test/suite/heap/heap_hash.result
b/mysql-test/suite/heap/heap_hash.result
index 987de1279d6..603baf6c6b8 100644
--- a/mysql-test/suite/heap/heap_hash.result
+++ b/mysql-test/suite/heap/heap_hash.result
@@ -66,7 +66,7 @@ a
alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index uniq_id uniq_id 4 NULL 5 Using where; Using index
+1 SIMPLE t1 index uniq_id uniq_id 8 NULL 5 Using where; Using index
drop table t1;
create table t1 (x int not null, y int not null, key x using HASH
(x), unique y using HASH (y))
engine=heap;
diff --git a/mysql-test/suite/maria/maria.result
b/mysql-test/suite/maria/maria.result
index 76b14024bf4..5646e36d4bd 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -353,8 +353,6 @@ test.t1 check status OK
drop table t1;
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d
varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
ERROR 42000: Specified key was too long; max key length is 1000 bytes
-CREATE TABLE t1 (a varchar(32000), unique key(a));
-ERROR 42000: Specified key was too long; max key length is 1000 bytes
CREATE TABLE t1 (a varchar(1), b varchar(1), key
(a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
ERROR 42000: Too many key parts specified; max 32 parts allowed
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d
varchar(255), e varchar(255));
diff --git a/mysql-test/suite/maria/maria.test
b/mysql-test/suite/maria/maria.test
index 19aab4aa944..873dbc52aa5 100644
--- a/mysql-test/suite/maria/maria.test
+++ b/mysql-test/suite/maria/maria.test
@@ -374,8 +374,6 @@ drop table t1;
--error 1071
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d
varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
---error 1071
-CREATE TABLE t1 (a varchar(32000), unique key(a));
--error 1070
CREATE TABLE t1 (a varchar(1), b varchar(1), key
(a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d
varchar(255), e varchar(255));
diff --git a/sql/field.cc b/sql/field.cc
index bb4a0f06fc4..c9c1b8f44aa 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -9886,7 +9886,7 @@ int Field_bit::key_cmp(const uchar *str, uint length)
}
-int Field_bit::cmp_offset(uint row_offset)
+int Field_bit::cmp_offset(my_ptrdiff_t row_offset)
{
if (bit_len)
{
diff --git a/sql/field.h b/sql/field.h
index 30098afe953..af84f9a7ee7 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -553,7 +553,7 @@ class Virtual_column_info: public Sql_alloc
name.str= NULL;
name.length= 0;
};
- ~Virtual_column_info() {}
+ ~Virtual_column_info() {};
enum_vcol_info_type get_vcol_type() const
{
return vcol_type;
@@ -1083,7 +1083,7 @@ class Field: public Value_source
virtual int cmp(const uchar *,const uchar *)=0;
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
{ return memcmp(a,b,pack_length()); }
- virtual int cmp_offset(uint row_offset)
+ virtual int cmp_offset(my_ptrdiff_t row_offset)
{ return cmp(ptr,ptr+row_offset); }
virtual int cmp_binary_offset(uint row_offset)
{ return cmp_binary(ptr, ptr+row_offset); };
@@ -4311,7 +4311,7 @@ class Field_bit :public Field {
int key_cmp(const uchar *a, const uchar *b)
{ return cmp_binary((uchar *) a, (uchar *) b); }
int key_cmp(const uchar *str, uint length);
- int cmp_offset(uint row_offset);
+ int cmp_offset(my_ptrdiff_t row_offset);
bool update_min(Field *min_val, bool force_update)
{
longlong val= val_int();
diff --git a/sql/handler.cc b/sql/handler.cc
index bc183837903..3c61b4f528a 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -3768,6 +3768,8 @@ void print_keydup_error(TABLE *table, KEY *key,
const char *msg, myf errflag)
}
else
{
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ setup_keyinfo_hash(key);
/* Table is opened and defined at this point */
key_unpack(&str,table, key);
uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
@@ -3778,6 +3780,8 @@ void print_keydup_error(TABLE *table, KEY *key,
const char *msg, myf errflag)
}
my_printf_error(ER_DUP_ENTRY, msg, errflag, str.c_ptr_safe(),
key->name.str);
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ re_setup_keyinfo_hash(key);
}
}
@@ -3795,7 +3799,6 @@ void print_keydup_error(TABLE *table, KEY *key,
myf errflag)
errflag);
}
-
/**
Print error that we got from handler function.
@@ -4288,6 +4291,8 @@ uint handler::get_dup_key(int error)
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
m_lock_type != F_UNLCK);
DBUG_ENTER("handler::get_dup_key");
+ if (table->s->long_unique_table && table->file->errkey < table->s->keys)
+ DBUG_RETURN(table->file->errkey);
table->file->errkey = (uint) -1;
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
@@ -6483,6 +6488,162 @@ static int wsrep_after_row(THD *thd)
}
#endif /* WITH_WSREP */
+static int check_duplicate_long_entry_key(TABLE *table, handler *h,
uchar *new_rec,
+ uint key_no)
+{
+ Field *hash_field;
+ int result, error= 0;
+ KEY *key_info= table->key_info + key_no;
+ hash_field= key_info->key_part->field;
+ DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
+ key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
+ || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
+ uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
+
+ if (hash_field->is_real_null())
+ return 0;
+
+ key_copy(ptr, new_rec, key_info, key_info->key_length, false);
+
+ if (!table->check_unique_buf)
+ table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
+ table->s->reclength);
+
+ result= h->ha_index_init(key_no, 0);
+ if (result)
+ return result;
+ result= h->ha_index_read_map(table->check_unique_buf,
+ ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
+ if (!result)
+ {
+ bool is_same;
+ Field * t_field;
+ Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
+ Item ** arguments= temp->arguments();
+ uint arg_count= temp->argument_count();
+ do
+ {
+ long diff= table->check_unique_buf - new_rec;
+ is_same= true;
+ for (uint j=0; is_same && j < arg_count; j++)
+ {
+ DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
+ // this one for left(fld_name,length)
+ arguments[j]->type() == Item::FUNC_ITEM);
+ if (arguments[j]->type() == Item::FIELD_ITEM)
+ {
+ t_field= static_cast<Item_field *>(arguments[j])->field;
+ if (t_field->cmp_offset(diff))
+ is_same= false;
+ }
+ else
+ {
+ Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
+ DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left",
fnc->func_name()));
+ DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
+ t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
+ uint length= fnc->arguments()[1]->val_int();
+ if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
+ is_same= false;
+ }
+ }
+ }
+ while (!is_same && !(result=
table->file->ha_index_next_same(table->check_unique_buf,
+ ptr, key_info->key_length)));
+ if (is_same)
+ {
+ table->file->errkey= key_no;
+ error= HA_ERR_FOUND_DUPP_KEY;
+ goto exit;
+ }
+ else
+ goto exit;
+ }
+ if (result == HA_ERR_LOCK_WAIT_TIMEOUT)
+ {
+ table->file->errkey= key_no;
+ error= HA_ERR_LOCK_WAIT_TIMEOUT;
+ }
+ exit:
+ h->ha_index_end();
+ return error;
+}
+
+/** @brief
+ check whether inserted records breaks the
+ unique constraint on long columns.
+ @returns 0 if no duplicate else returns error
+ */
+static int check_duplicate_long_entries(TABLE *table, handler *h,
uchar *new_rec)
+{
+ table->file->errkey= -1;
+ int result;
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
+ (result= check_duplicate_long_entry_key(table, h, new_rec, i)))
+ return result;
+ }
+ return 0;
+}
+
+/** @brief
+ check whether updated records breaks the
+ unique constraint on long columns.
+ In the case of update we just need to check the specic key
+ reason for that is consider case
+ create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
+ ,unique(x,y))
+ and update statement like this
+ update t1 set a=23+a; in this case if we try to scan for
+ whole keys in table then index scan on x_y will return 0
+ because data is same so in the case of update we take
+ key as a parameter in normal insert key should be -1
+ @returns 0 if no duplicate else returns error
+ */
+static int check_duplicate_long_entries_update(TABLE *table, handler
*h, uchar *new_rec)
+{
+ Field *field;
+ uint key_parts;
+ int error= 0;
+ KEY *keyinfo;
+ KEY_PART_INFO *keypart;
+ /*
+ Here we are comparing whether new record and old record are same
+ with respect to fields in hash_str
+ */
+ long reclength= table->record[1]-table->record[0];
+ if (!table->update_handler)
+ table->clone_handler_for_update();
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ keyinfo= table->key_info + i;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ key_parts= fields_in_hash_keyinfo(keyinfo);
+ keypart= keyinfo->key_part - key_parts;
+ for (uint j= 0; j < key_parts; j++, keypart++)
+ {
+ field= keypart->field;
+ /* Compare fields if they are different then check for duplicates*/
+ if(field->cmp_binary_offset(reclength))
+ {
+ if((error= check_duplicate_long_entry_key(table,
table->update_handler,
+ new_rec, i)))
+ goto exit;
+ /*
+ break because check_duplicate_long_entries_key will
+ take care of remaining fields
+ */
+ break;
+ }
+ }
+ }
+ }
+ exit:
+ return error;
+}
+
int handler::ha_write_row(uchar *buf)
{
int error;
@@ -6496,6 +6657,9 @@ int handler::ha_write_row(uchar *buf)
mark_trx_read_write();
increment_statistics(&SSV::ha_write_count);
+ if (table->s->long_unique_table &&
+ (error= check_duplicate_long_entries(table, table->file, buf)))
+ DBUG_RETURN(error);
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_WRITE_ROW, MAX_KEY, 0,
{ error= write_row(buf); })
@@ -6535,6 +6699,11 @@ int handler::ha_update_row(const uchar
*old_data, const uchar *new_data)
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
increment_statistics(&SSV::ha_update_count);
+ if (table->s->long_unique_table &&
+ (error= check_duplicate_long_entries_update(table,
table->file, (uchar *)new_data)))
+ {
+ return error;
+ }
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_UPDATE_ROW, active_index, 0,
{ error= update_row(old_data, new_data);})
diff --git a/sql/handler.h b/sql/handler.h
index 09c9c9a6849..695ee875ba7 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -323,7 +323,10 @@ enum enum_alter_inplace_result {
/** whether every data field explicitly stores length
(holds for InnoDB ROW_FORMAT=REDUNDANT) */
#define HA_EXTENDED_TYPES_CONVERSION (1ULL << 57)
-#define HA_LAST_TABLE_FLAG HA_EXTENDED_TYPES_CONVERSION
+
+/* Support native hash index */
+#define HA_CAN_HASH_KEYS (1ULL << 58)
+#define HA_LAST_TABLE_FLAG HA_CAN_HASH_KEYS
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@@ -3109,7 +3112,7 @@ class handler :public Sql_alloc
check_table_binlog_row_based_done(0),
check_table_binlog_row_based_result(0),
row_already_logged(0),
- in_range_check_pushed_down(FALSE),
+ in_range_check_pushed_down(FALSE), errkey(-1),
key_used_on_scan(MAX_KEY),
active_index(MAX_KEY), keyread(MAX_KEY),
ref_length(sizeof(my_off_t)),
diff --git a/sql/item_func.cc b/sql/item_func.cc
index ef0dc0ba34b..4722241d7d7 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1714,6 +1714,47 @@ bool Item_func_mod::fix_length_and_dec()
DBUG_RETURN(FALSE);
}
+static void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
+{
+ CHARSET_INFO *cs;
+ uchar l[4];
+ int4store(l, str->length());
+ cs= str->charset();
+ cs->coll->hash_sort(cs, l, sizeof(l), &nr1, &nr2);
+ cs= str->charset();
+ cs->coll->hash_sort(cs, (uchar *)str->ptr(), str->length(), &nr1, &nr2);
+}
+
+longlong Item_func_hash::val_int()
+{
+ DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
+ unsigned_flag= true;
+ ulong nr1= 1,nr2= 4;
+ CHARSET_INFO *cs;
+ String * str;
+ for(uint i= 0;i<arg_count;i++)
+ {
+ str = args[i]->val_str();
+ if(args[i]->null_value)
+ {
+ null_value= 1;
+ return 0;
+ }
+ calc_hash_for_unique(nr1, nr2, str);
+ }
+ null_value= 0;
+ return (longlong)nr1;
+}
+
+
+bool Item_func_hash::fix_length_and_dec()
+{
+ decimals= 0;
+ max_length= 8;
+ return false;
+}
+
+
double Item_func_neg::real_op()
{
diff --git a/sql/item_func.h b/sql/item_func.h
index 6408cf4dd55..b9778af5b0f 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1031,6 +1031,19 @@ class Item_long_func: public Item_int_func
};
+class Item_func_hash: public Item_int_func
+{
+public:
+ Item_func_hash(THD *thd, List<Item> &item): Item_int_func(thd, item)
+ {}
+ longlong val_int();
+ bool fix_length_and_dec();
+ const Type_handler *type_handler() const { return &type_handler_long; }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_hash>(thd, this); }
+ const char *func_name() const { return "<hash>"; }
+};
+
class Item_longlong_func: public Item_int_func
{
public:
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index f80ae99ac50..47037b6d102 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -7936,3 +7936,5 @@ ER_USER_IS_BLOCKED
ER_ACCOUNT_HAS_BEEN_LOCKED
eng "Access denied, this account is locked"
rum "Acces refuzat, acest cont este blocat"
+ER_TOO_LONG_HASH_KEYSEG
+ eng "Max key segment length is 65535"
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ef712f76031..3ce6e3a4677 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -17862,6 +17862,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM
*param, List<Item> &fields,
table->intersect_keys.init();
table->keys_in_use_for_query.init();
table->no_rows_with_nulls= param->force_not_null_cols;
+ table->update_handler= NULL;
+ table->check_unique_buf= NULL;
table->s= share;
init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 80884d66590..4776d807707 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2287,7 +2287,7 @@ int show_create_table(THD *thd, TABLE_LIST
*table_list, String *packet,
hton->field_options);
}
- key_info= table->key_info;
+ key_info= table->s->key_info;
primary_key= share->primary_key;
for (uint i=0 ; i < share->keys ; i++,key_info++)
@@ -2342,7 +2342,7 @@ int show_create_table(THD *thd, TABLE_LIST
*table_list, String *packet,
}
}
packet->append(')');
- store_key_options(thd, packet, table, key_info);
+ store_key_options(thd, packet, table, &table->key_info[i]);
if (key_info->parser)
{
LEX_CSTRING *parser_name= plugin_name(key_info->parser);
@@ -2467,7 +2467,8 @@ static void store_key_options(THD *thd, String
*packet, TABLE *table,
if (key_info->algorithm == HA_KEY_ALG_BTREE)
packet->append(STRING_WITH_LEN(" USING BTREE"));
- if (key_info->algorithm == HA_KEY_ALG_HASH)
+ if (key_info->algorithm == HA_KEY_ALG_HASH ||
+ key_info->algorithm == HA_KEY_ALG_LONG_HASH)
packet->append(STRING_WITH_LEN(" USING HASH"));
/* send USING only in non-default case: non-spatial rtree */
@@ -6596,15 +6597,20 @@ static int get_schema_stat_record(THD *thd,
TABLE_LIST *tables,
table->field[8]->set_notnull();
}
KEY *key=show_table->key_info+i;
- if (key->rec_per_key[j])
+ if (key->rec_per_key[j] && key->algorithm != HA_KEY_ALG_LONG_HASH)
{
ha_rows records= (ha_rows) ((double) show_table->stat_records() /
key->actual_rec_per_key(j));
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
}
- const char *tmp= show_table->file->index_type(i);
- table->field[13]->store(tmp, strlen(tmp), cs);
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ table->field[13]->store(STRING_WITH_LEN("HASH"), cs);
+ else
+ {
+ const char *tmp= show_table->file->index_type(i);
+ table->field[13]->store(tmp, strlen(tmp), cs);
+ }
}
if (!(key_info->flags & HA_FULLTEXT) &&
(key_part->field &&
@@ -6855,7 +6861,7 @@ static int get_schema_constraints_record(THD
*thd, TABLE_LIST *tables,
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
- KEY *key_info=show_table->key_info;
+ KEY *key_info=show_table->s->key_info;
uint primary_key= show_table->s->primary_key;
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
@@ -7053,7 +7059,7 @@ static int get_schema_key_column_usage_record(THD *thd,
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
- KEY *key_info=show_table->key_info;
+ KEY *key_info=show_table->s->key_info;
uint primary_key= show_table->s->primary_key;
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 4dd9d43a7b9..e8702a2daca 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2787,6 +2787,8 @@ bool quick_rm_table(THD *thd, handlerton *base,
const LEX_CSTRING *db,
This will make checking for duplicated keys faster and ensure that
PRIMARY keys are prioritized.
+ This will not reorder LONG_HASH indexes, because they must match the
+ order of their LONG_UNIQUE_HASH_FIELD's.
*/
static int sort_keys(KEY *a, KEY *b)
@@ -2799,6 +2801,9 @@ static int sort_keys(KEY *a, KEY *b)
return -1;
if ((a_flags ^ b_flags) & HA_NULL_PART_KEY)
{
+ if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
+ b->algorithm == HA_KEY_ALG_LONG_HASH)
+ return a->usable_key_parts - b->usable_key_parts;
/* Sort NOT NULL keys before other keys */
return (a_flags & HA_NULL_PART_KEY) ? 1 : -1;
}
@@ -3033,7 +3038,6 @@ void
promote_first_timestamp_column(List<Create_field> *column_definitions)
}
}
-
/**
Check if there is a duplicate key. Report a warning for every duplicate key.
@@ -3299,6 +3303,65 @@ int mysql_add_invisible_field(THD *thd,
List<Create_field> * field_list,
return 0;
}
+#define LONG_HASH_FIELD_NAME_LENGTH 30
+static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
+{
+ buf->length= my_snprintf((char *)buf->str,
+ LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
+}
+/**
+ Add fully invisible hash field to table in case of long
+ unique column
+ @param thd Thread Context.
+ @param create_list List of table fields.
+ @param key_info current long unique key info
+*/
+static Create_field * add_hash_field(THD * thd, List<Create_field>
*create_list,
+ KEY *key_info)
+{
+ List_iterator<Create_field> it(*create_list);
+// CHARSET_INFO *field_cs;
+ Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
+ cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
+ cf->decimals= 0;
+ cf->length= cf->char_length= cf->pack_length= HA_HASH_FIELD_LENGTH;
+ cf->invisible= INVISIBLE_FULL;
+ cf->pack_flag|= FIELDFLAG_MAYBE_NULL;
+ uint num= 1;
+ LEX_CSTRING field_name;
+ field_name.str= (char *)thd->alloc(LONG_HASH_FIELD_NAME_LENGTH);
+ make_long_hash_field_name(&field_name, num);
+ /*
+ Check for collisions
+ */
+ while ((dup_field= it++))
+ {
+ if (!my_strcasecmp(system_charset_info, field_name.str,
dup_field->field_name.str))
+ {
+ num++;
+ make_long_hash_field_name(&field_name, num);
+ it.rewind();
+ }
+ }
+ /* for (uint i= 0; i < key_info->user_defined_key_parts; i++)
+ {
+ dup_field= create_list->elem(key_info->key_part[i].fieldnr);
+ if (!i)
+ field_cs= dup_field->charset;
+ else if(field_cs != dup_field->charset)
+ {
+ my_error(ER_MULTIPLE_CS_HASH_KEY, MYF(0));
+ return NULL;
+ }
+ }
+ cf->charset= field_cs;*/
+ cf->field_name= field_name;
+ cf->set_handler(&type_handler_longlong);
+ key_info->algorithm= HA_KEY_ALG_LONG_HASH;
+ create_list->push_back(cf,thd->mem_root);
+ return cf;
+}
+
Key *
mysql_add_invisible_index(THD *thd, List<Key> *key_list,
LEX_CSTRING* field_name, enum Key::Keytype type)
@@ -3356,6 +3419,7 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
uint total_uneven_bit_length= 0;
int select_field_count= C_CREATE_SELECT(create_table_mode);
bool tmp_table= create_table_mode == C_ALTER_TABLE;
+ bool is_hash_field_needed= false;
DBUG_ENTER("mysql_prepare_create_table");
DBUG_EXECUTE_IF("test_pseudo_invisible",{
@@ -3673,6 +3737,7 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
uint key_length=0;
Key_part_spec *column;
+ is_hash_field_needed= false;
if (key->name.str == ignore_key)
{
/* ignore redundant keys */
@@ -3894,8 +3959,13 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
column->length= MAX_LEN_GEOM_POINT_FIELD;
if (!column->length)
{
- my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
- DBUG_RETURN(TRUE);
+ if (key->type == Key::PRIMARY)
+ {
+ my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ else
+ is_hash_field_needed= true;
}
}
#ifdef HAVE_SPATIAL
@@ -3974,9 +4044,9 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
if (key_part_length > max_key_length ||
key_part_length > file->max_key_part_length())
{
- key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
if (key->type == Key::MULTIPLE)
{
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY,
@@ -3986,10 +4056,7 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
}
else
- {
- my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
- DBUG_RETURN(TRUE);
- }
+ is_hash_field_needed= true;
}
}
// Catch invalid use of partial keys
@@ -4013,7 +4080,8 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
key_part_length= column->length;
}
- else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG))
+ else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG) &&
+ !is_hash_field_needed)
{
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
column->field_name.str);
@@ -4022,9 +4090,9 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
if (key_part_length > file->max_key_part_length() &&
key->type != Key::FULLTEXT)
{
- key_part_length= file->max_key_part_length();
if (key->type == Key::MULTIPLE)
{
+ key_part_length= file->max_key_part_length();
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
@@ -4034,18 +4102,34 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
}
else
{
- my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
- DBUG_RETURN(TRUE);
- }
+ if(key->type == Key::UNIQUE)
+ {
+ is_hash_field_needed= true;
}
- key_part_info->length= (uint16) key_part_length;
+ else
+ {
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
+ my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ /* We can not store key_part_length more then 2^16 - 1 in frm
+ So we will simply make it zero */
+ if (is_hash_field_needed && column->length > (1<<16) - 1)
+ {
+ my_error(ER_TOO_LONG_HASH_KEYSEG, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ else
+ key_part_info->length= (uint16) key_part_length;
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
(key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
(sql_field->real_field_type() == MYSQL_TYPE_STRING ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR ||
- sql_field->pack_flag & FIELDFLAG_BLOB)))
+ sql_field->pack_flag & FIELDFLAG_BLOB))&& !is_hash_field_needed)
{
if ((column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR)
@@ -4094,12 +4178,41 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
if (key->type == Key::UNIQUE && !(key_info->flags & HA_NULL_PART_KEY))
unique_key=1;
key_info->key_length=(uint16) key_length;
- if (key_length > max_key_length && key->type != Key::FULLTEXT)
+ if (key_length > max_key_length && key->type != Key::FULLTEXT &&
+ !is_hash_field_needed)
{
my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
DBUG_RETURN(TRUE);
}
+ if (is_hash_field_needed &&
+ key_info->algorithm != HA_KEY_ALG_UNDEF &&
+ key_info->algorithm != HA_KEY_ALG_HASH )
+ {
+ my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
+ DBUG_RETURN(TRUE);
+ }
+ if (is_hash_field_needed ||
+ (key_info->algorithm == HA_KEY_ALG_HASH &&
+ key_info->flags & HA_NOSAME &&
+ !(file->ha_table_flags() & HA_CAN_HASH_KEYS ) &&
+ file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))
+ {
+ Create_field *hash_fld= add_hash_field(thd, &alter_info->create_list,
+ key_info);
+ if (!hash_fld)
+ DBUG_RETURN(TRUE);
+ hash_fld->offset= record_offset;
+ hash_fld->charset= create_info->default_table_charset;
+ record_offset+= hash_fld->pack_length;
+ if (key_info->flags & HA_NULL_PART_KEY)
+ null_fields++;
+ else
+ {
+ hash_fld->flags|= NOT_NULL_FLAG;
+ hash_fld->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
+ }
+ }
if (validate_comment_length(thd, &key->key_create_info.comment,
INDEX_COMMENT_MAXLEN,
ER_TOO_LONG_INDEX_COMMENT,
@@ -4115,7 +4228,6 @@ mysql_prepare_create_table(THD *thd,
HA_CREATE_INFO *create_info,
// Check if a duplicate index is defined.
check_duplicate_key(thd, key, key_info, &alter_info->key_list);
-
key_info++;
}
@@ -8203,11 +8315,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
Collect all keys which isn't in drop list. Add only those
for which some fields exists.
*/
-
for (uint i=0 ; i < table->s->keys ; i++,key_info++)
{
if (key_info->flags & HA_INVISIBLE_KEY)
continue;
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ setup_keyinfo_hash(key_info);
const char *key_name= key_info->name.str;
Alter_drop *drop;
drop_it.rewind();
@@ -8276,6 +8389,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
key_part_length= key_part->length;
if (cfield->field) // Not new field
{
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ Field *fld= cfield->field;
+ if (fld->max_display_length() ==
cfield->length*fld->charset()->mbmaxlen
+ && fld->max_data_length() != key_part->length)
+ cfield->length= cfield->char_length= key_part->length;
+ }
/*
If the field can't have only a part used in a key according to its
new type, or should not be used partially according to its
@@ -8331,6 +8451,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
enum Key::Keytype key_type;
LEX_CSTRING tmp_name;
bzero((char*) &key_create_info, sizeof(key_create_info));
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ key_info->flags|= HA_NOSAME;
+ key_info->algorithm= HA_KEY_ALG_UNDEF;
+ }
key_create_info.algorithm= key_info->algorithm;
/*
We copy block size directly as some engines, like Area, sets this
@@ -8370,6 +8495,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
tmp_name.str= key_name;
tmp_name.length= strlen(key_name);
+ /* We dont need LONG_UNIQUE_HASH_FIELD flag because it will be
autogenerated */
key= new Key(key_type, &tmp_name, &key_create_info,
MY_TEST(key_info->flags & HA_GENERATED_KEY),
&key_parts, key_info->option_list, DDL_options());
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index d74da408dfc..246f5620461 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1055,6 +1055,8 @@ int mysql_update(THD *thd,
break;
}
}
+ if (table->update_handler)
+ table->delete_update_handler();
ANALYZE_STOP_TRACKING(&explain->command_tracker);
table->auto_increment_field_not_null= FALSE;
dup_key_found= 0;
@@ -2269,6 +2271,8 @@ multi_update::~multi_update()
for (table= update_tables ; table; table= table->next_local)
{
table->table->no_keyread= 0;
+ if (table->table->update_handler)
+ table->table->delete_update_handler();
if (ignore)
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
}
diff --git a/sql/table.cc b/sql/table.cc
index 7682119c241..8a5f1286ac5 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -811,6 +811,12 @@ static bool create_key_infos(const uchar *strpos,
const uchar *frm_image_end,
}
key_part->store_length=key_part->length;
}
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+ //Storing key hash
+ key_part++;
+ }
/*
Add primary key to end of extended keys for non unique keys for
@@ -844,7 +850,9 @@ static bool create_key_infos(const uchar *strpos,
const uchar *frm_image_end,
if (j == first_key_parts)
keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
}
- share->ext_key_parts+= keyinfo->ext_key_parts;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ share->ext_key_parts++;
+ share->ext_key_parts+= keyinfo->ext_key_parts;
}
keynames=(char*) key_part;
strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos)
- keynames;
@@ -1156,10 +1164,57 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT
*mem_root, TABLE *table,
pos+= expr_length;
}
- /* Now, initialize CURRENT_TIMESTAMP fields */
+ /* Now, initialize CURRENT_TIMESTAMP and UNIQUE_INDEX_HASH_FIELD fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
- {
+ {
Field *field= *field_ptr;
+ if (field->flags & LONG_UNIQUE_HASH_FIELD)
+ {
+ List<Item> *field_list= new (mem_root) List<Item>();
+ Item *list_item;
+ KEY *key;
+ uint key_index, parts;
+ for (key_index= 0; key_index < table->s->keys; key_index++)
+ {
+ key=table->key_info + key_index;
+ parts= key->user_defined_key_parts;
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH &&
+ key->key_part[key->user_defined_key_parts].fieldnr ==
field->field_index+ 1)
+ break;
+ }
+ KEY_PART_INFO *keypart;
+ for (uint i=0; i < parts; i++)
+ {
+ keypart= key->key_part + i;
+ if (!keypart->length)
+ {
+ list_item= new(mem_root)Item_field(thd, keypart->field);
+ }
+ else
+ {
+ int length= keypart->length/keypart->field->charset()->mbmaxlen;
+ list_item= new(mem_root)Item_func_left(thd,
+ new (mem_root)Item_field(thd, keypart->field),
+ new (mem_root) Item_int(thd, length));
+ list_item->fix_fields(thd, NULL);
+ keypart->key_part_flag |= HA_PART_KEY_SEG;
+ }
+ field_list->push_back(list_item, mem_root);
+ }
+ Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
+ Virtual_column_info *v= new (mem_root) Virtual_column_info();
+ field->vcol_info= v;
+ field->vcol_info->expr= hash_item;
+ key->user_defined_key_parts= key->ext_key_parts=
key->usable_key_parts= 1;
+ key->key_part+= parts;
+
+ if (key->flags & HA_NULL_PART_KEY)
+ key->key_length= HA_HASH_KEY_LENGTH_WITH_NULL;
+ else
+ key->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+
+ *(vfield_ptr++)= *field_ptr;
+ }
if (field->has_default_now_unireg_check())
{
expr_str.length(parse_vcol_keyword.length);
@@ -1377,7 +1432,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
uint com_length, null_bit_pos,
UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count;
- uint i;
+ uint i, hash_fields= 0;
bool use_hash, mysql57_null_bits= 0;
char *keynames, *names, *comment_pos;
const uchar *forminfo, *extra2;
@@ -1813,8 +1868,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
share, len, &first_keyinfo, keynames))
goto err;
}
-
share->key_block_size= uint2korr(frm_image+62);
+ keyinfo= share->key_info;
+ for (uint i= 0; i < share->keys; i++, keyinfo++)
+ if(keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ hash_fields++;
if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
goto err; // wrong engine (someone changed the frm under our feet?)
@@ -2305,6 +2363,41 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
/* Fix key->name and key_part->field */
if (key_parts)
{
+ keyinfo= share->key_info;
+ uint hash_field_used_no= share->fields - hash_fields;
+ KEY_PART_INFO *hash_keypart;
+ Field *hash_field;
+ uint offset= share->reclength - HA_HASH_FIELD_LENGTH * hash_fields;
+ for (uint i= 0; i < share->keys; i++, keyinfo++)
+ {
+ /*
+ We need set value in hash key_part
+ */
+
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ share->long_unique_table= 1;
+ if (share->frm_version < FRM_VER_EXPRESSSIONS)
+ share->frm_version= FRM_VER_EXPRESSSIONS;
+ hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
+ hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+ hash_keypart->store_length= hash_keypart->length;
+ hash_keypart->type= HA_KEYTYPE_ULONGLONG;
+ hash_keypart->key_part_flag= 0;
+ hash_keypart->key_type= 32834;
+ /* Last n fields are unique_index_hash fields*/
+ hash_keypart->offset= offset;
+// hash_keypart->offset= share->reclength
+// - HA_HASH_FIELD_LENGTH*(share->fields -
hash_field_used_no);
+ hash_keypart->fieldnr= hash_field_used_no + 1;
+ hash_field= share->field[hash_field_used_no];
+ hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
+ keyinfo->flags|= HA_NOSAME;
+ share->virtual_fields++;
+ hash_field_used_no++;
+ offset+= HA_HASH_FIELD_LENGTH;
+ }
+ }
uint add_first_key_parts= 0;
longlong ha_option= handler_file->ha_table_flags();
keyinfo= share->key_info;
@@ -2312,7 +2405,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
primary_key_name) ? MAX_KEY : 0;
KEY* key_first_info= NULL;
- if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME)
+ if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
/*
If the UNIQUE key doesn't have NULL columns and is not a part key
@@ -2411,9 +2505,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
if (field->null_ptr)
len_null_byte= HA_KEY_NULL_LENGTH;
- if (field->type() == MYSQL_TYPE_BLOB ||
+ if ((field->type() == MYSQL_TYPE_BLOB ||
field->real_type() == MYSQL_TYPE_VARCHAR ||
- field->type() == MYSQL_TYPE_GEOMETRY)
+ field->type() == MYSQL_TYPE_GEOMETRY) &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH )
{
length_bytes= HA_KEY_BLOB_LENGTH;
}
@@ -2478,6 +2573,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
key_part= keyinfo->key_part;
uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts :
keyinfo->user_defined_key_parts;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ key_parts++;
for (i=0; i < key_parts; key_part++, i++)
{
Field *field;
@@ -2491,8 +2588,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
field= key_part->field= share->field[key_part->fieldnr-1];
key_part->type= field->key_type();
+
if (field->invisible > INVISIBLE_USER && !field->vers_sys_field())
- keyinfo->flags |= HA_INVISIBLE_KEY;
+ if (keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
+ keyinfo->flags |= HA_INVISIBLE_KEY;
if (field->null_ptr)
{
key_part->null_offset=(uint) ((uchar*) field->null_ptr -
@@ -2518,7 +2617,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
key_part->key_part_flag|= HA_BIT_PART;
if (i == 0 && key != primary_key)
- field->flags |= (((keyinfo->flags & HA_NOSAME) &&
+ field->flags |= (((keyinfo->flags & HA_NOSAME ||
+ keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) &&
(keyinfo->user_defined_key_parts == 1)) ?
UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
if (i == 0)
@@ -2556,7 +2656,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
field->part_of_sortkey= share->keys_in_use;
}
}
- if (field->key_length() != key_part->length)
+ if (field->key_length() != key_part->length &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
#ifndef TO_BE_DELETED_ON_PRODUCTION
if (field->type() == MYSQL_TYPE_NEWDECIMAL)
@@ -2598,7 +2699,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD
*thd, bool write,
if (!(key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
HA_BIT_PART)) &&
key_part->type != HA_KEYTYPE_FLOAT &&
- key_part->type != HA_KEYTYPE_DOUBLE)
+ key_part->type == HA_KEYTYPE_DOUBLE &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
key_part->key_part_flag|= HA_CAN_MEMCMP;
}
keyinfo->usable_key_parts= usable_parts; // Filesort
@@ -3266,6 +3368,84 @@ static bool check_vcol_forward_refs(Field
*field, Virtual_column_info *vcol,
return res;
}
+static void print_long_unique_table(TABLE *table)
+{
+ char buff[256];
+ String str;
+ KEY *key_info_table, *key_info_share;
+ KEY_PART_INFO *key_part;
+ Field *field;
+ my_snprintf(buff, sizeof(buff), "Printing Table state, It will
print table fields,"
+ " fields->offset,field->null_bit, field->null_pos and
key_info ... \n"
+ "\nPrinting Table keyinfo\n");
+ str.append(buff, strlen(buff));
+ my_snprintf(buff, sizeof(buff), "\ntable->s->reclength %d\n"
+ "table->s->fields %d\n",
+ table->s->reclength, table->s->fields);
+ str.append(buff, strlen(buff));
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ key_info_table= table->key_info + i;
+ key_info_share= table->s->key_info + i;
+ my_snprintf(buff, sizeof(buff), "\ntable->key_info[%d]
user_defined_key_parts = %d\n"
+ "table->key_info[%d] algorithm ==
HA_KEY_ALG_LONG_HASH = %d\n"
+ "table->key_info[%d] flags &
HA_NOSAME = %d\n",
+ i, key_info_table->user_defined_key_parts,
+ i, key_info_table->algorithm ==
HA_KEY_ALG_LONG_HASH,
+ i, key_info_table->flags & HA_NOSAME);
+ str.append(buff, strlen(buff));
+ my_snprintf(buff, sizeof(buff), "\ntable->s->key_info[%d]
user_defined_key_parts = %d\n"
+ "table->s->key_info[%d] algorithm
== HA_KEY_ALG_LONG_HASH = %d\n"
+ "table->s->key_info[%d] flags &
HA_NOSAME = %d\n",
+ i, key_info_share->user_defined_key_parts,
+ i, key_info_share->algorithm ==
HA_KEY_ALG_LONG_HASH,
+ i, key_info_share->flags & HA_NOSAME);
+ str.append(buff, strlen(buff));
+ key_part = key_info_table->key_part;
+ my_snprintf(buff, sizeof(buff), "\nPrinting
table->key_info[%d].key_part[0] info\n"
+ "key_part->offset = %d\n"
+ "key_part->field_name = %s\n"
+ "key_part->length = %d\n"
+ "key_part->null_bit = %d\n"
+ "key_part->null_offset = %d\n",
+ i, key_part->offset, key_part->field->field_name.str,
key_part->length,
+ key_part->null_bit, key_part->null_offset);
+ str.append(buff, strlen(buff));
+
+ for (uint j= 0; j < key_info_share->user_defined_key_parts; j++)
+ {
+ key_part= key_info_share->key_part + j;
+ my_snprintf(buff, sizeof(buff), "\nPrinting
share->key_info[%d].key_part[%d] info\n"
+ "key_part->offset = %d\n"
+ "key_part->field_name = %s\n"
+ "key_part->length = %d\n"
+ "key_part->null_bit = %d\n"
+ "key_part->null_offset = %d\n",
+ i,j,key_part->offset, key_part->field->field_name.str,
key_part->length,
+ key_part->null_bit, key_part->null_offset);
+ str.append(buff, strlen(buff));
+ }
+ }
+ my_snprintf(buff, sizeof(buff), "\nPrinting table->fields\n");
+ str.append(buff, strlen(buff));
+ for(uint i= 0; i < table->s->fields; i++)
+ {
+ field= table->field[i];
+ my_snprintf(buff, sizeof(buff), "\ntable->field[%d]->field_name %s\n"
+ "table->field[%d]->offset = %d\n"
+ "table->field[%d]->field_length = %d\n"
+ "table->field[%d]->null_pos wrt to record 0 = %d\n"
+ "table->field[%d]->null_bit_pos = %d\n",
+ i, field->field_name.str,
+ i, field->ptr- table->record[0],
+ i, field->pack_length(),
+ i, field->null_bit ? field->null_ptr - table->record[0] : -1,
+ i, field->null_bit);
+ str.append(buff, strlen(buff));
+ }
+ (*error_handler_hook)(1, str.ptr(), ME_NOTE);
+}
+
/*
Open a table based on a TABLE_SHARE
@@ -3459,6 +3639,11 @@ enum open_frm_error open_table_from_share(THD
*thd, TABLE_SHARE *share,
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
key_info->user_defined_key_parts) ;
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ key_part_end++;
+ key_info->flags&= ~HA_NOSAME;
+ }
for ( ; key_part < key_part_end; key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
@@ -3707,6 +3892,8 @@ enum open_frm_error open_table_from_share(THD
*thd, TABLE_SHARE *share,
thd->status_var.opened_tables++;
thd->lex->context_analysis_only= save_context_analysis_only;
+ DBUG_EXECUTE_IF("print_long_unique_internal_state",
+ print_long_unique_table(outparam););
DBUG_RETURN (OPEN_FRM_OK);
err:
@@ -4186,7 +4373,7 @@ uint calculate_key_len(TABLE *table, uint key,
const uchar *buf,
/* works only with key prefixes */
DBUG_ASSERT(((keypart_map + 1) & keypart_map) == 0);
- KEY *key_info= table->s->key_info+key;
+ KEY *key_info= table->key_info+key;
KEY_PART_INFO *key_part= key_info->key_part;
KEY_PART_INFO *end_key_part= key_part + table->actual_n_key_parts(key_info);
uint length= 0;
@@ -4796,6 +4983,8 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
range_rowid_filter_cost_info_elems= 0;
range_rowid_filter_cost_info_ptr= NULL;
range_rowid_filter_cost_info= NULL;
+ update_handler= NULL;
+ check_unique_buf= NULL;
#ifdef HAVE_REPLICATION
/* used in RBR Triggers */
master_had_triggers= 0;
@@ -8638,6 +8827,69 @@ double KEY::actual_rec_per_key(uint i)
read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
}
+/*
+ find total number of field in hash expr
+*/
+int fields_in_hash_keyinfo(KEY *keyinfo)
+{
+ Item_func_hash * temp= (Item_func_hash *)
+ keyinfo->key_part->field->vcol_info->expr;
+ return temp->argument_count();
+}
+/*
+ setup_keyinfo_hash changes the key_info->key_part
+ to be same as defined by user
+ */
+void setup_keyinfo_hash(KEY *key_info)
+{
+ DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
+ DBUG_ASSERT(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD);
+ uint no_of_keyparts= fields_in_hash_keyinfo(key_info);
+ key_info->key_part-= no_of_keyparts;
+ key_info->user_defined_key_parts= key_info->usable_key_parts=
+ key_info->ext_key_parts= no_of_keyparts;
+}
+/*
+ re_setup_keyinfo_hash reverts th setup_keyinfo_hash and this type of
+ arrangement is expected by storage engine
+ */
+
+void re_setup_keyinfo_hash(KEY *key_info)
+{
+ DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
+ DBUG_ASSERT(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD));
+ while(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD))
+ key_info->key_part++;
+ key_info->user_defined_key_parts= key_info->usable_key_parts=
+ key_info->ext_key_parts= 1;
+}
+/**
+ @brief clone of current handler.
+ Creates a clone of handler used in update for
+ unique hash key.
+*/
+void TABLE::clone_handler_for_update()
+{
+ handler *update_handler= NULL;
+ if (!s->long_unique_table)
+ return;
+ update_handler= file->clone(s->normalized_path.str,
+ in_use->mem_root);
+ update_handler->ha_external_lock(in_use, F_RDLCK);
+ this->update_handler= update_handler;
+ return;
+}
+
+/**
+ @brief Deletes update handler object
+*/
+void TABLE::delete_update_handler()
+{
+ update_handler->ha_external_lock(in_use, F_UNLCK);
+ update_handler->ha_close();
+ delete update_handler;
+ this->update_handler= NULL;
+}
LEX_CSTRING *fk_option_name(enum_fk_option opt)
{
diff --git a/sql/table.h b/sql/table.h
index 914f4dc15c2..69117c92698 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -349,9 +349,18 @@ enum field_visibility_t {
INVISIBLE_FULL
};
-#define INVISIBLE_MAX_BITS 3
+#define INVISIBLE_MAX_BITS 3
+#define HA_HASH_FIELD_LENGTH 8
+#define HA_HASH_KEY_LENGTH_WITHOUT_NULL 8
+#define HA_HASH_KEY_LENGTH_WITH_NULL 9
+int fields_in_hash_keyinfo(KEY *keyinfo);
+
+void setup_keyinfo_hash(KEY *key_info);
+
+void re_setup_keyinfo_hash(KEY *key_info);
+
/**
Category of table found in the table share.
*/
@@ -745,6 +754,7 @@ struct TABLE_SHARE
bool vcols_need_refixing;
bool has_update_default_function;
bool can_do_row_logging; /* 1 if table supports RBR */
+ bool long_unique_table;
ulong table_map_id; /* for row-based replication */
@@ -1110,6 +1120,9 @@ struct TABLE
THD *in_use; /* Which thread uses this */
uchar *record[3]; /* Pointer to records */
+ /* record buf to resolve hash collisions for long UNIQUE constraints */
+ uchar *check_unique_buf;
+ handler *update_handler; /* Handler used in case of update */
uchar *write_row_record; /* Used as optimisation in
THD::write_row */
uchar *insert_values; /* used by INSERT ... UPDATE */
@@ -1572,6 +1585,8 @@ struct TABLE
void vers_update_fields();
void vers_update_end();
void find_constraint_correlated_indexes();
+ void clone_handler_for_update();
+ void delete_update_handler();
/** Number of additional fields used in versioned tables */
#define VERSIONING_FIELDS 2
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 4692b2d74d1..5d1ffdbc24f 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -39,7 +39,7 @@
/* threshold for safe_alloca */
#define ALLOCA_THRESHOLD 2048
-static uint pack_keys(uchar *,uint, KEY *, ulong);
+static uint pack_keys(uchar *,uint, KEY *, ulong, uint);
static bool pack_header(THD *, uchar *, List<Create_field> &, HA_CREATE_INFO *,
ulong, handler *);
static bool pack_vcols(String *, List<Create_field> &,
List<Virtual_column_info> *);
@@ -176,6 +176,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const
LEX_CSTRING *table,
ulong data_offset;
uint options_len;
uint gis_extra2_len= 0;
+ uint e_unique_hash_extra_parts= 0;
uchar fileinfo[FRM_HEADER_SIZE],forminfo[FRM_FORMINFO_SIZE];
const partition_info *part_info= IF_PARTITIONING(thd->work_part_info, 0);
bool error;
@@ -294,6 +295,9 @@ LEX_CUSTRING build_frm_image(THD *thd, const
LEX_CSTRING *table,
create_fields.elements;
}
+ for (i= 0; i < keys; i++)
+ if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
+ e_unique_hash_extra_parts++;
key_buff_length= uint4korr(fileinfo+47);
frm.length= FRM_HEADER_SIZE; // fileinfo;
@@ -366,7 +370,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const
LEX_CSTRING *table,
pos+= 4;
DBUG_ASSERT(pos == frm_ptr + uint2korr(fileinfo+6));
- key_info_length= pack_keys(pos, keys, key_info, data_offset);
+ key_info_length= pack_keys(pos, keys, key_info, data_offset,
e_unique_hash_extra_parts);
if (key_info_length > UINT_MAX16)
{
my_printf_error(ER_CANT_CREATE_TABLE,
@@ -528,7 +532,7 @@ int rea_create_table(THD *thd, LEX_CUSTRING *frm,
/* Pack keyinfo and keynames to keybuff for save in form-file. */
static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
- ulong data_offset)
+ ulong data_offset, uint e_unique_hash_extra_parts)
{
uint key_parts,length;
uchar *pos, *keyname_pos;
@@ -590,6 +594,7 @@ static uint pack_keys(uchar *keybuff, uint
key_count, KEY *keyinfo,
}
}
+ key_parts+= e_unique_hash_extra_parts;
if (key_count > 127 || key_parts > 127)
{
keybuff[0]= (key_count & 0x7f) | 0x80;
diff --git a/sql/unireg.h b/sql/unireg.h
index a24c1b516c5..47ae6de0f14 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -179,7 +179,7 @@ enum extra2_frm_value_type {
};
enum extra2_field_flags {
- VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS
+ VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS,
};
int rea_create_table(THD *thd, LEX_CUSTRING *frm,
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index f97bc52b6a9..b7577ec1aec 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -980,7 +980,7 @@ int ha_archive::write_row(uchar *buf)
if (table->next_number_field && record == table->record[0])
{
- KEY *mkey= &table->s->key_info[0]; // We only support one key right now
+ KEY *mkey= &table->key_info[0]; // We only support one key right now
update_auto_increment();
temp_auto= table->next_number_field->val_int();
@@ -1098,7 +1098,7 @@ int ha_archive::index_read_idx(uchar *buf, uint
index, const uchar *key,
{
int rc;
bool found= 0;
- KEY *mkey= &table->s->key_info[index];
+ KEY *mkey= &table->key_info[index];
current_k_offset= mkey->key_part->offset;
current_key= key;
current_key_len= key_len;
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 7599765c07a..d5635143f2e 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1657,10 +1657,7 @@ bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname)
/****************************************************************************/
bool ha_connect::IsUnique(uint n)
{
- TABLE_SHARE *s= (table) ? table->s : NULL;
- KEY kp= s->key_info[n];
-
- return (kp.flags & 1) != 0;
+ return (table->key_info[n].flags & HA_NOSAME) != 0;
} // end of IsUnique
/****************************************************************************/
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index 3a7e0060a05..c765e8e2f62 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -51,7 +51,7 @@ class ha_heap: public handler
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_CAN_SQL_HANDLER | HA_CAN_ONLINE_BACKUPS |
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
- HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 7a41442d4ba..d300cf001ed 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -2856,6 +2856,7 @@ ulonglong ha_mroonga::wrapper_table_flags() const
#ifdef HA_CAN_VIRTUAL_COLUMNS
table_flags |= HA_CAN_VIRTUAL_COLUMNS;
#endif
+ table_flags |= HA_CAN_HASH_KEYS;
DBUG_RETURN(table_flags);
}
@@ -2891,6 +2892,7 @@ ulonglong ha_mroonga::storage_table_flags() const
#ifdef HA_CAN_VIRTUAL_COLUMNS
flags |= HA_CAN_VIRTUAL_COLUMNS;
#endif
+ flags |= HA_CAN_HASH_KEYS;
DBUG_RETURN(flags);
}
diff --git a/storage/oqgraph/oqgraph_thunk.cc b/storage/oqgraph/oqgraph_thunk.cc
index 5e254450a2b..09cc9c1798b 100644
--- a/storage/oqgraph/oqgraph_thunk.cc
+++ b/storage/oqgraph/oqgraph_thunk.cc
@@ -109,7 +109,7 @@ const std::string& oqgraph3::cursor::record_position() const
if (_graph->_cursor->_index >= 0)
{
key_copy((uchar*) _graph->_cursor->_key.data(), table.record[0],
- table.s->key_info + _index,
table.s->key_info[_index].key_length, true);
+ table.key_info + _index, table.key_info[_index].key_length, true);
}
_graph->_stale= false;
@@ -184,7 +184,7 @@ int oqgraph3::cursor::restore_position()
if (int rc= table.file->ha_index_read_map(
table.record[0], (const uchar*) _key.data(),
(key_part_map)(1 << _parts) - 1,
- table.s->key_info[_index].user_defined_key_parts
== _parts ?
+ table.key_info[_index].user_defined_key_parts == _parts ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
{
table.file->ha_index_end();
@@ -368,7 +368,7 @@ int oqgraph3::cursor::seek_to(
if (!destid)
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -399,7 +399,7 @@ int oqgraph3::cursor::seek_to(
else if (!origid)
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -430,7 +430,7 @@ int oqgraph3::cursor::seek_to(
else
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -485,7 +485,7 @@ int oqgraph3::cursor::seek_to(
if (int rc= table.file->ha_index_read_map(
table.record[0], (uchar*) _key.data(),
(key_part_map) ((1U << _parts) - 1),
- table.s->key_info[_index].user_defined_key_parts == _parts ?
+ table.key_info[_index].user_defined_key_parts == _parts ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
{
table.file->ha_index_end();
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index a5fc3345ea3..cc97a19988d 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -9428,7 +9428,7 @@ ulonglong ha_spider::table_flags() const
const char *ha_spider::index_type(
uint key_number
) {
- KEY *key_info = &table->s->key_info[key_number];
+ KEY *key_info = &table->key_info[key_number];
DBUG_ENTER("ha_spider::index_type");
DBUG_PRINT("info",("spider this=%p", this));
DBUG_PRINT("info",("spider flags=%ld", key_info->flags));
@@ -11186,9 +11186,9 @@ double ha_spider::read_time(
if (keyread)
{
DBUG_PRINT("info",("spider read_time(keyread) = %.6f",
- share->read_rate * table->s->key_info[index].key_length *
+ share->read_rate * table->key_info[index].key_length *
rows / 2 + 2));
- DBUG_RETURN(share->read_rate * table->s->key_info[index].key_length *
+ DBUG_RETURN(share->read_rate * table->key_info[index].key_length *
rows / 2 + 2);
} else {
DBUG_PRINT("info",("spider read_time = %.6f",
diff --git a/storage/spider/spd_db_handlersocket.cc
b/storage/spider/spd_db_handlersocket.cc
index 37bbe530723..8eae39ffc5f 100644
--- a/storage/spider/spd_db_handlersocket.cc
+++ b/storage/spider/spd_db_handlersocket.cc
@@ -5145,7 +5145,7 @@ int spider_handlersocket_handler::append_open_handler(
share->tgt_dbs[spider->conn_link_idx[link_idx]],
share->tgt_table_names[spider->conn_link_idx[link_idx]],
spider->active_index < MAX_KEY ?
- table->s->key_info[spider->active_index].name :
+ table->key_info[spider->active_index].name :
"0",
str->c_ptr_safe(),
&request_key
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 262e47120ce..d744de02a9d 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -3290,10 +3290,10 @@ void spider_db_mbase::set_dup_key_idx(
key_name_length = spider->share->tgt_pk_names_lengths[all_link_idx];
} else {
#ifdef SPIDER_use_LEX_CSTRING_for_KEY_Field_name
- key_name = table->s->key_info[roop_count].name.str;
- key_name_length = table->s->key_info[roop_count].name.length;
+ key_name = table->key_info[roop_count].name.str;
+ key_name_length = table->key_info[roop_count].name.length;
#else
- key_name = table->s->key_info[roop_count].name;
+ key_name = table->key_info[roop_count].name;
key_name_length = strlen(key_name);
#endif
}
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index b0f438b05bf..c863a9bbad4 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -6934,7 +6934,7 @@ void ha_tokudb::trace_create_table_info(TABLE* form) {
field->flags);
}
for (i = 0; i < form->s->keys; i++) {
- KEY *key = &form->s->key_info[i];
+ KEY *key = &form->key_info[i];
TOKUDB_HANDLER_TRACE(
"key:%d:%s:%d",
i,
@@ -7062,7 +7062,7 @@ int ha_tokudb::create_secondary_dictionary(
sprintf(dict_name, "key-%s", key_info->name.str);
make_name(newname, newname_len, name, dict_name);
- prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
+ prim_key = (hpk) ? NULL : &form->key_info[primary_key];
//
// setup the row descriptor
@@ -7174,7 +7174,7 @@ int ha_tokudb::create_main_dictionary(
make_name(newname, newname_len, name, "main");
- prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
+ prim_key = (hpk) ? NULL : &form->key_info[primary_key];
//
// setup the row descriptor
@@ -7429,7 +7429,7 @@ int ha_tokudb::create(
error = write_key_name_to_status(
status_block,
- form->s->key_info[i].name.str,
+ form->key_info[i].name.str,
txn);
if (error) {
goto cleanup;
@@ -8159,7 +8159,7 @@ int ha_tokudb::tokudb_add_index(
for (uint i = 0; i < num_of_keys; i++) {
for (uint j = 0; j < table_arg->s->keys; j++) {
if (strcmp(key_info[i].name.str,
- table_arg->s->key_info[j].name.str) == 0) {
+ table_arg->key_info[j].name.str) == 0) {
error = HA_ERR_WRONG_COMMAND;
goto cleanup;
}
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc
b/storage/tokudb/ha_tokudb_alter_56.cc
index 2bcc5dee127..6a0802e2d86 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -1606,7 +1606,7 @@ int ha_tokudb::new_row_descriptor(TABLE* altered_table,
} else {
KEY* prim_key =
hidden_primary_key ? NULL :
- &altered_table->s->key_info[primary_key];
+ &altered_table->key_info[primary_key];
if (idx == primary_key) {
row_descriptor->size = create_main_key_descriptor(
(uchar*)row_descriptor->data,
diff --git a/storage/tokudb/ha_tokudb_update.cc
b/storage/tokudb/ha_tokudb_update.cc
index bb59d112680..fec0a42e063 100644
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@ -451,7 +451,7 @@ static bool check_all_update_expressions(
static bool full_field_in_key(TABLE* table, Field* field) {
assert_always(table->s->primary_key < table->s->keys);
- KEY* key = &table->s->key_info[table->s->primary_key];
+ KEY* key = &table->key_info[table->s->primary_key];
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name.str,
key_part->field->field_name.str) == 0) {
@@ -517,7 +517,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
MY_BITMAP pk_fields;
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false;
- KEY *key = &table->s->key_info[table->s->primary_key];
+ KEY *key = &table->key_info[table->s->primary_key];
for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
@@ -555,7 +555,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
static bool clustering_keys_exist(TABLE *table) {
for (uint keynr = 0; keynr < table->s->keys; keynr++) {
if (keynr != table->s->primary_key &&
- key_is_clustering(&table->s->key_info[keynr]))
+ key_is_clustering(&table->key_info[keynr]))
return true;
}
return false;
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_blob.result
b/storage/tokudb/mysql-test/tokudb/r/type_blob.result
index 3f6596787e5..85f9d343e04 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_blob.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_blob.result
@@ -357,8 +357,6 @@ HELLO MY 1
a 1
hello 1
drop table t1;
-create table t1 (a text, unique (a(21000)));
-ERROR 42000: Specified key was too long; max key length is 3072 bytes
create table t1 (a text, key (a(2100)));
show create table t1;
Table Create Table
diff --git a/storage/tokudb/mysql-test/tokudb/t/type_blob.test
b/storage/tokudb/mysql-test/tokudb/t/type_blob.test
index 6a429c46a55..7cf77e386c7 100644
--- a/storage/tokudb/mysql-test/tokudb/t/type_blob.test
+++ b/storage/tokudb/mysql-test/tokudb/t/type_blob.test
@@ -133,8 +133,6 @@ select c,count(*) from t1 group by c;
select d,count(*) from t1 group by d;
drop table t1;
--- error 1071
-create table t1 (a text, unique (a(21000))); # should give an error
create table t1 (a text, key (a(2100))); # key is auto-truncated
replace_regex /ENGINE=[a-zA-Z]*/ENGINE=ENGINE/;
show create table t1;
--
Regards
Sachin Setiya
Software Engineer at MariaDB
1
0
revision-id: 91d506cf2d5c143f7eb74e776b1417cf2acaacd8 (mariadb-10.2.22-12-g91d506cf2d5)
parent(s): 88b6dc4db5567951f9c0d0baa6e965d44a7130b1 431da59f1ce2b594ef465563bf18f670f07a1b32
author: Oleksandr Byelkin
committer: Oleksandr Byelkin
timestamp: 2019-02-19 16:47:45 +0100
message:
Merge branch '10.1' into 10.2
CMakeLists.txt | 2 +-
cmake/readline.cmake | 2 +-
configure.cmake | 2 +-
plugin/aws_key_management/CMakeLists.txt | 8 ++++++--
scripts/mysql_install_db.sh | 8 +++++---
5 files changed, 14 insertions(+), 8 deletions(-)
diff --cc plugin/aws_key_management/CMakeLists.txt
index aa93fc3aa03,49f6b54a24c..e9e1b49d5f2
--- a/plugin/aws_key_management/CMakeLists.txt
+++ b/plugin/aws_key_management/CMakeLists.txt
@@@ -17,8 -22,19 +17,10 @@@ MACRO(SKIP_AWS_PLUGIN msg
ENDMACRO()
SET(CMAKE_CXX_STANDARD 11)
-
+ IF(NOT NOT_FOR_DISTRIBUTION)
+ SKIP_AWS_PLUGIN("AWS SDK has Apache 2.0 License which is not complatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need this plugin")
+ ENDIF()
-MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc DISABLED
- COMPONENT aws-key-management)
-
-IF(NOT TARGET aws_key_management)
- RETURN()
-ELSE()
- SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" PARENT_SCOPE)
-ENDIF()
-
# This plugin needs recent C++ compilers (AWS C++ SDK header files are using C++11 features)
SET(CXX11_FLAGS)
SET(OLD_COMPILER_MSG "AWS SDK requires c++11 -capable compiler (minimal supported versions are g++ 4.8, clang 3.3, VS2103)")
@@@ -163,8 -181,5 +165,10 @@@ IF(WIN32
ELSE()
SET(AWS_CPP_SDK_DEPENDENCIES ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES})
ENDIF()
+MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc
+ LINK_LIBRARIES ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}
+ COMPONENT aws-key-management)
-
-TARGET_LINK_LIBRARIES(aws_key_management ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES})
++IF (TARGET aws_key_management)
++ SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" PARENT_SCOPE)
++ENDIF()
diff --cc scripts/mysql_install_db.sh
index 681b73dfc9f,5fa2c4c0e04..8bad8c4000b
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@@ -342,17 -342,15 +344,17 @@@ the
cannot_find_file fill_help_tables.sql @pkgdata_locations@
exit 1
fi
+ plugindir=`find_in_dirs --dir auth_socket.so $basedir/lib*/plugin $basedir/lib*/mysql/plugin`
# relative from where the script was run for a relocatable install
- elif test -n "$dirname0" -a -x "$dirname0/@INSTALL_SBINDIR@/mysqld"
+ elif test -n "$dirname0" -a -x "$rel_mysqld" -a ! "$rel_mysqld" -ef "@sbindir@/mysqld"
then
basedir="$dirname0"
- bindir="$basedir/@INSTALL_SBINDIR@"
+ bindir="$basedir/@INSTALL_BINDIR@"
resolveip="$bindir/resolveip"
- mysqld="$basedir/@INSTALL_SBINDIR@/mysqld"
+ mysqld="$rel_mysqld"
srcpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
buildpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
+ plugindir="$basedir/@INSTALL_PLUGINDIR@"
else
basedir="@prefix@"
bindir="@bindir@"
1
0