developers
Threads by month
- ----- 2024 -----
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
February 2019
- 12 participants
- 26 discussions
Hi, Nikita!
On Feb 18, Nikita Malyavin wrote:
> revision-id: 5e265de00c5 (mariadb-10.4.1-185-g5e265de00c5)
> parent(s): 8ec8242e962
> author: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> committer: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> timestamp: 2019-02-18 23:48:33 +1000
> message:
>
> triggers fix: delete
Commit comment, please. Like
MDEV-16973 Application-time periods: DELETE
fire triggers in the order: BEFORE DELETE, BEFORE INSERT, AFTER INSERT, AFTER DELETE.
> ---
> mysql-test/suite/period/r/delete.result | 74 ++++++++++++++++-----------------
> sql/sql_delete.cc | 45 ++++++++++----------
> 2 files changed, 58 insertions(+), 61 deletions(-)
>
> diff --git a/mysql-test/suite/period/r/delete.result b/mysql-test/suite/period/r/delete.result
> index f0949f5b0d9..0ac11f518f8 100644
> --- a/mysql-test/suite/period/r/delete.result
> +++ b/mysql-test/suite/period/r/delete.result
> @@ -35,33 +35,33 @@ id s e
> select * from log_tbl order by id;
> id log
> 1 >DEL: 1, 1999-01-01, 2018-12-12
> -2 <DEL: 1, 1999-01-01, 2018-12-12
> -3 >INS: 1, 1999-01-01, 2000-01-01
> -4 <INS: 1, 1999-01-01, 2000-01-01
> -5 >INS: 1, 2018-01-01, 2018-12-12
> -6 <INS: 1, 2018-01-01, 2018-12-12
> +2 >INS: 1, 1999-01-01, 2000-01-01
> +3 <INS: 1, 1999-01-01, 2000-01-01
> +4 >INS: 1, 2018-01-01, 2018-12-12
> +5 <INS: 1, 2018-01-01, 2018-12-12
> +6 <DEL: 1, 1999-01-01, 2018-12-12
> 7 >DEL: 1, 1999-01-01, 2017-01-01
> -8 <DEL: 1, 1999-01-01, 2017-01-01
> -9 >INS: 1, 1999-01-01, 2000-01-01
> -10 <INS: 1, 1999-01-01, 2000-01-01
> +8 >INS: 1, 1999-01-01, 2000-01-01
> +9 <INS: 1, 1999-01-01, 2000-01-01
> +10 <DEL: 1, 1999-01-01, 2017-01-01
> 11 >DEL: 1, 2017-01-01, 2019-01-01
> -12 <DEL: 1, 2017-01-01, 2019-01-01
> -13 >INS: 1, 2018-01-01, 2019-01-01
> -14 <INS: 1, 2018-01-01, 2019-01-01
> +12 >INS: 1, 2018-01-01, 2019-01-01
> +13 <INS: 1, 2018-01-01, 2019-01-01
> +14 <DEL: 1, 2017-01-01, 2019-01-01
> 15 >DEL: 2, 1998-01-01, 2018-12-12
> -16 <DEL: 2, 1998-01-01, 2018-12-12
> -17 >INS: 2, 1998-01-01, 2000-01-01
> -18 <INS: 2, 1998-01-01, 2000-01-01
> -19 >INS: 2, 2018-01-01, 2018-12-12
> -20 <INS: 2, 2018-01-01, 2018-12-12
> +16 >INS: 2, 1998-01-01, 2000-01-01
> +17 <INS: 2, 1998-01-01, 2000-01-01
> +18 >INS: 2, 2018-01-01, 2018-12-12
> +19 <INS: 2, 2018-01-01, 2018-12-12
> +20 <DEL: 2, 1998-01-01, 2018-12-12
> 21 >DEL: 3, 1997-01-01, 2015-01-01
> -22 <DEL: 3, 1997-01-01, 2015-01-01
> -23 >INS: 3, 1997-01-01, 2000-01-01
> -24 <INS: 3, 1997-01-01, 2000-01-01
> +22 >INS: 3, 1997-01-01, 2000-01-01
> +23 <INS: 3, 1997-01-01, 2000-01-01
> +24 <DEL: 3, 1997-01-01, 2015-01-01
> 25 >DEL: 4, 2016-01-01, 2020-01-01
> -26 <DEL: 4, 2016-01-01, 2020-01-01
> -27 >INS: 4, 2018-01-01, 2020-01-01
> -28 <INS: 4, 2018-01-01, 2020-01-01
> +26 >INS: 4, 2018-01-01, 2020-01-01
> +27 <INS: 4, 2018-01-01, 2020-01-01
> +28 <DEL: 4, 2016-01-01, 2020-01-01
> 29 >DEL: 5, 2010-01-01, 2015-01-01
> 30 <DEL: 5, 2010-01-01, 2015-01-01
add a test case with no BEFORE INSERT trigger. For example, here drop
the BEFORE INSERT trigger (but keep the other three) and delete
something again. Please, make sure that "delete something" covers all
cases, like above (left end, right end, middle, etc).
> # INSERT trigger only also works
> diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
> index 9607718d755..d79a9deaa4d 100644
> --- a/sql/sql_delete.cc
> +++ b/sql/sql_delete.cc
> @@ -802,9 +796,11 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
> only if there are no triggers set.
> It is also meaningless for system-versioned table
Fix the comment above to explain why you only look at BEFORE INSERT
trigger here.
> */
> - portion_of_time_through_update= !has_triggers
> - && !table->versioned()
> - && table->file->has_transactions();
> + portion_of_time_through_update=
> + !(table->triggers && table->triggers->has_triggers(TRG_EVENT_INSERT,
> + TRG_ACTION_BEFORE))
> + && !table->versioned()
> + && table->file->has_transactions();
>
> THD_STAGE_INFO(thd, stage_updating);
> while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
> @@ -840,6 +836,12 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
> else
> {
> error= table->delete_row();
> +
> + ha_rows rows_inserted;
> + if (likely(!error) && table_list->has_period()
> + && !portion_of_time_through_update)
> + error= table->insert_portion_of_time(thd, table_list->period_conditions,
> + &rows_inserted);
> }
Regards,
Sergei
Chief Architect MariaDB
and security(a)mariadb.org
2
2
Hi Varun,
Please find more input below.
> --- 10.4-optimizer-trace-orig/sql/sql_derived.cc 2019-02-10 14:25:59.217015365 +0300
> +++ 10.4-optimizer-trace-cl/sql/sql_derived.cc 2019-02-12 14:04:05.446571305 +0300
> @@ -486,6 +486,24 @@ exit_merge:
> DBUG_RETURN(res);
>
> unconditional_materialization:
> +
> + if (unlikely(thd->trace_started()))
> + {
> + /*
> + Add to the optimizer trace the change in choice for merged
> + derived tables/views to materialised ones.
> + */
> + Json_writer_object trace_wrapper(thd);
> + Json_writer_object trace_derived(thd, derived->is_derived() ?
> + "derived" : "view");
> + trace_derived.add("table", derived->alias.str ? derived->alias.str : "<NULL>")
> + .add_select_number(derived->get_unit()->
> + first_select()->select_number)
> + .add("initial_choice", "merged")
> + .add("final_choice", "materialized")
> + .add("cause", cause);
Please add test coverage for this (I cannot find it in the testsuite).
> +select * from v1 {
> + "steps": [
> + {
> + "join_preparation": {
> + "select_id": 1,
> + "steps": [
> + {
> + "view": {
> + "table": "v1",
> + "select_id": 2,
> + "merged": true
The above looks a bit odd, "view", inside it "table", and then either
"materilized" or "merged". I would prefer
"algorithm": "materialize"
or
"algorithm": "merge"
> diff -urpN '--exclude=.*' 10.4-optimizer-trace-orig/sql/sql_select.cc 10.4-optimizer-trace-cl/sql/sql_select.cc
> --- 10.4-optimizer-trace-orig/sql/sql_select.cc 2019-02-10 14:25:59.225015425 +0300
> +++ 10.4-optimizer-trace-cl/sql/sql_select.cc 2019-02-12 14:04:05.450571322 +0300
> @@ -345,6 +349,40 @@ bool dbug_user_var_equals_int(THD *thd,
> }
> #endif
>
> +static void trace_table_dependencies(THD *thd,
> + JOIN_TAB *join_tabs, uint table_count)
> +{
> + Json_writer_object trace_wrapper(thd);
> + Json_writer_array trace_dep(thd, "table_dependencies");
> + for (uint i = 0; i < table_count; i++)
> + {
> + TABLE_LIST *table_ref = join_tabs[i].tab_list;
> + Json_writer_object trace_one_table(thd);
> + trace_one_table.add_table_name(&join_tabs[i]);
> + trace_one_table.add("row_may_be_null",
> + (bool)table_ref->table->maybe_null);
> + const table_map map = table_ref->get_map();
> + DBUG_ASSERT(map < (1ULL << table_count));
> + for (uint j = 0; j < table_count; j++)
> + {
> + if (map & (1ULL << j))
> + {
> + trace_one_table.add("map_bit", static_cast<longlong>(j));
> + break;
> + }
> + }
> + Json_writer_array depends_on(thd, "depends_on_map_bits");
> + static_assert(sizeof(table_ref->get_map()) <= 64,
> + "RAND_TABLE_BIT may be in join_tabs[i].dependent, so we test "
> + "all 64 bits.");
sizeof() is in bytes, not bits, so the above assert actually checks for
whether table_map is 64 bytes large (which is incorrect).
I would suggest to switch to using Table_map_iterator and forget about the
64-byte limitation (and the value of RAND_TABLE_BIT, too)
> + for (uint j = 0; j < 64; j++)
> + {
> + if (join_tabs[i].dependent & (1ULL << j))
> + depends_on.add(static_cast<longlong>(j));
> + }
> --- 10.4-optimizer-trace-orig/sql/opt_table_elimination.cc 2019-02-10 14:25:59.209015304 +0300
> +++ 10.4-optimizer-trace-cl/sql/opt_table_elimination.cc 2019-02-12 14:04:05.442571289 +0300
> @@ -522,7 +524,8 @@ eliminate_tables_for_list(JOIN *join,
> List<TABLE_LIST> *join_list,
> table_map tables_in_list,
> Item *on_expr,
> - table_map tables_used_elsewhere);
> + table_map tables_used_elsewhere,
> + Json_writer_array* eliminate_tables);
I think I wrote this already - I think 'eliminate_tables' is a very poor name
for the argument- it doesn't show that this is just a trace output object. If
something is named 'eliminate_tables' I would expect it to play some active
role (e.g. being a list of tables we are trying to eliminate or something like
that).
Please change to something like trace, trace_eliminated, trace_for_eliminated,
...
> @@ -2340,8 +2377,13 @@ int pull_out_semijoin_tables(JOIN *join)
> bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
> {
> DBUG_ENTER("optimize_semijoin_nests");
> + THD *thd= join->thd;
> List_iterator<TABLE_LIST> sj_list_it(join->select_lex->sj_nests);
> TABLE_LIST *sj_nest;
> + Json_writer_object wrapper(thd);
> + Json_writer_object trace_semijoin_nest(thd,
> + "execution_plan_for_potential_materialization");
> + Json_writer_array trace_steps_array(thd, "steps");
> while ((sj_nest= sj_list_it++))
> {
> /* semi-join nests with only constant tables are not valid */
The most common output this produces is a useless
"execution_plan_for_potential_materialization": {
"steps": []
}
Please add "if (!join->select_lex->sj_nests.elements) \n DBUG_RETURN(FALSE);"
to avoid it.
Please also add a testcase where a non-empty output is produced (it's nice to
have a semi-representative set of outputs in main/opt_trace*result).
> --- 10.4-optimizer-trace-orig/mysql-test/main/opt_trace.result 1970-01-01 03:00:00.000000000 +0300
> +++ 10.4-optimizer-trace-cl/mysql-test/main/opt_trace.result 2019-02-12 14:04:05.438571272 +0300
> +QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
> +EXPLAIN SELECT DISTINCT a FROM t1 {
...
> + "best_group_range_summary": {
> + "type": "index_group",
> + "index": "a",
> + "group_attribute": null,
> + "min_aggregate": false,
> + "max_aggregate": false,
> + "distinct_aggregate": false,
> + "rows": 5,
> + "cost": 7.5,
> + "key_parts_used_for_access": ["a"],
> + "ranges": [],
> + "chosen": true
> + },
How come group_attribute is NULL? Loose Index Scan requires groping on a
column. Are you sure you are printing the right value?
--> - 10.4-optimizer-trace-orig/sql/opt_trace.cc 1970-01-01 03:00:00.000000000 +0300
> +++ 10.4-optimizer-trace-cl/sql/opt_trace.cc 2019-02-12 14:04:05.442571289 +0300
> ...
> +void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
> + Json_writer_object *writer)
> +
> +{
> + if (!thd->trace_started())
> + return;
> + char buff[1024];
> + String str(buff, sizeof(buff), system_charset_info);
> + str.length(0);
Please use StringBuffer<1024> instead.
> + select_lex->print(thd, &str,
> + enum_query_type(QT_TO_SYSTEM_CHARSET |
> + QT_SHOW_SELECT_NUMBER |
> + QT_ITEM_IDENT_SKIP_DB_NAMES |
> + QT_VIEW_INTERNAL
> + ));
> + /*
> + The output is not very pretty lots of back-ticks, the output
> + is as the one in explain extended , lets try to improved it here.
> + */
> + writer->add("expanded_query", str.c_ptr_safe(), str.length());
> +}
Did you try using QT_EXPLAIN? (if it doesn't work, I'd like to know why)
...
> + trace->missing_privilege();
> + return;
> +}
A 'return;' at the end of void function looks very confusing. Please remove it
> +void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
> +{
> + if (likely(!(thd->variables.optimizer_trace &
> + Opt_trace_context::FLAG_ENABLED)) ||
> + thd->system_thread)
> + return;
> +
> + Opt_trace_context *const trace = &thd->opt_trace;
> + if (!thd->trace_started())
> + return;
> + bool full_access;
> + Security_context *const backup_thd_sctx = thd->security_context();
> + thd->set_security_context(&thd->main_security_ctx);
> + const bool rc = check_show_routine_access(thd, sp, &full_access) || !full_access;
> + thd->set_security_context(backup_thd_sctx);
> + if (rc)
> + trace->missing_privilege();
> + return;
> +}
A 'return;' at the end of void function looks very confusing. Please remove
it.
Also, check the assignments in the function: they should follow coding style
and be "x= y" and not "x = y".
> +int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *)
> +{
> + TABLE *table = tables->table;
> + Opt_trace_info info;
> +
> + /* get_values of trace, query , missing bytes and missing_priv
> +
> + @todo: Need an iterator here to walk over all the traces
> + */
> + Opt_trace_context* ctx= &thd->opt_trace;
> +
> + if (thd->opt_trace.empty())
> + {
So thd->opt_trace.empty() returns FALSE when there are NO traces.
And it returns TRUE when there are traces.
THIS IS EXTREMELY CONFUSING! Please fix it.
> + Opt_trace_stmt *stmt= ctx->get_top_trace();
> + stmt->fill_info(&info);
> +
> + table->field[0]->store(info.query_ptr, static_cast<uint>(info.query_length),
> + info.query_charset);
...
> +void Json_writer::add_str(Item *item)
> +{
> + if (item)
> + {
> + THD *thd= current_thd;
> + char buff[256];
> + String str(buff, sizeof(buff), system_charset_info);
> + str.length(0);
Please use StringBuffer.
> + ~Opt_trace_stmt()
> + {
> + delete current_json;
> + missing_priv= false;
> + ctx= NULL;
> + I_S_disabled= 0;
> + }
What is this for? Why set member variables in destructor? This either does
nothing, or is a unsuccessful attempt to prevent "reading freed memory"-type
crashes. Please remove this.
> +private:
> + Opt_trace_context *ctx;
> + String query; // store the query sent by the user
> + Json_writer *current_json; // stores the trace
> + bool missing_priv; ///< whether user lacks privilege to see this trace
> + uint I_S_disabled;
> +};
What is I_S_disabled and why is it called like that? Please document as it is
totally confusing.
> +void opt_trace_disable_if_no_security_context_access(THD *thd)
> ...
> + if (!thd->trace_started())
> + {
> + /*
> + @@optimizer_trace has "enabled=on" but trace is not started.
> + Either Opt_trace_start ctor was not called for our statement (3), or it
> + was called but at that time, the variable had "enabled=off" (4).
> +
> + There are no known cases of (3).
> +
> + (4) suggests that the user managed to change the variable during
> + execution of the statement, and this statement is using
> + view/routine (note that we have not been able to provoke this, maybe
> + this is impossible). If it happens it is suspicious.
> +
> + We disable I_S output. And we cannot do otherwise: we have no place to
> + store a possible "missing privilege" information (no Opt_trace_stmt, as
> + is_started() is false), so cannot do security checks, so cannot safely
> + do tracing, so have to disable I_S output. And even then, we don't know
> + when to re-enable I_S output, as we have no place to store the
> + information "re-enable tracing at the end of this statement", and we
> + don't even have a notion of statement here (statements in the optimizer
> + trace world mean an Opt_trace_stmt object, and there is none here). So
> + we must disable for the session's life.
> +
> + COM_FIELD_LIST opens views, thus used to be a case of (3). To avoid
> + disabling I_S output for the session's life when this command is issued
> + (like in: "SET OPTIMIZER_TRACE='ENABLED=ON';USE somedb;" in the 'mysql'
> + command-line client), we have decided to create a Opt_trace_start for
> + this command. The command itself is not traced though
> + (SQLCOM_SHOW_FIELDS does not have CF_OPTIMIZER_TRACE).
> + */
> + return;
> + }
As far as I understand, this was copied from MySQL? Is this still true in
MariaDB codebase? It sounds scary, let's discuss it.
> diff -urpN '--exclude=.*' 10.4-optimizer-trace-orig/sql/my_json_writer.cc 10.4-optimizer-trace-cl/sql/my_json_writer.cc
> --- 10.4-optimizer-trace-orig/sql/my_json_writer.cc 2019-02-10 14:25:59.201015244 +0300
> +++ 10.4-optimizer-trace-cl/sql/my_json_writer.cc 2019-02-12 14:04:05.438571272 +0300
> ...
> +Json_writer_array::Json_writer_array(THD *thd) :
> + Json_writer_struct(thd)
Json_writer_object and Json_writer_array are widely used. I guess
performance-wise, it is be beneficial if their calls are inlined.
A while ago, compiler would only inline a function if its body is in the .h
file (and so is visible in the compilation unit where inlining should occur).
I'm wondering if this is still true? (if yes, we better move these methods to
the .h file).
Can you look at release-build disassembly and check if there are calls to
Json_writer_array member functions there or they were inlined?
(the same question for THD::trace_started)
> +{
> + if (my_writer)
> + my_writer->start_array();
> +}
> +
> +Json_writer_array::Json_writer_array(THD *thd, const char *str)
> + :Json_writer_struct(thd)
Please change the indentation to be like in the ctor above.
> +{
> + if (my_writer)
...
> @@ -123,9 +209,18 @@ public:
> + /*
> + One can set a limit of how large a JSON document should be.
> + Writes beyond that size will be counted, but will not be collected.
> + */
> + void set_size_limit(size_t mem_size) { output.set_size_limit(mem_size); }
> +
> + // psergey: return how many bytes would be required to store everything
Remove the comment.
> + size_t get_truncated_bytes() { return output.get_truncated_bytes(); }
> +
> Json_writer() :
> indent_level(0), document_start(true), element_started(false),
> - first_child(true)
> + first_child(true), allowed_mem_size(0)
> {
> fmt_helper.init(this);
> }
> @@ -140,15 +235,335 @@ private:
> bool element_started;
> bool first_child;
>
> + /*
> + True when we are using the optimizer trace
> + FALSE otherwise
> + */
> + size_t allowed_mem_size;
The comment above is wrong, remove it.
Also remove the 'allowed_mem_size' as it is not used anymore.
> +void Opt_trace_context::flush_optimizer_trace()
> +{
> + inited= false;
> + if (traces)
> + {
> + while (traces->elements())
> + {
> + Opt_trace_stmt *prev= traces->at(0);
> + delete prev;
> + traces->del(0);
> + }
> + delete traces;
> + traces= NULL;
> + }
> +}
The name of this function is confusing. It is not clear what is flushed and
where. How about renaming it to "delete_traces()" ?
> +Opt_trace_context::~Opt_trace_context()
> +{
> + inited= FALSE;
> + /*
> + would be nice to move this to a function
> + */
And this function is already there - it's currently called flush_optimizer_trace().
I am also confused with setting member variables (inited, max_mem_size) in the destructor.
This object is going away, why do it? Please remove that.
> + if (traces)
> + {
> + while (traces->elements())
> + {
> + Opt_trace_stmt *prev= traces->at(0);
> + delete prev;
> + traces->del(0);
> + }
> + delete traces;
> + traces= NULL;
> + }
> + max_mem_size= 0;
> +}
> +
> --- 10.4-optimizer-trace-orig/sql/opt_range.cc 2019-02-10 14:25:59.209015304 +0300
> +++ 10.4-optimizer-trace-cl/sql/opt_range.cc 2019-02-12 14:04:05.438571272 +0300
> @@ -3214,17 +3431,25 @@ bool calculate_cond_selectivity_for_tabl
> SEL_ARG *key= tree->keys[idx];
> if (key)
> {
...
> rows= records_in_column_ranges(¶m, idx, key);
> if (rows != DBL_MAX)
> + {
> key->field->cond_selectivity= rows/table_records;
> + selectivity_for_column.add("selectivity_from_histograms",
> + key->field->cond_selectivity);
> + }
That's not from multiple histogramS, that's from one histogram. Please change
the name.
And I think I have requested this already: at the end of the function, please
print the table->cond_selectivity as the total selectivity of condition for
this table.
...
Coding style: can you search through your patch for instances of
if (x) {
}
and change them to
if (x)
{
}
?
The same for "x = y;" -> "x= y;"
> --- 10.4-optimizer-trace-orig/sql/opt_trace_context.h 1970-01-01 03:00:00.000000000 +0300
> +++ 10.4-optimizer-trace-cl/sql/opt_trace_context.h 2019-02-10 14:25:38.576860203 +0300
> + Opt_trace_stmt *current_trace;
> + /*
> + TRUE: if we allocate memory for list of traces
> + FALSE: otherwise
> + */
> + bool inited;
'inited' is not necessary, it would be sufficient to check if 'traces' is
NULL. (On the other hand, why traces is a pointer?)
BR
Sergei
--
Sergei Petrunia, Software Developer
MariaDB Corporation | Skype: sergefp | Blog: http://s.petrunia.net/blog
1
0
Re: [Maria-developers] [Commits] b428e82: MDEV-7974 backport fix for mysql bug#12161 (XA and binlog).
by andrei.elkin@pp.inet.fi 14 Feb '19
by andrei.elkin@pp.inet.fi 14 Feb '19
14 Feb '19
Привет, Алексей!
Я просматриваю патч. Где-нить через 2-3 часа должны быть комментарии от
меня. Можно будет созвониться. И сразу - полагаю, ты сейчас прямо
усердно трудишься над тестами (в патче не присутствующими).
Andrei.
holyfoot(a)askmonty.org (Alexey Botchkov) writes:
> revision-id: b428e822da09b2bc82a2447332bb980f93c80262 (mariadb-10.4.1-103-gb428e82)
> parent(s): 8aae31cf494678b6253031c627566e50bc666920
> committer: Alexey Botchkov
> timestamp: 2019-02-14 02:46:57 +0400
> message:
>
> MDEV-7974 backport fix for mysql bug#12161 (XA and binlog).
>
> XA transactions now are kept persistent after prepare.
> XA_prepare_log_event implamented, and XA tranasctions are logged
> as XA transactions.
>
> ---
> sql/handler.cc | 9 ++
> sql/handler.h | 10 ++
> sql/log.cc | 115 +++++++++++++---
> sql/log.h | 10 ++
> sql/log_event.cc | 397 +++++++++++++++++++++++++++++++++++++++++++++++++++--
> sql/log_event.h | 81 +++++++++++
> sql/sql_class.cc | 18 +--
> sql/sql_class.h | 20 ++-
> sql/sql_connect.cc | 1 +
> sql/transaction.cc | 69 +++++++++-
> sql/transaction.h | 1 +
> 11 files changed, 691 insertions(+), 40 deletions(-)
>
> diff --git a/sql/handler.cc b/sql/handler.cc
> index 001055c..3b2a3e0 100644
> --- a/sql/handler.cc
> +++ b/sql/handler.cc
> @@ -1214,6 +1214,9 @@ static int prepare_or_error(handlerton *ht, THD *thd, bool all)
> }
>
>
> +/*static inline */int
> +binlog_commit_flush_xid_caches(THD *thd, binlog_cache_mngr *cache_mngr,
> + bool all, my_xid xid);
> /**
> @retval
> 0 ok
> @@ -1225,6 +1228,7 @@ int ha_prepare(THD *thd)
> int error=0, all=1;
> THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
> Ha_trx_info *ha_info= trans->ha_list;
> +
> DBUG_ENTER("ha_prepare");
>
> if (ha_info)
> @@ -1250,6 +1254,11 @@ int ha_prepare(THD *thd)
>
> }
> }
> + if (unlikely(tc_log->log_prepare(thd, all)))
> + {
> + ha_rollback_trans(thd, all);
> + error=1;
> + }
> }
>
> DBUG_RETURN(error);
> diff --git a/sql/handler.h b/sql/handler.h
> index fc6246c..613c1c3 100644
> --- a/sql/handler.h
> +++ b/sql/handler.h
> @@ -810,6 +810,16 @@ struct xid_t {
> long gtrid_length;
> long bqual_length;
> char data[XIDDATASIZE]; // not \0-terminated !
> + /*
> + The size of the string containing serialized Xid representation
> + is computed as a sum of
> + eight as the number of formatting symbols (X'',X'',)
> + plus 2 x XIDDATASIZE (2 due to hex format),
> + plus space for decimal digits of XID::formatID,
> + plus one for 0x0.
> + */
> + static const uint ser_buf_size=
> + 8 + 2 * XIDDATASIZE + 4 * sizeof(long) + 1;
>
> xid_t() {} /* Remove gcc warning */
> bool eq(struct xid_t *xid)
> diff --git a/sql/log.cc b/sql/log.cc
> index a56117a..316b871 100644
> --- a/sql/log.cc
> +++ b/sql/log.cc
> @@ -87,6 +87,9 @@ static bool binlog_savepoint_rollback_can_release_mdl(handlerton *hton,
> static int binlog_commit(handlerton *hton, THD *thd, bool all);
> static int binlog_rollback(handlerton *hton, THD *thd, bool all);
> static int binlog_prepare(handlerton *hton, THD *thd, bool all);
> +static int binlog_xa_recover(handlerton *hton, XID *xid_list, uint len);
> +static int binlog_commit_by_xid(handlerton *hton, XID *xid);
> +static int binlog_rollback_by_xid(handlerton *hton, XID *xid);
> static int binlog_start_consistent_snapshot(handlerton *hton, THD *thd);
>
> static const LEX_CSTRING write_error_msg=
> @@ -1688,6 +1691,9 @@ int binlog_init(void *p)
> binlog_hton->commit= binlog_commit;
> binlog_hton->rollback= binlog_rollback;
> binlog_hton->prepare= binlog_prepare;
> + binlog_hton->recover= binlog_xa_recover;
> + binlog_hton->commit_by_xid = binlog_commit_by_xid;
> + binlog_hton->rollback_by_xid = binlog_rollback_by_xid;
> binlog_hton->start_consistent_snapshot= binlog_start_consistent_snapshot;
> binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN;
> return 0;
> @@ -1883,23 +1889,16 @@ static inline int
> binlog_commit_flush_xid_caches(THD *thd, binlog_cache_mngr *cache_mngr,
> bool all, my_xid xid)
> {
> - if (xid)
> - {
> - Xid_log_event end_evt(thd, xid, TRUE);
> - return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
> - }
> - else
> + /* Mask XA COMMIT ... ONE PHASE as plain BEGIN ... COMMIT */
> + if (!xid)
> {
> - /*
> - Empty xid occurs in XA COMMIT ... ONE PHASE.
> - In this case, we do not have a MySQL xid for the transaction, and the
> - external XA transaction coordinator will have to handle recovery if
> - needed. So we end the transaction with a plain COMMIT query event.
> - */
> - Query_log_event end_evt(thd, STRING_WITH_LEN("COMMIT"),
> - TRUE, TRUE, TRUE, 0);
> - return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
> + DBUG_ASSERT(thd->transaction.xid_state.xa_state == XA_IDLE &&
> + thd->lex->xa_opt == XA_ONE_PHASE);
> + xid= thd->query_id;
> }
> +
> + Xid_log_event end_evt(thd, xid, TRUE);
> + return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
> }
>
> /**
> @@ -1961,11 +1960,77 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all)
> do nothing.
> just pretend we can do 2pc, so that MySQL won't
> switch to 1pc.
> - real work will be done in MYSQL_BIN_LOG::log_and_order()
> + real work is done in MYSQL_BIN_LOG::log_and_order()
> */
> return 0;
> }
>
> +
> +static int serialize_xid(XID *xid, char *buf)
> +{
> + size_t size;
> + buf[0]= '\'';
> + memcpy(buf+1, xid->data, xid->gtrid_length);
> + size= xid->gtrid_length + 2;
> + buf[size-1]= '\'';
> + if (xid->bqual_length == 0 && xid->formatID == 1)
> + return size;
> +
> + memcpy(buf+size, ", '", 3);
> + memcpy(buf+size+3, xid->data+xid->gtrid_length, xid->bqual_length);
> + size+= 3 + xid->bqual_length;
> + buf[size]= '\'';
> + size++;
> + if (xid->formatID != 1)
> + size+= sprintf(buf+size, ", %ld", xid->formatID);
> + return size;
> +}
> +
> +
> +static int binlog_xa_recover(handlerton *hton __attribute__((unused)),
> + XID *xid_list __attribute__((unused)),
> + uint len __attribute__((unused)))
> +{
> + /* Does nothing. */
> + return 0;
> +}
> +
> +
> +static int binlog_commit_by_xid(handlerton *hton, XID *xid)
> +{
> + THD *thd= current_thd;
> + const size_t xc_len= sizeof("XA COMMIT ") - 1; // do not count trailing 0
> + char buf[xc_len + xid_t::ser_buf_size];
> + size_t buflen;
> + binlog_cache_mngr *const cache_mngr= thd->binlog_setup_trx_data();
> +
> + DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT);
> +
> + if (!cache_mngr)
> + return 1;
> +
> + memcpy(buf, "XA COMMIT ", xc_len);
> + buflen= xc_len + serialize_xid(xid, buf+xc_len);
> + Query_log_event qe(thd, buf, buflen, TRUE, FALSE, FALSE, 0);
> + return binlog_flush_cache(thd, cache_mngr, &qe, TRUE, TRUE, TRUE);
> +}
> +
> +
> +static int binlog_rollback_by_xid(handlerton *hton, XID *xid)
> +{
> + THD *thd= current_thd;
> + const size_t xr_len= sizeof("XA ROLLBACK ") - 1; // do not count trailing 0
> + char buf[xr_len + xid_t::ser_buf_size];
> + size_t buflen;
> +
> + DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_ROLLBACK);
> + memcpy(buf, "XA ROLLBACK ", xr_len);
> + buflen= xr_len + serialize_xid(xid, buf+xr_len);
> + Query_log_event qe(thd, buf, buflen, FALSE, TRUE, TRUE, 0);
> + return mysql_bin_log.write_event(&qe);
> +}
> +
> +
> /*
> We flush the cache wrapped in a beging/rollback if:
> . aborting a single or multi-statement transaction and;
> @@ -9809,6 +9874,24 @@ int TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
> DBUG_RETURN(BINLOG_COOKIE_GET_ERROR_FLAG(cookie));
> }
>
> +
> +int TC_LOG_BINLOG::log_prepare(THD *thd, bool all)
> +{
> + XID *xid= &thd->transaction.xid_state.xid;
> + XA_prepare_log_event end_evt(thd, xid, FALSE);
> + binlog_cache_mngr *cache_mngr= thd->binlog_setup_trx_data();
> +
> + if (!cache_mngr)
> + {
> + WSREP_DEBUG("Skipping empty log_xid: %s", thd->query());
> + return 0;
> + }
> +
> + cache_mngr->using_xa= FALSE;
> + return (binlog_flush_cache(thd, cache_mngr, &end_evt, all, TRUE, TRUE));
> +}
> +
> +
> void
> TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
> {
> diff --git a/sql/log.h b/sql/log.h
> index 7dfdb36..92fdf95 100644
> --- a/sql/log.h
> +++ b/sql/log.h
> @@ -61,6 +61,7 @@ class TC_LOG
> bool need_prepare_ordered,
> bool need_commit_ordered) = 0;
> virtual int unlog(ulong cookie, my_xid xid)=0;
> + virtual int log_prepare(THD *thd, bool all)= 0;
> virtual void commit_checkpoint_notify(void *cookie)= 0;
>
> protected:
> @@ -115,6 +116,10 @@ class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
> return 1;
> }
> int unlog(ulong cookie, my_xid xid) { return 0; }
> + int log_prepare(THD *thd, bool all)
> + {
> + return 0;
> + }
> void commit_checkpoint_notify(void *cookie) { DBUG_ASSERT(0); };
> };
>
> @@ -198,6 +203,10 @@ class TC_LOG_MMAP: public TC_LOG
> int log_and_order(THD *thd, my_xid xid, bool all,
> bool need_prepare_ordered, bool need_commit_ordered);
> int unlog(ulong cookie, my_xid xid);
> + int log_prepare(THD *thd, bool all)
> + {
> + return 0;
> + }
> void commit_checkpoint_notify(void *cookie);
> int recover();
>
> @@ -698,6 +707,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
> int log_and_order(THD *thd, my_xid xid, bool all,
> bool need_prepare_ordered, bool need_commit_ordered);
> int unlog(ulong cookie, my_xid xid);
> + int log_prepare(THD *thd, bool all);
> void commit_checkpoint_notify(void *cookie);
> int recover(LOG_INFO *linfo, const char *last_log_name, IO_CACHE *first_log,
> Format_description_log_event *fdle, bool do_xa);
> diff --git a/sql/log_event.cc b/sql/log_event.cc
> index 7a0d0be..354c5f3 100644
> --- a/sql/log_event.cc
> +++ b/sql/log_event.cc
> @@ -2139,6 +2139,9 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
> case XID_EVENT:
> ev = new Xid_log_event(buf, fdle);
> break;
> + case XA_PREPARE_LOG_EVENT:
> + ev = new XA_prepare_log_event(buf, fdle);
> + break;
> case RAND_EVENT:
> ev = new Rand_log_event(buf, fdle);
> break;
> @@ -2190,7 +2193,6 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
> case PREVIOUS_GTIDS_LOG_EVENT:
> case TRANSACTION_CONTEXT_EVENT:
> case VIEW_CHANGE_EVENT:
> - case XA_PREPARE_LOG_EVENT:
> ev= new Ignorable_log_event(buf, fdle,
> get_type_str((Log_event_type) event_type));
> break;
> @@ -6222,6 +6224,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
> post_header_len[USER_VAR_EVENT-1]= USER_VAR_HEADER_LEN;
> post_header_len[FORMAT_DESCRIPTION_EVENT-1]= FORMAT_DESCRIPTION_HEADER_LEN;
> post_header_len[XID_EVENT-1]= XID_HEADER_LEN;
> + post_header_len[XA_PREPARE_LOG_EVENT-1]= XA_PREPARE_HEADER_LEN;
> post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= BEGIN_LOAD_QUERY_HEADER_LEN;
> post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN;
> /*
> @@ -7874,7 +7877,7 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
> buf+= 8;
> domain_id= uint4korr(buf);
> buf+= 4;
> - flags2= *buf;
> + flags2= *(buf++);
> if (flags2 & FL_GROUP_COMMIT_ID)
> {
> if (event_len < (uint)header_size + GTID_HEADER_LEN + 2)
> @@ -7882,8 +7885,22 @@ Gtid_log_event::Gtid_log_event(const char *buf, uint event_len,
> seq_no= 0; // So is_valid() returns false
> return;
> }
> - ++buf;
> commit_id= uint8korr(buf);
> + buf+= 8;
> + }
> + if (flags2 & FL_XA_TRANSACTION)
> + {
> + xid.formatID= (long) buf[0];
> + xid.gtrid_length= (long) buf[1];
> + xid.bqual_length= (long) buf[2];
> +
> + buf+= 3;
> + if (xid.formatID >= 0)
> + {
> + long data_length= xid.bqual_length + xid.gtrid_length;
> + memcpy(xid.data, buf, data_length);
> + buf+= data_length;
> + }
> }
> }
>
> @@ -7914,6 +7931,12 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
> /* Preserve any DDL or WAITED flag in the slave's binlog. */
> if (thd_arg->rgi_slave)
> flags2|= (thd_arg->rgi_slave->gtid_ev_flags2 & (FL_DDL|FL_WAITED));
> + if (thd->transaction.xid_state.xa_state == XA_IDLE &&
> + thd->lex->xa_opt != XA_ONE_PHASE)
> + {
> + flags2|= FL_XA_TRANSACTION;
> + xid= thd->transaction.xid_state.xid;
> + }
> }
>
>
> @@ -7956,7 +7979,7 @@ Gtid_log_event::peek(const char *event_start, size_t event_len,
> bool
> Gtid_log_event::write()
> {
> - uchar buf[GTID_HEADER_LEN+2];
> + uchar buf[GTID_HEADER_LEN+2+sizeof(XID)];
> size_t write_len;
>
> int8store(buf, seq_no);
> @@ -7968,8 +7991,25 @@ Gtid_log_event::write()
> write_len= GTID_HEADER_LEN + 2;
> }
> else
> + write_len= 13;
> +
> + if (flags2 & FL_XA_TRANSACTION)
> + {
> + buf[write_len]= (uchar) ((char) xid.formatID);
> + buf[write_len+1]= (uchar) xid.gtrid_length;
> + buf[write_len+2]= (uchar) xid.bqual_length;
> + write_len+= 3;
> + if (xid.formatID >= 0)
> + {
> + long data_length= xid.bqual_length + xid.gtrid_length;
> + memcpy(buf+write_len, xid.data, data_length);
> + write_len+= data_length;
> + }
> + }
> +
> + if (write_len < GTID_HEADER_LEN)
> {
> - bzero(buf+13, GTID_HEADER_LEN-13);
> + bzero(buf+write_len, GTID_HEADER_LEN-write_len);
> write_len= GTID_HEADER_LEN;
> }
> return write_header(write_len) ||
> @@ -8012,7 +8052,7 @@ Gtid_log_event::make_compatible_event(String *packet, bool *need_dummy_event,
> void
> Gtid_log_event::pack_info(Protocol *protocol)
> {
> - char buf[6+5+10+1+10+1+20+1+4+20+1];
> + char buf[6+5+10+1+10+1+20+1+4+20+1+5+128];
> char *p;
> p = strmov(buf, (flags2 & FL_STANDALONE ? "GTID " : "BEGIN GTID "));
> p= longlong10_to_str(domain_id, p, 10);
> @@ -8026,6 +8066,12 @@ Gtid_log_event::pack_info(Protocol *protocol)
> p= longlong10_to_str(commit_id, p, 10);
> }
>
> + if (flags2 & FL_XA_TRANSACTION)
> + {
> + p= strmov(p, " XID :");
> + p= strnmov(p, xid.data, xid.bqual_length + xid.gtrid_length);
> + }
> +
> protocol->store(buf, p-buf, &my_charset_bin);
> }
>
> @@ -8079,11 +8125,25 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi)
> thd->lex->sql_command= SQLCOM_BEGIN;
> thd->is_slave_error= 0;
> status_var_increment(thd->status_var.com_stat[thd->lex->sql_command]);
> - if (trans_begin(thd, 0))
> + if (flags2 & FL_XA_TRANSACTION)
> {
> - DBUG_PRINT("error", ("trans_begin() failed"));
> - thd->is_slave_error= 1;
> + thd->lex->xid= &xid;
> + thd->lex->xa_opt= XA_NONE;
> + if (trans_xa_start(thd))
> + {
> + DBUG_PRINT("error", ("trans_xa_start() failed"));
> + thd->is_slave_error= 1;
> + }
> + }
> + else
> + {
> + if (trans_begin(thd, 0))
> + {
> + DBUG_PRINT("error", ("trans_begin() failed"));
> + thd->is_slave_error= 1;
> + }
> }
> +
> thd->update_stats();
>
> if (likely(!thd->is_slave_error))
> @@ -8202,9 +8262,29 @@ Gtid_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
> buf, print_event_info->delimiter))
> goto err;
> }
> - if (!(flags2 & FL_STANDALONE))
> - if (my_b_printf(&cache, is_flashback ? "COMMIT\n%s\n" : "BEGIN\n%s\n", print_event_info->delimiter))
> + if ((flags2 & FL_XA_TRANSACTION) && !is_flashback)
> + {
> + my_b_write_string(&cache, "XA START '");
> + my_b_write(&cache, (uchar *) xid.data, xid.gtrid_length);
> + my_b_write_string(&cache, "'");
> + if (xid.bqual_length > 0 || xid.formatID != 1)
> + {
> + my_b_write_string(&cache, ", '");
> + my_b_write(&cache, (uchar *) xid.data+xid.gtrid_length, xid.bqual_length);
> + my_b_write_string(&cache, "'");
> + if (xid.formatID != 1)
> + if (my_b_printf(&cache, ", %d", xid.formatID))
> + goto err;
> + }
> + if (my_b_printf(&cache, "%s\n", print_event_info->delimiter))
> + goto err;
> + }
> + else if (!(flags2 & FL_STANDALONE))
> + {
> + if (my_b_printf(&cache, is_flashback ? "COMMIT\n%s\n" : "BEGIN\n%s\n",
> + print_event_info->delimiter))
> goto err;
> + }
>
> return cache.flush_data();
> err:
> @@ -9003,6 +9083,300 @@ Xid_log_event::do_shall_skip(rpl_group_info *rgi)
> #endif /* !MYSQL_CLIENT */
>
>
> +/**
> + Function serializes XID which is characterized by by four last arguments
> + of the function.
> + Serialized XID is presented in valid hex format and is returned to
> + the caller in a buffer pointed by the first argument.
> + The buffer size provived by the caller must be not less than
> + 8 + 2 * XIDDATASIZE + 4 * sizeof(XID::formatID) + 1, see
> + XID::serialize_xid() that is a caller and plugin.h for XID declaration.
> +
> + @param buf pointer to a buffer allocated for storing serialized data
> +
> + @return the value of the buffer pointer
> +*/
> +
> +char *XA_prepare_log_event::event_xid_t::serialize(char *buf) const
> +{
> + int i;
> + char *c= buf;
> + /*
> + Build a string like following pattern:
> + X'hex11hex12...hex1m',X'hex21hex22...hex2n',11
> + and store it into buf.
> + Here hex1i and hex2k are hexadecimals representing XID's internal
> + raw bytes (1 <= i <= m, 1 <= k <= n), and `m' and `n' even numbers
> + half of which corresponding to the lengths of XID's components.
> + */
> + c[0]= 'X';
> + c[1]= '\'';
> + c+= 2;
> + for (i= 0; i < gtrid_length; i++)
> + {
> + c[0]=_dig_vec_lower[((uchar*) data)[i] >> 4];
> + c[1]=_dig_vec_lower[((uchar*) data)[i] & 0x0f];
> + c+= 2;
> + }
> + c[0]= '\'';
> + c[1]= ',';
> + c[2]= 'X';
> + c[3]= '\'';
> + c+= 4;
> +
> + for (; i < gtrid_length + bqual_length; i++)
> + {
> + c[0]=_dig_vec_lower[((uchar*) data)[i] >> 4];
> + c[1]=_dig_vec_lower[((uchar*) data)[i] & 0x0f];
> + c+= 2;
> + }
> + c[0]= '\'';
> + sprintf(c+1, ",%lu", formatID);
> +
> + return buf;
> +}
> +
> +
> +/**************************************************************************
> + XA_prepare_log_event methods
> +**************************************************************************/
> +/**
> + @note
> + It's ok not to use int8store here,
> + as long as xid_t::set(ulonglong) and
> + xid_t::get_n_xid doesn't do it either.
> + We don't care about actual values of xids as long as
> + identical numbers compare identically
> +*/
> +
> +XA_prepare_log_event::
> +XA_prepare_log_event(const char* buf,
> + const Format_description_log_event *description_event)
> + :Log_event(buf, description_event)
> +{
> + uint32 temp= 0;
> + uint8 temp_byte;
> +
> + buf+= description_event->common_header_len +
> + description_event->post_header_len[XA_PREPARE_LOG_EVENT-1];
> + memcpy(&temp_byte, buf, 1);
> + one_phase= (bool) temp_byte;
> + buf += sizeof(temp_byte);
> + memcpy(&temp, buf, sizeof(temp));
> + m_xid.formatID= le32toh(temp);
> + buf += sizeof(temp);
> + memcpy(&temp, buf, sizeof(temp));
> + m_xid.gtrid_length= le32toh(temp);
> + buf += sizeof(temp);
> + memcpy(&temp, buf, sizeof(temp));
> + m_xid.bqual_length= le32toh(temp);
> + buf += sizeof(temp);
> + memcpy(m_xid.data, buf, m_xid.gtrid_length + m_xid.bqual_length);
> +
> + xid= NULL;
> +}
> +
> +
> +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
> +void XA_prepare_log_event::pack_info(Protocol *protocol)
> +{
> + char buf[ser_buf_size];
> + char query[sizeof("XA COMMIT ONE PHASE") + 1 + sizeof(buf)];
> +
> + /* RHS of the following assert is unknown to client sources */
> + compile_time_assert(ser_buf_size == XID::ser_buf_size);
> + m_xid.serialize(buf);
> + sprintf(query,
> + (one_phase ? "XA COMMIT %s ONE PHASE" : "XA PREPARE %s"),
> + buf);
> +
> + protocol->store(query, strlen(query), &my_charset_bin);
> +}
> +#endif
> +
> +
> +#ifndef MYSQL_CLIENT
> +bool XA_prepare_log_event::write()
> +{
> + uchar data[1 + 4 + 4 + 4];
> + uint8 one_phase_byte= one_phase;
> +
> + data[0]= one_phase;
> + int4store(data+1, static_cast<XID*>(xid)->formatID);
> + int4store(data+(1+4), static_cast<XID*>(xid)->gtrid_length);
> + int4store(data+(1+4+4), static_cast<XID*>(xid)->bqual_length);
> +
> + DBUG_ASSERT(xid_bufs_size == sizeof(data) - 1);
> +
> + return write_header(sizeof(one_phase_byte) + xid_bufs_size +
> + static_cast<XID*>(xid)->gtrid_length +
> + static_cast<XID*>(xid)->bqual_length) ||
> + write_data(data, sizeof(data)) ||
> + write_data((uchar*) static_cast<XID*>(xid)->data,
> + static_cast<XID*>(xid)->gtrid_length +
> + static_cast<XID*>(xid)->bqual_length) ||
> + write_footer();
> +}
> +#endif
> +
> +
> +#ifdef MYSQL_CLIENT
> +bool XA_prepare_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
> +{
> + Write_on_release_cache cache(&print_event_info->head_cache, file,
> + Write_on_release_cache::FLUSH_F, this);
> + char buf[ser_buf_size];
> +
> + m_xid.serialize(buf);
> +
> + if (!print_event_info->short_form)
> + {
> + print_header(&cache, print_event_info, FALSE);
> + if (my_b_printf(&cache, "\tXID = %s\n", buf))
> + goto error;
> + }
> +
> + if (my_b_printf(&cache, "XA END %s\n%s\n",
> + buf, print_event_info->delimiter) ||
> + my_b_printf(&cache, "XA PREPARE %s\n%s\n",
> + buf, print_event_info->delimiter))
> + goto error;
> +
> + return cache.flush_data();
> +error:
> + return TRUE;
> +}
> +#endif /* MYSQL_CLIENT */
> +
> +
> +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
> +int XA_prepare_log_event::do_apply_event(rpl_group_info *rgi)
> +{
> + bool res;
> + int err;
> + rpl_gtid gtid;
> + uint64 sub_id= 0;
> + Relay_log_info const *rli= rgi->rli;
> + xid_t xid;
> + void *hton= NULL;
> +
> + /*
> + XID_EVENT works like a COMMIT statement. And it also updates the
> + mysql.gtid_slave_pos table with the GTID of the current transaction.
> +
> + Therefore, it acts much like a normal SQL statement, so we need to do
> + THD::reset_for_next_command() as if starting a new statement.
> + */
> + thd->reset_for_next_command();
> + /*
> + Record any GTID in the same transaction, so slave state is transactionally
> + consistent.
> + */
> +#ifdef WITH_WSREP
> + thd->wsrep_affected_rows= 0;
> +#endif
> +
> + if (rgi->gtid_pending)
> + {
> + xa_states c_state= thd->transaction.xid_state.xa_state;
> + sub_id= rgi->gtid_sub_id;
> + rgi->gtid_pending= false;
> +
> + gtid= rgi->current_gtid;
> +
> + thd->transaction.xid_state.xa_state= XA_ACTIVE;
> + err= rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, true,
> + false, &hton);
> + thd->transaction.xid_state.xa_state= c_state;
> + if (err)
> + {
> + int ec= thd->get_stmt_da()->sql_errno();
> + /*
> + Do not report an error if this is really a kill due to a deadlock.
> + In this case, the transaction will be re-tried instead.
> + */
> + if (!is_parallel_retry_error(rgi, ec))
> + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(),
> + "Error during XID COMMIT: failed to update GTID state in "
> + "%s.%s: %d: %s",
> + "mysql", rpl_gtid_slave_state_table_name.str, ec,
> + thd->get_stmt_da()->message());
> + thd->is_slave_error= 1;
> + return err;
> + }
> +
> + DBUG_EXECUTE_IF("gtid_fail_after_record_gtid",
> + { my_error(ER_ERROR_DURING_COMMIT, MYF(0), HA_ERR_WRONG_COMMAND);
> + thd->is_slave_error= 1;
> + return 1;
> + });
> + }
> + /* For a slave XA_prepare_log_event is COMMIT */
> + general_log_print(thd, COM_QUERY,
> + "COMMIT /* implicit, from Xid_log_event */");
> + thd->variables.option_bits&= ~OPTION_GTID_BEGIN;
> +
> + xid.set(m_xid.formatID,
> + m_xid.data, m_xid.gtrid_length,
> + m_xid.data + m_xid.gtrid_length, m_xid.bqual_length);
> +
> + thd->lex->xid= &xid;
> + if (trans_xa_end(thd))
> + return 1;
> +
> + if (!one_phase)
> + {
> + res= trans_xa_prepare(thd);
> + }
> + else
> + {
> + res= trans_xa_commit(thd);
> + thd->mdl_context.release_transactional_locks();
> + }
> +
> +
> + if (!res && sub_id)
> + rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi);
> +
> + /*
> + Increment the global status commit count variable
> + */
> + status_var_increment(thd->status_var.com_stat[SQLCOM_COMMIT]);
> +
> + return res;
> +}
> +
> +
> +Log_event::enum_skip_reason
> +XA_prepare_log_event::do_shall_skip(rpl_group_info *rgi)
> +{
> + DBUG_ENTER("Xid_log_event::do_shall_skip");
> + if (rgi->rli->slave_skip_counter > 0)
> + {
> + DBUG_ASSERT(!rgi->rli->get_flag(Relay_log_info::IN_TRANSACTION));
> + thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_GTID_BEGIN);
> + DBUG_RETURN(Log_event::EVENT_SKIP_COUNT);
> + }
> +#ifdef WITH_WSREP
> + else if (wsrep_mysql_replication_bundle && WSREP_ON &&
> + opt_slave_domain_parallel_threads == 0)
> + {
> + if (++thd->wsrep_mysql_replicated < (int)wsrep_mysql_replication_bundle)
> + {
> + WSREP_DEBUG("skipping wsrep commit %d", thd->wsrep_mysql_replicated);
> + DBUG_RETURN(Log_event::EVENT_SKIP_IGNORE);
> + }
> + else
> + {
> + thd->wsrep_mysql_replicated = 0;
> + }
> + }
> +#endif
> + DBUG_RETURN(Log_event::do_shall_skip(rgi));
> +}
> +#endif /* !MYSQL_CLIENT */
> +
> +
> /**************************************************************************
> User_var_log_event methods
> **************************************************************************/
> @@ -14789,7 +15163,6 @@ bool event_that_should_be_ignored(const char *buf)
> event_type == PREVIOUS_GTIDS_LOG_EVENT ||
> event_type == TRANSACTION_CONTEXT_EVENT ||
> event_type == VIEW_CHANGE_EVENT ||
> - event_type == XA_PREPARE_LOG_EVENT ||
> (uint2korr(buf + FLAGS_OFFSET) & LOG_EVENT_IGNORABLE_F))
> return 1;
> return 0;
> diff --git a/sql/log_event.h b/sql/log_event.h
> index 38a40c9..b5c48c9 100644
> --- a/sql/log_event.h
> +++ b/sql/log_event.h
> @@ -217,6 +217,7 @@ class String;
> #define GTID_HEADER_LEN 19
> #define GTID_LIST_HEADER_LEN 4
> #define START_ENCRYPTION_HEADER_LEN 0
> +#define XA_PREPARE_HEADER_LEN 0
>
> /*
> Max number of possible extra bytes in a replication event compared to a
> @@ -3064,6 +3065,79 @@ class Xid_log_event: public Log_event
> #endif
> };
>
> +
> +/**
> + @class XA_prepare_log_event
> +
> + Similar to Xid_log_event except that
> + - it is specific to XA transaction
> + - it carries out the prepare logics rather than the final committing
> + when @c one_phase member is off.
> + From the groupping perspective the event finalizes the current "prepare" group
> + started with XA START Query-log-event.
> + When @c one_phase is false Commit of Rollback for XA transaction are
> + logged separately to the prepare-group events so being a groups of
> + their own.
> +*/
> +
> +class XA_prepare_log_event: public Log_event
> +{
> +protected:
> + /* The event_xid_t members were copied from handler.h */
> + struct event_xid_t
> + {
> + long formatID;
> + long gtrid_length;
> + long bqual_length;
> + char data[MYSQL_XIDDATASIZE]; // not \0-terminated !
> + char *serialize(char *buf) const;
> + };
> +
> + /* size of serialization buffer is explained in $MYSQL/sql/xa.h. */
> + static const uint ser_buf_size=
> + 8 + 2 * MYSQL_XIDDATASIZE + 4 * sizeof(long) + 1;
> +
> + /* Total size of buffers to hold serialized members of XID struct */
> + static const int xid_bufs_size= 12;
> + event_xid_t m_xid;
> + void *xid;
> + bool one_phase;
> +
> +public:
> +#ifdef MYSQL_SERVER
> + XA_prepare_log_event(THD* thd_arg, XID *xid_arg, bool one_phase_arg):
> + Log_event(thd_arg, 0, TRUE), xid(xid_arg), one_phase(one_phase_arg)
> + {
> + cache_type= Log_event::EVENT_NO_CACHE;
> + }
> +#ifdef HAVE_REPLICATION
> + void pack_info(Protocol* protocol);
> +#endif /* HAVE_REPLICATION */
> +#else
> + bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
> +#endif
> + XA_prepare_log_event(const char* buf,
> + const Format_description_log_event *description_event);
> + ~XA_prepare_log_event() {}
> + Log_event_type get_type_code() { return XA_PREPARE_LOG_EVENT; }
> + int get_data_size()
> + {
> + return xid_bufs_size + m_xid.gtrid_length + m_xid.bqual_length;
> + }
> +
> +#ifdef MYSQL_SERVER
> + bool write();
> +#endif
> + bool is_valid() const { return 1; }
> +
> +private:
> +#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
> + virtual int do_apply_event(rpl_group_info *rgi);
> + enum_skip_reason do_shall_skip(rpl_group_info *rgi);
> +#endif
> +};
> +
> +
> /**
> @class User_var_log_event
>
> @@ -3376,6 +3450,11 @@ class Gtid_log_event: public Log_event
> uint64 seq_no;
> uint64 commit_id;
> uint32 domain_id;
> +#ifdef MYSQL_SERVER
> + XID xid;
> +#else
> + struct st_mysql_xid xid;
> +#endif
> uchar flags2;
>
> /* Flags2. */
> @@ -3404,6 +3483,8 @@ class Gtid_log_event: public Log_event
> static const uchar FL_WAITED= 16;
> /* FL_DDL is set for event group containing DDL. */
> static const uchar FL_DDL= 32;
> + /* FL_XA_TRANSACTION is set for XA transaction. */
> + static const uchar FL_XA_TRANSACTION= 64;
>
> #ifdef MYSQL_SERVER
> Gtid_log_event(THD *thd_arg, uint64 seq_no, uint32 domain_id, bool standalone,
> diff --git a/sql/sql_class.cc b/sql/sql_class.cc
> index fa2f866..cc75da9 100644
> --- a/sql/sql_class.cc
> +++ b/sql/sql_class.cc
> @@ -1461,12 +1461,19 @@ void THD::cleanup(void)
> DBUG_ASSERT(cleanup_done == 0);
>
> set_killed(KILL_CONNECTION);
> -#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
> if (transaction.xid_state.xa_state == XA_PREPARED)
> {
> -#error xid_state in the cache should be replaced by the allocated value
> + trans_detach(this);
> + transaction.xid_state.xa_state= XA_NOTR;
> + transaction.xid_state.rm_error= 0;
> + }
> + else
> + {
> + transaction.xid_state.xa_state= XA_NOTR;
> + transaction.xid_state.rm_error= 0;
> + trans_rollback(this);
> + xid_cache_delete(this, &transaction.xid_state);
> }
> -#endif
>
> mysql_ha_cleanup(this);
> locked_tables_list.unlock_locked_tables(this);
> @@ -1474,11 +1481,6 @@ void THD::cleanup(void)
> delete_dynamic(&user_var_events);
> close_temporary_tables();
>
> - transaction.xid_state.xa_state= XA_NOTR;
> - transaction.xid_state.rm_error= 0;
> - trans_rollback(this);
> - xid_cache_delete(this, &transaction.xid_state);
> -
> DBUG_ASSERT(open_tables == NULL);
> /*
> If the thread was in the middle of an ongoing transaction (rolled
> diff --git a/sql/sql_class.h b/sql/sql_class.h
> index 69fabee..76befcb 100644
> --- a/sql/sql_class.h
> +++ b/sql/sql_class.h
> @@ -1255,6 +1255,18 @@ typedef struct st_xid_state {
> /* Error reported by the Resource Manager (RM) to the Transaction Manager. */
> uint rm_error;
> XID_cache_element *xid_cache_element;
> + /*
> + Binary logging status.
> + It is set to TRUE at XA PREPARE if the transaction was written
> + to the binlog.
> + Naturally FALSE means the transaction was not written to
> + the binlog. Happens if the trnasaction did not modify anything
> + or binlogging was turned off. In that case we shouldn't binlog
> + the consequent XA COMMIT/ROLLBACK.
> + The recovered transaction after server restart sets it to TRUE always.
> + That can cause inconsistencies (shoud be fixed?).
> + */
> + bool is_binlogged;
>
> /**
> Check that XA transaction has an uncommitted work. Report an error
> @@ -1278,6 +1290,12 @@ typedef struct st_xid_state {
> }
> return false;
> }
> +
> + void reset()
> + {
> + xid.null();
> + is_binlogged= FALSE;
> + }
> } XID_STATE;
>
> void xid_cache_init(void);
> @@ -2603,7 +2621,7 @@ class THD :public Statement,
> then.
> */
> if (!xid_state.rm_error)
> - xid_state.xid.null();
> + xid_state.reset();
> free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
> DBUG_VOID_RETURN;
> }
> diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
> index b48070b..3e4a067 100644
> --- a/sql/sql_connect.cc
> +++ b/sql/sql_connect.cc
> @@ -1414,6 +1414,7 @@ void do_handle_one_connection(CONNECT *connect)
> #endif
> end_thread:
> close_connection(thd);
> + thd->get_stmt_da()->reset_diagnostics_area();
>
> if (thd->userstat_running)
> update_global_user_stats(thd, create_user, time(NULL));
> diff --git a/sql/transaction.cc b/sql/transaction.cc
> index 13614d3..64533d7 100644
> --- a/sql/transaction.cc
> +++ b/sql/transaction.cc
> @@ -790,6 +790,44 @@ bool trans_release_savepoint(THD *thd, LEX_CSTRING name)
>
>
> /**
> + Detach the current XA transaction;
> +
> + @param thd Current thread
> +
> + @retval FALSE Success
> + @retval TRUE Failure
> +*/
> +
> +bool trans_detach(THD *thd)
> +{
> + XID_STATE *xid_s= &thd->transaction.xid_state;
> + Ha_trx_info *ha_info, *ha_info_next;
> +
> + DBUG_ENTER("trans_detach");
> +
> +// DBUG_ASSERT(xid_s->xa_state == XA_PREPARED &&
> +// xid_cache_search(thd, &xid_s->xid));
> +
> + xid_cache_delete(thd, xid_s);
> + if (xid_cache_insert(&xid_s->xid, XA_PREPARED))
> + DBUG_RETURN(TRUE);
> +
> + for (ha_info= thd->transaction.all.ha_list;
> + ha_info;
> + ha_info= ha_info_next)
> + {
> + ha_info_next= ha_info->next();
> + ha_info->reset(); /* keep it conveniently zero-filled */
> + }
> +
> + thd->transaction.all.ha_list= 0;
> + thd->transaction.all.no_2pc= 0;
> +
> + DBUG_RETURN(FALSE);
> +}
> +
> +
> +/**
> Starts an XA transaction with the given xid value.
>
> @param thd Current thread
> @@ -928,6 +966,12 @@ bool trans_xa_commit(THD *thd)
> res= !xs;
> if (res)
> my_error(ER_XAER_NOTA, MYF(0));
> + else if (thd->in_multi_stmt_transaction_mode())
> + {
> + my_error(ER_XAER_RMFAIL, MYF(0),
> + xa_state_names[thd->transaction.xid_state.xa_state]);
> + res= TRUE;
> + }
> else
> {
> res= xa_trans_rolled_back(xs);
> @@ -978,8 +1022,16 @@ bool trans_xa_commit(THD *thd)
> {
> DEBUG_SYNC(thd, "trans_xa_commit_after_acquire_commit_lock");
>
> - res= MY_TEST(ha_commit_one_phase(thd, 1));
> - if (res)
> + if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
> + {
> + res= thd->binlog_query(THD::THD::STMT_QUERY_TYPE,
> + thd->query(), thd->query_length(),
> + FALSE, FALSE, FALSE, 0);
> + }
> + else
> + res= 0;
> +
> + if (res || (res= MY_TEST(ha_commit_one_phase(thd, 1))))
> my_error(ER_XAER_RMERR, MYF(0));
> }
> }
> @@ -1044,7 +1096,18 @@ bool trans_xa_rollback(THD *thd)
> DBUG_RETURN(TRUE);
> }
>
> - res= xa_trans_force_rollback(thd);
> + if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
> + {
> + res= thd->binlog_query(THD::THD::STMT_QUERY_TYPE,
> + thd->query(), thd->query_length(),
> + FALSE, FALSE, FALSE, 0);
> + }
> + else
> + res= 0;
> +
> + res= res || xa_trans_force_rollback(thd);
> + if (res || (res= MY_TEST(xa_trans_force_rollback(thd))))
> + my_error(ER_XAER_RMERR, MYF(0));
>
> thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
> thd->transaction.all.reset();
> diff --git a/sql/transaction.h b/sql/transaction.h
> index 7e34693..f228cc6 100644
> --- a/sql/transaction.h
> +++ b/sql/transaction.h
> @@ -29,6 +29,7 @@ bool trans_commit(THD *thd);
> bool trans_commit_implicit(THD *thd);
> bool trans_rollback(THD *thd);
> bool trans_rollback_implicit(THD *thd);
> +bool trans_detach(THD *thd);
>
> bool trans_commit_stmt(THD *thd);
> bool trans_rollback_stmt(THD *thd);
> _______________________________________________
> commits mailing list
> commits(a)mariadb.org
> https://lists.askmonty.org/cgi-bin/mailman/listinfo/commits
1
0
Re: [Maria-developers] 297ac6aa5b0: MDEV-7597 Expiration of user passwords
by Sergei Golubchik 13 Feb '19
by Sergei Golubchik 13 Feb '19
13 Feb '19
Hi, Robert!
On Feb 13, Robert Bindar wrote:
> revision-id: 297ac6aa5b0 (mariadb-10.4.1-101-g297ac6aa5b0)
> parent(s): ce02e7c9acc
> author: Robert Bindar <robert(a)mariadb.org>
> committer: Robert Bindar <robert(a)mariadb.org>
> timestamp: 2019-02-07 17:47:13 +0200
> message:
>
> MDEV-7597 Expiration of user passwords
>
> This patch adds support for expiring user passwords.
> The following statements are extended:
> CREATE USER user@localhost PASSWORD EXPIRE [option]
> ALTER USER user@localhost PASSWORD EXPIRE [option]
> If no option is specified, the password is expired with immediate
> effect. If option is DEFAULT, global policy applies according to
> the default_password_lifetime system var (if 0, password never
> expires, if N, password expires every N days). If option is NEVER,
> the password never expires and if option is INTERVAL N DAY, the
> password expires every N days.
> The feature also supports the disconnect_on_expired_password system
> var and the --connect-expired-password client option.
>
> diff --git a/libmariadb b/libmariadb
> index 34f8887af03..70f2964dc4d 160000
> --- a/libmariadb
> +++ b/libmariadb
> @@ -1 +1 @@
> -Subproject commit 34f8887af03d022416dd6593de91d0706e57f46b
> +Subproject commit 70f2964dc4de116f4b50732cfec7cb566e082b4c
This shouldn't be in your commit
> diff --git a/mysql-test/main/grant5.result b/mysql-test/main/grant5.result
> index 3d0f331e58a..3bb81266b4d 100644
> --- a/mysql-test/main/grant5.result
> +++ b/mysql-test/main/grant5.result
> @@ -67,21 +67,21 @@ show create user u1@h;
> ERROR 28000: Can't find any matching row in the user table
> show create user u2@h;
> CREATE USER for u2@h
> -CREATE USER 'u2'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' ACCOUNT UNLOCK
> +CREATE USER 'u2'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK
Just like with account locking, I wouldn't print default behavior clauses here
For example, we don't print
REQUIRE NONE WITH MAX_QUERIES_PER_HOUR 0 MAX_UPDATES_PER_HOUR 0 MAX_CONNECTIONS_PER_HOUR 0 MAX_USER_CONNECTIONS 0
> show create user u3@h;
> CREATE USER for u3@h
> -CREATE USER 'u3'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' ACCOUNT UNLOCK
> +CREATE USER 'u3'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK
> show create user u4@h;
> CREATE USER for u4@h
> -CREATE USER 'u4'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' ACCOUNT UNLOCK
> +CREATE USER 'u4'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK
> show create user u5@h;
> ERROR 28000: Can't find any matching row in the user table
> show create user u6@h;
> CREATE USER for u6@h
> -CREATE USER 'u6'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044' ACCOUNT UNLOCK
> +CREATE USER 'u6'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK
> show create user u7@h;
> CREATE USER for u7@h
> -CREATE USER 'u7'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044' ACCOUNT UNLOCK
> +CREATE USER 'u7'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK
> show create user u8@h;
> ERROR 28000: Can't find any matching row in the user table
> grant select on *.* to u1@h;
> diff --git a/mysql-test/main/password_expiration_dbug.test b/mysql-test/main/password_expiration_dbug.test
> new file mode 100644
> index 00000000000..99cbf5a6efb
> --- /dev/null
> +++ b/mysql-test/main/password_expiration_dbug.test
> @@ -0,0 +1,57 @@
> +#
> +# Test password expiration INTERVAL and default_password_lifetime options
> +#
> +
> +--source include/have_debug.inc
> +--source include/not_embedded.inc
> +
> +set @old_dbug=@@global.debug_dbug;
> +set global debug_dbug="+d,password_expiration_interval_sec";
> +
> +--echo #
> +--echo # PASSWORD EXPIRE DEFAULT should use the default_password_lifetime
> +--echo # system var to set the number of days till expiration
> +--echo #
> +set global disconnect_on_expired_password=ON;
> +set global default_password_lifetime=1;
> +create user user1@localhost password expire default;
> +--sleep 2
I don't like that. all sleep-based tests have proven to be very unreliable
in buildbot :(
I don't have any good ideas, you cannot change the global server
time from the test. May be, mess with a timezone?
Like, set the expiration for a few minutes (like you do)
and change server's timezone to move the time forward?
Might work. Depends on what time you use for expiration - local or UTC.
> +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
> +--error ER_MUST_CHANGE_PASSWORD_LOGIN
> +connect(con1,localhost,user1);
> +drop user user1@localhost;
> +
> +--echo #
> +--echo # PASSWORD EXPIRE INTERVAL should expire a client's password after
> +--echo # X seconds and not before
> +--echo #
> +set global disconnect_on_expired_password=ON;
> +create user user1@localhost password expire interval 2 day;
> +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
> +connect(con1,localhost,user1);
> +disconnect con1;
> +connection default;
> +--sleep 3
> +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
> +--error ER_MUST_CHANGE_PASSWORD_LOGIN
> +connect(con1,localhost,user1);
> +drop user user1@localhost;
> +
> +--echo #
> +--echo # PASSWORD EXPIRE NEVER should override the other policies and never
> +--echo # expire a client's password
> +--echo #
> +set global disconnect_on_expired_password=ON;
> +create user user1@localhost password expire interval 2 day;
> +alter user user1@localhost password expire never;
> +--sleep 3
> +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
> +connect(con1,localhost,user1);
> +disconnect con1;
> +connection default;
> +drop user user1@localhost;
> +
> +set global debug_dbug=@old_dbug;
> +set global disconnect_on_expired_password=default;
> +set global default_password_lifetime=default;
> +
> diff --git a/mysql-test/main/system_mysql_db_fix40123.result b/mysql-test/main/system_mysql_db_fix40123.result
> index 3903dbdad94..a96a33ce54f 100644
> --- a/mysql-test/main/system_mysql_db_fix40123.result
> +++ b/mysql-test/main/system_mysql_db_fix40123.result
> @@ -108,6 +108,8 @@ user CREATE TABLE `user` (
> `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
> `authentication_string` text COLLATE utf8_bin NOT NULL,
> `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
> + `password_last_changed` timestamp NOT NULL DEFAULT current_timestamp(),
> + `password_lifetime` smallint(5) unsigned NOT NULL DEFAULT 0,
I've fixed it, so when you rebase they should go away, make sure to
rerun these tests.
> `account_locked` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
> `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
> `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
> diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
> index 04a9bd9785a..e8af55993f5 100644
> --- a/scripts/mysql_system_tables_fix.sql
> +++ b/scripts/mysql_system_tables_fix.sql
> @@ -805,6 +807,9 @@ IF 'BASE TABLE' = (select table_type from information_schema.tables where table_
> 'max_statement_time', max_statement_time,
> 'plugin', if(plugin>'',plugin,if(length(password)=16,'mysql_old_password','mysql_native_password')),
> 'authentication_string', if(plugin>'',authentication_string,password),
> + 'password_expired', 'Y'=password_expired,
Why do you need an explicit password_expired value?
I'd just use password_last_changed=0 for that. It'll work
automatically (changed too back in the past so it makes it expired)
without an explicit check for password_expired==1
> + 'password_last_changed', UNIX_TIMESTAMP(password_last_changed),
> + 'password_lifetime', ifnull(password_lifetime, -1),
> 'account_locked', 'Y'=account_locked,
> 'default_role', default_role,
> 'is_role', 'Y'=is_role)) as Priv
> diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
> index 4536abd2907..e0db27165c4 100644
> --- a/sql/share/errmsg-utf8.txt
> +++ b/sql/share/errmsg-utf8.txt
> @@ -6940,8 +6940,9 @@ ER_NOT_VALID_PASSWORD
> eng "Your password does not satisfy the current policy requirements"
>
> ER_MUST_CHANGE_PASSWORD
> - eng "You must SET PASSWORD before executing this statement"
> + eng "You must change your password using either ALTER USER or SET PASSWORD before executing this statement"
> bgn "Трябва първо да си смените паролата със SET PASSWORD за да можете да изпълните тази команда"
> + rum "Trebuie sa iti schimbi parola folosind ALTER USER sau SET PASSWORD inainte de a executa aceasta comanda"
Let's keep the old SET PASSWORD message. It seems that ALTER USER
is more appropriate for DBA, for account administration, not
for mere users.
>
> ER_FK_NO_INDEX_CHILD
> eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'"
> diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
> index 983686d51c7..f0a8ffb1a60 100644
> --- a/sql/sql_acl.cc
> +++ b/sql/sql_acl.cc
> @@ -634,7 +638,7 @@ static ACL_ROLE *find_acl_role(const char *user);
> static ROLE_GRANT_PAIR *find_role_grant_pair(const LEX_CSTRING *u, const LEX_CSTRING *h, const LEX_CSTRING *r);
> static ACL_USER_BASE *find_acl_user_base(const char *user, const char *host);
> static bool update_user_table_password(THD *, const User_table&,
> - const ACL_USER &);
> + ACL_USER &);
use only const references or pointers.
So, this one should become a pointer.
> static bool acl_load(THD *thd, const Grant_tables& grant_tables);
> static inline void get_grantor(THD *thd, char* grantor);
> static bool add_role_user_mapping(const char *uname, const char *hname, const char *rname);
> @@ -2705,6 +2806,7 @@ bool acl_getroot(Security_context *sctx, const char *user, const char *host,
> sctx->master_access= 0;
> sctx->db_access= 0;
> *sctx->priv_user= *sctx->priv_host= *sctx->priv_role= 0;
> + sctx->set_password_expired(false);
I'd rather call init() here. Future-proof.
>
> if (host[0]) // User, not Role
> {
> @@ -3457,6 +3565,17 @@ bool change_password(THD *thd, LEX_USER *user)
> goto end;
> }
>
> + if (sctx->password_expired() &&
> + (strcmp(acl_user->user.str, sctx->priv_user) ||
> + my_strcasecmp(system_charset_info,
> + acl_user->host.hostname,
> + sctx->priv_host)))
> + {
> + mysql_mutex_unlock(&acl_cache->lock);
> + my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
> + goto end;
> + }
> +
technically, check_alter_user() should've done that already,
I'm not sure you need to repeat the check here.
> if (update_user_table_password(thd, tables.user_table(), *acl_user))
> {
> mysql_mutex_unlock(&acl_cache->lock); /* purecov: deadcode */
> @@ -3930,6 +4049,12 @@ static bool update_user_table_password(THD *thd, const User_table& user_table,
> user_table.set_auth(user.plugin.str, user.plugin.length,
> user.auth_string.str, user.auth_string.length);
>
> + /* Update the persistent password expired state of user */
> + user_table.set_password_expired(false);
> + thd->set_time();
Hmm, why? This is done at the start of every statement, why do you repeat it
here?
> + Timeval now= thd->query_start_timeval();
Just simply m_time_t now= thd->query_start();
Don't use query_start_timeval() or query_start_sec_part()
unless you _really_ need microsecond precision. They have a side-effect
of server remembering that you asked for microseconds, so they'll
be logged in a binary log.
I've now removed THD::query_start_timeval(), it's too error-prone
> + user_table.set_password_last_changed(now.tv_sec);
> +
> if (unlikely(error= table->file->ha_update_row(table->record[1],
> table->record[0])) &&
> error != HA_ERR_RECORD_IS_THE_SAME)
> @@ -3937,6 +4062,19 @@ static bool update_user_table_password(THD *thd, const User_table& user_table,
> table->file->print_error(error,MYF(0)); /* purecov: deadcode */
> DBUG_RETURN(1);
> }
> +
> + /* Update the acl password expired state of user */
> + user.password_last_changed= now.tv_sec;
> + user.password_expired= false;
> +
> + /* If user is the connected user, reset the password expired field on sctx
> + and allow the user to exit sandbox mode */
> + if (!strcmp(user.user.str, thd->security_ctx->priv_user) &&
> + !my_strcasecmp(system_charset_info,
> + user.host.hostname,
> + thd->security_ctx->priv_host))
this should be a method in ACL_USER or in Security_context - it's used
too often.
> + thd->security_ctx->set_password_expired(false);
> +
> DBUG_RETURN(0);
> }
>
> @@ -4153,6 +4291,68 @@ static int replace_user_table(THD *thd, const User_table &user_table,
> user_table.set_account_locked(lock_value);
> new_acl_user.account_locked= lock_value;
> }
> +
> + thd->set_time();
> + Timeval now= thd->query_start_timeval();
same as above
> + if (!old_row_exists)
> + {
> + user_table.set_password_last_changed(now.tv_sec);
> + new_acl_user.password_last_changed= now.tv_sec;
> + new_acl_user.use_default_pass_lifetime= true;
> + }
> +
> + /* Unexpire the user password */
> + if (combo->is_changing_password)
Is it needed? Couldn't you just check if the password was specified?
> + {
> + user_table.set_password_expired(false);
> + user_table.set_password_last_changed(now.tv_sec);
> + new_acl_user.password_last_changed= now.tv_sec;
> + new_acl_user.password_expired= false;
> +
> + /* If combo is the connected user, reset the password expired field on sctx
> + and allow the user to exit sandbox mode */
> + if (!strcmp(combo->user.str, thd->security_ctx->priv_user) &&
> + !my_strcasecmp(system_charset_info,
> + combo->host.str,
> + thd->security_ctx->priv_host))
> + thd->security_ctx->set_password_expired(false);
I tend to think (again) that ALTER USER is for DBA's, and the user
should use SET PASSWORD. So, let's not allow ALTER USER in the sandbox
mode and, consequently, you shouldn't need the check above either.
For example, ALTER USER can specify MAX_QUERIES_PER_HOUR.
Surely, it's for DBA to specify the limit, it doesn't make any sense
to allow every user to overwrite his own limits.
And even for authentication, think of PAM. A user can change his own
password, but not /etc/pam.d/login for example. Similarly here
a user should be able to change his password, but not the authentication
plugin.
> + }
> +
> + switch (lex->account_options.password_expire) {
> + case PASSWORD_EXPIRE_UNSPECIFIED:
> + break;
> + case PASSWORD_EXPIRE_NOW:
> + user_table.set_password_expired(true);
> + new_acl_user.password_expired= true;
> + new_acl_user.use_default_pass_lifetime= false;
why do you change use_default_pass_lifetime?
> + break;
> + case PASSWORD_EXPIRE_NEVER:
> + user_table.set_password_lifetime(0);
> + new_acl_user.password_lifetime= 0;
> + new_acl_user.use_default_pass_lifetime= false;
> + break;
> + case PASSWORD_EXPIRE_DEFAULT:
> + user_table.set_password_lifetime_null();
> + new_acl_user.use_default_pass_lifetime= true;
> + new_acl_user.password_lifetime= -1;
why two variables to say the same thing?
> + break;
> + case PASSWORD_EXPIRE_INTERVAL:
> + longlong interval= lex->account_options.num_expiration_days;
> + Timestamp exp_tstamp(new_acl_user.password_last_changed, 0);
> + int warn= 0;
> + exp_tstamp.add(3600 * 24 * interval, &warn);
I'm not sure why you're doing it. If the user sets too many days here -
fine, why would that be a problem?
> + if (warn & MYSQL_TIME_WARN_OUT_OF_RANGE)
> + {
> + char num[MAX_BIGINT_WIDTH + 1];
> + my_snprintf(num, sizeof(num), "%lu", interval);
> + my_error(ER_WRONG_VALUE, MYF(0), "DAY", num);
> + goto end;
> + }
> + user_table.set_password_lifetime(interval);
> + new_acl_user.password_lifetime= interval;
> + new_acl_user.use_default_pass_lifetime= false;
> + break;
> + }
> }
>
> if (old_row_exists)
> @@ -10548,6 +10764,39 @@ int mysql_alter_user(THD* thd, List<LEX_USER> &users_list)
>
> LEX_USER *tmp_lex_user;
> List_iterator<LEX_USER> users_list_iterator(users_list);
> + bool password_expired= thd->security_ctx->password_expired();
> + bool own_password_changed= false;
> +
> + /* Precheck the users list to see if the password of the connected
> + user is being changed */
> + if (password_expired)
> + { // TODO: check bug with identified by '' not blanking the password
> + Security_context *sctx= thd->security_ctx;
> + while ((tmp_lex_user= users_list_iterator++))
> + {
> + LEX_USER* lex_user= get_current_user(thd, tmp_lex_user, false);
> +
> + bool is_self= !strcmp(lex_user->user.str, sctx->priv_user) &&
> + !my_strcasecmp(system_charset_info,
> + lex_user->host.str,
> + sctx->priv_host);
> + if (lex_user->is_changing_password && is_self)
> + {
> + own_password_changed= true;
> + break;
> + }
> + }
> +
> + if (!own_password_changed)
> + {
> + mysql_mutex_unlock(&acl_cache->lock);
> + mysql_rwlock_unlock(&LOCK_grant);
> + my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
already commented earlier. Just do ER_MUST_CHANGE_PASSWORD always
if in a sandbox mode
> + DBUG_RETURN(1);
> + }
> + users_list_iterator.init(users_list);
> + }
> +
> while ((tmp_lex_user= users_list_iterator++))
> {
> LEX_USER* lex_user= get_current_user(thd, tmp_lex_user, false);
> @@ -13294,6 +13543,44 @@ static void handle_password_errors(const char *user, const char *hostname, PASSW
> #endif
> }
>
> +bool check_password_lifetime(THD *thd, const ACL_USER *acl_user)
> +{
> + /* the password should never expire */
> + if (!acl_user->password_lifetime)
> + return false;
> +
> + longlong interval= acl_user->password_lifetime;
> + if (acl_user->use_default_pass_lifetime)
> + {
> + mysql_mutex_lock(&LOCK_global_system_variables);
> + interval= default_password_lifetime;
> + mysql_mutex_unlock(&LOCK_global_system_variables);
I'd skip the mutex lock here. The worst that can happen, if someone would be
logging in when default_password_lifetime is just being modified - that this
someone might be classified incorrectly, allowed in with a expired password
or not allowed with a non-expired. Which is a small price to pay to avoid
every single user (because most users never change default settings) taking
yet another mutex lock on every single login.
> +
> + /* default global policy applies, and that is password never expires */
> + if (!interval)
> + return false;
> + }
> +
> + thd->set_time();
> + Timeval tval_now= thd->query_start_timeval();
and again
> + Timestamp tstamp_now(tval_now);
> + Timestamp expiration_tstamp(acl_user->password_last_changed, 0);
> + int interval_sec= 3600 * 24 * interval;
> + int warn= 0;
> +
> + /* this helps test set a testable password lifetime in seconds not days */
> + DBUG_EXECUTE_IF("password_expiration_interval_sec", { interval_sec= interval; });
> +
> + expiration_tstamp.add(interval_sec, &warn);
> +
> + DBUG_ASSERT(!(warn & MYSQL_TIME_WARN_OUT_OF_RANGE));
> +
> + if (tstamp_now.cmp(expiration_tstamp) >= 0) {
> + return true;
> + }
why not to write simply
if ((thd->query_start() - acl_user->password_last_changed)/24/3600 > interval)
this keeps all the code in one (or two) lines, instead of spreading it over
two Timestamp::Timestamp constructors, Timestamp::add() and Timestamp::cmp()
methods. And it doesn't have a problem with large interval values, as a bonus.
> +
> + return false;
> +}
>
> /**
> Perform the handshake, authorize the client and update thd sctx variables.
> @@ -13512,6 +13799,25 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
> DBUG_RETURN(1);
> }
>
> + bool disconnect_password_expired= true;
> + bool client_can_handle_exp_pass= thd->client_capabilities &
> + CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS;
> + bool password_lifetime_due= check_password_lifetime(thd, acl_user);
> + mysql_mutex_lock(&LOCK_global_system_variables);
> + disconnect_password_expired= disconnect_on_expired_password;
> + mysql_mutex_unlock(&LOCK_global_system_variables);
same as above
> +
> + if (!client_can_handle_exp_pass && disconnect_password_expired &&
> + (acl_user->password_expired || password_lifetime_due))
by now you've locked the mutex twice, did some calculations to see
if the password expired... and only then you've checked
acl_user->password_expired.
> + {
> + status_var_increment(denied_connections);
> + my_error(ER_MUST_CHANGE_PASSWORD_LOGIN, MYF(0));
> + DBUG_RETURN(1);
> + }
> +
> + sctx->set_password_expired(acl_user->password_expired ||
> + password_lifetime_due);
> +
> /*
> Don't allow the user to connect if he has done too many queries.
> As we are testing max_user_connections == 0 here, it means that we
> diff --git a/sql/sql_class.h b/sql/sql_class.h
> index 69fabee708c..57b043fb8e4 100644
> --- a/sql/sql_class.h
> +++ b/sql/sql_class.h
> @@ -1340,6 +1342,11 @@ class Security_context {
> restore_security_context(THD *thd, Security_context *backup);
> #endif
> bool user_matches(Security_context *);
> +
> + bool password_expired()
> + { return pass_expired; }
> + void set_password_expired(bool val)
> + { pass_expired= val; }
Better remove them, this class doesn't have other
trivial setters/getters either.
> };
>
>
> diff --git a/sql/sql_const.h b/sql/sql_const.h
> index 82f3b9c21f2..3d888c25a9f 100644
> --- a/sql/sql_const.h
> +++ b/sql/sql_const.h
> @@ -275,6 +275,7 @@
> #define DELAYED_QUEUE_SIZE 1000
> #define DELAYED_WAIT_TIMEOUT (5*60) /**< Wait for delayed insert */
> #define MAX_CONNECT_ERRORS 100 ///< errors before disabling host
> +#define DEFAULT_PASSWORD_LIFETIME 0 ///< days before passwords expire
no need to create a name for that
>
> #define LONG_TIMEOUT ((ulong) 3600L*24L*365L)
>
> diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
> index 65b52b5b5da..35e9e869b49 100644
> --- a/sql/sql_parse.cc
> +++ b/sql/sql_parse.cc
> @@ -1687,6 +1687,17 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
> thd->get_stmt_da()->set_skip_flush();
> }
>
> + if (unlikely(thd->security_ctx->password_expired() &&
> + command != COM_QUERY &&
> + command != COM_STMT_CLOSE &&
> + command != COM_STMT_SEND_LONG_DATA &&
> + command != COM_PING &&
> + command != COM_QUIT))
Why do you allow COM_STMT_CLOSE and COM_STMT_SEND_LONG_DATA?
> + {
> + my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
> + goto dispatch_end;
> + }
> +
> switch (command) {
> case COM_INIT_DB:
> {
> @@ -3249,6 +3260,14 @@ mysql_execute_command(THD *thd)
> #endif
> DBUG_ENTER("mysql_execute_command");
>
> + if (thd->security_ctx->password_expired() &&
> + lex->sql_command != SQLCOM_SET_OPTION &&
> + lex->sql_command != SQLCOM_ALTER_USER)
SQLCOM_SET_OPTION would be enough
> + {
> + my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
> + DBUG_RETURN(1);
> + }
> +
> DBUG_ASSERT(thd->transaction.stmt.is_empty() || thd->in_sub_stmt);
> /*
> Each statement or replication event which might produce deadlock
> diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
> index cb822fc2e98..e621bd6519e 100644
> --- a/sql/sql_prepare.cc
> +++ b/sql/sql_prepare.cc
> @@ -4188,14 +4188,11 @@ Prepared_statement::execute_loop(String *expanded_query,
> if (set_parameters(expanded_query, packet, packet_end))
> return TRUE;
>
> -#ifdef NOT_YET_FROM_MYSQL_5_6
> - if (unlikely(thd->security_ctx->password_expired &&
> - !lex->is_change_password))
> + if (unlikely(thd->security_ctx->password_expired()))
Really? How can one end up here, if you don't allow COM_STMT_PREPARE
and COM_STMT_EXECUTE and SQLCOM_PREPARE/SQLCOM_EXECUTE ?
> {
> my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
> return true;
> }
> -#endif
>
> reexecute:
> // Make sure that reprepare() did not create any new Items.
> @@ -4361,15 +4358,12 @@ Prepared_statement::execute_bulk_loop(String *expanded_query,
> }
> read_types= FALSE;
>
> -#ifdef NOT_YET_FROM_MYSQL_5_6
> - if (unlikely(thd->security_ctx->password_expired &&
> - !lex->is_change_password))
> + if (unlikely(thd->security_ctx->password_expired()))
Same
> {
> my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
> thd->set_bulk_execution(0);
> return true;
> }
> -#endif
>
> // iterations changed by set_bulk_parameters
> while ((iterations || start_param) && !error && !thd->is_error())
> diff --git a/sql/sql_type.cc b/sql/sql_type.cc
> index 96a73a85267..e06272e4564 100644
> --- a/sql/sql_type.cc
> +++ b/sql/sql_type.cc
> @@ -618,20 +618,35 @@ Sec9 & Sec9::round(uint dec)
> return *this;
> }
>
> +void Timestamp::set_out_of_range(uint sec, uint usec, int *warn)
> +{
> + *warn|= MYSQL_TIME_WARN_OUT_OF_RANGE;
> + tv_sec= sec;
> + tv_usec= usec;
> +}
>
> void Timestamp::round_or_set_max(uint dec, int *warn)
> {
> DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
> if (add_nanoseconds_usec(msec_round_add[dec]) &&
> tv_sec++ >= TIMESTAMP_MAX_VALUE)
> - {
> - tv_sec= TIMESTAMP_MAX_VALUE;
> - tv_usec= TIME_MAX_SECOND_PART;
> - *warn|= MYSQL_TIME_WARN_OUT_OF_RANGE;
> - }
> + set_out_of_range(TIMESTAMP_MAX_VALUE, TIME_MAX_SECOND_PART, warn);
> +
> my_timeval_trunc(this, dec);
> }
>
> +Timestamp& Timestamp::add(int sec, int *warn)
> +{
> + longlong res= tv_sec + sec;
> + if (res >= TIMESTAMP_MAX_VALUE)
> + set_out_of_range(TIMESTAMP_MAX_VALUE, TIME_MAX_SECOND_PART, warn);
> + else if (res < 0)
> + set_out_of_range(0, 0, warn);
> + else
> + tv_sec= (my_time_t) res;
> +
> + return *this;
> +}
you don't need Timestamp class at all, nor any changes here.
>
> bool Temporal::add_nanoseconds_with_round(THD *thd, int *warn,
> date_conv_mode_t mode,
> diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
> index 8f42b18c176..919aae6858f 100644
> --- a/sql/sql_yacc.yy
> +++ b/sql/sql_yacc.yy
> @@ -8030,6 +8032,34 @@ opt_account_option:
> {
> Lex->account_options.account_locked= ACCOUNTLOCK_UNLOCKED;
> }
> + | PASSWORD_SYM EXPIRE_SYM opt_password_expire
> + { }
> + ;
> +opt_password_expire:
> + /* Nothing */
> + {
> + Lex->account_options.password_expire= PASSWORD_EXPIRE_NOW;
> + }
> + | NEVER_SYM
> + {
> + Lex->account_options.password_expire= PASSWORD_EXPIRE_NEVER;
> + }
> + | DEFAULT
> + {
> + Lex->account_options.password_expire= PASSWORD_EXPIRE_DEFAULT;
> + }
> + | INTERVAL_SYM real_ulong_num DAY_SYM
use NUM instead of real_ulong_num it'll make a number within UINT_MAX32
range which is what you need here anyway,
no reason to limit it to UINT_MAX16.
> + {
> + if (unlikely($2 == 0 || $2 > UINT_MAX16))
> + {
> + char num[MAX_BIGINT_WIDTH + 1];
> + my_snprintf(num, sizeof(num), "%lu", $2);
> + my_yyabort_error((ER_WRONG_VALUE, MYF(0), "DAY", num));
> + }
> +
> + Lex->account_options.password_expire= PASSWORD_EXPIRE_INTERVAL;
> + Lex->account_options.num_expiration_days= $2;
> + }
> ;
>
> ev_alter_on_schedule_completion:
> diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
> index 37b3d65e20a..ff521c10d9b 100644
> --- a/sql/sys_vars.cc
> +++ b/sql/sys_vars.cc
> @@ -1507,6 +1507,25 @@ static Sys_var_ulong Sys_max_connections(
> DEFAULT(MAX_CONNECTIONS_DEFAULT), BLOCK_SIZE(1), NO_MUTEX_GUARD,
> NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(fix_max_connections));
>
> +static Sys_var_ulong Sys_default_password_lifetime(
> + "default_password_lifetime",
> + "This defines the global password expiration policy. Defaults to 0, "
> + "meaning automatic password expiration is disabled. If the value is a "
> + "positive integer N, the passwords must be changed every N days. This "
> + "behavior can be overriden using the password expiration options in "
> + "ALTER USER.",
Don't say "Defaults to 0", Just "0 means password expiration is disabled".
> + GLOBAL_VAR(default_password_lifetime), CMD_LINE(REQUIRED_ARG),
> + VALID_RANGE(0, UINT_MAX), DEFAULT(DEFAULT_PASSWORD_LIFETIME),
> + BLOCK_SIZE(1));
> +
> +static Sys_var_mybool Sys_disconnect_on_expired_password(
> + "disconnect_on_expired_password",
> + "This variable controls how the server handles clients with expired "
> + "passwords. If enabled, the server disconnects the client, otherwise "
> + "the server puts the client in sandbox mode.",
Not exactly. "How the server handles clients _that don't know about the
sandbox mode_"
> + GLOBAL_VAR(disconnect_on_expired_password), CMD_LINE(OPT_ARG),
> + DEFAULT(FALSE));
> +
> static Sys_var_ulong Sys_max_connect_errors(
> "max_connect_errors",
> "If there is more than this number of interrupted connections from "
Regards,
Sergei
Chief Architect MariaDB
and security(a)mariadb.org
1
0
Re: [Maria-developers] 4f070b59005: 29 Jan, MDEV-371 Unique indexes for blobs
by Sergei Golubchik 12 Feb '19
by Sergei Golubchik 12 Feb '19
12 Feb '19
Hi, Sachin!
That's generally pretty good.
But I still don't understand what you store in frm and how you
manipulate TABLE and TABLE_SHARE key lists.
Could you please explain that?
On Jan 29, Sachin Setiya wrote:
> diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
> new file mode 100644
> --- /dev/null
> +++ b/mysql-test/main/long_unique.result
> @@ -0,0 +1,1392 @@
> +#Structure of tests
> +#First we will check all option for
> +#table containing single unique column
> +#table containing keys like unique(a,b,c,d) etc
> +#then table containing 2 blob unique etc
> +set @allowed_packet= @@max_allowed_packet;
> +#table with single long blob column;
> +create table t1(a blob unique );
> +insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
> +#blob with primary key not allowed
> +create table t2(a blob,primary key(a(10000)));
> +ERROR 42000: Specified key was too long; max key length is 1000 bytes
> +create table t3(a varchar(10000) primary key);
> +ERROR 42000: Specified key was too long; max key length is 1000 bytes
> +insert into t1 values(2);
> +ERROR 23000: Duplicate entry '2' for key 'a'
> +#table structure;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table t1
> +Non_unique 0
> +Key_name a
> +Seq_in_index 1
> +Column_name a
> +Collation A
> +Cardinality NULL
> +Sub_part NULL
> +Packed NULL
> +Null YES
> +Index_type HASH
Good.
But this also mean that HASH is a valid index type, and that one
can write
CREATE TABLE t1 (a blob, unique (a) USING HASH)
generally, this should work. And one can write
CREATE TABLE t1 (a blob, unique (a) USING BTREE)
and this should fail, "key too long; max key length is 1000 bytes"
Note, the syntax is already possible. You just need to handle
these cases correctly in mysql_prepare_create_table().
And if neither HASH nor BTREE is specified, it's auto-detection,
basically. For short keys it's BTREE, for long keys it's HASH.
This is already implemented by this patch :)
> +Comment
> +Index_comment
> +
> +MyISAM file: DATADIR/test/t1
> +Record format: Packed
> +Character set: latin1_swedish_ci (8)
> +Data records: 10 Deleted blocks: 0
> +Recordlength: 20
> +
> +table description:
> +Key Start Len Index Type
> +1 12 8 multip. ulonglong NULL
> +select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
> +def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI select,insert,update,references NEVER NULL
> +select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
> +def test t1 0 test a 1 a A NULL NULL NULL YES HASH
> +select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
> +CONSTRAINT_CATALOG def
> +CONSTRAINT_SCHEMA test
> +CONSTRAINT_NAME a
> +TABLE_CATALOG def
> +TABLE_SCHEMA test
> +TABLE_NAME t1
> +COLUMN_NAME a
> +ORDINAL_POSITION 1
> +POSITION_IN_UNIQUE_CONSTRAINT NULL
> +REFERENCED_TABLE_SCHEMA NULL
> +REFERENCED_TABLE_NAME NULL
> +REFERENCED_COLUMN_NAME NULL
> +# table select we should not be able to see db_row_hash_column;
> +select * from t1 order by a;
> +a
> +NULL
> +NULL
> +1
> +123456789034567890
> +123456789034567891
> +2
> +3
> +56
> +maria
> +sachin
> +select db_row_hash_1 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +#duplicate entry test;
> +insert into t1 values(2);
> +ERROR 23000: Duplicate entry '2' for key 'a'
> +insert into t1 values('sachin');
> +ERROR 23000: Duplicate entry 'sachin' for key 'a'
> +insert into t1 values(123456789034567891);
> +ERROR 23000: Duplicate entry '123456789034567891' for key 'a'
> +select * from t1 order by a;
> +a
> +NULL
> +NULL
> +1
> +123456789034567890
> +123456789034567891
> +2
> +3
> +56
> +maria
> +sachin
> +insert into t1 values(11),(22),(33);
> +insert into t1 values(12),(22);
> +ERROR 23000: Duplicate entry '22' for key 'a'
> +select * from t1 order by a;
> +a
> +NULL
> +NULL
> +1
> +11
> +12
> +123456789034567890
> +123456789034567891
> +2
> +22
> +3
> +33
> +56
> +maria
> +sachin
> +insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
> +insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
> +ERROR 23000: Duplicate entry 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm' for key 'a'
> +insert into t1 values(repeat('m',4001)),(repeat('m',4002));
> +truncate table t1;
> +insert into t1 values(1),(2),(3),(4),(5),(8),(7);
> +
> +MyISAM file: DATADIR/test/t1
> +Record format: Packed
> +Character set: latin1_swedish_ci (8)
> +Data records: 7 Deleted blocks: 0
> +Recordlength: 20
> +
> +table description:
> +Key Start Len Index Type
> +1 12 8 multip. ulonglong NULL
> +#now some alter commands;
> +alter table t1 add column b int;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +b int(11) YES NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 values(1,2);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +insert into t1 values(2,2);
> +ERROR 23000: Duplicate entry '2' for key 'a'
> +select db_row_hash_1 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +#now try to change db_row_hash_1 column;
> +alter table t1 drop column db_row_hash_1;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 add column d int , add column e int , drop column db_row_hash_1;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 modify column db_row_hash_1 int ;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column a int , add column b int, modify column db_row_hash_1 int ;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 change column db_row_hash_1 dsds int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column asd int, change column db_row_hash_1 dsds int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 drop column b , add column c int;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `c` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +#now add some column with name db_row_hash;
> +alter table t1 add column db_row_hash_1 int unique;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535)),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 values(45,1,55),(46,1,55);
> +ERROR 23000: Duplicate entry '55' for key 'db_row_hash_1'
> +insert into t1 values(45,1,55),(45,1,55);
> +ERROR 23000: Duplicate entry '45' for key 'a'
> +alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +c int(11) YES NULL
> +db_row_hash_1 int(11) YES UNI NULL
> +db_row_hash_2 int(11) YES NULL
> +db_row_hash_3 int(11) YES NULL
> +#this should also drop the unique index ;
> +alter table t1 drop column a;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +#add column with unique index on blob ;
> +alter table t1 add column a blob unique;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + `a` blob DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +# try to change the blob unique name;
> +#this will change index to b tree;
> +alter table t1 change column a aa blob ;
eh, the comment is wrong. Your old code
changed the column to int, that was indeed changing the
index to a btree. But now you simply rename the blob.
the index is not changed to btree anymore.
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + `aa` blob DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `a` (`aa`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 a 1 aa A NULL NULL NULL YES HASH
> +# try to change the blob unique datatype;
> +#this will change index to b tree;
> +alter table t1 modify column aa int ;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + `aa` int(11) DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `a` (`aa`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 a 1 aa A NULL NULL NULL YES BTREE
> +alter table t1 add column clm blob unique;
> +#try changing the name ;
> +alter table t1 change column clm clm_changed blob;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + `aa` int(11) DEFAULT NULL,
> + `clm_changed` blob DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `a` (`aa`),
> + UNIQUE KEY `clm` (`clm_changed`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 a 1 aa A NULL NULL NULL YES BTREE
> +t1 0 clm 1 clm_changed A NULL NULL NULL YES HASH
> +#now drop the unique key;
> +alter table t1 drop key clm;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `c` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + `aa` int(11) DEFAULT NULL,
> + `clm_changed` blob DEFAULT NULL,
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `a` (`aa`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 a 1 aa A NULL NULL NULL YES BTREE
> +drop table t1;
> +create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
> +desc t1;
> +Field Type Null Key Default Extra
> +a text YES UNI NULL
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +insert into t1 values ('ae');
> +insert into t1 values ('AE');
> +ERROR 23000: Duplicate entry 'AE' for key 'a'
> +insert into t1 values ('Ä');
> +drop table t1;
> +create table t1 (a int primary key, b blob unique);
> +desc t1;
> +Field Type Null Key Default Extra
> +a int(11) NO PRI NULL
> +b blob YES UNI NULL
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
> +t1 0 b 1 b A NULL NULL NULL YES HASH
> +insert into t1 values(1,1),(2,2),(3,3);
> +insert into t1 values(1,1);
> +ERROR 23000: Duplicate entry '1' for key 'b'
> +insert into t1 values(7,1);
> +ERROR 23000: Duplicate entry '1' for key 'b'
> +drop table t1;
> +#table with multiple long blob column and varchar text column ;
> +create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique);
> +insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
> +('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
> +(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
> +(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
> +#table structure;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +b int(11) YES NULL
> +c blob YES UNI NULL
> +d text YES UNI NULL
> +e varchar(3000) YES UNI NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`),
> + UNIQUE KEY `c` (`c`),
> + UNIQUE KEY `d` (`d`),
> + UNIQUE KEY `e` (`e`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 d 1 d A NULL NULL NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES HASH
> +
> +MyISAM file: DATADIR/test/t1
> +Record format: Packed
> +Character set: latin1_swedish_ci (8)
> +Data records: 8 Deleted blocks: 0
> +Recordlength: 3072
> +
> +table description:
> +Key Start Len Index Type
> +1 3063 8 multip. ulonglong NULL
> +2 3055 8 multip. ulonglong NULL
> +3 3047 8 multip. ulonglong NULL
> +4 3039 8 multip. ulonglong NULL
> +select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
> +def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI select,insert,update,references NEVER NULL
> +def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL
> +def test t1 c 3 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI select,insert,update,references NEVER NULL
> +def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text UNI select,insert,update,references NEVER NULL
> +def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1 latin1_swedish_ci varchar(3000) UNI select,insert,update,references NEVER NULL
> +select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
> +def test t1 0 test a 1 a A NULL NULL NULL YES HASH
> +def test t1 0 test c 1 c A NULL NULL NULL YES HASH
> +def test t1 0 test d 1 d A NULL NULL NULL YES HASH
> +def test t1 0 test e 1 e A NULL NULL NULL YES HASH
> +select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
> +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
> +def test a def test t1 a 1 NULL NULL NULL NULL
> +def test c def test t1 c 1 NULL NULL NULL NULL
> +def test d def test t1 d 1 NULL NULL NULL NULL
> +def test e def test t1 e 1 NULL NULL NULL NULL
> +#table select we should not be able to see db_row_hash_1 column;
> +select * from t1 order by a;
> +a b c d e
> +1 2 3 4 5
> +123456789034567890 43545 657567567567 78967657567567 657567567567567676
> +123456789034567891 353534 53453453453456 64565464564564 45435345345345
> +2 11 22 33 44
> +3111 222 333 444 555
> +5611 2222 3333 4444 5555
> +maria 345 frter dasd utyuty
> +sachin 341 fdf gfgfgfg hghgr
> +select db_row_hash_1 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +select db_row_hash_2 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
> +select db_row_hash_3 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
> +#duplicate entry test;
> +insert into t1 values(21,2,3,42,51);
> +ERROR 23000: Duplicate entry '3' for key 'c'
> +insert into t1 values('sachin',null,null,null,null);
> +ERROR 23000: Duplicate entry 'sachin' for key 'a'
> +insert into t1 values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
> +ERROR 23000: Duplicate entry '657567567567567676' for key 'e'
> +select * from t1 order by a;
> +a b c d e
> +1 2 3 4 5
> +123456789034567890 43545 657567567567 78967657567567 657567567567567676
> +123456789034567891 353534 53453453453456 64565464564564 45435345345345
> +2 11 22 33 44
> +3111 222 333 444 555
> +5611 2222 3333 4444 5555
> +maria 345 frter dasd utyuty
> +sachin 341 fdf gfgfgfg hghgr
> +insert into t1 values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
> +repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
> +repeat('s',2995));
> +insert into t1 values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
> +ERROR 23000: Duplicate entry 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for key 'e'
> +truncate table t1;
> +insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
> +#now some alter commands;
> +alter table t1 add column f int;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +b int(11) YES NULL
> +c blob YES UNI NULL
> +d text YES UNI NULL
> +e varchar(3000) YES UNI NULL
> +f int(11) YES NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535)),
> + UNIQUE KEY `c` (`c`(65535)),
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +#unique key should not break;
> +insert into t1 values(1,2,3,4,5,6);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +#now try to change db_row_hash_1 column;
> +alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 drop column b , add column g int;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535)),
> + UNIQUE KEY `c` (`c`(65535)),
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +#now add some column with name db_row_hash;
> +alter table t1 add column db_row_hash_1 int unique;
> +alter table t1 add column db_row_hash_2 int unique;
> +alter table t1 add column db_row_hash_3 int unique;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_3` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535)),
> + UNIQUE KEY `c` (`c`(65535)),
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
> + UNIQUE KEY `db_row_hash_3` (`db_row_hash_3`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +alter table t1 add column db_row_hash_7 int, add column db_row_hash_5 int , add column db_row_hash_4 int ;
> +alter table t1 drop column db_row_hash_7,drop column db_row_hash_3, drop column db_row_hash_4;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +c blob YES UNI NULL
> +d text YES UNI NULL
> +e varchar(3000) YES UNI NULL
> +f int(11) YES NULL
> +g int(11) YES NULL
> +db_row_hash_1 int(11) YES UNI NULL
> +db_row_hash_2 int(11) YES UNI NULL
> +db_row_hash_5 int(11) YES NULL
> +#this should not break anything;
> +insert into t1 values(1,2,3,4,5,6,23,5,6);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +#this should also drop the unique index;
> +alter table t1 drop column a, drop column c;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 d 1 d A NULL 65535 NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES HASH
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
> +#add column with unique index on blob;
> +alter table t1 add column a blob unique;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + `a` blob DEFAULT NULL,
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 d 1 d A NULL 65535 NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES HASH
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +#try to change the blob unique column name;
> +#this will change index to b tree;
> +alter table t1 modify column a int , modify column e int;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `d` text DEFAULT NULL,
> + `e` int(11) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + `a` int(11) DEFAULT NULL,
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 d 1 d A NULL 65535 NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
> +t1 0 a 1 a A NULL NULL NULL YES BTREE
> +alter table t1 add column clm1 blob unique,add column clm2 blob unique;
> +#try changing the name;
> +alter table t1 change column clm1 clm_changed1 blob, change column clm2 clm_changed2 blob;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `d` text DEFAULT NULL,
> + `e` int(11) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + `a` int(11) DEFAULT NULL,
> + `clm_changed1` blob DEFAULT NULL,
> + `clm_changed2` blob DEFAULT NULL,
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
> + UNIQUE KEY `a` (`a`),
> + UNIQUE KEY `clm1` (`clm_changed1`),
> + UNIQUE KEY `clm2` (`clm_changed2`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 d 1 d A NULL 65535 NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
> +t1 0 a 1 a A NULL NULL NULL YES BTREE
> +t1 0 clm1 1 clm_changed1 A NULL NULL NULL YES HASH
> +t1 0 clm2 1 clm_changed2 A NULL NULL NULL YES HASH
> +#now drop the unique key;
> +alter table t1 drop key clm1, drop key clm2;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `d` text DEFAULT NULL,
> + `e` int(11) DEFAULT NULL,
> + `f` int(11) DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + `a` int(11) DEFAULT NULL,
> + `clm_changed1` blob DEFAULT NULL,
> + `clm_changed2` blob DEFAULT NULL,
> + UNIQUE KEY `d` (`d`(65535)),
> + UNIQUE KEY `e` (`e`),
> + UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
> + UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 d 1 d A NULL 65535 NULL YES HASH
> +t1 0 e 1 e A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
> +t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
> +t1 0 a 1 a A NULL NULL NULL YES BTREE
> +drop table t1;
> +#now the table with key on multiple columns; the ultimate test;
> +create table t1(a blob, b int , c varchar(2000) , d text , e varchar(3000) , f longblob , g int , h text ,
> +unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
> +insert into t1 values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
> +('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
> +,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
> +#table structure;
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES MUL NULL
> +b int(11) YES MUL NULL
> +c varchar(2000) YES MUL NULL
> +d text YES NULL
> +e varchar(3000) YES MUL NULL
> +f longblob YES NULL
> +g int(11) YES NULL
> +h text YES NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`,`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`,`e`),
> + UNIQUE KEY `e` (`e`,`f`,`g`,`h`),
> + UNIQUE KEY `b` (`b`,`d`,`g`,`h`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A 0 NULL NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 NULL NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 NULL NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 NULL NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 NULL NULL YES HASH
> +
> +MyISAM file: DATADIR/test/t1
> +Record format: Packed
> +Character set: latin1_swedish_ci (8)
> +Data records: 9 Deleted blocks: 0
> +Recordlength: 5092
> +
> +table description:
> +Key Start Len Index Type
> +1 5081 8 multip. ulonglong NULL
> +2 5073 8 multip. ulonglong NULL
> +3 5065 8 multip. ulonglong NULL
> +4 5057 8 multip. ulonglong NULL
> +select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
> +def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob MUL select,insert,update,references NEVER NULL
> +def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) MUL select,insert,update,references NEVER NULL
> +def test t1 c 3 NULL YES varchar 2000 2000 NULL NULL NULL latin1 latin1_swedish_ci varchar(2000) MUL select,insert,update,references NEVER NULL
> +def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text select,insert,update,references NEVER NULL
> +def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1 latin1_swedish_ci varchar(3000) MUL select,insert,update,references NEVER NULL
> +def test t1 f 6 NULL YES longblob 4294967295 4294967295 NULL NULL NULL NULL NULL longblob select,insert,update,references NEVER NULL
> +def test t1 g 7 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL
> +def test t1 h 8 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text select,insert,update,references NEVER NULL
> +select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
> +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
> +def test t1 0 test a 1 a A NULL NULL NULL YES HASH
> +def test t1 0 test a 2 b A NULL NULL NULL YES HASH
> +def test t1 0 test a 3 c A NULL NULL NULL YES HASH
> +def test t1 0 test c 1 c A NULL NULL NULL YES HASH
> +def test t1 0 test c 2 d A 0 NULL NULL YES HASH
> +def test t1 0 test c 3 e A 0 NULL NULL YES HASH
> +def test t1 0 test e 1 e A 0 NULL NULL YES HASH
> +def test t1 0 test e 2 f A 0 NULL NULL YES HASH
> +def test t1 0 test e 3 g A 0 NULL NULL YES HASH
> +def test t1 0 test e 4 h A 0 NULL NULL YES HASH
> +def test t1 0 test b 1 b A 0 NULL NULL YES HASH
> +def test t1 0 test b 2 d A 0 NULL NULL YES HASH
> +def test t1 0 test b 3 g A 0 NULL NULL YES HASH
> +def test t1 0 test b 4 h A 0 NULL NULL YES HASH
> +select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
> +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
> +def test a def test t1 a 1 NULL NULL NULL NULL
> +def test a def test t1 b 2 NULL NULL NULL NULL
> +def test a def test t1 c 3 NULL NULL NULL NULL
> +def test c def test t1 c 1 NULL NULL NULL NULL
> +def test c def test t1 d 2 NULL NULL NULL NULL
> +def test c def test t1 e 3 NULL NULL NULL NULL
> +def test e def test t1 e 1 NULL NULL NULL NULL
> +def test e def test t1 f 2 NULL NULL NULL NULL
> +def test e def test t1 g 3 NULL NULL NULL NULL
> +def test e def test t1 h 4 NULL NULL NULL NULL
> +def test b def test t1 b 1 NULL NULL NULL NULL
> +def test b def test t1 d 2 NULL NULL NULL NULL
> +def test b def test t1 g 3 NULL NULL NULL NULL
> +def test b def test t1 h 4 NULL NULL NULL NULL
> +# table select we should not be able to see db_row_hash_1 column;
> +select * from t1 order by a;
> +a b c d e f g h
> +NULL NULL NULL NULL NULL NULL NULL NULL
> +NULL NULL NULL NULL NULL NULL NULL NULL
> +1 1 1 1 1 1 1 1
> +2 2 2 2 2 2 2 2
> +3 3 3 3 3 3 3 3
> +4 4 4 4 4 4 4 4
> +5 5 5 5 5 5 5 5
> +maria 6 maria maria maria maria 6 maria
> +mariadb 7 mariadb mariadb mariadb mariadb 8 mariadb
> +select db_row_hash_1 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +select db_row_hash_2 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
> +select db_row_hash_3 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
> +#duplicate entry test;
> +#duplicate keys entry;
> +insert into t1 values(1,1,1,0,0,0,0,0);
> +ERROR 23000: Duplicate entry '1-1-1' for key 'a'
> +insert into t1 values(0,0,1,1,1,0,0,0);
> +ERROR 23000: Duplicate entry '1-1-1' for key 'c'
> +insert into t1 values(0,0,0,0,1,1,1,1);
> +ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
> +insert into t1 values(1,1,1,1,1,0,0,0);
> +ERROR 23000: Duplicate entry '1-1-1' for key 'a'
> +insert into t1 values(0,0,0,0,1,1,1,1);
> +ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
> +insert into t1 values(1,1,1,1,1,1,1,1);
> +ERROR 23000: Duplicate entry '1-1-1' for key 'a'
> +select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5 from t1;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
> +alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
> +ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
> +alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
> +ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`,`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`,`e`),
> + UNIQUE KEY `e` (`e`,`f`,`g`,`h`),
> + UNIQUE KEY `b` (`b`,`d`,`g`,`h`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +# add column named db_row_hash_*;
> +alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
> +add column db_row_hash_1 int, add column db_row_hash_2 int;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + `db_row_hash_7` int(11) DEFAULT NULL,
> + `db_row_hash_5` int(11) DEFAULT NULL,
> + `db_row_hash_1` int(11) DEFAULT NULL,
> + `db_row_hash_2` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535),`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`(65535),`e`),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `b` (`b`,`d`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A 0 65535 NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
> +drop column db_row_hash_1, drop column db_row_hash_2 ;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` int(11) DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `d` text DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535),`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`(65535),`e`),
> + UNIQUE KEY `b` (`b`,`d`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A 0 65535 NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +#try to change column names;
> +alter table t1 change column a aa blob , change column b bb blob , change column d dd blob;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `aa` blob DEFAULT NULL,
> + `bb` blob DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `dd` blob DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`aa`(65535),`bb`,`c`),
> + UNIQUE KEY `c` (`c`,`dd`(65535),`e`),
> + UNIQUE KEY `b` (`bb`,`dd`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 aa A NULL 65535 NULL YES HASH
> +t1 0 a 2 bb A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 dd A 0 65535 NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 b 1 bb A 0 NULL NULL YES HASH
> +t1 0 b 2 dd A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +alter table t1 change column aa a blob , change column bb b blob , change column dd d blob;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` blob DEFAULT NULL,
> + `c` varchar(2000) DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535),`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`(65535),`e`),
> + UNIQUE KEY `b` (`b`,`d`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A 0 65535 NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +#now we will change the data type to int and varchar limit so that we no longer require hash_index;
> +#on key a_b_c;
> +alter table t1 modify column a varchar(20) , modify column b varchar(20) , modify column c varchar(20);
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` varchar(20) DEFAULT NULL,
> + `b` varchar(20) DEFAULT NULL,
> + `c` varchar(20) DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`,`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`(65535),`e`),
> + UNIQUE KEY `b` (`b`,`d`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES BTREE
> +t1 0 a 2 b A NULL NULL NULL YES BTREE
> +t1 0 a 3 c A NULL NULL NULL YES BTREE
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A NULL 65535 NULL YES HASH
> +t1 0 c 3 e A NULL NULL NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +#change it back;
> +alter table t1 modify column a blob , modify column b blob , modify column c blob;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` blob DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`,`b`,`c`),
> + UNIQUE KEY `c` (`c`,`d`(65535),`e`),
> + UNIQUE KEY `b` (`b`,`d`(65535),`g`,`h`(65535)),
> + UNIQUE KEY `e` (`e`,`f`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 a 3 c A NULL NULL NULL YES HASH
> +t1 0 c 1 c A NULL NULL NULL YES HASH
> +t1 0 c 2 d A 0 65535 NULL YES HASH
> +t1 0 c 3 e A 0 NULL NULL YES HASH
> +t1 0 b 1 b A 0 NULL NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES HASH
> +t1 0 e 2 f A 0 65535 NULL YES HASH
> +t1 0 e 3 g A 0 NULL NULL YES HASH
> +t1 0 e 4 h A 0 65535 NULL YES HASH
> +#try to delete blob column in unique;
> +truncate table t1;
> +#now try to delete keys;
> +alter table t1 drop key c, drop key e;
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` blob DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + `e` varchar(3000) DEFAULT NULL,
> + `f` longblob DEFAULT NULL,
> + `g` int(11) DEFAULT NULL,
> + `h` text DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535),`b`(65535),`c`(65535)),
> + UNIQUE KEY `b` (`b`(65535),`d`(65535),`g`,`h`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +t1 0 a 2 b A NULL 65535 NULL YES HASH
> +t1 0 a 3 c A 0 65535 NULL YES HASH
> +t1 0 b 1 b A 0 65535 NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 b 3 g A 0 NULL NULL YES HASH
> +t1 0 b 4 h A 0 65535 NULL YES HASH
> +drop table t1;
> +#now alter table containing some data basically some tests with ignore;
> +create table t1 (a blob);
> +insert into t1 values(1),(2),(3);
> +#normal alter table;
> +alter table t1 add unique key(a);
> +alter table t1 drop key a;
> +truncate table t1;
> +insert into t1 values(1),(1),(2),(2),(3);
> +alter table t1 add unique key(a);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +alter ignore table t1 add unique key(a);
> +select * from t1 order by a;
> +a
> +1
> +2
> +3
> +insert into t1 values(1);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +drop table t1;
> +#Now with multiple keys;
> +create table t1(a blob , b blob, c blob , d blob , e int);
> +insert into t1 values (1,1,1,1,1);
> +insert into t1 values (1,1,1,1,1);
> +insert into t1 values (2,1,1,1,1);
> +insert into t1 values (2,2,2,2,2);
> +insert into t1 values (3,3,4,4,4);
> +insert into t1 values (4,4,4,4,4);
> +alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
> +ERROR 23000: Duplicate entry '1-1' for key 'a'
> +alter ignore table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
> +select * from t1 order by a;
> +a b c d e
> +1 1 1 1 1
> +2 2 2 2 2
> +3 3 4 4 4
> +insert into t1 values (1,12,1,13,14);
> +ERROR 23000: Duplicate entry '1-1' for key 'a'
> +insert into t1 values (12,1,14,1,14);
> +ERROR 23000: Duplicate entry '1-1' for key 'b'
> +insert into t1 values (13,12,13,14,4);
> +ERROR 23000: Duplicate entry '4' for key 'e'
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + `b` blob DEFAULT NULL,
> + `c` blob DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + `e` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(65535),`c`(65535)),
> + UNIQUE KEY `b` (`b`(65535),`d`(65535)),
> + UNIQUE KEY `e` (`e`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 65535 NULL YES HASH
> +t1 0 a 2 c A NULL 65535 NULL YES HASH
> +t1 0 b 1 b A NULL 65535 NULL YES HASH
> +t1 0 b 2 d A 0 65535 NULL YES HASH
> +t1 0 e 1 e A 0 NULL NULL YES BTREE
> +drop table t1;
> +#visibility of db_row_hash
> +create table t1 (a blob unique , b blob unique);
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +b blob YES UNI NULL
> +insert into t1 values(1,19);
> +insert into t1 values(2,29);
> +insert into t1 values(3,39);
> +insert into t1 values(4,49);
> +create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
> +insert into t2 values(11,1);
> +insert into t2 values(22,2);
> +insert into t2 values(33,3);
> +insert into t2 values(44,4);
> +select * from t1 order by a;
> +a b
> +1 19
> +2 29
> +3 39
> +4 49
> +select * from t2 order by DB_ROW_HASH_1;
> +DB_ROW_HASH_1 DB_ROW_HASH_2
> +11 1
> +22 2
> +33 3
> +44 4
> +select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
> +ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'field list'
> +#bug
> +select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
> +DB_ROW_HASH_1 DB_ROW_HASH_2
> +11 1
> +11 1
> +11 1
> +11 1
> +22 2
> +22 2
> +22 2
> +22 2
> +33 3
> +33 3
> +33 3
> +33 3
> +44 4
> +44 4
> +44 4
> +44 4
> +select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
> +ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'IN/ALL/ANY subquery'
> +select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
> +DB_ROW_HASH_1 DB_ROW_HASH_2
> +11 1
> +22 2
> +33 3
> +44 4
> +11 1
> +22 2
> +33 3
> +44 4
> +11 1
> +22 2
> +33 3
> +44 4
> +11 1
> +22 2
> +33 3
> +44 4
> +select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
> +DB_ROW_HASH_1 DB_ROW_HASH_2
> +11 1
> +22 2
> +33 3
> +44 4
> +select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
> +ERROR 42S22: Unknown column 't1.DB_ROW_HASH_1' in 'where clause'
> +select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
> +DB_ROW_HASH_1
> +11
> +22
> +33
> +44
> +drop table t1,t2;
> +#very long blob entry;
> +SET @@GLOBAL.max_allowed_packet=67108864;
> +connect 'newcon', localhost, root,,;
> +connection newcon;
> +show variables like 'max_allowed_packet';
> +Variable_name Value
> +max_allowed_packet 67108864
> +create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
> +desc t1;
> +Field Type Null Key Default Extra
> +a longblob YES UNI NULL
> +b longblob YES MUL NULL
> +c longblob YES NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` longblob DEFAULT NULL,
> + `b` longblob DEFAULT NULL,
> + `c` longblob DEFAULT NULL,
> + UNIQUE KEY `a` (`a`),
> + UNIQUE KEY `b` (`b`,`c`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL NULL NULL YES HASH
> +t1 0 b 1 b A NULL NULL NULL YES HASH
> +t1 0 b 2 c A 0 NULL NULL YES HASH
> +insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
> +concat(repeat('sachin',10000000),'1'));
> +insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
> +concat(repeat('sachin',10000000),'1'));
> +insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
> +concat(repeat('sachin',10000000),'4'));
> +ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for key 'a'
> +insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
> +concat(repeat('sachin',10000000),'1'));
> +ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for key 'b'
> +drop table t1;
> +#long key unique with different key length
> +create table t1(a blob, unique(a(3000)));
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob YES UNI NULL
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 3000 NULL YES HASH
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(3000))
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 value(concat(repeat('s',3000),'1'));
> +insert into t1 value(concat(repeat('s',3000),'2'));
> +ERROR 23000: Duplicate entry 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for key 'a'
> +insert into t1 value(concat(repeat('a',3000),'2'));
> +drop table t1;
> +create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
> +unique(a(3500), b), unique(c(4500), d));
> +desc t1;
> +Field Type Null Key Default Extra
> +a varchar(4000) YES MUL NULL
> +b longblob YES NULL
> +c varchar(5000) YES MUL NULL
> +d longblob YES NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` varchar(4000) DEFAULT NULL,
> + `b` longblob DEFAULT NULL,
> + `c` varchar(5000) DEFAULT NULL,
> + `d` longblob DEFAULT NULL,
> + UNIQUE KEY `a` (`a`(3500),`b`),
> + UNIQUE KEY `c` (`c`(4500),`d`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +show keys from t1;
> +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
> +t1 0 a 1 a A NULL 3500 NULL YES HASH
> +t1 0 a 2 b A NULL NULL NULL YES HASH
> +t1 0 c 1 c A 0 4500 NULL YES HASH
> +t1 0 c 2 d A 0 NULL NULL YES HASH
> +drop table t1;
> +disconnect newcon;
> +connection default;
> +SET @@GLOBAL.max_allowed_packet=4194304;
> +#ext bug
> +create table t1(a int primary key, b blob unique, c int, d blob , index(c));
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` int(11) NOT NULL,
> + `b` blob DEFAULT NULL,
> + `c` int(11) DEFAULT NULL,
> + `d` blob DEFAULT NULL,
> + PRIMARY KEY (`a`),
> + UNIQUE KEY `b` (`b`),
> + KEY `c` (`c`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 values(1,23,1,33);
> +insert into t1 values(2,23,1,33);
> +ERROR 23000: Duplicate entry '23' for key 'b'
> +drop table t1;
> +create table t2 (a blob unique , c int , index(c));
> +show create table t2;
> +Table Create Table
> +t2 CREATE TABLE `t2` (
> + `a` blob DEFAULT NULL,
> + `c` int(11) DEFAULT NULL,
> + UNIQUE KEY `a` (`a`),
> + KEY `c` (`c`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t2 values(1,1);
> +insert into t2 values(2,1);
> +drop table t2;
> +#not null test //todo solve warnings
> +create table t1(a blob unique not null);
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob NO UNI NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob NOT NULL,
> + UNIQUE KEY `a` (`a`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 values(1);
> +insert into t1 values(3);
> +insert into t1 values(1);
> +ERROR 23000: Duplicate entry '1' for key 'a'
> +drop table t1;
> +create table t1(a int primary key, b blob unique , c blob unique not null);
> +insert into t1 values(1,1,1);
> +insert into t1 values(2,1,2);
> +ERROR 23000: Duplicate entry '1' for key 'b'
> +insert into t1 values(3,3,1);
> +ERROR 23000: Duplicate entry '1' for key 'c'
> +drop table t1;
> +create table t1 (a blob unique not null, b blob not null, c blob not null, unique(b,c));
> +desc t1;
> +Field Type Null Key Default Extra
> +a blob NO UNI NULL
> +b blob NO MUL NULL
> +c blob NO NULL
> +show create table t1;
> +Table Create Table
> +t1 CREATE TABLE `t1` (
> + `a` blob NOT NULL,
> + `b` blob NOT NULL,
> + `c` blob NOT NULL,
> + UNIQUE KEY `a` (`a`),
> + UNIQUE KEY `b` (`b`,`c`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +insert into t1 values (1, 2, 3);
> +insert into t1 values (2, 1, 3);
> +insert into t1 values (2, 1, 3);
> +ERROR 23000: Duplicate entry '2' for key 'a'
> +drop table t1;
> +#partition
> +create table t1(a blob unique) partition by hash(a);
> +ERROR HY000: A BLOB field is not allowed in partition function
> +#key length > 2^15 -1
> +create table t1(a blob, unique(a(100001)));
> +ERROR 42000: Specified key was too long; max key length is 1000 bytes
> +set @@GLOBAL.max_allowed_packet= @allowed_packet;
> diff --git a/mysql-test/main/long_unique_debug.result b/mysql-test/main/long_unique_debug.result
> new file mode 100644
> --- /dev/null
> +++ b/mysql-test/main/long_unique_debug.result
> @@ -0,0 +1,32 @@
> +#In this test case we will check what will happen in the case of hash collusion
collision, not collusion :)
> +SET debug_dbug="d,same_long_unique_hash";
> +create table t1(a blob unique);
> +insert into t1 values('xyz');
> +insert into t1 values('abc');
> +insert into t1 values('sachin');
> +insert into t1 values('sachin');
> +ERROR 23000: Duplicate entry 'sachin' for key 'a'
> +insert into t1 values('maria');
> +insert into t1 values('maria');
> +ERROR 23000: Duplicate entry 'maria' for key 'a'
> +drop table t1;
> +create table t1(a blob unique, b blob unique);
> +insert into t1 values('xyz', 11);
> +insert into t1 values('abc', 22);
> +insert into t1 values('sachin', 1);
> +insert into t1 values('sachin', 4);
> +ERROR 23000: Duplicate entry 'sachin' for key 'a'
> +insert into t1 values('maria', 2);
> +insert into t1 values('maria', 3);
> +ERROR 23000: Duplicate entry 'maria' for key 'a'
> +drop table t1;
> +create table t1(a blob , b blob , unique(a,b));
> +insert into t1 values('xyz', 11);
> +insert into t1 values('abc', 22);
> +insert into t1 values('sachin', 1);
> +insert into t1 values('sachin', 1);
> +ERROR 23000: Duplicate entry 'sachin-1' for key 'a'
> +insert into t1 values('maria', 2);
> +insert into t1 values('maria', 2);
> +ERROR 23000: Duplicate entry 'maria-2' for key 'a'
> +drop table t1;
> diff --git a/mysql-test/main/mdev-504.test b/mysql-test/main/mdev-504.test
> --- a/mysql-test/main/mdev-504.test
> +++ b/mysql-test/main/mdev-504.test
> @@ -44,7 +43,7 @@ while ($trial)
> --connect (con2,localhost,root,,)
> --send CALL p_analyze()
>
> - --let $run = 100
> + --let $run = 1
Why?
>
> while ($run)
> {
> diff --git a/mysql-test/main/type_blob.result b/mysql-test/main/type_blob.result
> --- a/mysql-test/main/type_blob.result
> +++ b/mysql-test/main/type_blob.result
> @@ -370,7 +370,7 @@ a 1
> hello 1
> drop table t1;
> create table t1 (a text, unique (a(2100)));
> -ERROR 42000: Specified key was too long; max key length is 1000 bytes
> +drop table t1;
or remove the create table instead, it's not a useful test anymore
> create table t1 (a text, key (a(2100)));
> Warnings:
> Note 1071 Specified key was too long; max key length is 1000 bytes
> diff --git a/sql/field.h b/sql/field.h
> --- a/sql/field.h
> +++ b/sql/field.h
> @@ -687,7 +687,7 @@ class Field: public Value_source
> GEOM_MULTIPOINT = 4, GEOM_MULTILINESTRING = 5, GEOM_MULTIPOLYGON = 6,
> GEOM_GEOMETRYCOLLECTION = 7
> };
> - enum imagetype { itRAW, itMBR};
> + enum imagetype { itRAW, itMBR, itHASH};
unused
>
> utype unireg_check;
> uint32 field_length; // Length of field
> diff --git a/sql/handler.cc b/sql/handler.cc
> --- a/sql/handler.cc
> +++ b/sql/handler.cc
> @@ -3686,9 +3686,19 @@ void handler::print_error(int error, myf errflag)
> uint key_nr=get_dup_key(error);
> if ((int) key_nr >= 0 && key_nr < table->s->keys)
> {
> + KEY *long_key= NULL;
> + if (table->key_info[key_nr].algorithm
> + == HA_KEY_ALG_LONG_HASH)
> + {
> + long_key= table->key_info + key_nr;
> + setup_keyinfo_hash(long_key);
> + }
> print_keydup_error(table, &table->key_info[key_nr], errflag);
> + if (long_key)
> + re_setup_keyinfo_hash(long_key);
> DBUG_VOID_RETURN;
> }
> + table->re_setup_table();
> }
> textno=ER_DUP_KEY;
> break;
> @@ -6256,6 +6268,162 @@ int handler::ha_reset()
> DBUG_RETURN(reset());
> }
>
> +static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_rec,
> + uint key_no)
> +{
> + Field *hash_field;
> + int result, error= 0;
> + KEY *key_info= table->key_info + key_no;
> + hash_field= key_info->key_part->field;
> + DBUG_ASSERT((table->key_info[key_no].flags & HA_NULL_PART_KEY &&
> + table->key_info[key_no].key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
> + || table->key_info[key_no].key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
> + uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
> +
> + if (hash_field->is_real_null())
> + return 0;
> +
remember, you have `key_info= table->key_info + key_no` above :)
you don't need to write `table->key_info[key_no]` anymore here
> + key_copy(ptr, new_rec, &table->key_info[key_no],
> + table->key_info[key_no].key_length, false);
> +
> + if (!table->check_unique_buf)
> + table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
> + table->s->reclength*sizeof(uchar));
C99 standard says
6.5.3.4 The sizeof operator
When applied to an operand that has type char, unsigned
char, or signed char, (or a qualified version thereof) the
result is 1.
so don't bother with `*sizeof(uchar)`
> +
> + result= h->ha_index_init(key_no, 0);
> + if (result)
> + return result;
> + result= h->ha_index_read_map(table->check_unique_buf,
> + ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
> + if (!result)
> + {
> + bool is_same;
> + Field * t_field;
> + Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
> + Item ** arguments= temp->arguments();
> + uint arg_count= temp->argument_count();
> + do
> + {
> + long diff= table->check_unique_buf - new_rec;
> + is_same= true;
> + for (uint j=0; j < arg_count; j++)
you can add `is_same && ` here to the for() condition too.
> + {
> + DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
> + // this one for left(fld_name,length)
> + arguments[j]->type() == Item::FUNC_ITEM);
> + if (arguments[j]->type() == Item::FIELD_ITEM)
> + {
> + t_field= static_cast<Item_field *>(arguments[j])->field;
> + if (t_field->cmp_offset(diff))
> + is_same= false;
> + }
> + else
> + {
> + Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
> + DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
> + DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
> + t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
> + uint length= fnc->arguments()[1]->val_int();
> + if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
> + is_same= false;
> + }
> + }
> + }
> + while (!is_same && !(result= table->file->ha_index_next_same(table->check_unique_buf,
> + ptr, table->key_info[key_no].key_length)));
> + if (is_same)
> + {
> + table->dupp_hash_key= key_no;
> + error= HA_ERR_FOUND_DUPP_KEY;
> + goto exit;
> + }
> + else
> + goto exit;
> + }
> + if (result == HA_ERR_LOCK_WAIT_TIMEOUT)
> + {
> + table->dupp_hash_key= key_no;
> + //TODO check if this is the only case
> + error= HA_ERR_FOUND_DUPP_KEY;
why not to keep HA_ERR_LOCK_WAIT_TIMEOUT?
> + }
> + exit:
> + h->ha_index_end();
> + return error;
> +}
empty line between functions, please
> +/** @brief
> + check whether inserted records breaks the
> + unique constraint on long columns.
> + @returns 0 if no duplicate else returns error
> + */
> +static int check_duplicate_long_entries(TABLE *table, handler *h, uchar *new_rec)
> +{
> + table->dupp_hash_key= -1;
> + int result;
> + for (uint i= 0; i < table->s->keys; i++)
> + {
> + if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
> + (result= check_duplicate_long_entry_key(table, h, new_rec, i)))
> + return result;
> + }
> + return 0;
> +}
> +
> +/** @brief
> + check whether updated records breaks the
> + unique constraint on long columns.
> + In the case of update we just need to check the specic key
> + reason for that is consider case
> + create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
> + ,unique(x,y))
> + and update statement like this
> + update t1 set a=23+a; in this case if we try to scan for
> + whole keys in table then index scan on x_y will return 0
> + because data is same so in the case of update we take
> + key as a parameter in normal insert key should be -1
> + @returns 0 if no duplicate else returns error
> + */
> +static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec)
> +{
> + Field *field;
> + uint key_parts;
> + int error= 0;
> + KEY *keyinfo;
> + KEY_PART_INFO *keypart;
> + /*
> + Here we are comparing whether new record and old record are same
> + with respect to fields in hash_str
> + */
> + long reclength= table->record[1]-table->record[0];
> + if (!table->update_handler)
> + table->clone_handler_for_update();
> + for (uint i= 0; i < table->s->keys; i++)
> + {
> + keyinfo= table->key_info + i;
> + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
> + {
> + key_parts= fields_in_hash_keyinfo(keyinfo);
> + keypart= keyinfo->key_part - key_parts;
> + for (uint j= 0; j < key_parts; j++, keypart++)
> + {
> + field= keypart->field;
> + /* Compare fields if they are different then check for duplicates*/
> + if(field->cmp_binary_offset(reclength))
> + {
> + if((error= check_duplicate_long_entry_key(table, table->update_handler,
> + new_rec, i)))
> + goto exit;
> + /*
> + break beacuse check_duplicate_long_entrie_key will
> + take care of remaning fields
three typos in two lines :)
> + */
> + break;
> + }
> + }
> + }
> + }
> + exit:
> + return error;
> +}
>
> int handler::ha_write_row(uchar *buf)
> {
> diff --git a/sql/item.cc b/sql/item.cc
> --- a/sql/item.cc
> +++ b/sql/item.cc
> @@ -356,7 +356,7 @@ Item::Item(THD *thd):
> /* Initially this item is not attached to any JOIN_TAB. */
> join_tab_idx= MAX_TABLES;
>
> - /* Put item in free list so that we can free all items at end */
> + /* Put item in free list so that we can free all items at end only if*/
Huh?
> next= thd->free_list;
> thd->free_list= this;
> /*
> diff --git a/sql/item_func.cc b/sql/item_func.cc
> --- a/sql/item_func.cc
> +++ b/sql/item_func.cc
> @@ -1718,6 +1718,48 @@ bool Item_func_mod::fix_length_and_dec()
> DBUG_RETURN(FALSE);
> }
>
> +inline void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
static inline (or simply static, a compiler will inline it anyway)
> +{
> + CHARSET_INFO *cs;
> + uchar l[4];
> + int4store(l, str->length());
> + cs= str->charset();
> + cs->coll->hash_sort(cs, l, sizeof(l), &nr1, &nr2);
> + cs= str->charset();
> + cs->coll->hash_sort(cs, (uchar *)str->ptr(), str->length(), &nr1, &nr2);
> +}
> +
> +longlong Item_func_hash::val_int()
> +{
> + DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
good that you've added tests for collisions!
> + unsigned_flag= true;
> + ulong nr1= 1,nr2= 4;
> + CHARSET_INFO *cs;
> + String * str;
> + for(uint i= 0;i<arg_count;i++)
> + {
> + str = args[i]->val_str();
> + if(args[i]->null_value)
> + {
> + null_value= 1;
> + return 0;
> + }
> + calc_hash_for_unique(nr1, nr2, str);
> + }
> + null_value= 0;
> + return (longlong)nr1;
> +}
> +
> +
> +bool Item_func_hash::fix_length_and_dec()
> +{
> + maybe_null= 1;
Why are you forcing maybe_null=1?
Most functions are "normal" - they can return NULL if one of the
arguments is NULL, otherwise they don't return NULL.
And Item_func::fix_fields() sets maybe_null accordingly, it'll
be 1 if any of the arguments has maybe_null==1, otherwise
maybe_null will be 0.
You only need to set maybe_null explicitly if your function is
special, for ISNULL(x) # never returns NULL, and NULLIF(x,y)
can return NULL when both arguments are not NULL.
Looks like in your case the default maybe_null beahavior is
correct, though.
> + decimals= 0;
> + max_length= 8;
> + return false;
> +}
> +
> +
>
> double Item_func_neg::real_op()
> {
> diff --git a/sql/sql_base.cc b/sql/sql_base.cc
> --- a/sql/sql_base.cc
> +++ b/sql/sql_base.cc
> @@ -994,6 +994,8 @@ void close_thread_table(THD *thd, TABLE **table_ptr)
> table->file->update_global_table_stats();
> table->file->update_global_index_stats();
> }
> + if (table->update_handler)
> + table->delete_update_handler();
you're doing it in sql_update.cc, isn't it enough?
>
> /*
> This look is needed to allow THD::notify_shared_lock() to
> diff --git a/sql/sql_show.cc b/sql/sql_show.cc
> --- a/sql/sql_show.cc
> +++ b/sql/sql_show.cc
> @@ -2306,7 +2307,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
> */
> packet->append(STRING_WITH_LEN("PRIMARY KEY"));
> }
> - else if (key_info->flags & HA_NOSAME)
> + else if (key_info->flags & HA_NOSAME || key_info->algorithm == HA_KEY_ALG_LONG_HASH)
I'd think that after setup_table_hash(), HA_NOSAME should be set
and you wouldn't need extra checks.
> packet->append(STRING_WITH_LEN("UNIQUE KEY "));
> else if (key_info->flags & HA_FULLTEXT)
> packet->append(STRING_WITH_LEN("FULLTEXT KEY "));
> @@ -6546,7 +6548,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
> else if (!tables->view)
> {
> TABLE *show_table= tables->table;
> - KEY *key_info=show_table->s->key_info;
> + KEY *key_info=show_table->key_info;
you mean, TABLE_SHARE::key_info shows your HASH definition?
and but you change (with setup_table_hash) TABLE::key_info
to store the original user's UNIQUE definition?
Why is TABLE_SHARE::key_info changed?
> if (show_table->file)
> {
> show_table->file->info(HA_STATUS_VARIABLE |
> diff --git a/sql/sql_table.cc b/sql/sql_table.cc
> --- a/sql/sql_table.cc
> +++ b/sql/sql_table.cc
> @@ -4106,12 +4177,28 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
> if (key->type == Key::UNIQUE && !(key_info->flags & HA_NULL_PART_KEY))
> unique_key=1;
> key_info->key_length=(uint16) key_length;
> - if (key_length > max_key_length && key->type != Key::FULLTEXT)
> + if (key_length > max_key_length && key->type != Key::FULLTEXT &&
> + !is_hash_field_needed)
> {
> my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
> DBUG_RETURN(TRUE);
> }
>
> + if (is_hash_field_needed)
> + {
> + Create_field *hash_fld= add_hash_field(thd, &alter_info->create_list,
> + create_info->default_table_charset,
> + key_info);
> + hash_fld->offset= record_offset;
> + record_offset+= hash_fld->pack_length;
> + if (key_info->flags & HA_NULL_PART_KEY)
> + null_fields++;
> + else
> + {
> + hash_fld->flags|= NOT_NULL_FLAG;
> + hash_fld->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
> + }
> + }
Hmm, haven't we agreed *not* to write the virtual hash field
into the frm, not show it to create/alter table and
add it automatically when the table is opened?
> if (validate_comment_length(thd, &key->key_create_info.comment,
> INDEX_COMMENT_MAXLEN,
> ER_TOO_LONG_INDEX_COMMENT,
> diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
> --- a/sql/sql_yacc.yy
> +++ b/sql/sql_yacc.yy
> @@ -10820,6 +10820,12 @@ function_call_conflict:
> if (unlikely($$ == NULL))
> MYSQL_YYABORT;
> }
> + | HASH_SYM '(' expr_list ')'
> + {
> + $$= new (thd->mem_root)Item_func_hash(thd,*$3);
> + if (unlikely($$ == NULL))
> + MYSQL_YYABORT;
> + }
Haven't we agreed to make this HASH() function fully internal,
not visible to the parser?
> /* LAST_VALUE here conflicts with the definition for window functions.
> We have these 2 separate rules to remove the shift/reduce conflict.
> */
> diff --git a/sql/table.cc b/sql/table.cc
> --- a/sql/table.cc
> +++ b/sql/table.cc
> @@ -747,7 +746,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
> if (i == 0)
> {
> ext_key_parts+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
> - n_length=keys * sizeof(KEY) + ext_key_parts * sizeof(KEY_PART_INFO);
> + n_length=keys * sizeof(KEY) + (ext_key_parts) * sizeof(KEY_PART_INFO);
Another left-over from an intermediate implementation? :)
> if (!(keyinfo= (KEY*) alloc_root(&share->mem_root,
> n_length + len)))
> return 1;
> @@ -798,6 +797,12 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
> }
> key_part->store_length=key_part->length;
> }
> + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
> + {
> + keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
> + //Storing key hash
> + key_part++;
> + }
I don't understand what you do here
>
> /*
> Add primary key to end of extended keys for non unique keys for
> @@ -831,7 +836,9 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
> if (j == first_key_parts)
> keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
> }
> - share->ext_key_parts+= keyinfo->ext_key_parts;
> + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
> + share->ext_key_parts++;
> + share->ext_key_parts+= keyinfo->ext_key_parts;
I don't understand what you do here
> }
> keynames=(char*) key_part;
> strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames;
> @@ -3322,6 +3421,36 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
>
> key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
> key_info->user_defined_key_parts) ;
> + if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
> + {
> + /*
> + Either it can be first time opening the table share or it can be second time
> + of more. The difference is when it is first time key_part[0]->fieldnr points
> + to blob/long field, but when it is 2nd time there will bw always one key_part
> + and it will point to hash_field.
> + So in the case of 2nd time we will make key_info->key_part point to start of long
> + field.
you're overcomplicating here.
if you would *always* create your HASH key_def in
parse_vcol_defs(), you'll have TABLE_SHARE to be always "like
the first time", you simply never modify TABLE_SHARE and always
modify TABLEin parse_vcol_defs().
and you won't need setup_keyinfo_hash/setup_table_hash functions
either.
> + For example we have unique(a,b,c)
> + In first share opening key_part will point to a field
> + but in parse_vcol_defs it will be changed to point to db_row_hash field
> + in Second or later opening key_part will be pointing to db_row_hash
> + We will chnage it back to point to field a, because in this way we can create
> + vcol_info for hash field in parse_vcol_defs.
> + */
> + //Second or more time share opening
> + key_info->user_defined_key_parts= 0;
> + key_part_end= key_part;
> + while(!(share->field[key_part_end->fieldnr -1 ]->flags & LONG_UNIQUE_HASH_FIELD))
> + {
> + key_part_end++;
> + key_info->user_defined_key_parts++;
> + }
> + key_info->usable_key_parts= key_info->ext_key_parts= key_info->user_defined_key_parts;
> + key_part_end++;
> + share_keyinfo= share->key_info + key_no;
> + if (share_keyinfo->key_part->field->flags & LONG_UNIQUE_HASH_FIELD)
> + share_keyinfo->key_part-= key_info->user_defined_key_parts;
> + }
> for ( ; key_part < key_part_end; key_part++)
> {
> Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
> @@ -8498,6 +8630,120 @@ double KEY::actual_rec_per_key(uint i)
> }
>
>
> +/*
> + find out the field positoin in hash_str()
> + position starts from 0
> + else return -1;
> +*/
> +int find_field_pos_in_hash(Item *hash_item, const char * field_name)
> +{
> + Item_func_or_sum * temp= static_cast<Item_func_or_sum *>(hash_item);
> + Item_args * t_item= static_cast<Item_args *>(temp);
> + uint arg_count= t_item->argument_count();
> + Item ** arguments= t_item->arguments();
> + Field * t_field;
> +
> + for (uint j=0; j < arg_count; j++)
> + {
> + DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
> + arguments[j]->type() == Item::FUNC_ITEM);
> + if (arguments[j]->type() == Item::FIELD_ITEM)
> + {
> + t_field= static_cast<Item_field *>(arguments[j])->field;
> + }
> + else
> + {
> + Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
> + t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
> + }
> + if (!my_strcasecmp(system_charset_info, t_field->field_name.str, field_name))
> + return j;
> + }
> + return -1;
> +}
> +
> +/*
> + find total number of field in hash expr
> +*/
> +int fields_in_hash_keyinfo(KEY *keyinfo)
> +{
> + Item_func_hash * temp= (Item_func_hash *)
> + keyinfo->key_part->field->vcol_info->expr;
> + return temp->argument_count();
> +}
> +
> +void setup_keyinfo_hash(KEY *key_info)
> +{
> + uint no_of_keyparts= fields_in_hash_keyinfo(key_info);
> + key_info->key_part-= no_of_keyparts;
> + key_info->user_defined_key_parts= key_info->usable_key_parts=
> + key_info->ext_key_parts= no_of_keyparts;
add DBUG_ASSERT to make sure it's your HA_KEY_ALG_LONG_HASH
> +}
> +
> +void re_setup_keyinfo_hash(KEY *key_info)
> +{
> + while(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD))
> + key_info->key_part++;
> + key_info->user_defined_key_parts= key_info->usable_key_parts=
> + key_info->ext_key_parts= 1;
> +}
> +/**
> + @brief clone of current handler.
> + Creates a clone of handler used in update for
> + unique hash key.
> +*/
> +void TABLE::clone_handler_for_update()
> +{
> + handler *update_handler= NULL;
> + if (!s->long_unique_table)
> + return;
> + update_handler= file->clone(s->normalized_path.str,
> + in_use->mem_root);
> + update_handler->ha_external_lock(in_use, F_RDLCK);
> + this->update_handler= update_handler;
> + return;
> +}
> +
> +/**
> + @brief Deletes update handler object
> +*/
> +void TABLE::delete_update_handler()
> +{
> + update_handler->ha_external_lock(in_use, F_UNLCK);
> + update_handler->ha_close();
> + delete update_handler;
> + this->update_handler= NULL;
> +}
> +/**
> + @brief This function makes table object with
> + long unique keys ready for optimizer and alter table
Looks like a bad function name and a bad explanation.
I think - but not sure! - that it restores the original
UNIQUE definition, with columns as specified by the user.
Is that so?
> + @param table Table object
> + */
> +void TABLE::setup_table_hash()
> +{
> +
> + if (!this->s->long_unique_table)
> + return;
> + KEY *keyinfo= key_info;
> + for (uint i= 0; i < this->s->keys; i++, keyinfo++)
> + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
> + setup_keyinfo_hash(keyinfo);
> +}
> +
> +/**
> + @brief Revert the effect of setup_table_hash
> + @param table Table Object
> + */
> +void TABLE::re_setup_table()
> +{
> + if (!s->long_unique_table)
> + return;
> + KEY *keyinfo= key_info;
> + for (uint i= 0; i < s->keys; i++, keyinfo++)
> + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
> + re_setup_keyinfo_hash(keyinfo);
> +}
> +
> LEX_CSTRING *fk_option_name(enum_fk_option opt)
> {
> static LEX_CSTRING names[]=
> diff --git a/sql/table.h b/sql/table.h
> --- a/sql/table.h
> +++ b/sql/table.h
> @@ -1105,6 +1120,17 @@ struct TABLE
> THD *in_use; /* Which thread uses this */
>
> uchar *record[3]; /* Pointer to records */
> + /* record buf to resolve hash collisions for long UNIQUE constraints */
> + uchar *check_unique_buf;
> + handler *update_handler; /* Handler used in case of update */
> + /*
> + In the case of write row for long unique we are unable to find
> + which key is violated. Because we in case of duplicate hash we never reach
> + handler write_row function. So print_error will always print that
> + key 0 is violated. We store which key is violated in this variable
> + by default this should be initialized to -1
> + */
> + int dupp_hash_key;
couldn't you simply store it in table->file->errkey ?
> uchar *write_row_record; /* Used as optimisation in
> THD::write_row */
> uchar *insert_values; /* used by INSERT ... UPDATE */
> diff --git a/sql/table_cache.cc b/sql/table_cache.cc
> --- a/sql/table_cache.cc
> +++ b/sql/table_cache.cc
> @@ -822,13 +822,10 @@ TABLE_SHARE *tdc_acquire_share(THD *thd, TABLE_LIST *tl, uint flags,
>
> if (res == -1)
> DBUG_RETURN(0);
> - else if (res == 1)
> - continue;
Why?
>
> element= (TDC_element*) lf_hash_search_using_hash_value(&tdc_hash,
> thd->tdc_hash_pins, hash_value, (uchar*) key, key_length);
> lf_hash_search_unpin(thd->tdc_hash_pins);
> - DBUG_ASSERT(element);
>
> if (!(share= alloc_table_share(tl->db.str, tl->table_name.str, key, key_length)))
> {
> diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
> --- a/storage/myisam/myisamchk.c
> +++ b/storage/myisam/myisamchk.c
> @@ -1333,7 +1333,8 @@ static void descript(HA_CHECK *param, register MI_INFO *info, char * name)
> key++,keyinfo++)
> {
> keyseg=keyinfo->seg;
> - if (keyinfo->flag & HA_NOSAME) text="unique ";
> + if (keyinfo->flag & HA_NOSAME)
> + text="unique ";
I presume you can revert this change, looks like some left-over
from when you tried to make myisamchk to recognize your uniques
and then later reverted it.
> else if (keyinfo->flag & HA_FULLTEXT) text="fulltext ";
> else text="multip.";
>
>
Regards,
Sergei
Chief Architect MariaDB
and security(a)mariadb.org
2
2
Re: [Maria-developers] 4b01d3aee60: MDEV-17082 Application-time periods: CREATE
by Sergei Golubchik 12 Feb '19
by Sergei Golubchik 12 Feb '19
12 Feb '19
Hi, Nikita!
On Jan 31, Nikita Malyavin wrote:
> revision-id: 4b01d3aee60 (versioning-1.0.7-2-g4b01d3aee60)
> parent(s): 44144533e50
> author: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> committer: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> timestamp: 2019-01-30 22:53:43 +1000
> message:
>
> MDEV-17082 Application-time periods: CREATE
>
> * add syntax `CREATE TABLE ... PERIOD FOR <apptime>`
> * add table period entity
>
> diff --git a/mysql-test/suite/period/r/create.result b/mysql-test/suite/period/r/create.result
> --- /dev/null
> +++ b/mysql-test/suite/period/r/create.result
> @@ -0,0 +1,82 @@
> +create or replace table t (id int primary key, s date, e date,
> +period for mytime(s,e));
> +show create table t;
> +Table Create Table
> +t CREATE TABLE `t` (
> + `id` int(11) NOT NULL,
> + `s` date NOT NULL,
> + `e` date NOT NULL,
> + PRIMARY KEY (`id`),
> + PERIOD FOR `mytime` (`s`, `e`),
> + CONSTRAINT `CONSTRAINT_1` CHECK (`s` < `e`)
as discussed, let's give the constraint the same name as the period.
but don't forget to add a test where a user explicitly creates a
constraint with this name. Like
create or replace table t (id int primary key, s date, e date,
period for mytime(s,e), constraint mytime (id < 100));
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
> +period for mytime(s,e));
> +show create table t;
> +Table Create Table
> +t CREATE TABLE `t` (
> + `id` int(11) NOT NULL,
> + `s` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
> + `e` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
> + PRIMARY KEY (`id`),
> + PERIOD FOR `mytime` (`s`, `e`),
> + CONSTRAINT `CONSTRAINT_1` CHECK (`s` < `e`)
> +) ENGINE=MyISAM DEFAULT CHARSET=latin1
> +# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)a)
> +# 2) If a <table period definition> TPD is specified, then:
> +# a) <table scope> shall not be specified.
> +create or replace temporary table t (s date, e date, period for mytime(s,e));
> +ERROR HY000: Temporary tables with application-time period not allowed
_are_ not allowed.
or, better,
Application-time period table cannot be temporary
> +# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)iii)
> +# The <data type or domain name> contained in CD1 is either DATE or a
> +# timestamp type and it is equivalent to the <data type or domain name>
> +# contained in CD2.
> +create or replace table t (id int primary key, s datetime, e date,
> +period for mytime(s,e));
> +ERROR HY000: PERIOD FOR `mytime` fields types mismatch
_field_ type mismatch
or
Fields `s` and `t` of PERIOD FOR `mytime` have different types.
or
Fields of PERIOD FOR `mytime` have different types.
> +create or replace table t (s timestamp(2), e timestamp(6),
> +period for mytime(s,e));
> +ERROR HY000: PERIOD FOR `mytime` fields types mismatch
> +create or replace table t (id int primary key, s int, e date,
> +period for mytime(s,e));
> +ERROR 42000: Incorrect column specifier for column 's'
incorrect column _type_?
> +create or replace table t (id int primary key, s date, e date,
> +period for mytime(s,x));
> +ERROR 42S22: Unknown column 'x' in 'mytime'
> +create or replace table t (id int primary key, s date, e date,
> +period for mytime(s,e),
> +period for mytime2(s,e));
> +ERROR HY000: Cannot specify more than one application-time period
> +# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)d)
> +# No <column name> in any <column definition> shall be equivalent to PN.
> +create or replace table t (mytime int, s date, e date,
> +period for mytime(s,e));
> +ERROR HY000: Could not specify name `mytime` for field. It is already used by period.
Better "Column and period `mytime` have the same name".
Because "already used" implies that period got the name first, while in
the table definition the field was specified first.
> +# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)v)2)A)
> +# Neither CD1 nor CD2 shall contain an <identity column specification>, a
> +# <generation clause>, a <system time period start column specification>,
> +# or a <system time period end column specification>.
> +create or replace table t (id int primary key,
> +s date,
> +e date generated always as (s+1),
> +period for mytime(s,e));
> +ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
> +create or replace table t (id int primary key,
> +s date,
> +e date as (s+1) VIRTUAL,
> +period for mytime(s,e));
> +ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
> +create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
> +st timestamp(6) as row start,
> +en timestamp(6) as row end,
> +period for system_time (st, en),
> +period for mytime(st,e)) with system versioning;
> +ERROR HY000: Period field `st` cannot be GENERATED ALWAYS AS
> +# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)
> +# Let IDCN be an implementation-dependent <constraint name> that is not
> +# equivalent to the <constraint name> of any table constraint descriptor
> +# included in S.
> +create or replace table t (x int, s date, e date,
> +period for mytime(s, e),
> +constraint mytime check (x > 1));
please, add SHOW CREATE TABLE here (add it after every successful CREATE TABLE).
> +create or replace database test;
> diff --git a/sql/datadict.cc b/sql/datadict.cc
> --- a/sql/datadict.cc
> +++ b/sql/datadict.cc
> @@ -292,6 +292,12 @@ bool dd_read_extra2(const uchar *frm_image, size_t len, extra2_fields *fields)
We've discussed that TRUNCATE TABLE bug fix, it won't need dd_read_extra2().
You still want to have a separate function for reading extra2, fine, but
as dd_frm_type() won't needed it, I'd rather move it into table.cc
and declared it static.
In fact, if you'd like to keep extra2 code separate, you could move
everything extra2 related to a separate object (like your extra2_fields)
with methods for reading and writing it. But, please, don't do this
refactoring before March.
> fields->field_flags.str= extra2;
> fields->field_flags.length= length;
> break;
> + case EXTRA2_APPLICATION_TIME_PERIOD:
> + if (fields->application_period.str)
> + DBUG_RETURN(true);
> + fields->application_period.str= extra2;
> + fields->application_period.length= length;
> + break;
> default:
> /* abort frm parsing if it's an unknown but important extra2 value */
> if (type >= EXTRA2_ENGINE_IMPORTANT)
> diff --git a/sql/handler.cc b/sql/handler.cc
> --- a/sql/handler.cc
> +++ b/sql/handler.cc
> @@ -7352,3 +7352,99 @@ bool Vers_parse_info::check_sys_fields(const Lex_table_name &table_name,
>
> +bool Table_scope_and_contents_source_st::check_fields(
> + THD *thd, Alter_info *alter_info, TABLE_LIST &create_table)
> +{
> + bool res= vers_check_system_fields(thd, alter_info, create_table);
> + if (res)
> + return true;
> +
> + if (!period_info.name)
> + return false;
> +
> + if (tmp_table())
> + {
> + my_error(ER_PERIOD_TEMPORARY_NOT_ALLOWED, MYF(0));
> + return true;
> + }
> +
> + Table_period_info::start_end_t &period= period_info.period;
> + const Create_field *row_start= NULL;
> + const Create_field *row_end= NULL;
> + List_iterator<Create_field> it(alter_info->create_list);
> + while (const Create_field *f= it++)
> + {
> + if (period.start.streq(f->field_name)) row_start= f;
> + else if (period.end.streq(f->field_name)) row_end= f;
> +
> + if (period_info.name.streq(f->field_name))
> + {
> + // TODO this stub should be removed by MDEV-16976
why removed?
> + my_error(ER_PERIOD_NAME_IS_NOT_ALLOWED_FOR_FIELD, MYF(0), f->field_name.str);
> + return true;
> + }
> + }
>
> diff --git a/sql/table.h b/sql/table.h
> --- a/sql/table.h
> +++ b/sql/table.h
> @@ -1730,6 +1747,9 @@ class IS_table_read_plan;
> /** The threshold size a blob field buffer before it is freed */
> #define MAX_TDC_BLOB_SIZE 65536
>
> +/** number of bytes read by uint2korr and sint2korr */
> +#define korr2size 2
No, that's silly. It's like #define TWO 2
Of course korr2size is 2. And korr3size is 3, and korr4size is 4.
If you want to get rid of the magic number, don't put it into the name
of the macro :)
That's why I suggested something like
#define fieldno_size 2
#define fieldno_korr uint2korr
#define fieldno_store int2store
Regards,
Sergei
Chief Architect MariaDB
and security(a)mariadb.org
2
5
A final reminder about the MariaDB Developers Unconference taking place
23 - 24 February in New York, just before OpenWorks.
We're almost at capacity, so if you haven't signed up yet, please do so
soon. Details about where to sign up and how to suggest sessions are at
https://mariadb.org/2019-developers-unconference-new-york/
Hope to see you there!
1
0
10 Feb '19
Hi Sergei!
Can you review that you are happy with the storage engine API changes?
I'veustructured the commit to be as small as possible to achieve the
desired outcome. In my tests, we are now twice as fast as MySQL for a
10 mil row table with 13 columns.
Vicențiu
-------- Forwarded Message --------From: vicentiu(a)mariadb.orgTo:
commits(a)mariadb.orgSubject: 53730224efd: Improve histogram collection
performance by samplingDate: Sun, 10 Feb 2019 20:09:49 +0200 (EET)
revision-id: 53730224efd987f97a6cc968ff5214ee499d84e0 (mariadb-10.4.1-
163-g53730224efd)parent(s):
3c305d3f1951f1667f84e48ddd98674c6318c39dauthor: Vicențiu
Ciorbarucommitter: Vicențiu Ciorbarutimestamp: 2019-02-10 19:54:50
+0200message:
Improve histogram collection performance by sampling
Histogram collection is done by sampling a percentage of rows from the
table,not looking at all individual ones.
The default implementation, to keep the server's Engine
IndepenentStatistics component working uses Bernoulli sampling. It does
a simpletable scan, but only accepts rows that pass a dice roll.
Thisimplementation is done as a storage engine interface method, so as
toallow faster and/or more efficient implementations for storage
enginesinternally.
The number of rows collected is capped to a minimum of 50000
andincreases logarithmically with a coffecient of 4096. The coffecient
ischosen so that we expect an error of less than 3% in our
estimationsaccording to the paper:"Random Sampling for Histogram
Construction: How much is enough?”– Surajit Chaudhuri, Rajeev Motwani,
Vivek Narasayya, ACM SIGMOD, 1998.
This interface is also a precursor to allowing SELECT ... FROM
tablewith sampling to work.
Performance wise, for a table of 10 million rows and 13 columns, 6
int,6 doubles, one string, the previous analyze table statistics took1
minute 20 seconds to collect all data. Post implementation, thetime is
less than 6 seconds. This was run on an InnoDB table, NVME SSD
withapproximately 2GB/s read speed and 500MB/s write speed.
--- mysql-test/main/selectivity.result | 8 +++---- mysql-
test/main/selectivity_innodb.result | 8 +++--
-- sql/handler.cc | 14
+++++++++++ sql/handler.h | 40
++++++++++++++++++++++++++++++- sql/sql_statistics.cc
| 32 +++++++++++++++++++------ 5 files changed, 86 insertions(+), 16
deletions(-)
diff --git a/mysql-test/main/selectivity.result b/mysql-
test/main/selectivity.resultindex 00907235ecc..6d09c1c9b62 100644---
a/mysql-test/main/selectivity.result+++ b/mysql-
test/main/selectivity.result@@ -1290,9 +1290,9 @@ explain
extended select * from t1, t2, t1 as t3 where t1.b=t2.c and t2.d=t3.a
and t3.b<5 and t1.a < 2000; id select_type table type possibl
e_keys key key_len ref rows filtered Extra-1 SIMPLE
t1 ALL NULL NULL NULL NULL 262144 100.00 Using
where+1 SIMPLE t1 ALL NULL NULL NULL NULL 262117
100.00 Using where 1 SIMPLE t2 ref c,d c 5
test.t1.b 5 100.00 -1 SIMPLE t3 ALL NULL
NULL NULL NULL 262144 100.00 Using where; Using join buffer
(flat, BNL join)+1 SIMPLE t3 ALL NULL NULL NULL
NULL 262117 100.00 Using where; Using join buffer (flat, BNL
join) Warnings: Note 1003 select `test`.`t1`.`a` AS
`a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS
`d`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join
`test`.`t2` join `test`.`t1` `t3` where `test`.`t2`.`c` =
`test`.`t1`.`b` and `test`.`t3`.`a` = `test`.`t2`.`d` and
`test`.`t3`.`b` < 5 and `test`.`t1`.`a` < 2000 select * from t1, t2, t1
as t3@@ -1307,9 +1307,9 @@ explain extended select * from t1, t2, t1 as
t3 where t1.b=t2.c and t2.d=t3.a and t3.b<5 and t1.a < 2000; id select_
type table type possible_keys key key_len ref rows
filtered Extra-1 SIMPLE t3 ALL NULL NULL NULL
NULL 262144 0.00 Using where+1 SIMPLE t3 ALL NULL
NULL NULL NULL 262117 0.00 Using where 1 SIMPLE t2
ref c,d d 5 test.t3.a 7 100.00 -1
SIMPLE t1 ALL NULL NULL NULL NULL 262144 2.00
Using where; Using join buffer (flat, BNL join)+1 SIMPLE t1
ALL NULL NULL NULL NULL 262117 2.00 Using where;
Using join buffer (flat, BNL join) Warnings: Note 1003 select
`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS
`c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS
`b` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t3` where
`test`.`t1`.`b` = `test`.`t2`.`c` and `test`.`t2`.`d` = `test`.`t3`.`a`
and `test`.`t3`.`b` < 5 and `test`.`t1`.`a` < 2000 select * from t1,
t2, t1 as t3diff --git a/mysql-test/main/selectivity_innodb.result
b/mysql-test/main/selectivity_innodb.resultindex
93917065722..0b20a40f69f 100644--- a/mysql-
test/main/selectivity_innodb.result+++ b/mysql-
test/main/selectivity_innodb.result@@ -1300,9 +1300,9 @@ explain
extended select * from t1, t2, t1 as t3 where t1.b=t2.c and t2.d=t3.a
and t3.b<5 and t1.a < 2000; id select_type table type possibl
e_keys key key_len ref rows filtered Extra-1 SIMPLE
t1 ALL NULL NULL NULL NULL 262144 100.00 Using
where+1 SIMPLE t1 ALL NULL NULL NULL NULL 262117
100.00 Using where 1 SIMPLE t2 ref c,d c 5
test.t1.b 5 100.00 -1 SIMPLE t3 ALL NULL
NULL NULL NULL 262144 100.00 Using where; Using join buffer
(flat, BNL join)+1 SIMPLE t3 ALL NULL NULL NULL
NULL 262117 100.00 Using where; Using join buffer (flat, BNL
join) Warnings: Note 1003 select `test`.`t1`.`a` AS
`a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS
`d`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join
`test`.`t2` join `test`.`t1` `t3` where `test`.`t2`.`c` =
`test`.`t1`.`b` and `test`.`t3`.`a` = `test`.`t2`.`d` and
`test`.`t3`.`b` < 5 and `test`.`t1`.`a` < 2000 select * from t1, t2, t1
as t3@@ -1317,9 +1317,9 @@ explain extended select * from t1, t2, t1 as
t3 where t1.b=t2.c and t2.d=t3.a and t3.b<5 and t1.a < 2000; id select_
type table type possible_keys key key_len ref rows
filtered Extra-1 SIMPLE t3 ALL NULL NULL NULL
NULL 262144 0.00 Using where+1 SIMPLE t3 ALL NULL
NULL NULL NULL 262117 0.00 Using where 1 SIMPLE t2
ref c,d d 5 test.t3.a 7 100.00 -1
SIMPLE t1 ALL NULL NULL NULL NULL 262144 2.00
Using where; Using join buffer (flat, BNL join)+1 SIMPLE t1
ALL NULL NULL NULL NULL 262117 2.00 Using where;
Using join buffer (flat, BNL join) Warnings: Note 1003 select
`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS
`c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS
`b` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t3` where
`test`.`t1`.`b` = `test`.`t2`.`c` and `test`.`t2`.`d` = `test`.`t3`.`a`
and `test`.`t3`.`b` < 5 and `test`.`t1`.`a` < 2000 select * from t1,
t2, t1 as t3diff --git a/sql/handler.cc b/sql/handler.ccindex
2e45b5883ea..16fb126918e 100644--- a/sql/handler.cc+++
b/sql/handler.cc@@ -7547,3 +7547,17 @@ bool
Vers_parse_info::check_sys_fields(const Lex_table_name
&table_name, "ROW END" : found_flag ? "ROW START" : "ROW
START/END"); return true; }++int handler::random_sample(uchar
*buf)+{+ int rc;+ THD *thd= ha_thd();+ do+ {+ if (table->in_use-
>check_killed(1))+ return HA_ERR_ABORTED_BY_USER;+ rc=
rnd_next(buf);+ } while (rc == HA_ERR_RECORD_DELETED || thd_rnd(thd) >
sample_fraction);++ return rc;+}diff --git a/sql/handler.h
b/sql/handler.hindex dfb2333b24e..fcdadbeb42b 100644---
a/sql/handler.h+++ b/sql/handler.h@@ -1913,6 +1913,11 @@ enum
enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT=
0, HA_STATS_AUTO_RECALC_ON,
HA_STATS_AUTO_RECALC_OFF }; +enum sample_mode
{+ HA_SAMPLE_BERNOULLI= 0,+ HA_SAMPLE_SYSTEM+};+ /** A helper
struct for schema DDL statements: CREATE SCHEMA [IF NOT EXISTS]
name [ schema_specification... ]@@ -2947,9 +2952,11 @@ class handler
:public Sql_alloc /** Length of ref (1-8 or the clustered key length)
*/ uint ref_length; FT_INFO *ft_handler;- enum init_stat { NONE=0,
INDEX, RND };+ enum init_stat { NONE=0, INDEX, RND, SAMPLE
}; init_stat inited, pre_inited; + double sample_fraction= 0;+ enum
sample_mode sample_mode; const COND
*pushed_cond; /** next_insert_id is the next value which should
be inserted into the@@ -3112,6 +3119,31 @@ class handler :public
Sql_alloc virtual int prepare_range_scan(const key_range *start_key,
const key_range *end_key) { return 0; } + int
ha_random_sample_init(THD *thd, enum sample_mode mode, double
fraction)+ __attribute__((warn_unused_result))+ {+ DBUG_ENTER("h
a_random_sample_init");+ DBUG_ASSERT(inited==NONE);+ int
result;+ sample_mode= mode;+ sample_fraction=
fraction;+ inited= (result= random_sample_init(mode, fraction)) ?
NONE : SAMPLE;+ DBUG_RETURN(result);+ }+ int
ha_random_sample(uchar
*buf)+ __attribute__((warn_unused_result))+ {+ DBUG_ENTER("ha_ra
ndom_sample");+ DBUG_ASSERT(inited ==
SAMPLE);+ DBUG_RETURN(random_sample(buf));+ }+ int
ha_random_sample_end()+ {+ DBUG_ENTER("ha_random_sample_end");+
inited= NONE;+ DBUG_RETURN(random_sample_end());+ }+ int
ha_rnd_init(bool scan) __attribute__
((warn_unused_result)) { DBUG_EXECUTE_IF("ha_rnd_init_fail",
return HA_ERR_TABLE_DEF_CHANGED;);@@ -4425,6 +4457,12 @@ class handler
:public Sql_alloc /* Note: ha_index_read_idx_map() may bypass
index_init() */ virtual int index_init(uint idx, bool sorted) {
return 0; } virtual int index_end() { return 0; }+ virtual int
random_sample_init(enum sample_mode mode, double
fraction)+ {+ return rnd_init(TRUE);+ }+ virtual int
random_sample(uchar *buf);+ virtual int random_sample_end() { return
rnd_end(); } /** rnd_init() can be called two times without
rnd_end() in between (it only makes sense if scan=1).diff --git
a/sql/sql_statistics.cc b/sql/sql_statistics.ccindex
db214a1fe28..22a015821de 100644--- a/sql/sql_statistics.cc+++
b/sql/sql_statistics.cc@@ -2727,12 +2727,16 @@ int
collect_statistics_for_table(THD *thd, TABLE *table) Field
*table_field; ha_rows rows= 0; handler *file=table->file;+ double
sample_fraction;+ const ha_rows MIN_THRESHOLD_FOR_SAMPLING=
50000; DBUG_ENTER("collect_statistics_for_table"); table-
>collected_stats->cardinality_is_null= TRUE; table->collected_stats-
>cardinality= 0; + table->file->info(HA_STATUS_VARIABLE);+ for
(field_ptr= table->field; *field_ptr; field_ptr++) { table_field=
*field_ptr; @@ -2743,12 +2747,27 @@ int
collect_statistics_for_table(THD *thd, TABLE
*table) restore_record(table, s->default_values); - /* Perform a
full table scan to collect statistics on 'table's columns */- if
(!(rc= file->ha_rnd_init(TRUE)))- { ++ if (file->records() <
MIN_THRESHOLD_FOR_SAMPLING)+ {+ sample_fraction=
1;+ }+ else+ {+ sample_fraction=
std::fmin(+ (MIN_THRESHOLD_FOR_SAMPLING + 4096
*+ log(200 * file->records())) / file->records(),
1);+ }+++ /* Fetch samples from the table to collect statistics on
table's columns */++ if (!(rc= file->ha_random_sample_init(thd,
HA_SAMPLE_BERNOULLI,+ sample_fra
ction)))+ { DEBUG_SYNC(table->in_use,
"statistics_collection_start"); - while ((rc= file-
>ha_rnd_next(table->record[0])) != HA_ERR_END_OF_FILE)+ while ((rc=
file->ha_random_sample(table->record[0])) !=
HA_ERR_END_OF_FILE) { if (thd->killed) break;@@
-2768,10 +2787,9 @@ int collect_statistics_for_table(THD *thd, TABLE
*table) break; rows++; }- file-
>ha_rnd_end();+ file->ha_random_sample_end(); } rc= (rc ==
HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;- /* Calculate
values for all statistical characteristics on columns and and for
each field f of 'table' save them in the write_stat structure@@ -2780,7
+2798,7 @@ int collect_statistics_for_table(THD *thd, TABLE
*table) if (!rc) { table->collected_stats->cardinality_is_null=
FALSE;- table->collected_stats->cardinality= rows;+ table-
>collected_stats->cardinality= rows /
sample_fraction; } bitmap_clear_all(table->write_set);
1
0
Hi Varun,
I've did some adjustments MDEV-18489 and pushed the patch into
10.4-optimizer-trace, please check it out.
Also I have filed MDEV-18527 and MDEV-18528.
Some input on the code:
> --- 10.4-optimizer-trace-orig/sql/sql_select.cc
> +++ 10.4-optimizer-trace-cl/sql/sql_select.cc
> @@ -15983,12 +16250,26 @@ optimize_cond(JOIN *join, COND *conds,
> that occurs in a function set a pointer to the multiple equality
> predicate. Substitute a constant instead of this field if the
> multiple equality contains a constant.
> - */
> - DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
> + */
> +
> + Opt_trace_context *const trace = &thd->opt_trace;
> + Json_writer *writer= trace->get_current_json();
> + Json_writer_object trace_wrapper(writer);
> + Json_writer_object trace_cond(writer, "condition_processing");
> + trace_cond.add("condition", join->conds == conds ? "WHERE" : "HAVING")
> + .add("original_condition", conds);
> +
> + Json_writer_array trace_steps(writer, "steps");
> + DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
Small question: Why was DBUG_EXECUTE shifted right?
A bigger question: the code seems unnecesarily verbose:
1. > + Opt_trace_context *const trace = &thd->opt_trace;
2. > + Json_writer *writer= trace->get_current_json();
3. > + Json_writer_object trace_wrapper(writer);
4. > + Json_writer_object trace_cond(writer, "condition_processing");
Can we save the space by just calling the constructors:
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cond(thd, "condition_processing");
?
This applies here and in many other places.
Alternative, we could use:
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cond(trace_wrapper, "condition_processing");
.. which makes the nesting clearer (and we could also add debug safety check: it
is invalid to operate on trace_wrapper until trace_cond hasn't been end()'ed)
> --- 10.4-optimizer-trace-orig/sql/opt_range.cc
> +++ 10.4-optimizer-trace-cl/sql/opt_range.cc
>
> +void TRP_ROR_UNION::trace_basic_info(const PARAM *param,
> + Json_writer_object *trace_object) const
> +{
> + Opt_trace_context *const trace = ¶m->thd->opt_trace;
> + Json_writer* writer= trace->get_current_json();
> + trace_object->add("type", "index_roworder_union");
> + Json_writer_array ota(writer, "union_of");
The name 'ota' makes sense in MySQL codebase (where it is a contraction of
Optimizer_trace_array), but is really confusing in MariaDB codebase. Please
change everywhere to "smth_trace" or something like that (jwa in the worst
case).
> @@ -2654,12 +2833,18 @@ int SQL_SELECT::test_quick_select(THD *t
>
> if (cond)
> {
> - if ((tree= cond->get_mm_tree(¶m, &cond)))
> + {
> + Json_writer_array trace_range_summary(writer,
> + "setup_range_conditions");
> + tree= cond->get_mm_tree(¶m, &cond);
> + }
Does this ever produce anything meaningful, other than empty:
"setup_range_conditions": [],
In MySQL, the possible contents of this array are:
"impossible_condition": {
"cause": "comparison_with_null_always_false"
} /* impossible_condition */
"impossible_condition": {
"cause": "null_field_in_non_null_column"
} /* impossible_condition */
But in MariaDB we don't have this (should we?)
Also, why_use_underscores_in_value_of_cause? It is a quoted string where
spaces are allowed. MySQL seems to have figured this out at some point and have
a few cause strings using spaces.
> --- 10.4-optimizer-trace-orig/sql/opt_table_elimination.cc
> +++ 10.4-optimizer-trace-cl/sql/opt_table_elimination.cc
> @@ -522,7 +524,8 @@ eliminate_tables_for_list(JOIN *join,
> List<TABLE_LIST> *join_list,
> table_map tables_in_list,
> Item *on_expr,
> - table_map tables_used_elsewhere);
> + table_map tables_used_elsewhere,
> + Json_writer_array* eliminate_tables);
Please change the name to something indicating it's just a trace.
BR
Sergei
--
Sergei Petrunia, Software Developer
MariaDB Corporation | Skype: sergefp | Blog: http://s.petrunia.net/blog
1
1
Re: [Maria-developers] b1a4d1e4937: MDEV-16973 Application-time periods: DELETE
by Sergei Golubchik 08 Feb '19
by Sergei Golubchik 08 Feb '19
08 Feb '19
Hi, Nikita!
On Jan 31, Nikita Malyavin wrote:
> revision-id: b1a4d1e4937 (versioning-1.0.7-3-gb1a4d1e4937)
> parent(s): 4b01d3aee60
> author: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> committer: Nikita Malyavin <nikitamalyavin(a)gmail.com>
> timestamp: 2019-01-30 22:54:00 +1000
> message:
>
> MDEV-16973 Application-time periods: DELETE
>
> * inject portion of time updates into mysql_delete main loop
> * triggered case emits delete+insert, no updates
> * PORTION OF `SYSTEM_TIME` is forbidden
> * `DELETE HISTORY .. FOR PORTION OF ...` is forbidden as well
> diff --git a/mysql-test/suite/period/r/delete.result b/mysql-test/suite/period/r/delete.result
> --- /dev/null
> +++ b/mysql-test/suite/period/r/delete.result
> @@ -0,0 +1,244 @@
> +create or replace table t (id int, s date, e date, period for apptime(s,e));
> +insert into t values(1, '1999-01-01', '2018-12-12');
> +insert into t values(1, '1999-01-01', '2017-01-01');
> +insert into t values(1, '2017-01-01', '2019-01-01');
> +insert into t values(2, '1998-01-01', '2018-12-12');
> +insert into t values(3, '1997-01-01', '2015-01-01');
> +insert into t values(4, '2016-01-01', '2020-01-01');
> +insert into t values(5, '2010-01-01', '2015-01-01');
> +create or replace table t1 (id int, s date, e date, period for apptime(s,e));
> +insert t1 select * from t;
> +create or replace table t2 (id int, s date, e date, period for apptime(s,e));
> +insert t2 select * from t;
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +delete from t1 for portion of APPTIME from '2000-01-01' to '2018-01-01';
> +select * from t;
> +id s e
> +1 1999-01-01 2000-01-01
> +1 1999-01-01 2000-01-01
> +1 2018-01-01 2018-12-12
> +1 2018-01-01 2019-01-01
> +2 1998-01-01 2000-01-01
> +2 2018-01-01 2018-12-12
> +3 1997-01-01 2000-01-01
> +4 2018-01-01 2020-01-01
> +select * from t1;
> +id s e
> +1 1999-01-01 2000-01-01
> +1 1999-01-01 2000-01-01
> +1 2018-01-01 2018-12-12
> +1 2018-01-01 2019-01-01
> +2 1998-01-01 2000-01-01
> +2 2018-01-01 2018-12-12
> +3 1997-01-01 2000-01-01
> +4 2018-01-01 2020-01-01
> +select * from log_tbl;
> +id log
here (and everywhere selecting from log_tbl), better do ORDER BY id
it's quite difficult to follow the sequence of events otherwise
> +1 >DEL: 1, 1999-01-01, 2018-12-12
> +10 <INS: 1, 1999-01-01, 2000-01-01
> +11 >DEL: 1, 2017-01-01, 2019-01-01
> +12 <DEL: 1, 2017-01-01, 2019-01-01
> +13 >INS: 1, 2018-01-01, 2019-01-01
> +14 <INS: 1, 2018-01-01, 2019-01-01
> +15 >DEL: 2, 1998-01-01, 2018-12-12
> +16 <DEL: 2, 1998-01-01, 2018-12-12
> +17 >INS: 2, 1998-01-01, 2000-01-01
> +18 <INS: 2, 1998-01-01, 2000-01-01
> +19 >INS: 2, 2018-01-01, 2018-12-12
> +2 <DEL: 1, 1999-01-01, 2018-12-12
> +20 <INS: 2, 2018-01-01, 2018-12-12
> +21 >DEL: 3, 1997-01-01, 2015-01-01
> +22 <DEL: 3, 1997-01-01, 2015-01-01
> +23 >INS: 3, 1997-01-01, 2000-01-01
> +24 <INS: 3, 1997-01-01, 2000-01-01
> +25 >DEL: 4, 2016-01-01, 2020-01-01
> +26 <DEL: 4, 2016-01-01, 2020-01-01
> +27 >INS: 4, 2018-01-01, 2020-01-01
> +28 <INS: 4, 2018-01-01, 2020-01-01
> +29 >DEL: 5, 2010-01-01, 2015-01-01
> +3 >INS: 1, 1999-01-01, 2000-01-01
> +30 <DEL: 5, 2010-01-01, 2015-01-01
> +4 <INS: 1, 1999-01-01, 2000-01-01
> +5 >INS: 1, 2018-01-01, 2018-12-12
> +6 <INS: 1, 2018-01-01, 2018-12-12
> +7 >DEL: 1, 1999-01-01, 2017-01-01
> +8 <DEL: 1, 1999-01-01, 2017-01-01
> +9 >INS: 1, 1999-01-01, 2000-01-01
> +# INSERT trigger only also works
> +drop trigger tr1del_t2;
> +drop trigger tr2del_t2;
> +delete from t2 for portion of APPTIME from '2000-01-01' to '2018-01-01';
> +select * from log_tbl;
> +id log
> +1 >INS: 1, 1999-01-01, 2000-01-01
> +10 <INS: 2, 1998-01-01, 2000-01-01
> +11 >INS: 2, 2018-01-01, 2018-12-12
> +12 <INS: 2, 2018-01-01, 2018-12-12
> +13 >INS: 3, 1997-01-01, 2000-01-01
> +14 <INS: 3, 1997-01-01, 2000-01-01
> +15 >INS: 4, 2018-01-01, 2020-01-01
> +16 <INS: 4, 2018-01-01, 2020-01-01
> +2 <INS: 1, 1999-01-01, 2000-01-01
> +3 >INS: 1, 2018-01-01, 2018-12-12
> +4 <INS: 1, 2018-01-01, 2018-12-12
> +5 >INS: 1, 1999-01-01, 2000-01-01
> +6 <INS: 1, 1999-01-01, 2000-01-01
> +7 >INS: 1, 2018-01-01, 2019-01-01
> +8 <INS: 1, 2018-01-01, 2019-01-01
> +9 >INS: 2, 1998-01-01, 2000-01-01
> +# multi-table DELETE is not possible
> +delete t, t1 from t1, t for portion of apptime from '2000-01-01' to '2018-01-01';
> +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01'' at line 1
> +delete t for portion of apptime from '2000-01-01' to '2018-01-01', t1 from t, t1;
> +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01', t1 from t, t1' at line 1
> +# Here another check fails before parsing ends
> +delete t, t1 from t for portion of apptime from '2000-01-01' to '2018-01-01', t1;
> +ERROR 42S02: Unknown table 't1' in MULTI DELETE
> +delete history from t2 for portion of apptime from '2000-01-01' to '2018-01-01';
> +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01'' at line 1
> +delete from t for portion of othertime from '2000-01-01' to '2018-01-01';
> +ERROR HY000: Period `othertime` is not found in table
> +delete from t for portion of system_time from '2000-01-01' to '2018-01-01';
> +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'system_time' at line 1
> +create or replace table t (id int, str text, s date, e date,
> +period for apptime(s,e));
> +insert into t values(1, 'data', '1999-01-01', '2018-12-12');
> +insert into t values(1, 'other data', '1999-01-01', '2018-12-12');
> +insert into t values(1, 'deleted', '2000-01-01', '2018-01-01');
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +show warnings;
> +Level Code Message
> +select * from t;
> +id str s e
> +1 data 1999-01-01 2000-01-01
> +1 data 2018-01-01 2018-12-12
> +1 other data 1999-01-01 2000-01-01
> +1 other data 2018-01-01 2018-12-12
> +drop table t1;
> +# SQL16, Part 2, 15.7 <Effect of deleting rows from base tables>,
> +# General rules, 8)b)i)
> +# If the column descriptor that corresponds to the i-th field of BR
> +# describes an identity column, a generated column, a system-time period
> +# start column, or a system-time period end column, then let V i be
> +# DEFAULT.
> +# auto_increment field is updated
> +create or replace table t (id int primary key auto_increment, s date, e date,
> +period for apptime(s, e));
> +insert into t values (default, '1999-01-01', '2018-12-12');
> +select * from t;
> +id s e
> +1 1999-01-01 2018-12-12
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select * from t;
> +id s e
> +2 1999-01-01 2000-01-01
> +3 2018-01-01 2018-12-12
> +truncate t;
> +# same for trigger case
> +insert into t values (default, '1999-01-01', '2018-12-12');
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select * from t;
> +id s e
> +2 1999-01-01 2000-01-01
> +3 2018-01-01 2018-12-12
> +select * from log_tbl;
> +id log
> +1 >DEL: 1999-01-01, 2018-12-12
> +2 <DEL: 1999-01-01, 2018-12-12
> +3 >INS: 1999-01-01, 2000-01-01
> +4 <INS: 1999-01-01, 2000-01-01
> +5 >INS: 2018-01-01, 2018-12-12
> +6 <INS: 2018-01-01, 2018-12-12
> +# auto_increment field overflow
> +create or replace table t (id tinyint auto_increment primary key,
> +s date, e date, period for apptime(s,e));
> +insert into t values(127, '1999-01-01', '2018-12-12');
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +ERROR 22003: Out of range value for column 'id' at row 1
add select * from t here, please
> +# same for trigger case
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +ERROR 22003: Out of range value for column 'id' at row 1
and here
and select from the log_tbl too
> +# generated columns are updated
> +create or replace table t (s date, e date,
> +xs date as (s) stored, xe date as (e) stored,
> +period for apptime(s, e));
> +insert into t values('1999-01-01', '2018-12-12', default, default);
> +select * from t;
> +s e xs xe
> +1999-01-01 2018-12-12 1999-01-01 2018-12-12
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select * from t;
> +s e xs xe
> +1999-01-01 2000-01-01 1999-01-01 2000-01-01
> +2018-01-01 2018-12-12 2018-01-01 2018-12-12
> +truncate t;
> +# same for trigger case
> +insert into t values('1999-01-01', '2018-12-12', default, default);
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select * from t;
> +s e xs xe
> +1999-01-01 2000-01-01 1999-01-01 2000-01-01
> +2018-01-01 2018-12-12 2018-01-01 2018-12-12
> +select * from log_tbl;
> +id log
> +1 >DEL: 1999-01-01, 2018-12-12
> +2 <DEL: 1999-01-01, 2018-12-12
> +3 >INS: 1999-01-01, 2000-01-01
> +4 <INS: 1999-01-01, 2000-01-01
> +5 >INS: 2018-01-01, 2018-12-12
> +6 <INS: 2018-01-01, 2018-12-12
> +# View can't be used
> +create or replace view v as select * from t;
> +delete from v for portion of p from '2000-01-01' to '2018-01-01';
> +ERROR 42S02: 'v' is a view
> +# system_time columns are updated
> +create or replace table t (
> +s date, e date,
> +row_start SYS_TYPE as row start invisible,
> +row_end SYS_TYPE as row end invisible,
> +period for apptime(s, e),
> +period for system_time (row_start, row_end)) with system versioning;
> +insert into t values('1999-01-01', '2018-12-12'),
> +('1999-01-01', '1999-12-12');
> +select row_start into @ins_time from t limit 1;
> +select * from t order by s, e;
> +s e
> +1999-01-01 1999-12-12
> +1999-01-01 2018-12-12
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
> +from t for system_time all
> +order by s, e, row_start;
> +s e if(row_start = @ins_time, "OLD", "NEW") check_row(row_start, row_end)
> +1999-01-01 1999-12-12 OLD CURRENT ROW
> +1999-01-01 2000-01-01 NEW CURRENT ROW
> +1999-01-01 2018-12-12 OLD HISTORICAL ROW
> +2018-01-01 2018-12-12 NEW CURRENT ROW
> +# same for trigger case
> +delete from t;
> +delete history from t;
> +insert into t values('1999-01-01', '2018-12-12'),
> +('1999-01-01', '1999-12-12');
> +select row_start into @ins_time from t limit 1;
> +select * from t order by s, e;
> +s e
> +1999-01-01 1999-12-12
> +1999-01-01 2018-12-12
> +delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
> +select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
> +from t for system_time all
> +order by s, e, row_start;
> +s e if(row_start = @ins_time, "OLD", "NEW") check_row(row_start, row_end)
> +1999-01-01 1999-12-12 OLD CURRENT ROW
> +1999-01-01 2000-01-01 NEW CURRENT ROW
> +1999-01-01 2018-12-12 OLD HISTORICAL ROW
> +2018-01-01 2018-12-12 NEW CURRENT ROW
> +select * from log_tbl;
> +id log
> +1 >DEL: 1999-01-01, 2018-12-12
> +2 <DEL: 1999-01-01, 2018-12-12
> +3 >INS: 1999-01-01, 2000-01-01
> +4 <INS: 1999-01-01, 2000-01-01
> +5 >INS: 2018-01-01, 2018-12-12
> +6 <INS: 2018-01-01, 2018-12-12
> +create or replace database test;
> diff --git a/mysql-test/suite/versioning/t/select.test b/mysql-test/suite/versioning/t/select.test
> --- a/mysql-test/suite/versioning/t/select.test
> +++ b/mysql-test/suite/versioning/t/select.test
> @@ -107,6 +107,32 @@ for system_time as of timestamp @t0 as t;
> drop table t1;
> drop table t2;
>
> +# Query conditions check
> +
> +create or replace table t1(x int) with system versioning;
> +insert into t1 values (1);
> +delete from t1;
> +insert into t1 values (2);
> +delete from t1;
> +insert into t1 values (3);
> +delete from t1;
> +
> +select row_start into @start1 from t1 for system_time all where x = 1;
> +select row_end into @end1 from t1 for system_time all where x = 1;
> +select row_start into @start2 from t1 for system_time all where x = 2;
> +select row_end into @end2 from t1 for system_time all where x = 2;
> +select row_start into @start3 from t1 for system_time all where x = 3;
> +select row_end into @end3 from t1 for system_time all where x = 3;
> +
> +select x as ASOF_x from t1 for system_time as of @start2;
> +select x as ASOF_x from t1 for system_time as of @end2;
> +select x as FROMTO_x from t1 for system_time from @start1 to @end3;
> +select x as FROMTO_x from t1 for system_time from @end1 to @start2;
> +select x as BETWAND_x from t1 for system_time between @start1 and @end3;
> +select x as BETWAND_x from t1 for system_time between @end1 and @start2;
> +
> +drop table t1;
what does that have to do with MDEV-16973?
> +
> # Wildcard expansion on hidden fields
>
> create table t1(
> @@ -233,9 +259,9 @@ select x from t1 for system_time as of @trx_start;
> --echo ### Issue #365, bug 4 (related to #226, optimized fields)
> create or replace table t1 (i int, b int) with system versioning;
> insert into t1 values (0, 0), (0, 0);
> -select min(i) over (partition by b) as f
> -from (select i + 0 as i, b from t1) as tt
> -order by i;
> +#select min(i) over (partition by b) as f
> +#from (select i + 0 as i, b from t1) as tt
> +#order by i;
why is this?
>
> --echo ### Issue #365, bug 5 (dangling AND)
> create or replace table t1 (a int);
> diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
> --- a/sql/sql_delete.cc
> +++ b/sql/sql_delete.cc
> @@ -245,6 +245,48 @@ static bool record_should_be_deleted(THD *thd, TABLE *table, SQL_SELECT *sel,
> return false;
> }
>
> +inline
> +int TABLE::update_portion_of_time(THD *thd,
> + const vers_select_conds_t &period_conds,
> + bool *inside_period)
I don't understand why you want to keep this very much DELETE-only
functionality in the TABLE class which is used everywhere.
And what's the point of pretending it's in a common TABLE class,
if it can only be used in sql_delete.cc? I find it quite confusing :(
> +{
> + bool lcond= period_conds.field_start->val_datetime_packed(thd)
> + < period_conds.start.item->val_datetime_packed(thd);
> + bool rcond= period_conds.field_end->val_datetime_packed(thd)
> + > period_conds.end.item->val_datetime_packed(thd);
> +
> + *inside_period= !lcond && !rcond;
> + if (*inside_period)
> + return 0;
> +
> + DBUG_ASSERT(!triggers || (!triggers->has_triggers(TRG_EVENT_INSERT,
> + TRG_ACTION_BEFORE)
> + && !triggers->has_triggers(TRG_EVENT_INSERT,
> + TRG_ACTION_AFTER)
> + && !triggers->has_delete_triggers()));
> +
> + int res= 0;
> + Item *src= lcond ? period_conds.start.item : period_conds.end.item;
> + uint dst_fieldno= lcond ? s->period.end_fieldno : s->period.start_fieldno;
> +
> + store_record(this, record[1]);
> + if (likely(!res))
> + res= src->save_in_field(field[dst_fieldno], true);
> +
> + if (likely(!res))
> + res= update_generated_fields();
> +
> + if(likely(!res))
> + res= file->ha_update_row(record[1], record[0]);
> +
> + restore_record(this, record[1]);
> +
> + if (likely(!res) && lcond && rcond)
> + res= period_make_insert(period_conds.end.item,
> + field[s->period.start_fieldno]);
> +
> + return res;
> +}
>
> inline
> int TABLE::delete_row()
> @@ -672,6 +736,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
>
> table->mark_columns_needed_for_delete();
>
> + if (table_list->has_period())
> + table->use_all_columns();
may be even
if (table_list->has_period())
table->use_all_columns()
else
table->mark_columns_needed_for_delete();
> +
> if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE) &&
> !table->prepare_triggers_for_delete_stmt_or_event())
> will_batch= !table->file->start_bulk_delete();
> @@ -727,6 +794,16 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
> delete_record= true;
> }
>
> + /*
> + From SQL2016, Part 2, 15.7 <Effect of deleting rows from base table>,
> + General Rules, 8), we can conclude that DELETE FOR PORTTION OF time performs
> + 0-2 INSERTS + DELETE. We can substitute INSERT+DELETE with one UPDATE, but
> + only if there are no triggers set.
> + It is also meaningless for system-versioned table
> + */
> + portion_of_time_through_update= !has_triggers
> + && !table->versioned(VERS_TIMESTAMP);
I still don't understand why you disable portion_of_time_through_update
for VERS_TIMESTAMP, but not for VERS_TRX_ID.
> +
> THD_STAGE_INFO(thd, stage_updating);
> while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
> likely(!thd->is_error()))
> diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
> --- a/sql/sql_lex.cc
> +++ b/sql/sql_lex.cc
> @@ -3576,6 +3577,20 @@ void LEX::set_trg_event_type_for_tables()
> break;
> }
>
> + if (period_conditions.is_set())
> + {
> + switch (sql_command)
> + {
> + case SQLCOM_DELETE:
> + case SQLCOM_UPDATE:
> + case SQLCOM_REPLACE:
> + new_trg_event_map |= static_cast<uint8>
> + (1 << static_cast<int>(TRG_EVENT_INSERT));
I've added a helper for this recently, use
new_trg_event_map |= trg2bit(TRG_EVENT_INSERT);
> + default:
> + break;
> + }
> + }
> +
>
> /*
> Do not iterate over sub-selects, only the tables in the outermost
> diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
> --- a/sql/sql_yacc.yy
> +++ b/sql/sql_yacc.yy
> @@ -9295,6 +9296,22 @@ history_point:
> $$= Vers_history_point($1, $2);
> }
> ;
> +opt_for_portion_of_time_clause:
> + /* empty */
> + {
> + $$= false;
> + }
> + | FOR_SYM PORTION_SYM OF_SYM ident FROM history_point TO_SYM history_point
history_point allows TIMESTAMP '2010-10-10 10:10:10' and
TRANSACTION 1234.
You don't need any of that, just use a corresponding expression
rule. E.g. bit_expr, like history_point does.
> + {
> + if (unlikely(0 == strcasecmp($4.str, "SYSTEM_TIME")))
> + {
> + thd->parse_error(ER_SYNTAX_ERROR, $4.str);
no, for the error message to look correct you need to pass the pointer
into the query text. It's usually done like this:
FOR_SYM PORTION_SYM OF_SYM remember_tok_start ident FROM ...
{
...
thd->parse_error(ER_SYNTAX_ERROR, $4);
> + MYSQL_YYABORT;
> + }
> + $$= true;
> + Lex->period_conditions.init(SYSTEM_TIME_FROM_TO, $6, $8, $4);
> + }
> + ;
>
> opt_for_system_time_clause:
> /* empty */
Regards,
Sergei
Chief Architect MariaDB
and security(a)mariadb.org
2
3