developers
Threads by month
- ----- 2025 -----
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
June 2010
- 19 participants
- 200 discussions

[Maria-developers] Rev 2795: Bugfixes in file:///home/bell/maria/bzr/work-maria-5.3-scache/
by sanjaï¼ askmonty.org 05 Jun '10
by sanjaï¼ askmonty.org 05 Jun '10
05 Jun '10
At file:///home/bell/maria/bzr/work-maria-5.3-scache/
------------------------------------------------------------
revno: 2795
revision-id: sanja(a)askmonty.org-20100605195727-7rrc5k75lr0a4o9z
parent: sanja(a)askmonty.org-20100527182744-1tu96cgyiaodzs32
committer: sanja(a)askmonty.org
branch nick: work-maria-5.3-scache
timestamp: Sat 2010-06-05 22:57:27 +0300
message:
Bugfixes
=== modified file 'mysql-test/r/myisam_mrr.result'
--- a/mysql-test/r/myisam_mrr.result 2010-03-11 21:43:31 +0000
+++ b/mysql-test/r/myisam_mrr.result 2010-06-05 19:57:27 +0000
@@ -394,7 +394,7 @@
# - engine_condition_pushdown does not affect ICP
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_condition_pushdown=on,firstmatch=on,loosescan=on,materialization=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_condition_pushdown=on,firstmatch=on,loosescan=on,materialization=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, key(a));
=== modified file 'mysql-test/r/subquery_cache.result'
--- a/mysql-test/r/subquery_cache.result 2010-05-27 17:41:38 +0000
+++ b/mysql-test/r/subquery_cache.result 2010-06-05 19:57:27 +0000
@@ -588,4 +588,28 @@
Subquery_cache_hit 0
Subquery_cache_miss 4
drop table t1;
+#test of sql_big_tables switch and outer table reference in subquery with grouping
+set option sql_big_tables=1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1),(2,1),(3,2),(4,2),(5,3),(6,3);
+SELECT (SELECT t1_outer.a FROM t1 AS t1_inner GROUP BY b LIMIT 1) FROM t1 AS t1_outer;
+(SELECT t1_outer.a FROM t1 AS t1_inner GROUP BY b LIMIT 1)
+1
+2
+3
+4
+5
+6
+drop table t1;
+set option sql_big_tables=0;
+#test of function reference to outer query
+set local group_concat_max_len=400;
+create table t2 (a int, b int);
+insert into t2 values (1,1), (2,2);
+select b x, (select group_concat(x) from t2) from t2;
+x (select group_concat(x) from t2)
+1 1,1
+2 2,2
+drop table t2;
+set local group_concat_max_len=default;
set optimizer_switch='subquery_cache=default';
=== modified file 'mysql-test/t/subquery_cache.test'
--- a/mysql-test/t/subquery_cache.test 2010-05-27 17:41:38 +0000
+++ b/mysql-test/t/subquery_cache.test 2010-06-05 19:57:27 +0000
@@ -201,4 +201,20 @@
show status like "subquery_cache%";
drop table t1;
+--echo #test of sql_big_tables switch and outer table reference in subquery with grouping
+set option sql_big_tables=1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1),(2,1),(3,2),(4,2),(5,3),(6,3);
+SELECT (SELECT t1_outer.a FROM t1 AS t1_inner GROUP BY b LIMIT 1) FROM t1 AS t1_outer;
+drop table t1;
+set option sql_big_tables=0;
+
+--echo #test of function reference to outer query
+set local group_concat_max_len=400;
+create table t2 (a int, b int);
+insert into t2 values (1,1), (2,2);
+select b x, (select group_concat(x) from t2) from t2;
+drop table t2;
+set local group_concat_max_len=default;
+
set optimizer_switch='subquery_cache=default';
=== modified file 'sql/item.cc'
--- a/sql/item.cc 2010-05-27 17:41:38 +0000
+++ b/sql/item.cc 2010-06-05 19:57:27 +0000
@@ -5110,6 +5110,19 @@
}
+/**
+ Saves one Fields of an Item of in other Field
+
+ @param from Field to copy value from
+ @param null_value reference on item null_value to set it if it is needed
+ @param to Field to cope value to
+ @param no_conversions how to deal with NULL value (see
+ set_field_to_null_with_conversions())
+
+ @retval FALSE OK
+ @retval TRUE Error
+*/
+
static int save_field_in_field(Field *from, my_bool *null_value,
Field *to, bool no_conversions)
{
@@ -5139,6 +5152,10 @@
int Item_field::save_in_field(Field *to, bool no_conversions)
{
+ /* if it is external field */
+ if (unlikely(depended_from))
+ return save_field_in_field(field, &null_value, to, no_conversions);
+
return save_field_in_field(result_field, &null_value, to, no_conversions);
}
@@ -6346,7 +6363,7 @@
int Item_ref::save_in_field(Field *to, bool no_conversions)
{
int res;
- if (result_field)
+ if (result_field && !depended_from)
return save_field_in_field(result_field, &null_value, to, no_conversions);
res= (*ref)->save_in_field(to, no_conversions);
null_value= (*ref)->null_value;
=== modified file 'sql/item_subselect.cc'
--- a/sql/item_subselect.cc 2010-05-25 18:29:14 +0000
+++ b/sql/item_subselect.cc 2010-06-05 19:57:27 +0000
@@ -1,4 +1,4 @@
-/* Copyrigh (C) 2000 MySQL AB
+/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -818,6 +818,12 @@
exec();
}
+/**
+ Checks subquery cache for value
+
+ @retval NULL nothing found
+ @retval reference on item representing value found in the cache
+*/
Item *Item_subselect::check_cache()
{
=== modified file 'sql/item_subselect.h'
--- a/sql/item_subselect.h 2010-05-24 17:29:56 +0000
+++ b/sql/item_subselect.h 2010-06-05 19:57:27 +0000
@@ -95,7 +95,10 @@
st_select_lex *parent_select;
/**
- List of items subquery depends on (externally resolved);
+ List of references on items subquery depends on (externally resolved);
+
+ @note We can't store direct links on Items because it could be
+ substituted with other item (for example for grouping).
*/
List<Item*> depends_on;
=== modified file 'sql/sql_subquery_cache.cc'
--- a/sql/sql_subquery_cache.cc 2010-05-27 18:27:44 +0000
+++ b/sql/sql_subquery_cache.cc 2010-06-05 19:57:27 +0000
@@ -96,6 +96,10 @@
/**
Creates equalities expression.
+ @note For some type of fields index lookup do not return failure but set
+ pointer on the next record. To check exact match we use expression like:
+ field1=value1 and field2=value2 ...
+
@retval FALSE OK
@retval TRUE Error
*/
@@ -111,6 +115,7 @@
for (uint i= 1 /* skip result filed */; (ref= li++); i++)
{
Field *fld= cache_table->field[i];
+ /* Only some field types should be checked after lookup */
if (fld->type() == MYSQL_TYPE_VARCHAR ||
fld->type() == MYSQL_TYPE_TINY_BLOB ||
fld->type() == MYSQL_TYPE_MEDIUM_BLOB ||
@@ -140,11 +145,22 @@
}
+/**
+ Enumerates all fields in field number order.
+
+ @param arg reference on current field number
+
+ @return field number
+*/
+
static uint field_enumerator(uchar *arg)
{
return ((uint*)arg)[0]++;
}
+/**
+ Initializes temporary table and index for this cache
+*/
void Subquery_cache_tmptable::init()
{
@@ -182,8 +198,10 @@
if (!(cache_table= create_tmp_table(table_thd, &cache_table_param,
items, (ORDER*) NULL,
FALSE, FALSE,
- (table_thd->options |
- TMP_TABLE_ALL_COLUMNS),
+ ((table_thd->options |
+ TMP_TABLE_ALL_COLUMNS) &
+ ~(OPTION_BIG_TABLES |
+ TMP_TABLE_FORCE_MYISAM)),
HA_POS_ERROR,
(char *)"subquery-cache-table")))
{
@@ -191,14 +209,16 @@
DBUG_VOID_RETURN;
}
- if (cache_table->s->blob_fields)
+ if (cache_table->s->db_type() != heap_hton)
{
- DBUG_PRINT("error", ("we do not need blobs"));
+ DBUG_PRINT("error", ("we need only heap table"));
goto error;
}
+ /* first field in the table is result value, so we skip it */
li_items++;
field_counter=1;
+
if (cache_table->alloc_keys(1) ||
(cache_table->add_tmp_key(0, items.elements - 1,
&field_enumerator,
@@ -224,6 +244,7 @@
DBUG_PRINT("error", ("Creating Item_field failed"));
goto error;
}
+
if (make_equalities())
{
DBUG_PRINT("error", ("Creating equalities failed"));
@@ -247,11 +268,26 @@
}
+/**
+ Checks if current key present in the cache and returns value if it is true
+
+ @param value assigned Item with value from the cache if key
+ is found
+ @return result of the key lookup
+*/
+
Subquery_cache::result Subquery_cache_tmptable::check_value(Item **value)
{
int res;
DBUG_ENTER("Subquery_cache_tmptable::check_value");
+ /*
+ We delay cache initialization to get item references which should be
+ used at the moment of query execution. I.e. we store reference on item
+ reference at the moment of class creation but for table creation and
+ index supply structures (join_tab) we need real Items which used at the
+ moment of execution so we can resolve reference only at this point.
+ */
if (!inited)
init();
@@ -275,6 +311,15 @@
}
+/**
+ Puts given value in the cache
+
+ @param value Value to put in the cache
+
+ @retval FALSE OK
+ @retval TRUE Error
+*/
+
my_bool Subquery_cache_tmptable::put_value(Item *value)
{
int error;
@@ -313,9 +358,3 @@
cache_table= NULL;
DBUG_RETURN(TRUE);
}
-
-
-void Subquery_cache_tmptable::cleanup()
-{
- cache_table->file->ha_delete_all_rows();
-}
=== modified file 'sql/sql_subquery_cache.h'
--- a/sql/sql_subquery_cache.h 2010-05-25 10:45:36 +0000
+++ b/sql/sql_subquery_cache.h 2010-06-05 19:57:27 +0000
@@ -23,10 +23,6 @@
Puts value into this cache (key should be taken from cache owner)
*/
virtual my_bool put_value(Item *value)= 0;
- /**
- Cleans up and reset cache before reusing
- */
- virtual void cleanup()= 0;
};
struct st_table_ref;
@@ -45,10 +41,9 @@
virtual ~Subquery_cache_tmptable();
virtual result check_value(Item **value);
virtual my_bool put_value(Item *value);
- virtual void cleanup();
+
+private:
void init();
-
-private:
bool make_equalities();
/* tmp table parameters */
=== modified file 'sql/table.cc'
--- a/sql/table.cc 2010-05-27 17:41:38 +0000
+++ b/sql/table.cc 2010-06-05 19:57:27 +0000
@@ -5187,10 +5187,16 @@
key_part_info->store_length= key_part_info->length;
if ((*reg_field)->real_maybe_null())
+ {
key_part_info->store_length+= HA_KEY_NULL_LENGTH;
+ keyinfo->key_length+= HA_KEY_NULL_LENGTH;
+ }
if ((*reg_field)->type() == MYSQL_TYPE_BLOB ||
(*reg_field)->real_type() == MYSQL_TYPE_VARCHAR)
+ {
key_part_info->store_length+= HA_KEY_BLOB_LENGTH;
+ keyinfo->key_length+= HA_KEY_BLOB_LENGTH; // ???
+ }
key_part_info->type= (uint8) (*reg_field)->key_type();
key_part_info->key_type =
1
0
>From dispatch_command() in sql_parse.cc net_end_statement() is called after
ha_autocommit_or_rollback() but before close_thread_tables(). What can go
wrong in the call to close_thread_tables() after the response to the client?
Commit or rollback was done before a response was sent to the client.
/* If commit fails, we should be able to reset the OK status. */
thd->main_da.can_overwrite_status= TRUE;
ha_autocommit_or_rollback(thd, thd->is_error());
thd->main_da.can_overwrite_status= FALSE;
thd->transaction.stmt.reset();
net_end_statement(thd);
query_cache_end_of_result(thd);
thd->proc_info= "closing tables";
/* Free tables */
close_thread_tables(thd);
--
Mark Callaghan
mdcallag(a)gmail.com
2
1

Re: [Maria-developers] [Bug 314570] Re: update is not changing internal auto increment value
by Sergei Golubchik 05 Jun '10
by Sergei Golubchik 05 Jun '10
05 Jun '10
Hi, Michael!
On Jun 04, Michael Widenius wrote:
>
> hi!
>
> >>>>> "Sergei" == Sergei <sergii(a)pisem.net> writes:
>
> Sergei> ** Changed in: maria
> Sergei> Importance: Undecided => Low
>
> Sergei> --
> Sergei> update is not changing internal auto increment value
> Sergei> https://bugs.launchpad.net/bugs/314570
>
> Why low ?
>
> Looks like a serious issue that we should get Percona to fix at once!
Because Heikki said it's not a bug, but intentional InnoDB behavior.
I'm not sure we should fix it at all. Heikki is certainly not fixing
it.
Regards,
Sergei
2
1

04 Jun '10
All,
MariaDB 5.2.1 is getting closer to being released so I've started
filling out the Release Notes and Changelog pages:
http://askmonty.org/wiki/Manual:MariaDB_5.2.1_Release_Notes
http://askmonty.org/wiki/Manual:MariaDB_5.2.1_Changelog
On the documentation TODO list for this release is a page on the OQGraph
storage engine for the manual. Any volunteers? :) (I'll get to it next
week, but if someone wants to put something up right away I wouldn't
object.)
Thanks.
--
Daniel Bartholomew
Monty Program - http://askmonty.org
1
0

Re: [Maria-developers] [Commits] Rev 2802: few small MySQL bugs/issues that impact the engines, as discussed in the SE summit in http://bazaar.launchpad.net/~maria-captains/maria/5.2/
by Sergei Golubchik 03 Jun '10
by Sergei Golubchik 03 Jun '10
03 Jun '10
Hi, Monty!
Thanks for the review!
See my replies below.
On Jun 03, Michael Widenius wrote:
>
> > At http://bazaar.launchpad.net/~maria-captains/maria/5.2/
> > ------------------------------------------------------------
> > revno: 2802
>
> > few small MySQL bugs/issues that impact the engines, as discussed in the SE summit
> > * remove handler::index_read_last()
> > * create handler::keyread_read_time() (was get_index_only_read_time() in opt_range.cc)
> > * ha_show_status() allows engine's show_status() to fail
> > * remove HTON_FLUSH_AFTER_RENAME
> > * fix key_cmp_if_same() to work for floats and doubles
> > * set table->status in the server, don't force engines to do it
> > * increment status vars in the server, don't force engines to do it
>
> > +++ b/mysql-test/r/status_user.result 2010-06-01 22:39:29 +0000
> > @@ -100,8 +100,8 @@ Handler_commit 19
> > Handler_delete 1
> > Handler_discover 0
> > Handler_prepare 18
> > -Handler_read_first 1
> > -Handler_read_key 8
> > +Handler_read_first 0
> > +Handler_read_key 3
>
> Any explanation why this change happened (as the test didn't change
> and I can't understand how the values could suddently be less now).
This change is correct. Before my commit, calls were counted in the
handler, say, in the index_first() and index_next().
And ha_innobase::rnd_next() is implemented by calling
index_first/index_next.
So, innodb was incrementing Handler_read_first and Handler_read_next for
table scans (and of course it was incrementing Handler_read_rnd_next too
- double counting).
It was wrong - first, it was double counting. Second, Handler_*
should count handler calls as done by mysql, not expose internal
implementation of the engine. For example, mi_rfirst() calls mi_rnext()
internally, but we don't count it as Handler_read_next. The same should
be true for any engine, even if it mixes implementation levels.
> By the way, it would be nice if the file comments would be part of the
> commit email (as I assume you documented this issue there).
I have not :(
But I will, when I recommit.
> > +++ b/mysql-test/r/partition_pruning.result 2010-06-01 22:39:29 +0000
> > @@ -2373,7 +2373,7 @@ flush status;
> > update t1 set a=100 where a+1=5+1;
> > show status like 'Handler_read_rnd_next';
> > Variable_name Value
> > -Handler_read_rnd_next 10
> > +Handler_read_rnd_next 19
>
> Any explanation why this change happened (as the test didn't change)
> Is it because we don't anymore count rows read in 'show' commands?
This is questionable change, I wanted to discuss it.
ha_partition::index_next (for example) calls underlying engine's
file->ha_index_next(), not file->index_next().
After my change Handler_read_key_next is incremented for both
ha_partition::index_next and file->index_next(). Double counting.
Before my change when a partition was pruned, Handler_read_key* counters
were not incremented at all (as ha_partition::index_read did not call
file->index_read() at all). Now it is incremented - that's why the
numbers are increased.
Possible solutions:
* do not increment Handler_read* counters for ha_partition methods,
only count calls to the underlying engines.
* do not increment Handler_read* counters for underlying engines - only
count calls from the upper layer into the handler, this is logical but
counters won't show partition pruning or handler call overhead caused by
many partitions. this can be solved by adding special set of counters
Handler_partition_read_* (or something).
> > === modified file 'sql/handler.cc'
> > --- a/sql/handler.cc 2010-06-01 19:52:20 +0000
> > +++ b/sql/handler.cc 2010-06-01 22:39:29 +0000
> > @@ -2131,8 +2125,6 @@ int handler::read_first_row(uchar * buf,
> > register int error;
> > DBUG_ENTER("handler::read_first_row");
> >
> > - ha_statistic_increment(&SSV::ha_read_first_count);
>
> The above is wrong; We are later calling 'index_first()' in this
> function, not ha_index_first(), so we miss one increment (which was
> shown in the test cases). Note that we do also call rnd_next() in
> this function, without any counting of rows so we need to fix other
> things in this function too!
That's fine, the counter is incremented in ha_read_first_row() wrapper.
If anything, the old code was wrong as it was incrementing
ha_read_first_count twice (once here and once in index_first).
> Simplest solution is to change to call ha_index_first / ha_rnd_next()
> in this function. This will also fix the 'table->status' variable that
> your are not counting anymore.
> This should be ok as we very seldom use 'handler::read_first_row()'
see above. I think it's ok to just increment ha_read_first_count in the
wrapper. Especially because read_first_row() is rarely used.
> Note that we should do same change in other functions that are calling
> handler functions directly:
>
> handler::read_range_first
> - This calls index_first() and index_read_map()
> get_auto_increment()
> - This calls index_last() and index_read_map()
> index_read_idx_map()
> - This calls index_read_map()
> - Note that we can't trivially change this to call ha_index_read_map()
> as we increment things statistics in ha_index_read_idx_map()
> - We need to update table->status in this function!
Yes. But read_range_first() for example has no dedicated counter, so
either it increments Handler_read_key* counters in the default
implementation, or it increments nothing at all when any engine provides
its own implementation :(
> > +/*
> > + Calculate cost of 'index only' scan for given index and number of records.
> > +
> > + SYNOPSIS
> > + handler->keyread_read_time()
> > + param parameters structure
> > + records #of records to read
> > + keynr key to read
> > +
> > + NOTES
> > + It is assumed that we will read trough the whole key range and that all
> > + key blocks are half full (normally things are much better). It is also
> > + assumed that each time we read the next key from the index, the handler
> > + performs a random seek, thus the cost is proportional to the number of
> > + blocks read.
> > +*/
> > +
> > +double handler::keyread_read_time(uint index, uint ranges, ha_rows rows)
> > +{
> > + double read_time;
> > + uint keys_per_block= (stats.block_size/2/
> > + (table->key_info[index].key_length + ref_length) + 1);
> > + read_time=((double) (rows+keys_per_block-1)/ (double) keys_per_block);
> > + return read_time;
> > +}
>
> Do we really need the 'ranges' argument ?
> (It's always '1' in the current code and you are not using it)
I don't know :)
I've copied it from the handler::read_time(), just to have the
interface the same for consistency. After all - logically - if the
read_time() may depend on the number of ranges, keyread_read_time()
certainly can do too.
> > === modified file 'sql/key.cc'
> > --- a/sql/key.cc 2008-10-10 10:01:01 +0000
> > +++ b/sql/key.cc 2010-06-01 22:39:29 +0000
> > @@ -278,8 +278,10 @@ bool key_cmp_if_same(TABLE *table,const
> > key++;
> > store_length--;
> > }
> > - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
> > - HA_BIT_PART))
> > + if ((key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
> > + HA_BIT_PART)) ||
> > + key_part->type == HA_KEYTYPE_FLOAT ||
> > + key_part->type == HA_KEYTYPE_DOUBLE)
> > {
> > if (key_part->field->key_cmp(key, key_part->length))
> > return 1;
>
> I understand that for float and double there is some extraordinary
> cases where memcmp() is not same as =, but who has had a problem with
> this?
there was a bug report in mysql bugdb.
http://bugs.mysql.com/bug.php?id=44372
> As a separate note, I think it would be better to add to key_part_flag
> HA_NO_CMP_WITH_MEMCMP for key_parts of type FLOAT or DOUBLE
> when we open the table. This would simplify this test a bit.
I'll try to
> > === modified file 'sql/table.h'
> > --- a/sql/table.h 2010-06-01 19:52:20 +0000
> > +++ b/sql/table.h 2010-06-01 22:39:29 +0000
> > @@ -13,6 +13,8 @@
> > along with this program; if not, write to the Free Software
> > Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
> >
> > +#ifndef SQL_TABLE_INCLUDED
> > +#define SQL_TABLE_INCLUDED
>
> Do we really need this one as it's automaticly included by mysql_priv.h ?
> Anyway, it should probably be MYSQL_TABLE_H to be similar our other defines.
This is actually unrelated change, I tried to include table.h to handler.h
(to solve the problem of inline handler methods needing TABLE) and had
to add include guards. later I solved the problem differently but kept
the guards as they're a good thing anyway.
As for the name of the guard, it's new (~1 yr old) MySQL style. As I
personally don't care about the name of the guards, as long as they all
use a consistent style, I use the MySQL naming style here.
> > === modified file 'storage/myisam/ha_myisam.cc'
>
> <cut>
>
> > int ha_myisam::index_next(uchar *buf)
> > {
> > DBUG_ASSERT(inited==INDEX);
> > - ha_statistic_increment(&SSV::ha_read_next_count);
> > int error=mi_rnext(file,buf,active_index);
> > table->status=error ? STATUS_NOT_FOUND: 0;
>
> you should probably remove the setting of table->status here
Neither updating table->status not ha_statistic_increment() can
hurt here, and as you have seen I've not updated any other engine at
all. I've only did it in MyISAM to check that the change works, the code
compiles, test results don't change, and so on.
But I'll remove table->status updates from MyISAM.
> The whole function can the be changed to:
>
> return mi_rnext(file,buf,active_index);
>
> Same goes for all other instances of setting table->status in this file
>
Regards,
Sergei
1
0

02 Jun '10
Hi,
I was looking today at some optimizer code, and bumped again
into sql_select.cc:find_best(). We have been using the greedy
optimizer for years, and this function has been dead code for
a while, isn't it time to remove it?
The less code, the better.
Timour
2
3
Hello Kristian,
Thursday, May 27, 2010, 1:20:59 PM, you wrote:
KN> [Cc:ed maria-developers@ for general interest, hope that's ok]
That's fine. Seems you bcc-ed though. ;)
>> Mine was 1.10. Downgrading to 1.9.6 did the trick, thanks.
KN> Ok, good, at some point we can get someone to help sort out what
KN> the problem might be with 1.10.
Or at least add some check that'd *clearly* complain about improper
version. Fighting with 1.10 was.. emotional.
>> 1. Use prebuilt searchd binary, sphinx.conf file and test index
KN> My idea is that mysql-test-run.pl will look for an already
KN> installed searchd and indexer binary (in eg. SPHINXSEARCH_INDEXER,
KN> SPHINXSEARCH_SEARCD, and maybe $PATH). If not found, sphinxse
That's also good. For some reason I though everything should be self
contained (ie. work immediately out of bzr clone).
KN> tests will be skipped, if found mysql-test-run.pl will generate a
KN> simple .conf and start the daemon for the test. There is already a
Hmm, why *generate* that? I'd just bundle .conf and source .xml data
for indexer. Maybe prebuilt .sp* indexes too. Indexes are binary but
test ones can be kept tiny.
--
Best regards,
Andrew mailto:shodan@shodan.ru
2
3

[Maria-developers] Updated (by Guest): Efficient group commit for binary log (116)
by worklog-noreplyï¼ askmonty.org 01 Jun '10
by worklog-noreplyï¼ askmonty.org 01 Jun '10
01 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: Efficient group commit for binary log
CREATION DATE..: Mon, 26 Apr 2010, 13:28
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......: Serg
CATEGORY.......: Server-RawIdeaBin
TASK ID........: 116 (http://askmonty.org/worklog/?tid=116)
VERSION........: Server-9.x
STATUS.........: Assigned
PRIORITY.......: 60
WORKED HOURS...: 60
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Guest - Tue, 01 Jun 2010, 14:20)=-=-
Status updated.
--- /tmp/wklog.116.old.32652 2010-06-01 14:20:15.000000000 +0000
+++ /tmp/wklog.116.new.32652 2010-06-01 14:20:15.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Assigned
-=-=(Knielsen - Mon, 31 May 2010, 06:48)=-=-
Finish first architecture draft (changed my mind a number of times before I was satisfied).
Write up architecture in worklog.
Fix remaining test failures in proof-of-concept patch + implement xtradb part.
Run some benchmarks on proof-of-concept implementation.
Worked 11 hours and estimate 0 hours remain (original estimate increased by 11 hours).
-=-=(Knielsen - Tue, 25 May 2010, 13:19)=-=-
Low Level Design modified.
--- /tmp/wklog.116.old.14255 2010-05-25 13:19:00.000000000 +0000
+++ /tmp/wklog.116.new.14255 2010-05-25 13:19:00.000000000 +0000
@@ -1 +1,363 @@
+1. Changes for ha_commit_trans()
+
+The gut of the code for commit is in the function ha_commit_trans() (and in
+commit_one_phase() which is called from it). This must be extended to use the
+new prepare_ordered(), group_log_xid(), and commit_ordered() calls.
+
+1.1 Atomic queue of committing transactions
+
+To keep the right commit order among participants, we put transactions into a
+queue. The operations on the queue are non-locking:
+
+ - Insert THD at the head of the queue, and return old queue.
+
+ THD *enqueue_atomic(THD *thd)
+
+ - Fetch (and delete) the whole queue.
+
+ THD *atomic_grab_reverse_queue()
+
+These are simple to implement with atomic compare-and-set. Note that there is
+no ABA problem [2], as we do not delete individual elements from the queue, we
+grab the whole queue and replace it with NULL.
+
+A transaction enters the queue when it does prepare_ordered(). This way, the
+scheduling order for prepare_ordered() calls is what determines the sequence
+in the queue and effectively the commit order.
+
+The queue is grabbed by the code doing group_log_xid() and commit_ordered()
+calls. The queue is passed directly to group_log_xid(), and afterwards
+iterated to do individual commit_ordered() calls.
+
+Using a lock-free queue allows prepare_ordered() (for one transaction) to run
+in parallel with commit_ordered (in another transaction), increasing potential
+parallelism.
+
+The queue is simply a linked list of THD objects, linked through a
+THD::next_commit_ordered field. Since we add at the head of the queue, the
+list is actually in reverse order, so must be reversed when we grab and delete
+it.
+
+The reason that enqueue_atomic() returns the old queue is so that we can check
+if an insert goes to the head of the queue. The thread at the head of the
+queue will do the sequential part of group commit for everyone.
+
+
+1.2 Locks
+
+1.2.1 Global LOCK_prepare_ordered
+
+This lock is taken to serialise calls to prepare_ordered(). Note that
+effectively, the commit order is decided by the order in which threads obtain
+this lock.
+
+
+1.2.2 Global LOCK_group_commit and COND_group_commit
+
+This lock is used to protect the serial part of group commit. It is taken
+around the code where we grab the queue, call group_log_xid() on the queue,
+and call commit_ordered() on each element of the queue, to make sure they
+happen serialised and in consistent order. It also protects the variable
+group_commit_queue_busy, which is used when not using group_log_xid() to delay
+running over a new queue until the first queue is completely done.
+
+
+1.2.3 Global LOCK_commit_ordered
+
+This lock is taken around calls to commit_ordered(), to ensure they happen
+serialised.
+
+
+1.2.4 Per-thread thd->LOCK_commit_ordered and thd->COND_commit_ordered
+
+This lock protects the thd->group_commit_ready variable, as well as the
+condition variable used to wake up threads after log_xid() and
+commit_ordered() finishes.
+
+
+1.2.5 Global LOCK_group_commit_queue
+
+This is only used on platforms with no native compare-and-set operations, to
+make the queue operations atomic.
+
+
+1.3 Commit algorithm.
+
+This is the basic algorithm, simplified by
+
+ - omitting some error handling
+
+ - omitting looping over all handlers when invoking handler methods
+
+ - omitting some possible optimisations when not all calls needed (see next
+ section).
+
+ - Omitting the case where no group_log_xid() is used, see below.
+
+---- BEGIN ALGORITHM ----
+ ht->prepare()
+
+ // Call prepare_ordered() and enqueue in correct commit order
+ lock(LOCK_prepare_ordered)
+ ht->prepare_ordered()
+ old_queue= enqueue_atomic(thd)
+ thd->group_commit_ready= FALSE
+ is_group_commit_leader= (old_queue == NULL)
+ unlock(LOCK_prepare_ordered)
+
+ if (is_group_commit_leader)
+
+ // The first in queue handles group commit for everyone
+
+ lock(LOCK_group_commit)
+ // Wait while queue is busy, see below for when this occurs
+ while (group_commit_queue_busy)
+ cond_wait(COND_group_commit)
+
+ // Grab and reverse the queue to get correct order of transactions
+ queue= atomic_grab_reverse_queue()
+
+ // This call will set individual error codes in thd->xid_error
+ // It also sets the cookie for unlog() in thd->xid_cookie
+ group_log_xid(queue)
+
+ lock(LOCK_commit_ordered)
+ for (other IN queue)
+ if (!other->xid_error)
+ ht->commit_ordered()
+ unlock(LOCK_commit_ordered)
+
+ unlock(LOCK_group_commit)
+
+ // Now we are done, so wake up all the others.
+ for (other IN TAIL(queue))
+ lock(other->LOCK_commit_ordered)
+ other->group_commit_ready= TRUE
+ cond_signal(other->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+ else
+ // If not the leader, just wait until leader did the work for us.
+ lock(thd->LOCK_commit_ordered)
+ while (!thd->group_commit_ready)
+ cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+
+ // Finally do any error reporting now that we're back in own thread.
+ if (thd->xid_error)
+ xid_delayed_error(thd)
+ else
+ ht->commit(thd)
+ unlog(thd->xid_cookie, thd->xid)
+---- END ALGORITHM ----
+
+If the transaction coordinator does not support group_log_xid(), we have to do
+things differently. In this case after the serialisation point at
+prepare_ordered(), we have to parallelise again when running log_xid()
+(otherwise we would loose group commit). But then when log_xid() is done, we
+have to serialise again to check for any error and call commit_ordered() in
+correct sequence for any transaction where log_xid() did not return error.
+
+The central part of the algorithm in this case (when using log_xid()) is:
+
+---- BEGIN ALGORITHM ----
+ cookie= log_xid(thd)
+ error= (cookie == 0)
+
+ if (is_group_commit_leader)
+
+ // The first to enqueue grabs the queue and runs first.
+ // But we must wait until a previous queue run is fully done.
+
+ lock(LOCK_group_commit)
+ while (group_commit_queue_busy)
+ cond_wait(COND_group_commit)
+ queue= atomic_grab_reverse_queue()
+ // The queue will be busy until last thread in it is done.
+ group_commit_queue_busy= TRUE
+ unlock(LOCK_group_commit)
+ else
+ // Not first in queue -> wait for previous one to wake us up.
+ lock(thd->LOCK_commit_ordered)
+ while (!thd->group_commit_ready)
+ cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+
+ if (!error) // Only if log_xid() was successful
+ lock(LOCK_commit_ordered)
+ ht->commit_ordered()
+ unlock(LOCK_commit_ordered)
+
+ // Wake up the next thread, and release queue in last.
+ next= thd->next_commit_ordered
+
+ if (next)
+ lock(next->LOCK_commit_ordered)
+ next->group_commit_ready= TRUE
+ cond_signal(next->COND_commit_ordered)
+ unlock(next->LOCK_commit_ordered)
+ else
+ lock(LOCK_group_commit)
+ group_commit_queue_busy= FALSE
+ unlock(LOCK_group_commit)
+---- END ALGORITHM ----
+
+There are a number of locks taken in the algorithm, but in the group_log_xid()
+case most of them should be uncontended most of the time. The
+LOCK_group_commit of course will be contended, as new threads queue up waiting
+for the previous group commit (and binlog fsync()) to finish so they can do
+the next group commit. This is the whole point of implementing group commit.
+
+The LOCK_prepare_ordered and LOCK_commit_ordered mutexes should be not much
+contended as long as handlers follow the intension of having the corresponding
+handler calls execute quickly.
+
+The per-thread LOCK_commit_ordered mutexes should not be contended; they are
+only used to wake up a sleeping thread.
+
+
+1.4 Optimisations when not using all three new calls
+
+
+The prepare_ordered(), group_log_xid(), and commit_ordered() methods are
+optional, and if not implemented by a particular handler/transaction
+coordinator, we can optimise the algorithm to take advantage of not having to
+keep ordering for the missing parts.
+
+If there is no prepare_ordered(), then we need not take the
+LOCK_prepare_ordered mutex.
+
+If there is no commit_ordered(), then we need not take the LOCK_commit_ordered
+mutex.
+
+If there is no group_log_xid(), then we only need the queue to ensure same
+ordering of transactions for commit_ordered() as for prepare_ordered(). Thus,
+if either of these (or both) are also not present, we do not need to use the
+queue at all.
+
+
+2. Binlog code changes (log.cc)
+
+
+The bulk of the work needed for the binary log is to extend the code to allow
+group commit to the log. Unlike InnoDB/XtraDB, there is no existing support
+inside the binlog code for group commit.
+
+The existing code runs most of the write + fsync to the binary lock under the
+global LOCK_log mutex, preventing any group commit.
+
+To enable group commit, this code must be split into two parts:
+
+ - one part that runs per transaction, re-writing the embedded event positions
+ for the correct offset, and writing this into the in-memory log cache.
+
+ - another part that writes a set of transactions to the disk, and runs
+ fsync().
+
+Then in group_log_xid(), we can run the first part in a loop over all the
+transactions in the passed-in queue, and run the second part only once.
+
+The binlog code also has other code paths that write into the binlog,
+eg. non-transactional statements. These have to be adapted also to work with
+the new code.
+
+In order to get some group commit facility for these also, we change that part
+of the code in a similar way to ha_commit_trans. We keep another,
+binlog-internal queue of such non-transactional binlog writes, and such writes
+queue up here before sleeping on the LOCK_log mutex. Once a thread obtains the
+LOCK_log, it loops over the queue for the fast part, and does the slow part
+once, then finally wakes up the others in the queue.
+
+In the transactional case in group_log_xid(), before we run the passed-in
+queue, we add any members found in the binlog-internal queue. This allows
+these non-transactional writes to share the group commit.
+
+However, in the case where it is a non-transactional write that gets the
+LOCK_log, the transactional transactions from the ha_commit_trans() queue will
+not be able to take part (they will have to wait for their turn to do another
+fsync). It seems difficult to cleanly let the binlog code grab the queue from
+out of the ha_commit_trans() algorithm. I think the group commit is mostly
+useful in transactional workloads anyway (non-transactional engines will loose
+data anyway in case of crash, so why fsync() after each transaction?)
+
+
+3. XtraDB changes (ha_innodb.cc)
+
+The changes needed in XtraDB are comparatively simple, as XtraDB already
+implements group commit, it just needs to be enabled with the new
+commit_ordered() call.
+
+The existing commit() method already is logically in two parts. The first part
+runs under the prepare_commit_mutex() and must be run in same order as binlog
+commit. This part needs to be moved to commit_ordered(). The second part runs
+after releasing prepare_commit_mutex and does transaction log write+fsync; it
+can remain.
+
+Then the prepare_commit_mutex is removed (and the enable_unsafe_group_commit
+XtraDB option to disable it).
+
+There are two asserts that check that the thread running the first part of
+XtraDB commit is the same as the thread running the other operations for the
+transaction. These have to be removed (as commit_ordered() can run in a
+different thread). Also an error reporting with sql_print_error() has to be
+delayed until commit() time.
+
+
+4. Proof-of-concept implementation
+
+There is a proof-of-concept implementation of this architecture, in the form
+of a quilt patch series [3].
+
+A quick benchmark was done, with sync_binlog=1 and
+innodb_flush_log_at_trx_commit=1. 64 parallel threads doing single-row
+transactions against one table.
+
+Without the patch, we get only 25 queries per second.
+
+With the patch, we get 650 queries per second.
+
+
+5. Open issues/tasks
+
+5.1 XA / other prepare() and commit() call sites.
+
+Check that user-level XA is handled correctly and working. And covered
+sufficiently with tests. Also check that any other calls of ha->prepare() and
+ha->commit() outside of ha_commit_trans() are handled correctly.
+
+5.2 Testing
+
+This worklog needs additions to the test suite, including error inserts to
+check error handling, and synchronisation points to check thread parallelism
+correctness.
+
+
+6. Alternative implementations
+
+ - The binlog code maintains its own extra atomic transaction queue to handle
+ non-transactional commits in a good way together with transactional (with
+ respect to group commit). Alternatively, we could ignore this issue and
+ just give up on group commit for non-transactional statements, for some
+ code simplifications.
+
+ - The binlog code has two ways to prepare end_event and similar, one that
+ uses stack-allocation, and another for when stack allocation is not
+ possible that uses thd->mem_root. Probably the overhead of thd->mem_root is
+ so small that it would make sense to use the same code for both cases.
+
+ - Instead of adding extra fields to THD, we could allocate a separate
+ structure on the thd->mem_root() with the required extra fields (including
+ the THD pointer). Would seem to require initialising mutexes at every
+ commit though.
+
+ - It would probably be a good idea to implement TC_LOG_MMAP::group_log_xid()
+ (should not be hard).
+
+
+-----------------------------------------------------------------------
+
+References:
+
+[2] https://secure.wikimedia.org/wikipedia/en/wiki/ABA_problem
+
+[3] https://knielsen-hq.org/maria/patches.mwl116/
-=-=(Knielsen - Tue, 25 May 2010, 13:18)=-=-
High-Level Specification modified.
--- /tmp/wklog.116.old.14249 2010-05-25 13:18:34.000000000 +0000
+++ /tmp/wklog.116.new.14249 2010-05-25 13:18:34.000000000 +0000
@@ -1 +1,157 @@
+The basic idea in group commit is that multiple threads, each handling one
+transaction, prepare for commit and then queue up together waiting to do an
+fsync() on the transaction log. Then once the log is available, a single
+thread does the fsync() + other necessary book-keeping for all of the threads
+at once. After this, the single thread signals the other threads that it's
+done and they can finish up and return success (or failure) from the commit
+operation.
+
+So group commit has a parallel part, and a sequential part. So we need a
+facility for engines/binlog to participate in both the parallel and the
+sequential part.
+
+To do this, we add two new handlerton methods:
+
+ int (*prepare_ordered)(handlerton *hton, THD *thd, bool all);
+ void (*commit_ordered)(handlerton *hton, THD *thd, bool all);
+
+The idea is that the existing prepare() and commit() methods run in the
+parallel part of group commit, and the new prepare_ordered() and
+commit_ordered() run in the sequential part.
+
+The prepare_ordered() method is called after prepare(). The order of
+tranctions that call into prepare_ordered() is guaranteed to be the same among
+all storage engines and binlog, and it is serialised so no two calls can be
+running inside the same engine at the same time.
+
+The commit_ordered() method is called before commit(), and similarly is
+guaranteed to have same transaction order in all participants, and to be
+serialised within one engine.
+
+As the prepare_ordered() and commit_ordered() calls are serialised, the idea
+is that handlers should do the minimum amount of work needed in these calls,
+relaying most of the work (eg. fsync() ...) to prepare() and commit().
+
+As a concrete example, for InnoDB the commit_ordered() method will do the
+first part of commit that fixed the commit order in the transaction log
+buffer, and the commit() method will write the log to disk and fsync()
+it. This split already exists inside the InnoDB code, running before
+respectively after releasing the prepare_commit_mutex.
+
+In addition, the XA transaction coordinator (TC_LOG) is special, since it is
+the one responsible for deciding whether to commit or rollback the
+transaction. For this we need an extra method, since this decision can be done
+only after we know that all prepare() and prepare_ordered() calls succeed, and
+must be done to know whether to call commit_ordered()/commit(), or do rollback.
+
+The existing method for this is TC_LOG::log_xid(). To make implementing group
+commit simpler to implement in a transaction coordinator and more efficient,
+we introduce a new method:
+
+ void group_log_xid(THD *first_thd);
+
+This method runs in the sequential part of group commit. It receives a list of
+transactions to perform log_xid() on, in the correct commit order. (Note that
+TC_LOG can do parallel parts of group commit in its own prepare() and commit()
+methods).
+
+This method can make it easier to implement the group commit in TC_LOG, as it
+gets directly the list of transactions in the right order. Without it, it
+might need to compute such order anyway in a prepare_ordered() method, and the
+server has to create this ordered list anyway to implement the order guarantee
+for prepare_ordered() and commit_ordered().
+
+This group_log_xid() method also is more efficient, as it avoids some
+inter-thread synchronisation. Since group_log_xid() is serialised, we can run
+it together with all the commit_ordered() method calls and need only a single
+sequential code section. With the log_xid() methods, we would need first a
+sequential part for the prepare_ordered() calls, then a parallel part with
+log_xid() calls (to not loose group commit ability for log_xid()), then again
+a sequential part for the commit_ordered() method calls.
+
+The extra synchronisation is needed, as each commit_ordered() call will have
+to wait for log_xid() in one thread (if log_xid() fails then commit_ordered()
+should not be called), and also wait for commit_ordered() to finish in all
+threads handling earlier commits. In effect we will need to bounce the
+execution from one thread to the other among all participants in the group
+commit.
+
+As a consequence of the group_log_xid() optimisation, handlers must be aware
+that the commit_ordered() call can happen in another thread than the one
+running commit() (so thread local storage is not available). This should not
+be a big issue as the THD is available for storing any needed information.
+
+Since group_log_xid() runs for multiple transactions in a single thread, it
+can not do error reporting (my_error()) as that relies on thread local
+storage. Instead it sets an error code in THD::xid_error, and if there is an
+error then later another method will be called (in correct thread context) to
+actually report the error:
+
+ int xid_delayed_error(THD *thd)
+
+The three new methods prepare_ordered(), group_log_xid(), and commit_ordered()
+are optional (as is xid_delayed_error). A storage engine or transaction
+coordinator is free to not implement them if they are not needed. In this case
+there will be no order guarantee for the corresponding stage of group commit
+for that engine. For example, InnoDB needs no ordering of the prepare phase,
+so can omit implementing prepare_ordered(); TC_LOG_MMAP needs no ordering at
+all, so does not need to implement any of them.
+
+Note in particular that all existing engines (/binlog implementations if they
+exist) will work unmodified (and also without any change in group commit
+facilities or commit order guaranteed).
+
+Using these new APIs, the work will be to
+
+ - In ha_commit_trans(), implement the correct semantics for the three new
+ calls.
+
+ - In XtraDB, use the new commit_ordered() call to remove the
+ prepare_commit_mutex (and resurrect group commit) without loosing the
+ consistency with binlog commit order.
+
+ - In log.cc (binlog module), implement group_log_xid() to do group commit of
+ multiple transactions to the binlog with a single shared fsync() call.
+
+-----------------------------------------------------------------------
+Some possible alternative for this worklog:
+
+ - We could eliminate the group_log_xid() method for a simpler API, at the
+ cost of extra synchronisation between threads to do in-order
+ commit_ordered() method calls. This would also allow to call
+ commit_ordered() in the correct thread context.
+
+ - Alternatively, we could eliminate log_xid() and require that all
+ transaction coordinators implement group_log_xid() instead, again for some
+ moderate simplification.
+
+ - At the moment there is no plugin actually using prepare_ordered(), so, it
+ could be removed from the design. But it fits in well, is efficient to
+ implement, and could be useful later (eg. for the requested feature of
+ releasing locks early in InnoDB).
+
+-----------------------------------------------------------------------
+Some possible follow-up projects after this is implemented:
+
+ - Add statistics about how efficient group commit is (#fsyncs/#commits in
+ each engine and binlog).
+
+ - Implement an XtraDB prepare_ordered() methods that can release row locks
+ early (Mark Callaghan from Facebook advocates this, but need to determine
+ exactly how to do this safely).
+
+ - Implement a new crash recovery algorithm that uses the consistent commit
+ ordering to need only fsync() for the binlog. At crash recovery, any
+ missing transactions in an engine is replayed from the correct point in the
+ binlog (this point must be stored transactionally inside the engine, as
+ XtraDB already does today).
+
+ - Implement that START TRANSACTION WITH CONSISTENT SNAPSHOT 1) really gets a
+ consistent snapshow, with same set of committed and not committed
+ transactions in all engines, 2) returns a corresponding consistent binlog
+ position. This should be easy by piggybacking on the synchronisation
+ implemented for ha_commit_trans().
+
+ - Use this in XtraBackup to get consistent binlog position without having to
+ block all updates with FLUSH TABLES WITH READ LOCK.
-=-=(Knielsen - Tue, 25 May 2010, 13:18)=-=-
High Level Description modified.
--- /tmp/wklog.116.old.14234 2010-05-25 13:18:07.000000000 +0000
+++ /tmp/wklog.116.new.14234 2010-05-25 13:18:07.000000000 +0000
@@ -21,3 +21,69 @@
http://kristiannielsen.livejournal.com/12408.html
http://kristiannielsen.livejournal.com/12553.html
+----
+
+Implementing group commit in MySQL faces some challenges from the handler
+plugin architecture:
+
+1. Because storage engine handlers have separate transaction log from the
+mysql binlog (and from each other), there are multiple fsync() calls per
+commit that need the group commit optimisation (2 per participating storage
+engine + 1 for binlog).
+
+2. The code handling commit is split in several places, in main server code
+and in storage engine code. With pluggable binlog it will be split even
+more. This requires a good abstract yet powerful API to be able to implement
+group commit simply and efficiently in plugins without the different parts
+having to rely on iternals of the others.
+
+3. We want the order of commits to be the same in all engines participating in
+multiple transactions. This requirement is the reason that InnoDB currently
+breaks group commit with the infamous prepare_commit_mutex.
+
+While currently there is no server guarantee to get same commit order in
+engines an binlog (except for the InnoDB prepare_commit_mutex hack), there are
+several reasons why this could be desirable:
+
+ - InnoDB hot backup needs to be able to extract a binlog position that is
+ consistent with the hot backup to be able to provision a new slave, and
+ this is impossible without imposing at least partial consistent ordering
+ between InnoDB and binlog.
+
+ - Other backup methods could have similar needs, eg. XtraBackup or
+ `mysqldump --single-transaction`, to have consistent commit order between
+ binlog and storage engines without having to do FLUSH TABLES WITH READ LOCK
+ or similar expensive blocking operation. (other backup methods, like LVM
+ snapshot, don't need consistent commit order, as they can restore
+ out-of-order commits during crash recovery using XA).
+
+ - If we have consistent commit order, we can think about optimising commit to
+ need only one fsync (for binlog); lost commits in storage engines can then
+ be recovered from the binlog at crash recovery by re-playing against the
+ engine from a particular point in the binlog.
+
+ - With consistent commit order, we can get better semantics for START
+ TRANSACTION WITH CONSISTENT SNAPSHOT with multi-engine transactions (and we
+ could even get it to return also a matching binlog position). Currently,
+ this "CONSISTENT SNAPSHOT" can be inconsistent among multiple storage
+ engines.
+
+ - In InnoDB, the performance in the presense of hotspots can be improved if
+ we can release row locks early in the commit phase, but this requires that we
+release them in
+ the same order as commits in the binlog to ensure consistency between
+ master and slaves.
+
+ - There was some discussions around Galera [1] synchroneous replication and
+ global transaction ID that it needed consistent commit order among
+ participating engines.
+
+ - I believe there could be other applications for guaranteed consistent
+ commit order, and that the architecture described in this worklog can
+ implement such guarantee with reasonable overhead.
+
+
+References:
+
+[1] Galera: http://www.codership.com/products/galera_replication
+
-=-=(Knielsen - Tue, 25 May 2010, 08:28)=-=-
More thoughts on and changes to the archtecture. Got to something now that I am satisfied with and
that seems to be able to handle all issues.
Implement new prepare_ordered and commit_ordered handler methods and the logic in ha_commit_trans().
Implement TC_LOG::group_log_xid() method and logic in ha_commit_trans().
Implement XtraDB part, using commit_ordered() rather than prepare_commit_mutex.
Fix test suite failures.
Proof-of-concept patch series complete now.
Do initial benchmark, getting good results. With 64 threads, see 26x improvement in queries-per-sec.
Next step: write up the architecture description.
Worked 21 hours and estimate 0 hours remain (original estimate increased by 21 hours).
-=-=(Knielsen - Wed, 12 May 2010, 06:41)=-=-
Started work on a Quilt patch series, refactoring the binlog code to prepare for implementing the
group commit, and working on the design of group commit in parallel.
Found and fixed several problems in error handling when writing to binlog.
Removed redundant table map version locking.
Split binlog writing into two parts in preparations for group commit. When ready to write to the
binlog, threads enter a queue, and the first thread in the queue handles the binlog writing for
everyone. When it obtains the LOCK_log, it first loops over all threads, executing the first part of
binlog writing (the write(2) syscall essentially). It then runs the second part (fsync(2)
essentially) only once, and then wakes up the remaining threads in the queue.
Still to be done:
Finish the proof-of-concept group commit patch, by 1) implementing the prepare_fast() and
commit_fast() callbacks in handler.cc 2) move the binlog thread enqueue from log_xid() to
binlog_prepare_fast(), 3) move fast part of InnoDB commit to innobase_commit_fast(), removing the
prepare_commit_mutex().
Write up the final design in this worklog.
Evaluate the design to see if we can do better/different.
Think about possible next steps, such as releasing innodb row locks early (in
innobase_prepare_fast), and doing crash recovery by replaying transactions from the binlog (removing
the need for engine durability and 2 of 3 fsync() in commit).
Worked 28 hours and estimate 0 hours remain (original estimate increased by 28 hours).
-=-=(Serg - Mon, 26 Apr 2010, 14:10)=-=-
Observers changed: Serg
DESCRIPTION:
Currently, in order to ensure that the server can recover after a crash to a
state in which storage engines and binary log are consistent with each other,
it is necessary to use XA with durable commits for both storage engines
(innodb_flush_log_at_trx_commit=1) and binary log (sync_binlog=1).
This is _very_ expensive, since the server needs to do three fsync() operations
for every commit, as there is no working group commit when the binary log is
enabled.
The idea is to
- Implement/fix group commit to work properly with the binary log enabled.
- (Optionally) avoid the need to fsync() in the engine, and instead rely on
replaying any lost transactions from the binary log against the engine
during crash recovery.
For background see these articles:
http://kristiannielsen.livejournal.com/12254.html
http://kristiannielsen.livejournal.com/12408.html
http://kristiannielsen.livejournal.com/12553.html
----
Implementing group commit in MySQL faces some challenges from the handler
plugin architecture:
1. Because storage engine handlers have separate transaction log from the
mysql binlog (and from each other), there are multiple fsync() calls per
commit that need the group commit optimisation (2 per participating storage
engine + 1 for binlog).
2. The code handling commit is split in several places, in main server code
and in storage engine code. With pluggable binlog it will be split even
more. This requires a good abstract yet powerful API to be able to implement
group commit simply and efficiently in plugins without the different parts
having to rely on iternals of the others.
3. We want the order of commits to be the same in all engines participating in
multiple transactions. This requirement is the reason that InnoDB currently
breaks group commit with the infamous prepare_commit_mutex.
While currently there is no server guarantee to get same commit order in
engines an binlog (except for the InnoDB prepare_commit_mutex hack), there are
several reasons why this could be desirable:
- InnoDB hot backup needs to be able to extract a binlog position that is
consistent with the hot backup to be able to provision a new slave, and
this is impossible without imposing at least partial consistent ordering
between InnoDB and binlog.
- Other backup methods could have similar needs, eg. XtraBackup or
`mysqldump --single-transaction`, to have consistent commit order between
binlog and storage engines without having to do FLUSH TABLES WITH READ LOCK
or similar expensive blocking operation. (other backup methods, like LVM
snapshot, don't need consistent commit order, as they can restore
out-of-order commits during crash recovery using XA).
- If we have consistent commit order, we can think about optimising commit to
need only one fsync (for binlog); lost commits in storage engines can then
be recovered from the binlog at crash recovery by re-playing against the
engine from a particular point in the binlog.
- With consistent commit order, we can get better semantics for START
TRANSACTION WITH CONSISTENT SNAPSHOT with multi-engine transactions (and we
could even get it to return also a matching binlog position). Currently,
this "CONSISTENT SNAPSHOT" can be inconsistent among multiple storage
engines.
- In InnoDB, the performance in the presense of hotspots can be improved if
we can release row locks early in the commit phase, but this requires that we
release them in
the same order as commits in the binlog to ensure consistency between
master and slaves.
- There was some discussions around Galera [1] synchroneous replication and
global transaction ID that it needed consistent commit order among
participating engines.
- I believe there could be other applications for guaranteed consistent
commit order, and that the architecture described in this worklog can
implement such guarantee with reasonable overhead.
References:
[1] Galera: http://www.codership.com/products/galera_replication
HIGH-LEVEL SPECIFICATION:
The basic idea in group commit is that multiple threads, each handling one
transaction, prepare for commit and then queue up together waiting to do an
fsync() on the transaction log. Then once the log is available, a single
thread does the fsync() + other necessary book-keeping for all of the threads
at once. After this, the single thread signals the other threads that it's
done and they can finish up and return success (or failure) from the commit
operation.
So group commit has a parallel part, and a sequential part. So we need a
facility for engines/binlog to participate in both the parallel and the
sequential part.
To do this, we add two new handlerton methods:
int (*prepare_ordered)(handlerton *hton, THD *thd, bool all);
void (*commit_ordered)(handlerton *hton, THD *thd, bool all);
The idea is that the existing prepare() and commit() methods run in the
parallel part of group commit, and the new prepare_ordered() and
commit_ordered() run in the sequential part.
The prepare_ordered() method is called after prepare(). The order of
tranctions that call into prepare_ordered() is guaranteed to be the same among
all storage engines and binlog, and it is serialised so no two calls can be
running inside the same engine at the same time.
The commit_ordered() method is called before commit(), and similarly is
guaranteed to have same transaction order in all participants, and to be
serialised within one engine.
As the prepare_ordered() and commit_ordered() calls are serialised, the idea
is that handlers should do the minimum amount of work needed in these calls,
relaying most of the work (eg. fsync() ...) to prepare() and commit().
As a concrete example, for InnoDB the commit_ordered() method will do the
first part of commit that fixed the commit order in the transaction log
buffer, and the commit() method will write the log to disk and fsync()
it. This split already exists inside the InnoDB code, running before
respectively after releasing the prepare_commit_mutex.
In addition, the XA transaction coordinator (TC_LOG) is special, since it is
the one responsible for deciding whether to commit or rollback the
transaction. For this we need an extra method, since this decision can be done
only after we know that all prepare() and prepare_ordered() calls succeed, and
must be done to know whether to call commit_ordered()/commit(), or do rollback.
The existing method for this is TC_LOG::log_xid(). To make implementing group
commit simpler to implement in a transaction coordinator and more efficient,
we introduce a new method:
void group_log_xid(THD *first_thd);
This method runs in the sequential part of group commit. It receives a list of
transactions to perform log_xid() on, in the correct commit order. (Note that
TC_LOG can do parallel parts of group commit in its own prepare() and commit()
methods).
This method can make it easier to implement the group commit in TC_LOG, as it
gets directly the list of transactions in the right order. Without it, it
might need to compute such order anyway in a prepare_ordered() method, and the
server has to create this ordered list anyway to implement the order guarantee
for prepare_ordered() and commit_ordered().
This group_log_xid() method also is more efficient, as it avoids some
inter-thread synchronisation. Since group_log_xid() is serialised, we can run
it together with all the commit_ordered() method calls and need only a single
sequential code section. With the log_xid() methods, we would need first a
sequential part for the prepare_ordered() calls, then a parallel part with
log_xid() calls (to not loose group commit ability for log_xid()), then again
a sequential part for the commit_ordered() method calls.
The extra synchronisation is needed, as each commit_ordered() call will have
to wait for log_xid() in one thread (if log_xid() fails then commit_ordered()
should not be called), and also wait for commit_ordered() to finish in all
threads handling earlier commits. In effect we will need to bounce the
execution from one thread to the other among all participants in the group
commit.
As a consequence of the group_log_xid() optimisation, handlers must be aware
that the commit_ordered() call can happen in another thread than the one
running commit() (so thread local storage is not available). This should not
be a big issue as the THD is available for storing any needed information.
Since group_log_xid() runs for multiple transactions in a single thread, it
can not do error reporting (my_error()) as that relies on thread local
storage. Instead it sets an error code in THD::xid_error, and if there is an
error then later another method will be called (in correct thread context) to
actually report the error:
int xid_delayed_error(THD *thd)
The three new methods prepare_ordered(), group_log_xid(), and commit_ordered()
are optional (as is xid_delayed_error). A storage engine or transaction
coordinator is free to not implement them if they are not needed. In this case
there will be no order guarantee for the corresponding stage of group commit
for that engine. For example, InnoDB needs no ordering of the prepare phase,
so can omit implementing prepare_ordered(); TC_LOG_MMAP needs no ordering at
all, so does not need to implement any of them.
Note in particular that all existing engines (/binlog implementations if they
exist) will work unmodified (and also without any change in group commit
facilities or commit order guaranteed).
Using these new APIs, the work will be to
- In ha_commit_trans(), implement the correct semantics for the three new
calls.
- In XtraDB, use the new commit_ordered() call to remove the
prepare_commit_mutex (and resurrect group commit) without loosing the
consistency with binlog commit order.
- In log.cc (binlog module), implement group_log_xid() to do group commit of
multiple transactions to the binlog with a single shared fsync() call.
-----------------------------------------------------------------------
Some possible alternative for this worklog:
- We could eliminate the group_log_xid() method for a simpler API, at the
cost of extra synchronisation between threads to do in-order
commit_ordered() method calls. This would also allow to call
commit_ordered() in the correct thread context.
- Alternatively, we could eliminate log_xid() and require that all
transaction coordinators implement group_log_xid() instead, again for some
moderate simplification.
- At the moment there is no plugin actually using prepare_ordered(), so, it
could be removed from the design. But it fits in well, is efficient to
implement, and could be useful later (eg. for the requested feature of
releasing locks early in InnoDB).
-----------------------------------------------------------------------
Some possible follow-up projects after this is implemented:
- Add statistics about how efficient group commit is (#fsyncs/#commits in
each engine and binlog).
- Implement an XtraDB prepare_ordered() methods that can release row locks
early (Mark Callaghan from Facebook advocates this, but need to determine
exactly how to do this safely).
- Implement a new crash recovery algorithm that uses the consistent commit
ordering to need only fsync() for the binlog. At crash recovery, any
missing transactions in an engine is replayed from the correct point in the
binlog (this point must be stored transactionally inside the engine, as
XtraDB already does today).
- Implement that START TRANSACTION WITH CONSISTENT SNAPSHOT 1) really gets a
consistent snapshow, with same set of committed and not committed
transactions in all engines, 2) returns a corresponding consistent binlog
position. This should be easy by piggybacking on the synchronisation
implemented for ha_commit_trans().
- Use this in XtraBackup to get consistent binlog position without having to
block all updates with FLUSH TABLES WITH READ LOCK.
LOW-LEVEL DESIGN:
1. Changes for ha_commit_trans()
The gut of the code for commit is in the function ha_commit_trans() (and in
commit_one_phase() which is called from it). This must be extended to use the
new prepare_ordered(), group_log_xid(), and commit_ordered() calls.
1.1 Atomic queue of committing transactions
To keep the right commit order among participants, we put transactions into a
queue. The operations on the queue are non-locking:
- Insert THD at the head of the queue, and return old queue.
THD *enqueue_atomic(THD *thd)
- Fetch (and delete) the whole queue.
THD *atomic_grab_reverse_queue()
These are simple to implement with atomic compare-and-set. Note that there is
no ABA problem [2], as we do not delete individual elements from the queue, we
grab the whole queue and replace it with NULL.
A transaction enters the queue when it does prepare_ordered(). This way, the
scheduling order for prepare_ordered() calls is what determines the sequence
in the queue and effectively the commit order.
The queue is grabbed by the code doing group_log_xid() and commit_ordered()
calls. The queue is passed directly to group_log_xid(), and afterwards
iterated to do individual commit_ordered() calls.
Using a lock-free queue allows prepare_ordered() (for one transaction) to run
in parallel with commit_ordered (in another transaction), increasing potential
parallelism.
The queue is simply a linked list of THD objects, linked through a
THD::next_commit_ordered field. Since we add at the head of the queue, the
list is actually in reverse order, so must be reversed when we grab and delete
it.
The reason that enqueue_atomic() returns the old queue is so that we can check
if an insert goes to the head of the queue. The thread at the head of the
queue will do the sequential part of group commit for everyone.
1.2 Locks
1.2.1 Global LOCK_prepare_ordered
This lock is taken to serialise calls to prepare_ordered(). Note that
effectively, the commit order is decided by the order in which threads obtain
this lock.
1.2.2 Global LOCK_group_commit and COND_group_commit
This lock is used to protect the serial part of group commit. It is taken
around the code where we grab the queue, call group_log_xid() on the queue,
and call commit_ordered() on each element of the queue, to make sure they
happen serialised and in consistent order. It also protects the variable
group_commit_queue_busy, which is used when not using group_log_xid() to delay
running over a new queue until the first queue is completely done.
1.2.3 Global LOCK_commit_ordered
This lock is taken around calls to commit_ordered(), to ensure they happen
serialised.
1.2.4 Per-thread thd->LOCK_commit_ordered and thd->COND_commit_ordered
This lock protects the thd->group_commit_ready variable, as well as the
condition variable used to wake up threads after log_xid() and
commit_ordered() finishes.
1.2.5 Global LOCK_group_commit_queue
This is only used on platforms with no native compare-and-set operations, to
make the queue operations atomic.
1.3 Commit algorithm.
This is the basic algorithm, simplified by
- omitting some error handling
- omitting looping over all handlers when invoking handler methods
- omitting some possible optimisations when not all calls needed (see next
section).
- Omitting the case where no group_log_xid() is used, see below.
---- BEGIN ALGORITHM ----
ht->prepare()
// Call prepare_ordered() and enqueue in correct commit order
lock(LOCK_prepare_ordered)
ht->prepare_ordered()
old_queue= enqueue_atomic(thd)
thd->group_commit_ready= FALSE
is_group_commit_leader= (old_queue == NULL)
unlock(LOCK_prepare_ordered)
if (is_group_commit_leader)
// The first in queue handles group commit for everyone
lock(LOCK_group_commit)
// Wait while queue is busy, see below for when this occurs
while (group_commit_queue_busy)
cond_wait(COND_group_commit)
// Grab and reverse the queue to get correct order of transactions
queue= atomic_grab_reverse_queue()
// This call will set individual error codes in thd->xid_error
// It also sets the cookie for unlog() in thd->xid_cookie
group_log_xid(queue)
lock(LOCK_commit_ordered)
for (other IN queue)
if (!other->xid_error)
ht->commit_ordered()
unlock(LOCK_commit_ordered)
unlock(LOCK_group_commit)
// Now we are done, so wake up all the others.
for (other IN TAIL(queue))
lock(other->LOCK_commit_ordered)
other->group_commit_ready= TRUE
cond_signal(other->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
else
// If not the leader, just wait until leader did the work for us.
lock(thd->LOCK_commit_ordered)
while (!thd->group_commit_ready)
cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
// Finally do any error reporting now that we're back in own thread.
if (thd->xid_error)
xid_delayed_error(thd)
else
ht->commit(thd)
unlog(thd->xid_cookie, thd->xid)
---- END ALGORITHM ----
If the transaction coordinator does not support group_log_xid(), we have to do
things differently. In this case after the serialisation point at
prepare_ordered(), we have to parallelise again when running log_xid()
(otherwise we would loose group commit). But then when log_xid() is done, we
have to serialise again to check for any error and call commit_ordered() in
correct sequence for any transaction where log_xid() did not return error.
The central part of the algorithm in this case (when using log_xid()) is:
---- BEGIN ALGORITHM ----
cookie= log_xid(thd)
error= (cookie == 0)
if (is_group_commit_leader)
// The first to enqueue grabs the queue and runs first.
// But we must wait until a previous queue run is fully done.
lock(LOCK_group_commit)
while (group_commit_queue_busy)
cond_wait(COND_group_commit)
queue= atomic_grab_reverse_queue()
// The queue will be busy until last thread in it is done.
group_commit_queue_busy= TRUE
unlock(LOCK_group_commit)
else
// Not first in queue -> wait for previous one to wake us up.
lock(thd->LOCK_commit_ordered)
while (!thd->group_commit_ready)
cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
if (!error) // Only if log_xid() was successful
lock(LOCK_commit_ordered)
ht->commit_ordered()
unlock(LOCK_commit_ordered)
// Wake up the next thread, and release queue in last.
next= thd->next_commit_ordered
if (next)
lock(next->LOCK_commit_ordered)
next->group_commit_ready= TRUE
cond_signal(next->COND_commit_ordered)
unlock(next->LOCK_commit_ordered)
else
lock(LOCK_group_commit)
group_commit_queue_busy= FALSE
unlock(LOCK_group_commit)
---- END ALGORITHM ----
There are a number of locks taken in the algorithm, but in the group_log_xid()
case most of them should be uncontended most of the time. The
LOCK_group_commit of course will be contended, as new threads queue up waiting
for the previous group commit (and binlog fsync()) to finish so they can do
the next group commit. This is the whole point of implementing group commit.
The LOCK_prepare_ordered and LOCK_commit_ordered mutexes should be not much
contended as long as handlers follow the intension of having the corresponding
handler calls execute quickly.
The per-thread LOCK_commit_ordered mutexes should not be contended; they are
only used to wake up a sleeping thread.
1.4 Optimisations when not using all three new calls
The prepare_ordered(), group_log_xid(), and commit_ordered() methods are
optional, and if not implemented by a particular handler/transaction
coordinator, we can optimise the algorithm to take advantage of not having to
keep ordering for the missing parts.
If there is no prepare_ordered(), then we need not take the
LOCK_prepare_ordered mutex.
If there is no commit_ordered(), then we need not take the LOCK_commit_ordered
mutex.
If there is no group_log_xid(), then we only need the queue to ensure same
ordering of transactions for commit_ordered() as for prepare_ordered(). Thus,
if either of these (or both) are also not present, we do not need to use the
queue at all.
2. Binlog code changes (log.cc)
The bulk of the work needed for the binary log is to extend the code to allow
group commit to the log. Unlike InnoDB/XtraDB, there is no existing support
inside the binlog code for group commit.
The existing code runs most of the write + fsync to the binary lock under the
global LOCK_log mutex, preventing any group commit.
To enable group commit, this code must be split into two parts:
- one part that runs per transaction, re-writing the embedded event positions
for the correct offset, and writing this into the in-memory log cache.
- another part that writes a set of transactions to the disk, and runs
fsync().
Then in group_log_xid(), we can run the first part in a loop over all the
transactions in the passed-in queue, and run the second part only once.
The binlog code also has other code paths that write into the binlog,
eg. non-transactional statements. These have to be adapted also to work with
the new code.
In order to get some group commit facility for these also, we change that part
of the code in a similar way to ha_commit_trans. We keep another,
binlog-internal queue of such non-transactional binlog writes, and such writes
queue up here before sleeping on the LOCK_log mutex. Once a thread obtains the
LOCK_log, it loops over the queue for the fast part, and does the slow part
once, then finally wakes up the others in the queue.
In the transactional case in group_log_xid(), before we run the passed-in
queue, we add any members found in the binlog-internal queue. This allows
these non-transactional writes to share the group commit.
However, in the case where it is a non-transactional write that gets the
LOCK_log, the transactional transactions from the ha_commit_trans() queue will
not be able to take part (they will have to wait for their turn to do another
fsync). It seems difficult to cleanly let the binlog code grab the queue from
out of the ha_commit_trans() algorithm. I think the group commit is mostly
useful in transactional workloads anyway (non-transactional engines will loose
data anyway in case of crash, so why fsync() after each transaction?)
3. XtraDB changes (ha_innodb.cc)
The changes needed in XtraDB are comparatively simple, as XtraDB already
implements group commit, it just needs to be enabled with the new
commit_ordered() call.
The existing commit() method already is logically in two parts. The first part
runs under the prepare_commit_mutex() and must be run in same order as binlog
commit. This part needs to be moved to commit_ordered(). The second part runs
after releasing prepare_commit_mutex and does transaction log write+fsync; it
can remain.
Then the prepare_commit_mutex is removed (and the enable_unsafe_group_commit
XtraDB option to disable it).
There are two asserts that check that the thread running the first part of
XtraDB commit is the same as the thread running the other operations for the
transaction. These have to be removed (as commit_ordered() can run in a
different thread). Also an error reporting with sql_print_error() has to be
delayed until commit() time.
4. Proof-of-concept implementation
There is a proof-of-concept implementation of this architecture, in the form
of a quilt patch series [3].
A quick benchmark was done, with sync_binlog=1 and
innodb_flush_log_at_trx_commit=1. 64 parallel threads doing single-row
transactions against one table.
Without the patch, we get only 25 queries per second.
With the patch, we get 650 queries per second.
5. Open issues/tasks
5.1 XA / other prepare() and commit() call sites.
Check that user-level XA is handled correctly and working. And covered
sufficiently with tests. Also check that any other calls of ha->prepare() and
ha->commit() outside of ha_commit_trans() are handled correctly.
5.2 Testing
This worklog needs additions to the test suite, including error inserts to
check error handling, and synchronisation points to check thread parallelism
correctness.
6. Alternative implementations
- The binlog code maintains its own extra atomic transaction queue to handle
non-transactional commits in a good way together with transactional (with
respect to group commit). Alternatively, we could ignore this issue and
just give up on group commit for non-transactional statements, for some
code simplifications.
- The binlog code has two ways to prepare end_event and similar, one that
uses stack-allocation, and another for when stack allocation is not
possible that uses thd->mem_root. Probably the overhead of thd->mem_root is
so small that it would make sense to use the same code for both cases.
- Instead of adding extra fields to THD, we could allocate a separate
structure on the thd->mem_root() with the required extra fields (including
the THD pointer). Would seem to require initialising mutexes at every
commit though.
- It would probably be a good idea to implement TC_LOG_MMAP::group_log_xid()
(should not be hard).
-----------------------------------------------------------------------
References:
[2] https://secure.wikimedia.org/wikipedia/en/wiki/ABA_problem
[3] https://knielsen-hq.org/maria/patches.mwl116/
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v3.5.9)
1
0

[Maria-developers] Updated (by Guest): Efficient group commit for binary log (116)
by worklog-noreplyï¼ askmonty.org 01 Jun '10
by worklog-noreplyï¼ askmonty.org 01 Jun '10
01 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: Efficient group commit for binary log
CREATION DATE..: Mon, 26 Apr 2010, 13:28
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......: Serg
CATEGORY.......: Server-RawIdeaBin
TASK ID........: 116 (http://askmonty.org/worklog/?tid=116)
VERSION........: Server-9.x
STATUS.........: Assigned
PRIORITY.......: 60
WORKED HOURS...: 60
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Guest - Tue, 01 Jun 2010, 14:20)=-=-
Status updated.
--- /tmp/wklog.116.old.32652 2010-06-01 14:20:15.000000000 +0000
+++ /tmp/wklog.116.new.32652 2010-06-01 14:20:15.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Assigned
-=-=(Knielsen - Mon, 31 May 2010, 06:48)=-=-
Finish first architecture draft (changed my mind a number of times before I was satisfied).
Write up architecture in worklog.
Fix remaining test failures in proof-of-concept patch + implement xtradb part.
Run some benchmarks on proof-of-concept implementation.
Worked 11 hours and estimate 0 hours remain (original estimate increased by 11 hours).
-=-=(Knielsen - Tue, 25 May 2010, 13:19)=-=-
Low Level Design modified.
--- /tmp/wklog.116.old.14255 2010-05-25 13:19:00.000000000 +0000
+++ /tmp/wklog.116.new.14255 2010-05-25 13:19:00.000000000 +0000
@@ -1 +1,363 @@
+1. Changes for ha_commit_trans()
+
+The gut of the code for commit is in the function ha_commit_trans() (and in
+commit_one_phase() which is called from it). This must be extended to use the
+new prepare_ordered(), group_log_xid(), and commit_ordered() calls.
+
+1.1 Atomic queue of committing transactions
+
+To keep the right commit order among participants, we put transactions into a
+queue. The operations on the queue are non-locking:
+
+ - Insert THD at the head of the queue, and return old queue.
+
+ THD *enqueue_atomic(THD *thd)
+
+ - Fetch (and delete) the whole queue.
+
+ THD *atomic_grab_reverse_queue()
+
+These are simple to implement with atomic compare-and-set. Note that there is
+no ABA problem [2], as we do not delete individual elements from the queue, we
+grab the whole queue and replace it with NULL.
+
+A transaction enters the queue when it does prepare_ordered(). This way, the
+scheduling order for prepare_ordered() calls is what determines the sequence
+in the queue and effectively the commit order.
+
+The queue is grabbed by the code doing group_log_xid() and commit_ordered()
+calls. The queue is passed directly to group_log_xid(), and afterwards
+iterated to do individual commit_ordered() calls.
+
+Using a lock-free queue allows prepare_ordered() (for one transaction) to run
+in parallel with commit_ordered (in another transaction), increasing potential
+parallelism.
+
+The queue is simply a linked list of THD objects, linked through a
+THD::next_commit_ordered field. Since we add at the head of the queue, the
+list is actually in reverse order, so must be reversed when we grab and delete
+it.
+
+The reason that enqueue_atomic() returns the old queue is so that we can check
+if an insert goes to the head of the queue. The thread at the head of the
+queue will do the sequential part of group commit for everyone.
+
+
+1.2 Locks
+
+1.2.1 Global LOCK_prepare_ordered
+
+This lock is taken to serialise calls to prepare_ordered(). Note that
+effectively, the commit order is decided by the order in which threads obtain
+this lock.
+
+
+1.2.2 Global LOCK_group_commit and COND_group_commit
+
+This lock is used to protect the serial part of group commit. It is taken
+around the code where we grab the queue, call group_log_xid() on the queue,
+and call commit_ordered() on each element of the queue, to make sure they
+happen serialised and in consistent order. It also protects the variable
+group_commit_queue_busy, which is used when not using group_log_xid() to delay
+running over a new queue until the first queue is completely done.
+
+
+1.2.3 Global LOCK_commit_ordered
+
+This lock is taken around calls to commit_ordered(), to ensure they happen
+serialised.
+
+
+1.2.4 Per-thread thd->LOCK_commit_ordered and thd->COND_commit_ordered
+
+This lock protects the thd->group_commit_ready variable, as well as the
+condition variable used to wake up threads after log_xid() and
+commit_ordered() finishes.
+
+
+1.2.5 Global LOCK_group_commit_queue
+
+This is only used on platforms with no native compare-and-set operations, to
+make the queue operations atomic.
+
+
+1.3 Commit algorithm.
+
+This is the basic algorithm, simplified by
+
+ - omitting some error handling
+
+ - omitting looping over all handlers when invoking handler methods
+
+ - omitting some possible optimisations when not all calls needed (see next
+ section).
+
+ - Omitting the case where no group_log_xid() is used, see below.
+
+---- BEGIN ALGORITHM ----
+ ht->prepare()
+
+ // Call prepare_ordered() and enqueue in correct commit order
+ lock(LOCK_prepare_ordered)
+ ht->prepare_ordered()
+ old_queue= enqueue_atomic(thd)
+ thd->group_commit_ready= FALSE
+ is_group_commit_leader= (old_queue == NULL)
+ unlock(LOCK_prepare_ordered)
+
+ if (is_group_commit_leader)
+
+ // The first in queue handles group commit for everyone
+
+ lock(LOCK_group_commit)
+ // Wait while queue is busy, see below for when this occurs
+ while (group_commit_queue_busy)
+ cond_wait(COND_group_commit)
+
+ // Grab and reverse the queue to get correct order of transactions
+ queue= atomic_grab_reverse_queue()
+
+ // This call will set individual error codes in thd->xid_error
+ // It also sets the cookie for unlog() in thd->xid_cookie
+ group_log_xid(queue)
+
+ lock(LOCK_commit_ordered)
+ for (other IN queue)
+ if (!other->xid_error)
+ ht->commit_ordered()
+ unlock(LOCK_commit_ordered)
+
+ unlock(LOCK_group_commit)
+
+ // Now we are done, so wake up all the others.
+ for (other IN TAIL(queue))
+ lock(other->LOCK_commit_ordered)
+ other->group_commit_ready= TRUE
+ cond_signal(other->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+ else
+ // If not the leader, just wait until leader did the work for us.
+ lock(thd->LOCK_commit_ordered)
+ while (!thd->group_commit_ready)
+ cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+
+ // Finally do any error reporting now that we're back in own thread.
+ if (thd->xid_error)
+ xid_delayed_error(thd)
+ else
+ ht->commit(thd)
+ unlog(thd->xid_cookie, thd->xid)
+---- END ALGORITHM ----
+
+If the transaction coordinator does not support group_log_xid(), we have to do
+things differently. In this case after the serialisation point at
+prepare_ordered(), we have to parallelise again when running log_xid()
+(otherwise we would loose group commit). But then when log_xid() is done, we
+have to serialise again to check for any error and call commit_ordered() in
+correct sequence for any transaction where log_xid() did not return error.
+
+The central part of the algorithm in this case (when using log_xid()) is:
+
+---- BEGIN ALGORITHM ----
+ cookie= log_xid(thd)
+ error= (cookie == 0)
+
+ if (is_group_commit_leader)
+
+ // The first to enqueue grabs the queue and runs first.
+ // But we must wait until a previous queue run is fully done.
+
+ lock(LOCK_group_commit)
+ while (group_commit_queue_busy)
+ cond_wait(COND_group_commit)
+ queue= atomic_grab_reverse_queue()
+ // The queue will be busy until last thread in it is done.
+ group_commit_queue_busy= TRUE
+ unlock(LOCK_group_commit)
+ else
+ // Not first in queue -> wait for previous one to wake us up.
+ lock(thd->LOCK_commit_ordered)
+ while (!thd->group_commit_ready)
+ cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
+ unlock(other->LOCK_commit_ordered)
+
+ if (!error) // Only if log_xid() was successful
+ lock(LOCK_commit_ordered)
+ ht->commit_ordered()
+ unlock(LOCK_commit_ordered)
+
+ // Wake up the next thread, and release queue in last.
+ next= thd->next_commit_ordered
+
+ if (next)
+ lock(next->LOCK_commit_ordered)
+ next->group_commit_ready= TRUE
+ cond_signal(next->COND_commit_ordered)
+ unlock(next->LOCK_commit_ordered)
+ else
+ lock(LOCK_group_commit)
+ group_commit_queue_busy= FALSE
+ unlock(LOCK_group_commit)
+---- END ALGORITHM ----
+
+There are a number of locks taken in the algorithm, but in the group_log_xid()
+case most of them should be uncontended most of the time. The
+LOCK_group_commit of course will be contended, as new threads queue up waiting
+for the previous group commit (and binlog fsync()) to finish so they can do
+the next group commit. This is the whole point of implementing group commit.
+
+The LOCK_prepare_ordered and LOCK_commit_ordered mutexes should be not much
+contended as long as handlers follow the intension of having the corresponding
+handler calls execute quickly.
+
+The per-thread LOCK_commit_ordered mutexes should not be contended; they are
+only used to wake up a sleeping thread.
+
+
+1.4 Optimisations when not using all three new calls
+
+
+The prepare_ordered(), group_log_xid(), and commit_ordered() methods are
+optional, and if not implemented by a particular handler/transaction
+coordinator, we can optimise the algorithm to take advantage of not having to
+keep ordering for the missing parts.
+
+If there is no prepare_ordered(), then we need not take the
+LOCK_prepare_ordered mutex.
+
+If there is no commit_ordered(), then we need not take the LOCK_commit_ordered
+mutex.
+
+If there is no group_log_xid(), then we only need the queue to ensure same
+ordering of transactions for commit_ordered() as for prepare_ordered(). Thus,
+if either of these (or both) are also not present, we do not need to use the
+queue at all.
+
+
+2. Binlog code changes (log.cc)
+
+
+The bulk of the work needed for the binary log is to extend the code to allow
+group commit to the log. Unlike InnoDB/XtraDB, there is no existing support
+inside the binlog code for group commit.
+
+The existing code runs most of the write + fsync to the binary lock under the
+global LOCK_log mutex, preventing any group commit.
+
+To enable group commit, this code must be split into two parts:
+
+ - one part that runs per transaction, re-writing the embedded event positions
+ for the correct offset, and writing this into the in-memory log cache.
+
+ - another part that writes a set of transactions to the disk, and runs
+ fsync().
+
+Then in group_log_xid(), we can run the first part in a loop over all the
+transactions in the passed-in queue, and run the second part only once.
+
+The binlog code also has other code paths that write into the binlog,
+eg. non-transactional statements. These have to be adapted also to work with
+the new code.
+
+In order to get some group commit facility for these also, we change that part
+of the code in a similar way to ha_commit_trans. We keep another,
+binlog-internal queue of such non-transactional binlog writes, and such writes
+queue up here before sleeping on the LOCK_log mutex. Once a thread obtains the
+LOCK_log, it loops over the queue for the fast part, and does the slow part
+once, then finally wakes up the others in the queue.
+
+In the transactional case in group_log_xid(), before we run the passed-in
+queue, we add any members found in the binlog-internal queue. This allows
+these non-transactional writes to share the group commit.
+
+However, in the case where it is a non-transactional write that gets the
+LOCK_log, the transactional transactions from the ha_commit_trans() queue will
+not be able to take part (they will have to wait for their turn to do another
+fsync). It seems difficult to cleanly let the binlog code grab the queue from
+out of the ha_commit_trans() algorithm. I think the group commit is mostly
+useful in transactional workloads anyway (non-transactional engines will loose
+data anyway in case of crash, so why fsync() after each transaction?)
+
+
+3. XtraDB changes (ha_innodb.cc)
+
+The changes needed in XtraDB are comparatively simple, as XtraDB already
+implements group commit, it just needs to be enabled with the new
+commit_ordered() call.
+
+The existing commit() method already is logically in two parts. The first part
+runs under the prepare_commit_mutex() and must be run in same order as binlog
+commit. This part needs to be moved to commit_ordered(). The second part runs
+after releasing prepare_commit_mutex and does transaction log write+fsync; it
+can remain.
+
+Then the prepare_commit_mutex is removed (and the enable_unsafe_group_commit
+XtraDB option to disable it).
+
+There are two asserts that check that the thread running the first part of
+XtraDB commit is the same as the thread running the other operations for the
+transaction. These have to be removed (as commit_ordered() can run in a
+different thread). Also an error reporting with sql_print_error() has to be
+delayed until commit() time.
+
+
+4. Proof-of-concept implementation
+
+There is a proof-of-concept implementation of this architecture, in the form
+of a quilt patch series [3].
+
+A quick benchmark was done, with sync_binlog=1 and
+innodb_flush_log_at_trx_commit=1. 64 parallel threads doing single-row
+transactions against one table.
+
+Without the patch, we get only 25 queries per second.
+
+With the patch, we get 650 queries per second.
+
+
+5. Open issues/tasks
+
+5.1 XA / other prepare() and commit() call sites.
+
+Check that user-level XA is handled correctly and working. And covered
+sufficiently with tests. Also check that any other calls of ha->prepare() and
+ha->commit() outside of ha_commit_trans() are handled correctly.
+
+5.2 Testing
+
+This worklog needs additions to the test suite, including error inserts to
+check error handling, and synchronisation points to check thread parallelism
+correctness.
+
+
+6. Alternative implementations
+
+ - The binlog code maintains its own extra atomic transaction queue to handle
+ non-transactional commits in a good way together with transactional (with
+ respect to group commit). Alternatively, we could ignore this issue and
+ just give up on group commit for non-transactional statements, for some
+ code simplifications.
+
+ - The binlog code has two ways to prepare end_event and similar, one that
+ uses stack-allocation, and another for when stack allocation is not
+ possible that uses thd->mem_root. Probably the overhead of thd->mem_root is
+ so small that it would make sense to use the same code for both cases.
+
+ - Instead of adding extra fields to THD, we could allocate a separate
+ structure on the thd->mem_root() with the required extra fields (including
+ the THD pointer). Would seem to require initialising mutexes at every
+ commit though.
+
+ - It would probably be a good idea to implement TC_LOG_MMAP::group_log_xid()
+ (should not be hard).
+
+
+-----------------------------------------------------------------------
+
+References:
+
+[2] https://secure.wikimedia.org/wikipedia/en/wiki/ABA_problem
+
+[3] https://knielsen-hq.org/maria/patches.mwl116/
-=-=(Knielsen - Tue, 25 May 2010, 13:18)=-=-
High-Level Specification modified.
--- /tmp/wklog.116.old.14249 2010-05-25 13:18:34.000000000 +0000
+++ /tmp/wklog.116.new.14249 2010-05-25 13:18:34.000000000 +0000
@@ -1 +1,157 @@
+The basic idea in group commit is that multiple threads, each handling one
+transaction, prepare for commit and then queue up together waiting to do an
+fsync() on the transaction log. Then once the log is available, a single
+thread does the fsync() + other necessary book-keeping for all of the threads
+at once. After this, the single thread signals the other threads that it's
+done and they can finish up and return success (or failure) from the commit
+operation.
+
+So group commit has a parallel part, and a sequential part. So we need a
+facility for engines/binlog to participate in both the parallel and the
+sequential part.
+
+To do this, we add two new handlerton methods:
+
+ int (*prepare_ordered)(handlerton *hton, THD *thd, bool all);
+ void (*commit_ordered)(handlerton *hton, THD *thd, bool all);
+
+The idea is that the existing prepare() and commit() methods run in the
+parallel part of group commit, and the new prepare_ordered() and
+commit_ordered() run in the sequential part.
+
+The prepare_ordered() method is called after prepare(). The order of
+tranctions that call into prepare_ordered() is guaranteed to be the same among
+all storage engines and binlog, and it is serialised so no two calls can be
+running inside the same engine at the same time.
+
+The commit_ordered() method is called before commit(), and similarly is
+guaranteed to have same transaction order in all participants, and to be
+serialised within one engine.
+
+As the prepare_ordered() and commit_ordered() calls are serialised, the idea
+is that handlers should do the minimum amount of work needed in these calls,
+relaying most of the work (eg. fsync() ...) to prepare() and commit().
+
+As a concrete example, for InnoDB the commit_ordered() method will do the
+first part of commit that fixed the commit order in the transaction log
+buffer, and the commit() method will write the log to disk and fsync()
+it. This split already exists inside the InnoDB code, running before
+respectively after releasing the prepare_commit_mutex.
+
+In addition, the XA transaction coordinator (TC_LOG) is special, since it is
+the one responsible for deciding whether to commit or rollback the
+transaction. For this we need an extra method, since this decision can be done
+only after we know that all prepare() and prepare_ordered() calls succeed, and
+must be done to know whether to call commit_ordered()/commit(), or do rollback.
+
+The existing method for this is TC_LOG::log_xid(). To make implementing group
+commit simpler to implement in a transaction coordinator and more efficient,
+we introduce a new method:
+
+ void group_log_xid(THD *first_thd);
+
+This method runs in the sequential part of group commit. It receives a list of
+transactions to perform log_xid() on, in the correct commit order. (Note that
+TC_LOG can do parallel parts of group commit in its own prepare() and commit()
+methods).
+
+This method can make it easier to implement the group commit in TC_LOG, as it
+gets directly the list of transactions in the right order. Without it, it
+might need to compute such order anyway in a prepare_ordered() method, and the
+server has to create this ordered list anyway to implement the order guarantee
+for prepare_ordered() and commit_ordered().
+
+This group_log_xid() method also is more efficient, as it avoids some
+inter-thread synchronisation. Since group_log_xid() is serialised, we can run
+it together with all the commit_ordered() method calls and need only a single
+sequential code section. With the log_xid() methods, we would need first a
+sequential part for the prepare_ordered() calls, then a parallel part with
+log_xid() calls (to not loose group commit ability for log_xid()), then again
+a sequential part for the commit_ordered() method calls.
+
+The extra synchronisation is needed, as each commit_ordered() call will have
+to wait for log_xid() in one thread (if log_xid() fails then commit_ordered()
+should not be called), and also wait for commit_ordered() to finish in all
+threads handling earlier commits. In effect we will need to bounce the
+execution from one thread to the other among all participants in the group
+commit.
+
+As a consequence of the group_log_xid() optimisation, handlers must be aware
+that the commit_ordered() call can happen in another thread than the one
+running commit() (so thread local storage is not available). This should not
+be a big issue as the THD is available for storing any needed information.
+
+Since group_log_xid() runs for multiple transactions in a single thread, it
+can not do error reporting (my_error()) as that relies on thread local
+storage. Instead it sets an error code in THD::xid_error, and if there is an
+error then later another method will be called (in correct thread context) to
+actually report the error:
+
+ int xid_delayed_error(THD *thd)
+
+The three new methods prepare_ordered(), group_log_xid(), and commit_ordered()
+are optional (as is xid_delayed_error). A storage engine or transaction
+coordinator is free to not implement them if they are not needed. In this case
+there will be no order guarantee for the corresponding stage of group commit
+for that engine. For example, InnoDB needs no ordering of the prepare phase,
+so can omit implementing prepare_ordered(); TC_LOG_MMAP needs no ordering at
+all, so does not need to implement any of them.
+
+Note in particular that all existing engines (/binlog implementations if they
+exist) will work unmodified (and also without any change in group commit
+facilities or commit order guaranteed).
+
+Using these new APIs, the work will be to
+
+ - In ha_commit_trans(), implement the correct semantics for the three new
+ calls.
+
+ - In XtraDB, use the new commit_ordered() call to remove the
+ prepare_commit_mutex (and resurrect group commit) without loosing the
+ consistency with binlog commit order.
+
+ - In log.cc (binlog module), implement group_log_xid() to do group commit of
+ multiple transactions to the binlog with a single shared fsync() call.
+
+-----------------------------------------------------------------------
+Some possible alternative for this worklog:
+
+ - We could eliminate the group_log_xid() method for a simpler API, at the
+ cost of extra synchronisation between threads to do in-order
+ commit_ordered() method calls. This would also allow to call
+ commit_ordered() in the correct thread context.
+
+ - Alternatively, we could eliminate log_xid() and require that all
+ transaction coordinators implement group_log_xid() instead, again for some
+ moderate simplification.
+
+ - At the moment there is no plugin actually using prepare_ordered(), so, it
+ could be removed from the design. But it fits in well, is efficient to
+ implement, and could be useful later (eg. for the requested feature of
+ releasing locks early in InnoDB).
+
+-----------------------------------------------------------------------
+Some possible follow-up projects after this is implemented:
+
+ - Add statistics about how efficient group commit is (#fsyncs/#commits in
+ each engine and binlog).
+
+ - Implement an XtraDB prepare_ordered() methods that can release row locks
+ early (Mark Callaghan from Facebook advocates this, but need to determine
+ exactly how to do this safely).
+
+ - Implement a new crash recovery algorithm that uses the consistent commit
+ ordering to need only fsync() for the binlog. At crash recovery, any
+ missing transactions in an engine is replayed from the correct point in the
+ binlog (this point must be stored transactionally inside the engine, as
+ XtraDB already does today).
+
+ - Implement that START TRANSACTION WITH CONSISTENT SNAPSHOT 1) really gets a
+ consistent snapshow, with same set of committed and not committed
+ transactions in all engines, 2) returns a corresponding consistent binlog
+ position. This should be easy by piggybacking on the synchronisation
+ implemented for ha_commit_trans().
+
+ - Use this in XtraBackup to get consistent binlog position without having to
+ block all updates with FLUSH TABLES WITH READ LOCK.
-=-=(Knielsen - Tue, 25 May 2010, 13:18)=-=-
High Level Description modified.
--- /tmp/wklog.116.old.14234 2010-05-25 13:18:07.000000000 +0000
+++ /tmp/wklog.116.new.14234 2010-05-25 13:18:07.000000000 +0000
@@ -21,3 +21,69 @@
http://kristiannielsen.livejournal.com/12408.html
http://kristiannielsen.livejournal.com/12553.html
+----
+
+Implementing group commit in MySQL faces some challenges from the handler
+plugin architecture:
+
+1. Because storage engine handlers have separate transaction log from the
+mysql binlog (and from each other), there are multiple fsync() calls per
+commit that need the group commit optimisation (2 per participating storage
+engine + 1 for binlog).
+
+2. The code handling commit is split in several places, in main server code
+and in storage engine code. With pluggable binlog it will be split even
+more. This requires a good abstract yet powerful API to be able to implement
+group commit simply and efficiently in plugins without the different parts
+having to rely on iternals of the others.
+
+3. We want the order of commits to be the same in all engines participating in
+multiple transactions. This requirement is the reason that InnoDB currently
+breaks group commit with the infamous prepare_commit_mutex.
+
+While currently there is no server guarantee to get same commit order in
+engines an binlog (except for the InnoDB prepare_commit_mutex hack), there are
+several reasons why this could be desirable:
+
+ - InnoDB hot backup needs to be able to extract a binlog position that is
+ consistent with the hot backup to be able to provision a new slave, and
+ this is impossible without imposing at least partial consistent ordering
+ between InnoDB and binlog.
+
+ - Other backup methods could have similar needs, eg. XtraBackup or
+ `mysqldump --single-transaction`, to have consistent commit order between
+ binlog and storage engines without having to do FLUSH TABLES WITH READ LOCK
+ or similar expensive blocking operation. (other backup methods, like LVM
+ snapshot, don't need consistent commit order, as they can restore
+ out-of-order commits during crash recovery using XA).
+
+ - If we have consistent commit order, we can think about optimising commit to
+ need only one fsync (for binlog); lost commits in storage engines can then
+ be recovered from the binlog at crash recovery by re-playing against the
+ engine from a particular point in the binlog.
+
+ - With consistent commit order, we can get better semantics for START
+ TRANSACTION WITH CONSISTENT SNAPSHOT with multi-engine transactions (and we
+ could even get it to return also a matching binlog position). Currently,
+ this "CONSISTENT SNAPSHOT" can be inconsistent among multiple storage
+ engines.
+
+ - In InnoDB, the performance in the presense of hotspots can be improved if
+ we can release row locks early in the commit phase, but this requires that we
+release them in
+ the same order as commits in the binlog to ensure consistency between
+ master and slaves.
+
+ - There was some discussions around Galera [1] synchroneous replication and
+ global transaction ID that it needed consistent commit order among
+ participating engines.
+
+ - I believe there could be other applications for guaranteed consistent
+ commit order, and that the architecture described in this worklog can
+ implement such guarantee with reasonable overhead.
+
+
+References:
+
+[1] Galera: http://www.codership.com/products/galera_replication
+
-=-=(Knielsen - Tue, 25 May 2010, 08:28)=-=-
More thoughts on and changes to the archtecture. Got to something now that I am satisfied with and
that seems to be able to handle all issues.
Implement new prepare_ordered and commit_ordered handler methods and the logic in ha_commit_trans().
Implement TC_LOG::group_log_xid() method and logic in ha_commit_trans().
Implement XtraDB part, using commit_ordered() rather than prepare_commit_mutex.
Fix test suite failures.
Proof-of-concept patch series complete now.
Do initial benchmark, getting good results. With 64 threads, see 26x improvement in queries-per-sec.
Next step: write up the architecture description.
Worked 21 hours and estimate 0 hours remain (original estimate increased by 21 hours).
-=-=(Knielsen - Wed, 12 May 2010, 06:41)=-=-
Started work on a Quilt patch series, refactoring the binlog code to prepare for implementing the
group commit, and working on the design of group commit in parallel.
Found and fixed several problems in error handling when writing to binlog.
Removed redundant table map version locking.
Split binlog writing into two parts in preparations for group commit. When ready to write to the
binlog, threads enter a queue, and the first thread in the queue handles the binlog writing for
everyone. When it obtains the LOCK_log, it first loops over all threads, executing the first part of
binlog writing (the write(2) syscall essentially). It then runs the second part (fsync(2)
essentially) only once, and then wakes up the remaining threads in the queue.
Still to be done:
Finish the proof-of-concept group commit patch, by 1) implementing the prepare_fast() and
commit_fast() callbacks in handler.cc 2) move the binlog thread enqueue from log_xid() to
binlog_prepare_fast(), 3) move fast part of InnoDB commit to innobase_commit_fast(), removing the
prepare_commit_mutex().
Write up the final design in this worklog.
Evaluate the design to see if we can do better/different.
Think about possible next steps, such as releasing innodb row locks early (in
innobase_prepare_fast), and doing crash recovery by replaying transactions from the binlog (removing
the need for engine durability and 2 of 3 fsync() in commit).
Worked 28 hours and estimate 0 hours remain (original estimate increased by 28 hours).
-=-=(Serg - Mon, 26 Apr 2010, 14:10)=-=-
Observers changed: Serg
DESCRIPTION:
Currently, in order to ensure that the server can recover after a crash to a
state in which storage engines and binary log are consistent with each other,
it is necessary to use XA with durable commits for both storage engines
(innodb_flush_log_at_trx_commit=1) and binary log (sync_binlog=1).
This is _very_ expensive, since the server needs to do three fsync() operations
for every commit, as there is no working group commit when the binary log is
enabled.
The idea is to
- Implement/fix group commit to work properly with the binary log enabled.
- (Optionally) avoid the need to fsync() in the engine, and instead rely on
replaying any lost transactions from the binary log against the engine
during crash recovery.
For background see these articles:
http://kristiannielsen.livejournal.com/12254.html
http://kristiannielsen.livejournal.com/12408.html
http://kristiannielsen.livejournal.com/12553.html
----
Implementing group commit in MySQL faces some challenges from the handler
plugin architecture:
1. Because storage engine handlers have separate transaction log from the
mysql binlog (and from each other), there are multiple fsync() calls per
commit that need the group commit optimisation (2 per participating storage
engine + 1 for binlog).
2. The code handling commit is split in several places, in main server code
and in storage engine code. With pluggable binlog it will be split even
more. This requires a good abstract yet powerful API to be able to implement
group commit simply and efficiently in plugins without the different parts
having to rely on iternals of the others.
3. We want the order of commits to be the same in all engines participating in
multiple transactions. This requirement is the reason that InnoDB currently
breaks group commit with the infamous prepare_commit_mutex.
While currently there is no server guarantee to get same commit order in
engines an binlog (except for the InnoDB prepare_commit_mutex hack), there are
several reasons why this could be desirable:
- InnoDB hot backup needs to be able to extract a binlog position that is
consistent with the hot backup to be able to provision a new slave, and
this is impossible without imposing at least partial consistent ordering
between InnoDB and binlog.
- Other backup methods could have similar needs, eg. XtraBackup or
`mysqldump --single-transaction`, to have consistent commit order between
binlog and storage engines without having to do FLUSH TABLES WITH READ LOCK
or similar expensive blocking operation. (other backup methods, like LVM
snapshot, don't need consistent commit order, as they can restore
out-of-order commits during crash recovery using XA).
- If we have consistent commit order, we can think about optimising commit to
need only one fsync (for binlog); lost commits in storage engines can then
be recovered from the binlog at crash recovery by re-playing against the
engine from a particular point in the binlog.
- With consistent commit order, we can get better semantics for START
TRANSACTION WITH CONSISTENT SNAPSHOT with multi-engine transactions (and we
could even get it to return also a matching binlog position). Currently,
this "CONSISTENT SNAPSHOT" can be inconsistent among multiple storage
engines.
- In InnoDB, the performance in the presense of hotspots can be improved if
we can release row locks early in the commit phase, but this requires that we
release them in
the same order as commits in the binlog to ensure consistency between
master and slaves.
- There was some discussions around Galera [1] synchroneous replication and
global transaction ID that it needed consistent commit order among
participating engines.
- I believe there could be other applications for guaranteed consistent
commit order, and that the architecture described in this worklog can
implement such guarantee with reasonable overhead.
References:
[1] Galera: http://www.codership.com/products/galera_replication
HIGH-LEVEL SPECIFICATION:
The basic idea in group commit is that multiple threads, each handling one
transaction, prepare for commit and then queue up together waiting to do an
fsync() on the transaction log. Then once the log is available, a single
thread does the fsync() + other necessary book-keeping for all of the threads
at once. After this, the single thread signals the other threads that it's
done and they can finish up and return success (or failure) from the commit
operation.
So group commit has a parallel part, and a sequential part. So we need a
facility for engines/binlog to participate in both the parallel and the
sequential part.
To do this, we add two new handlerton methods:
int (*prepare_ordered)(handlerton *hton, THD *thd, bool all);
void (*commit_ordered)(handlerton *hton, THD *thd, bool all);
The idea is that the existing prepare() and commit() methods run in the
parallel part of group commit, and the new prepare_ordered() and
commit_ordered() run in the sequential part.
The prepare_ordered() method is called after prepare(). The order of
tranctions that call into prepare_ordered() is guaranteed to be the same among
all storage engines and binlog, and it is serialised so no two calls can be
running inside the same engine at the same time.
The commit_ordered() method is called before commit(), and similarly is
guaranteed to have same transaction order in all participants, and to be
serialised within one engine.
As the prepare_ordered() and commit_ordered() calls are serialised, the idea
is that handlers should do the minimum amount of work needed in these calls,
relaying most of the work (eg. fsync() ...) to prepare() and commit().
As a concrete example, for InnoDB the commit_ordered() method will do the
first part of commit that fixed the commit order in the transaction log
buffer, and the commit() method will write the log to disk and fsync()
it. This split already exists inside the InnoDB code, running before
respectively after releasing the prepare_commit_mutex.
In addition, the XA transaction coordinator (TC_LOG) is special, since it is
the one responsible for deciding whether to commit or rollback the
transaction. For this we need an extra method, since this decision can be done
only after we know that all prepare() and prepare_ordered() calls succeed, and
must be done to know whether to call commit_ordered()/commit(), or do rollback.
The existing method for this is TC_LOG::log_xid(). To make implementing group
commit simpler to implement in a transaction coordinator and more efficient,
we introduce a new method:
void group_log_xid(THD *first_thd);
This method runs in the sequential part of group commit. It receives a list of
transactions to perform log_xid() on, in the correct commit order. (Note that
TC_LOG can do parallel parts of group commit in its own prepare() and commit()
methods).
This method can make it easier to implement the group commit in TC_LOG, as it
gets directly the list of transactions in the right order. Without it, it
might need to compute such order anyway in a prepare_ordered() method, and the
server has to create this ordered list anyway to implement the order guarantee
for prepare_ordered() and commit_ordered().
This group_log_xid() method also is more efficient, as it avoids some
inter-thread synchronisation. Since group_log_xid() is serialised, we can run
it together with all the commit_ordered() method calls and need only a single
sequential code section. With the log_xid() methods, we would need first a
sequential part for the prepare_ordered() calls, then a parallel part with
log_xid() calls (to not loose group commit ability for log_xid()), then again
a sequential part for the commit_ordered() method calls.
The extra synchronisation is needed, as each commit_ordered() call will have
to wait for log_xid() in one thread (if log_xid() fails then commit_ordered()
should not be called), and also wait for commit_ordered() to finish in all
threads handling earlier commits. In effect we will need to bounce the
execution from one thread to the other among all participants in the group
commit.
As a consequence of the group_log_xid() optimisation, handlers must be aware
that the commit_ordered() call can happen in another thread than the one
running commit() (so thread local storage is not available). This should not
be a big issue as the THD is available for storing any needed information.
Since group_log_xid() runs for multiple transactions in a single thread, it
can not do error reporting (my_error()) as that relies on thread local
storage. Instead it sets an error code in THD::xid_error, and if there is an
error then later another method will be called (in correct thread context) to
actually report the error:
int xid_delayed_error(THD *thd)
The three new methods prepare_ordered(), group_log_xid(), and commit_ordered()
are optional (as is xid_delayed_error). A storage engine or transaction
coordinator is free to not implement them if they are not needed. In this case
there will be no order guarantee for the corresponding stage of group commit
for that engine. For example, InnoDB needs no ordering of the prepare phase,
so can omit implementing prepare_ordered(); TC_LOG_MMAP needs no ordering at
all, so does not need to implement any of them.
Note in particular that all existing engines (/binlog implementations if they
exist) will work unmodified (and also without any change in group commit
facilities or commit order guaranteed).
Using these new APIs, the work will be to
- In ha_commit_trans(), implement the correct semantics for the three new
calls.
- In XtraDB, use the new commit_ordered() call to remove the
prepare_commit_mutex (and resurrect group commit) without loosing the
consistency with binlog commit order.
- In log.cc (binlog module), implement group_log_xid() to do group commit of
multiple transactions to the binlog with a single shared fsync() call.
-----------------------------------------------------------------------
Some possible alternative for this worklog:
- We could eliminate the group_log_xid() method for a simpler API, at the
cost of extra synchronisation between threads to do in-order
commit_ordered() method calls. This would also allow to call
commit_ordered() in the correct thread context.
- Alternatively, we could eliminate log_xid() and require that all
transaction coordinators implement group_log_xid() instead, again for some
moderate simplification.
- At the moment there is no plugin actually using prepare_ordered(), so, it
could be removed from the design. But it fits in well, is efficient to
implement, and could be useful later (eg. for the requested feature of
releasing locks early in InnoDB).
-----------------------------------------------------------------------
Some possible follow-up projects after this is implemented:
- Add statistics about how efficient group commit is (#fsyncs/#commits in
each engine and binlog).
- Implement an XtraDB prepare_ordered() methods that can release row locks
early (Mark Callaghan from Facebook advocates this, but need to determine
exactly how to do this safely).
- Implement a new crash recovery algorithm that uses the consistent commit
ordering to need only fsync() for the binlog. At crash recovery, any
missing transactions in an engine is replayed from the correct point in the
binlog (this point must be stored transactionally inside the engine, as
XtraDB already does today).
- Implement that START TRANSACTION WITH CONSISTENT SNAPSHOT 1) really gets a
consistent snapshow, with same set of committed and not committed
transactions in all engines, 2) returns a corresponding consistent binlog
position. This should be easy by piggybacking on the synchronisation
implemented for ha_commit_trans().
- Use this in XtraBackup to get consistent binlog position without having to
block all updates with FLUSH TABLES WITH READ LOCK.
LOW-LEVEL DESIGN:
1. Changes for ha_commit_trans()
The gut of the code for commit is in the function ha_commit_trans() (and in
commit_one_phase() which is called from it). This must be extended to use the
new prepare_ordered(), group_log_xid(), and commit_ordered() calls.
1.1 Atomic queue of committing transactions
To keep the right commit order among participants, we put transactions into a
queue. The operations on the queue are non-locking:
- Insert THD at the head of the queue, and return old queue.
THD *enqueue_atomic(THD *thd)
- Fetch (and delete) the whole queue.
THD *atomic_grab_reverse_queue()
These are simple to implement with atomic compare-and-set. Note that there is
no ABA problem [2], as we do not delete individual elements from the queue, we
grab the whole queue and replace it with NULL.
A transaction enters the queue when it does prepare_ordered(). This way, the
scheduling order for prepare_ordered() calls is what determines the sequence
in the queue and effectively the commit order.
The queue is grabbed by the code doing group_log_xid() and commit_ordered()
calls. The queue is passed directly to group_log_xid(), and afterwards
iterated to do individual commit_ordered() calls.
Using a lock-free queue allows prepare_ordered() (for one transaction) to run
in parallel with commit_ordered (in another transaction), increasing potential
parallelism.
The queue is simply a linked list of THD objects, linked through a
THD::next_commit_ordered field. Since we add at the head of the queue, the
list is actually in reverse order, so must be reversed when we grab and delete
it.
The reason that enqueue_atomic() returns the old queue is so that we can check
if an insert goes to the head of the queue. The thread at the head of the
queue will do the sequential part of group commit for everyone.
1.2 Locks
1.2.1 Global LOCK_prepare_ordered
This lock is taken to serialise calls to prepare_ordered(). Note that
effectively, the commit order is decided by the order in which threads obtain
this lock.
1.2.2 Global LOCK_group_commit and COND_group_commit
This lock is used to protect the serial part of group commit. It is taken
around the code where we grab the queue, call group_log_xid() on the queue,
and call commit_ordered() on each element of the queue, to make sure they
happen serialised and in consistent order. It also protects the variable
group_commit_queue_busy, which is used when not using group_log_xid() to delay
running over a new queue until the first queue is completely done.
1.2.3 Global LOCK_commit_ordered
This lock is taken around calls to commit_ordered(), to ensure they happen
serialised.
1.2.4 Per-thread thd->LOCK_commit_ordered and thd->COND_commit_ordered
This lock protects the thd->group_commit_ready variable, as well as the
condition variable used to wake up threads after log_xid() and
commit_ordered() finishes.
1.2.5 Global LOCK_group_commit_queue
This is only used on platforms with no native compare-and-set operations, to
make the queue operations atomic.
1.3 Commit algorithm.
This is the basic algorithm, simplified by
- omitting some error handling
- omitting looping over all handlers when invoking handler methods
- omitting some possible optimisations when not all calls needed (see next
section).
- Omitting the case where no group_log_xid() is used, see below.
---- BEGIN ALGORITHM ----
ht->prepare()
// Call prepare_ordered() and enqueue in correct commit order
lock(LOCK_prepare_ordered)
ht->prepare_ordered()
old_queue= enqueue_atomic(thd)
thd->group_commit_ready= FALSE
is_group_commit_leader= (old_queue == NULL)
unlock(LOCK_prepare_ordered)
if (is_group_commit_leader)
// The first in queue handles group commit for everyone
lock(LOCK_group_commit)
// Wait while queue is busy, see below for when this occurs
while (group_commit_queue_busy)
cond_wait(COND_group_commit)
// Grab and reverse the queue to get correct order of transactions
queue= atomic_grab_reverse_queue()
// This call will set individual error codes in thd->xid_error
// It also sets the cookie for unlog() in thd->xid_cookie
group_log_xid(queue)
lock(LOCK_commit_ordered)
for (other IN queue)
if (!other->xid_error)
ht->commit_ordered()
unlock(LOCK_commit_ordered)
unlock(LOCK_group_commit)
// Now we are done, so wake up all the others.
for (other IN TAIL(queue))
lock(other->LOCK_commit_ordered)
other->group_commit_ready= TRUE
cond_signal(other->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
else
// If not the leader, just wait until leader did the work for us.
lock(thd->LOCK_commit_ordered)
while (!thd->group_commit_ready)
cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
// Finally do any error reporting now that we're back in own thread.
if (thd->xid_error)
xid_delayed_error(thd)
else
ht->commit(thd)
unlog(thd->xid_cookie, thd->xid)
---- END ALGORITHM ----
If the transaction coordinator does not support group_log_xid(), we have to do
things differently. In this case after the serialisation point at
prepare_ordered(), we have to parallelise again when running log_xid()
(otherwise we would loose group commit). But then when log_xid() is done, we
have to serialise again to check for any error and call commit_ordered() in
correct sequence for any transaction where log_xid() did not return error.
The central part of the algorithm in this case (when using log_xid()) is:
---- BEGIN ALGORITHM ----
cookie= log_xid(thd)
error= (cookie == 0)
if (is_group_commit_leader)
// The first to enqueue grabs the queue and runs first.
// But we must wait until a previous queue run is fully done.
lock(LOCK_group_commit)
while (group_commit_queue_busy)
cond_wait(COND_group_commit)
queue= atomic_grab_reverse_queue()
// The queue will be busy until last thread in it is done.
group_commit_queue_busy= TRUE
unlock(LOCK_group_commit)
else
// Not first in queue -> wait for previous one to wake us up.
lock(thd->LOCK_commit_ordered)
while (!thd->group_commit_ready)
cond_wait(thd->LOCK_commit_ordered, thd->COND_commit_ordered)
unlock(other->LOCK_commit_ordered)
if (!error) // Only if log_xid() was successful
lock(LOCK_commit_ordered)
ht->commit_ordered()
unlock(LOCK_commit_ordered)
// Wake up the next thread, and release queue in last.
next= thd->next_commit_ordered
if (next)
lock(next->LOCK_commit_ordered)
next->group_commit_ready= TRUE
cond_signal(next->COND_commit_ordered)
unlock(next->LOCK_commit_ordered)
else
lock(LOCK_group_commit)
group_commit_queue_busy= FALSE
unlock(LOCK_group_commit)
---- END ALGORITHM ----
There are a number of locks taken in the algorithm, but in the group_log_xid()
case most of them should be uncontended most of the time. The
LOCK_group_commit of course will be contended, as new threads queue up waiting
for the previous group commit (and binlog fsync()) to finish so they can do
the next group commit. This is the whole point of implementing group commit.
The LOCK_prepare_ordered and LOCK_commit_ordered mutexes should be not much
contended as long as handlers follow the intension of having the corresponding
handler calls execute quickly.
The per-thread LOCK_commit_ordered mutexes should not be contended; they are
only used to wake up a sleeping thread.
1.4 Optimisations when not using all three new calls
The prepare_ordered(), group_log_xid(), and commit_ordered() methods are
optional, and if not implemented by a particular handler/transaction
coordinator, we can optimise the algorithm to take advantage of not having to
keep ordering for the missing parts.
If there is no prepare_ordered(), then we need not take the
LOCK_prepare_ordered mutex.
If there is no commit_ordered(), then we need not take the LOCK_commit_ordered
mutex.
If there is no group_log_xid(), then we only need the queue to ensure same
ordering of transactions for commit_ordered() as for prepare_ordered(). Thus,
if either of these (or both) are also not present, we do not need to use the
queue at all.
2. Binlog code changes (log.cc)
The bulk of the work needed for the binary log is to extend the code to allow
group commit to the log. Unlike InnoDB/XtraDB, there is no existing support
inside the binlog code for group commit.
The existing code runs most of the write + fsync to the binary lock under the
global LOCK_log mutex, preventing any group commit.
To enable group commit, this code must be split into two parts:
- one part that runs per transaction, re-writing the embedded event positions
for the correct offset, and writing this into the in-memory log cache.
- another part that writes a set of transactions to the disk, and runs
fsync().
Then in group_log_xid(), we can run the first part in a loop over all the
transactions in the passed-in queue, and run the second part only once.
The binlog code also has other code paths that write into the binlog,
eg. non-transactional statements. These have to be adapted also to work with
the new code.
In order to get some group commit facility for these also, we change that part
of the code in a similar way to ha_commit_trans. We keep another,
binlog-internal queue of such non-transactional binlog writes, and such writes
queue up here before sleeping on the LOCK_log mutex. Once a thread obtains the
LOCK_log, it loops over the queue for the fast part, and does the slow part
once, then finally wakes up the others in the queue.
In the transactional case in group_log_xid(), before we run the passed-in
queue, we add any members found in the binlog-internal queue. This allows
these non-transactional writes to share the group commit.
However, in the case where it is a non-transactional write that gets the
LOCK_log, the transactional transactions from the ha_commit_trans() queue will
not be able to take part (they will have to wait for their turn to do another
fsync). It seems difficult to cleanly let the binlog code grab the queue from
out of the ha_commit_trans() algorithm. I think the group commit is mostly
useful in transactional workloads anyway (non-transactional engines will loose
data anyway in case of crash, so why fsync() after each transaction?)
3. XtraDB changes (ha_innodb.cc)
The changes needed in XtraDB are comparatively simple, as XtraDB already
implements group commit, it just needs to be enabled with the new
commit_ordered() call.
The existing commit() method already is logically in two parts. The first part
runs under the prepare_commit_mutex() and must be run in same order as binlog
commit. This part needs to be moved to commit_ordered(). The second part runs
after releasing prepare_commit_mutex and does transaction log write+fsync; it
can remain.
Then the prepare_commit_mutex is removed (and the enable_unsafe_group_commit
XtraDB option to disable it).
There are two asserts that check that the thread running the first part of
XtraDB commit is the same as the thread running the other operations for the
transaction. These have to be removed (as commit_ordered() can run in a
different thread). Also an error reporting with sql_print_error() has to be
delayed until commit() time.
4. Proof-of-concept implementation
There is a proof-of-concept implementation of this architecture, in the form
of a quilt patch series [3].
A quick benchmark was done, with sync_binlog=1 and
innodb_flush_log_at_trx_commit=1. 64 parallel threads doing single-row
transactions against one table.
Without the patch, we get only 25 queries per second.
With the patch, we get 650 queries per second.
5. Open issues/tasks
5.1 XA / other prepare() and commit() call sites.
Check that user-level XA is handled correctly and working. And covered
sufficiently with tests. Also check that any other calls of ha->prepare() and
ha->commit() outside of ha_commit_trans() are handled correctly.
5.2 Testing
This worklog needs additions to the test suite, including error inserts to
check error handling, and synchronisation points to check thread parallelism
correctness.
6. Alternative implementations
- The binlog code maintains its own extra atomic transaction queue to handle
non-transactional commits in a good way together with transactional (with
respect to group commit). Alternatively, we could ignore this issue and
just give up on group commit for non-transactional statements, for some
code simplifications.
- The binlog code has two ways to prepare end_event and similar, one that
uses stack-allocation, and another for when stack allocation is not
possible that uses thd->mem_root. Probably the overhead of thd->mem_root is
so small that it would make sense to use the same code for both cases.
- Instead of adding extra fields to THD, we could allocate a separate
structure on the thd->mem_root() with the required extra fields (including
the THD pointer). Would seem to require initialising mutexes at every
commit though.
- It would probably be a good idea to implement TC_LOG_MMAP::group_log_xid()
(should not be hard).
-----------------------------------------------------------------------
References:
[2] https://secure.wikimedia.org/wikipedia/en/wiki/ABA_problem
[3] https://knielsen-hq.org/maria/patches.mwl116/
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v3.5.9)
1
0

[Maria-developers] bzr commit into file:///home/tsk/mprog/src/5.3-mwl89/ branch (timour:2793)
by timourï¼ askmonty.org 01 Jun '10
by timourï¼ askmonty.org 01 Jun '10
01 Jun '10
#At file:///home/tsk/mprog/src/5.3-mwl89/ based on revid:timour@askmonty.org-20100527131347-unr62oupctbp912x
2793 timour(a)askmonty.org 2010-06-01
MWL#89: Cost-based choice between Materialization and IN->EXISTS transformation
Phase 2: Changed the code-generation for subquery materialization to be
performed in runtime memory for each (re)execution, instead of in
statement memory (once per prepared statement).
- Item_in_subselect::setup_engine() no longer wraps materialization related
objects to be created in statement memory.
- Merged subselect_hash_sj_engine::init_permanent and
subselect_hash_sj_engine::init_runtime into subselect_hash_sj_engine::init,
which is called for each (re)execution.
- Fixed deletion of the temp table accordingly.
modified:
sql/item_subselect.cc
sql/item_subselect.h
=== modified file 'sql/item_subselect.cc'
--- a/sql/item_subselect.cc 2010-05-27 13:13:47 +0000
+++ b/sql/item_subselect.cc 2010-06-01 11:57:35 +0000
@@ -148,6 +148,7 @@ void Item_in_subselect::cleanup()
Item_subselect::~Item_subselect()
{
delete engine;
+ engine= NULL;
}
Item_subselect::trans_res
@@ -2090,82 +2091,62 @@ void Item_in_subselect::update_used_tabl
bool Item_in_subselect::setup_engine()
{
- subselect_hash_sj_engine *new_engine= NULL;
- bool res= FALSE;
+ subselect_hash_sj_engine *mat_engine= NULL;
+ subselect_single_select_engine *select_engine;
DBUG_ENTER("Item_in_subselect::setup_engine");
+ /*
+ The select (IN=>EXISTS) engine is pre-created already at parse time, and
+ is stored in statment memory (preserved across PS executions).
+ */
+ DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
+ select_engine= (subselect_single_select_engine*) engine;
- if (engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE)
- {
- /* Create/initialize objects in permanent memory. */
- subselect_single_select_engine *old_engine;
- Query_arena *arena= thd->stmt_arena, backup;
-
- old_engine= (subselect_single_select_engine*) engine;
-
- if (arena->is_conventional())
- arena= 0;
- else
- thd->set_n_backup_active_arena(arena, &backup);
-
- if (!(new_engine= new subselect_hash_sj_engine(thd, this,
- old_engine)) ||
- new_engine->init_permanent(&old_engine->join->fields_list))
- {
- Item_subselect::trans_res trans_res;
- /*
- If for some reason we cannot use materialization for this IN predicate,
- delete all materialization-related objects, and apply the IN=>EXISTS
- transformation.
- */
- delete new_engine;
- new_engine= NULL;
- exec_method= NOT_TRANSFORMED;
- if (left_expr->cols() == 1)
- trans_res= single_value_in_to_exists_transformer(old_engine->join,
- &eq_creator);
- else
- trans_res= row_value_in_to_exists_transformer(old_engine->join);
- /*
- The IN=>EXISTS transformation above injects new predicates into the
- WHERE and HAVING clauses. Since the subquery was already optimized,
- below we force its reoptimization with the new injected conditions
- by the first call to subselect_single_select_engine::exec().
- This is the only case of lazy subquery optimization in the server.
- */
- DBUG_ASSERT(old_engine->join->optimized);
- old_engine->join->optimized= false;
- res= (trans_res != Item_subselect::RES_OK);
- }
- if (new_engine)
- engine= new_engine;
-
- if (arena)
- thd->restore_active_arena(arena, &backup);
- }
- else
- {
- DBUG_ASSERT(engine->engine_type() == subselect_engine::HASH_SJ_ENGINE);
- new_engine= (subselect_hash_sj_engine*) engine;
- }
+ /* Create/initialize execution objects. */
+ if (!(mat_engine= new subselect_hash_sj_engine(thd, this, select_engine)))
+ DBUG_RETURN(TRUE);
- /* Initilizations done in runtime memory, repeated for each execution. */
- if (new_engine)
+ if (mat_engine->init(&select_engine->join->fields_list))
{
+ Item_subselect::trans_res trans_res;
+ /*
+ If for some reason we cannot use materialization for this IN predicate,
+ delete all materialization-related objects, and apply the IN=>EXISTS
+ transformation.
+ */
+ delete mat_engine;
+ mat_engine= NULL;
+ exec_method= NOT_TRANSFORMED;
+
+ if (left_expr->cols() == 1)
+ trans_res= single_value_in_to_exists_transformer(select_engine->join,
+ &eq_creator);
+ else
+ trans_res= row_value_in_to_exists_transformer(select_engine->join);
/*
- Reset the LIMIT 1 set in Item_exists_subselect::fix_length_and_dec.
- TODO:
- Currently we set the subquery LIMIT to infinity, and this is correct
- because we forbid at parse time LIMIT inside IN subqueries (see
- Item_in_subselect::test_limit). However, once we allow this, here
- we should set the correct limit if given in the query.
+ The IN=>EXISTS transformation above injects new predicates into the
+ WHERE and HAVING clauses. Since the subquery was already optimized,
+ below we force its reoptimization with the new injected conditions
+ by the first call to subselect_single_select_engine::exec().
+ This is the only case of lazy subquery optimization in the server.
*/
- unit->global_parameters->select_limit= NULL;
- if ((res= new_engine->init_runtime()))
- DBUG_RETURN(res);
+ DBUG_ASSERT(select_engine->join->optimized);
+ select_engine->join->optimized= false;
+ DBUG_RETURN(trans_res != Item_subselect::RES_OK);
}
- DBUG_RETURN(res);
+ /*
+ Reset the "LIMIT 1" set in Item_exists_subselect::fix_length_and_dec.
+ TODO:
+ Currently we set the subquery LIMIT to infinity, and this is correct
+ because we forbid at parse time LIMIT inside IN subqueries (see
+ Item_in_subselect::test_limit). However, once we allow this, here
+ we should set the correct limit if given in the query.
+ */
+ unit->global_parameters->select_limit= NULL;
+
+ engine= mat_engine;
+ DBUG_RETURN(FALSE);
}
@@ -3680,14 +3661,14 @@ bitmap_init_memroot(MY_BITMAP *map, uint
@retval FALSE otherwise
*/
-bool subselect_hash_sj_engine::init_permanent(List<Item> *tmp_columns)
+bool subselect_hash_sj_engine::init(List<Item> *tmp_columns)
{
select_union *result_sink;
/* Options to create_tmp_table. */
ulonglong tmp_create_options= thd->options | TMP_TABLE_ALL_COLUMNS;
/* | TMP_TABLE_FORCE_MYISAM; TIMOUR: force MYISAM */
- DBUG_ENTER("subselect_hash_sj_engine::init_permanent");
+ DBUG_ENTER("subselect_hash_sj_engine::init");
if (bitmap_init_memroot(&non_null_key_parts, tmp_columns->elements,
thd->mem_root) ||
@@ -3762,6 +3743,17 @@ bool subselect_hash_sj_engine::init_perm
!(lookup_engine= make_unique_engine()))
DBUG_RETURN(TRUE);
+ /*
+ Repeat name resolution for 'cond' since cond is not part of any
+ clause of the query, and it is not 'fixed' during JOIN::prepare.
+ */
+ if (semi_join_conds && !semi_join_conds->fixed &&
+ semi_join_conds->fix_fields(thd, (Item**)&semi_join_conds))
+ DBUG_RETURN(TRUE);
+ /* Let our engine reuse this query plan for materialization. */
+ materialize_join= materialize_engine->join;
+ materialize_join->change_result(result);
+
DBUG_RETURN(FALSE);
}
@@ -3907,30 +3899,6 @@ subselect_hash_sj_engine::make_unique_en
}
-/**
- Initialize members of the engine that need to be re-initilized at each
- execution.
-
- @retval TRUE if a memory allocation error occurred
- @retval FALSE if success
-*/
-
-bool subselect_hash_sj_engine::init_runtime()
-{
- /*
- Repeat name resolution for 'cond' since cond is not part of any
- clause of the query, and it is not 'fixed' during JOIN::prepare.
- */
- if (semi_join_conds && !semi_join_conds->fixed &&
- semi_join_conds->fix_fields(thd, (Item**)&semi_join_conds))
- return TRUE;
- /* Let our engine reuse this query plan for materialization. */
- materialize_join= materialize_engine->join;
- materialize_join->change_result(result);
- return FALSE;
-}
-
-
subselect_hash_sj_engine::~subselect_hash_sj_engine()
{
delete lookup_engine;
@@ -3967,6 +3935,13 @@ void subselect_hash_sj_engine::cleanup()
count_null_only_columns= 0;
strategy= UNDEFINED;
materialize_engine->cleanup();
+ /*
+ Restore the original Item_in_subselect engine. This engine is created once
+ at parse time and stored across executions, while all other materialization
+ related engines are created and chosen for each execution.
+ */
+ ((Item_in_subselect *) item)->engine= materialize_engine;
+
if (lookup_engine_type == TABLE_SCAN_ENGINE ||
lookup_engine_type == ROWID_MERGE_ENGINE)
{
@@ -3983,6 +3958,9 @@ void subselect_hash_sj_engine::cleanup()
DBUG_ASSERT(lookup_engine->engine_type() == UNIQUESUBQUERY_ENGINE);
lookup_engine->cleanup();
result->cleanup(); /* Resets the temp table as well. */
+ DBUG_ASSERT(tmp_table);
+ free_tmp_table(thd, tmp_table);
+ tmp_table= NULL;
}
=== modified file 'sql/item_subselect.h'
--- a/sql/item_subselect.h 2010-05-27 13:13:47 +0000
+++ b/sql/item_subselect.h 2010-06-01 11:57:35 +0000
@@ -802,8 +802,7 @@ public:
}
~subselect_hash_sj_engine();
- bool init_permanent(List<Item> *tmp_columns);
- bool init_runtime();
+ bool init(List<Item> *tmp_columns);
void cleanup();
int prepare();
int exec();
1
0