developers
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- 4 participants
- 6830 discussions

[Maria-developers] WL#24 Updated (by Sergei): index_merge: fair choice between index_merge union and range access
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: index_merge: fair choice between index_merge union and range access
CREATION DATE..: Tue, 26 May 2009, 12:10
SUPERVISOR.....: Monty
IMPLEMENTOR....:
COPIES TO......: Psergey
CATEGORY.......: Server-RawIdeaBin
TASK ID........: 24 (http://askmonty.org/worklog/?tid=24)
VERSION........: Server-9.x
STATUS.........: Un-Assigned
PRIORITY.......: 60
WORKED HOURS...: 0
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Sergei - Tue, 29 Jun 2010, 14:00)=-=-
Category updated.
--- /tmp/wklog.24.old.31772 2010-06-29 14:00:05.000000000 +0000
+++ /tmp/wklog.24.new.31772 2010-06-29 14:00:05.000000000 +0000
@@ -1 +1 @@
-Server-Sprint
+Server-RawIdeaBin
-=-=(Guest - Sun, 16 Aug 2009, 02:13)=-=-
Low Level Design modified.
--- /tmp/wklog.24.old.23383 2009-08-16 02:13:54.000000000 +0300
+++ /tmp/wklog.24.new.23383 2009-08-16 02:13:54.000000000 +0300
@@ -125,7 +125,7 @@
The optimizer will generate the plans that only use the "col1=c1" part. The
right side of the AND will be ignored even if it has good selectivity.
-(Here an imerge for col2=c2 OR col3=c3 won't be built since neither col2=c2 nor
+(Here no imerge for col2=c2 OR col3=c3 will be built since neither col2=c2 nor
col3=c3 represent index ranges.)
@@ -199,7 +199,7 @@
O2. "Create index_merge accesses when possible"
Current tree_or() will not create index_merge access when it could create
- non-index merge access (see DISCARD-IMERGE-3 and its example in the "Problems
+ non-index merge access (see DISCARD-IMERGE-2 and its example in the "Problems
in the current implementation" section). This will be changed to work as
follows: we will create index_merge made for index scans that didn't have
their match in the other sel_tree.
-=-=(Guest - Sun, 16 Aug 2009, 01:03)=-=-
Low Level Design modified.
--- /tmp/wklog.24.old.20767 2009-08-16 01:03:11.000000000 +0300
+++ /tmp/wklog.24.new.20767 2009-08-16 01:03:11.000000000 +0300
@@ -18,6 +18,8 @@
# a range tree has range access options, possibly for several keys
range_tree = range(key1) AND range(key2) AND ... AND range(keyN);
+ (here range(keyi) may represent ranges not for initial keyi prefixes,
+ but ranges for any infixes for keyi)
# merge tree represents several way to index_merge
imerge_tree = imerge1 AND imerge2 AND ...
@@ -47,13 +49,13 @@
R.add(range_union(A.range(i), B.range(i)));
if (R has at least one range access)
- return R;
+ return R; // DISCARD-IMERGE-2
else
{
/* could not build any range accesses. construct index_merge */
- remove non-ranges from A; // DISCARD-IMERGE-2
+ remove non-ranges from A;
remove non-ranges from B;
- return new index_merge(A, B);
+ return new index_merge(A, B); // DISCARD-IMERGE-3
}
}
else if (A is range tree and B is index_merge tree (or vice versa))
@@ -65,12 +67,12 @@
(range_treeB_11 OR range_treeB_12 OR ... OR range_treeB_1N) AND
(range_treeB_21 OR range_treeB_22 OR ... OR range_treeB_2N) AND
...
- (range_treeB_K1 OR range_treeB_K2 OR ... OR range_treeB_kN) AND
+ (range_treeB_K1 OR range_treeB_K2 OR ... OR range_treeB_kN)
=
(range_treeA OR range_treeB_11 OR ... OR range_treeB_1N) AND
(range_treeA OR range_treeB_21 OR ... OR range_treeB_2N) AND
...
- (range_treeA OR range_treeB_11 OR ... OR range_treeB_1N) AND
+ (range_treeA OR range_treeB_11 OR ... OR range_treeB_1N)
Now each line represents an index_merge..
}
@@ -82,18 +84,18 @@
OR
imergeB1 AND imergeB2 AND ... AND imergeBN
- -> (discard all imergeA{i=2,3,...} -> // DISCARD-IMERGE-3
+ -> (discard all imergeA{i=2,3,...} -> // DISCARD-IMERGE-4
imergeA1
OR
- imergeB1 AND imergeB2 AND ... AND imergeBN =
+ imergeB1 =
- = (combine imergeA1 with each of the imergeB{i} ) =
+ = (combine imergeA1 with each of the range_treeB_1{i} ) =
- combine(imergeA1 OR imergeB1) AND
- combine(imergeA1 OR imergeB2) AND
+ combine(imergeA1 OR range_treeB_11) AND
+ combine(imergeA1 OR range_treeB_12) AND
... AND
- combine(imergeA1 OR imergeBN)
+ combine(imergeA1 OR range_treeB_1N)
}
}
@@ -109,7 +111,7 @@
DISCARD-IMERGE-2 step will cause index_merge option to be discarded when
the WHERE clause has this form (conditions t.badkey may have abritrary form):
- (t.badkey<c1 AND t.key1=c1) OR (t.key1=c2 AND t.badkey < c2)
+ (t.badkey<c1 AND t.key1=c1) OR (t.key2=c2 AND t.badkey < c2)
DISCARD-IMERGE-3 manifests itself as the following effect: suppose there are
two indexes:
@@ -123,6 +125,8 @@
The optimizer will generate the plans that only use the "col1=c1" part. The
right side of the AND will be ignored even if it has good selectivity.
+(Here an imerge for col2=c2 OR col3=c3 won't be built since neither col2=c2 nor
+col3=c3 represent index ranges.)
2. New implementation
-=-=(Guest - Mon, 20 Jul 2009, 17:13)=-=-
Dependency deleted: 30 no longer depends on 24
-=-=(Guest - Sat, 20 Jun 2009, 09:34)=-=-
Low Level Design modified.
--- /tmp/wklog.24.old.21663 2009-06-20 09:34:48.000000000 +0300
+++ /tmp/wklog.24.new.21663 2009-06-20 09:34:48.000000000 +0300
@@ -4,6 +4,7 @@
2. New implementation
2.1 New tree_and()
2.2 New tree_or()
+3. Testing and required coverage
</contents>
1. Current implementation overview
@@ -240,3 +241,14 @@
In order to limit the impact of this combinatorial explosion, we will
introduce a rule that we won't generate more than #defined
MAX_IMERGE_OPTS options.
+
+3. Testing and required coverage
+================================
+So far could find the following user cases:
+
+* BUG#17259: Query optimizer chooses wrong index
+* BUG#17673: Optimizer does not use Index Merge optimization in some cases
+* BUG#23322: Optimizer sometimes erroniously prefers other index over index merge
+* BUG#30151: optimizer is very reluctant to chose index_merge algorithm
+
+
-=-=(Guest - Thu, 18 Jun 2009, 16:55)=-=-
Low Level Design modified.
--- /tmp/wklog.24.old.19152 2009-06-18 16:55:00.000000000 +0300
+++ /tmp/wklog.24.new.19152 2009-06-18 16:55:00.000000000 +0300
@@ -141,13 +141,15 @@
Operations on SEL_ARG trees will be modified to produce/process the trees of
this kind:
+
2.1 New tree_and()
------------------
In order not to lose plans, we'll make these changes:
-1. Don't remove index_merge part of the tree.
+A1. Don't remove index_merge part of the tree (this will take care of
+ DISCARD-IMERGE-1 problem)
-2. Push range conditions down into index_merge trees that may support them.
+A2. Push range conditions down into index_merge trees that may support them.
if one tree has range(key1) and the other tree has imerge(key1 OR key2)
then perform an equvalent of this operation:
@@ -155,8 +157,86 @@
(rangeA(key1) AND rangeB(key1)) OR (rangeA(key1) AND rangeB(key2))
-3. Just as before: if both sel_tree A and sel_tree B have index_merge options,
+A3. Just as before: if both sel_tree A and sel_tree B have index_merge options,
concatenate them together.
-2.2 New tree_or()
+2.2 New tree_or()
+-----------------
+O1. Dont remove non-range plans:
+ Current tree_or() code will refuse to produce index_merge plans for
+ conditions like
+
+ "t.key1part2=const OR t.key2part1=const"
+
+ (this is marked as DISCARD-IMERGE-3). This was justifed as the left part of
+ the AND condition is not usable for range access, and the operation of
+ tree_and() guaranteed that there was no way it could changed to make a
+ usable range plan. With new tree_and() and rule A2, this is no longer the
+ case. For example for this query:
+
+ (t.key1part2=const OR t.key2part1=const) AND t.key1part1=const
+
+ it will construct a
+
+ imerge(t.key1part2=const OR t.key2part1=const), range(t.key1part1=const)
+
+ then tree_and() will apply rule A2 to push the range down into index merge
+ and after that we'll have:
+
+ range(t.key1part1=const)
+ imerge(
+ t.key1part2=const AND t.key1part1=const,
+ t.key2part1=const
+ )
+ note that imerge(...) describes a usable index_merge plan and it's possible
+ that it will be the best access path.
+
+O2. "Create index_merge accesses when possible"
+ Current tree_or() will not create index_merge access when it could create
+ non-index merge access (see DISCARD-IMERGE-3 and its example in the "Problems
+ in the current implementation" section). This will be changed to work as
+ follows: we will create index_merge made for index scans that didn't have
+ their match in the other sel_tree.
+ Ilustrating it with an example:
+
+ | sel_tree_A | sel_tree_B | A or B | include in index_merge?
+ ------+------------+------------+--------+------------------------
+ key1 | cond1 | cond2 | condM | no
+ key2 | cond3 | cond4 | NULL | no
+ key3 | cond5 | | | yes, A-side
+ key4 | cond6 | | | yes, A-side
+ key5 | | cond7 | | yes, B-side
+ key6 | | cond8 | | yes, B-side
+
+ here we assume that
+ - (cond1 OR cond2) did produce a combined range. Not including them in
+ index_merge.
+ - (cond3 OR cond4) didn't produce a usable range (e.g. they were
+ t.key1part1=c1 AND t.key1part2=c1, respectively, and combining them
+ didn't yield any range list)
+ - All other scand didn't have their counterparts, so we'll end up with a
+ SEL_TREE of:
+
+ range(condM) AND index_merge((cond5 AND cond6),(cond7 AND cond8))
+ .
+
+O4. There is no O4. DISCARD-INDEX-MERGE-4 will remain there. The idea is
+that although DISCARD-INDEX-MERGE-4 does discard plans, so far we haven
+seen any complaints that could be attributed to it.
+If we face the need to lift DISCARD-INDEX-MERGE-4, our answer will be to
+lift it ,and produce a cross-product:
+
+ ((key1p OR key2p) AND (key3p OR key4p))
+ OR
+ ((key5p OR key6p) AND (key7p OR key8p))
+
+ = (key1p OR key2p OR key5p OR key6p) AND // this part is currently
+ (key3p OR key4p OR key5p OR key6p) AND // produced
+
+ (key1p OR key2p OR key5p OR key6p) AND // this part will be added
+ (key3p OR key4p OR key5p OR key6p) //.
+
+In order to limit the impact of this combinatorial explosion, we will
+introduce a rule that we won't generate more than #defined
+MAX_IMERGE_OPTS options.
-=-=(Guest - Thu, 18 Jun 2009, 14:56)=-=-
Low Level Design modified.
--- /tmp/wklog.24.old.15612 2009-06-18 14:56:09.000000000 +0300
+++ /tmp/wklog.24.new.15612 2009-06-18 14:56:09.000000000 +0300
@@ -1 +1,162 @@
+<contents>
+1. Current implementation overview
+1.1. Problems in the current implementation
+2. New implementation
+2.1 New tree_and()
+2.2 New tree_or()
+</contents>
+
+1. Current implementation overview
+==================================
+At the moment, range analyzer works as follows:
+
+SEL_TREE structure represents
+
+ # There are sel_trees, a sel_tree is either range or merge tree
+ sel_tree = range_tree | imerge_tree
+
+ # a range tree has range access options, possibly for several keys
+ range_tree = range(key1) AND range(key2) AND ... AND range(keyN);
+
+ # merge tree represents several way to index_merge
+ imerge_tree = imerge1 AND imerge2 AND ...
+
+ # a way to do index merge == a set to use of different indexes.
+ imergeX = range_tree1 OR range_tree2 OR ..
+ where no pair of range_treeX have ranges over the same index.
+
+
+ tree_and(A, B)
+ {
+ if (both A and B are range trees)
+ return a range_tree with computed intersection for each range;
+ if (only one of A and B is a range tree)
+ return that tree; // DISCARD-IMERGE-1
+ // at this point both trees are index_merge trees
+ return concat_lists( A.imerge1 ... A.imergeN, B.imerge1 ... B.imergeN);
+ }
+
+
+ tree_or(A, B)
+ {
+ if (A and B are range trees)
+ {
+ R = new range_tree;
+ for each index i
+ R.add(range_union(A.range(i), B.range(i)));
+
+ if (R has at least one range access)
+ return R;
+ else
+ {
+ /* could not build any range accesses. construct index_merge */
+ remove non-ranges from A; // DISCARD-IMERGE-2
+ remove non-ranges from B;
+ return new index_merge(A, B);
+ }
+ }
+ else if (A is range tree and B is index_merge tree (or vice versa))
+ {
+ Perform this transformation:
+
+ range_treeA // this is A
+ OR
+ (range_treeB_11 OR range_treeB_12 OR ... OR range_treeB_1N) AND
+ (range_treeB_21 OR range_treeB_22 OR ... OR range_treeB_2N) AND
+ ...
+ (range_treeB_K1 OR range_treeB_K2 OR ... OR range_treeB_kN) AND
+ =
+ (range_treeA OR range_treeB_11 OR ... OR range_treeB_1N) AND
+ (range_treeA OR range_treeB_21 OR ... OR range_treeB_2N) AND
+ ...
+ (range_treeA OR range_treeB_11 OR ... OR range_treeB_1N) AND
+
+ Now each line represents an index_merge..
+ }
+ else if (both A and B are index_merge trees)
+ {
+ Perform this transformation:
+
+ imergeA1 AND imergeA2 AND ... AND imergeAN
+ OR
+ imergeB1 AND imergeB2 AND ... AND imergeBN
+
+ -> (discard all imergeA{i=2,3,...} -> // DISCARD-IMERGE-3
+
+ imergeA1
+ OR
+ imergeB1 AND imergeB2 AND ... AND imergeBN =
+
+ = (combine imergeA1 with each of the imergeB{i} ) =
+
+ combine(imergeA1 OR imergeB1) AND
+ combine(imergeA1 OR imergeB2) AND
+ ... AND
+ combine(imergeA1 OR imergeBN)
+ }
+ }
+
+1.1. Problems in the current implementation
+-------------------------------------------
+As marked in the code above:
+
+DISCARD-IMERGE-1 step will cause index_merge option to be discarded when
+the WHERE clause has this form:
+
+ (t.key1=c1 OR t.key2=c2) AND t.badkey < c3
+
+DISCARD-IMERGE-2 step will cause index_merge option to be discarded when
+the WHERE clause has this form (conditions t.badkey may have abritrary form):
+
+ (t.badkey<c1 AND t.key1=c1) OR (t.key1=c2 AND t.badkey < c2)
+
+DISCARD-IMERGE-3 manifests itself as the following effect: suppose there are
+two indexes:
+
+ INDEX i1(col1, col2),
+ INDEX i2(col1, col3)
+
+and this WHERE clause:
+
+ col1=c1 AND (col2=c2 OR col3=c3)
+
+The optimizer will generate the plans that only use the "col1=c1" part. The
+right side of the AND will be ignored even if it has good selectivity.
+
+
+2. New implementation
+=====================
+
+<general idea>
+* Don't start fighting combinatorial explosion until we've actually got one.
+</>
+
+SEL_TREE structure will be now able to hold both index_merge and range scan
+candidates at the same time. That is,
+
+ sel_tree2 = range_tree AND imerge_tree
+
+where both parts are optional (i.e. can be empty)
+
+Operations on SEL_ARG trees will be modified to produce/process the trees of
+this kind:
+
+2.1 New tree_and()
+------------------
+In order not to lose plans, we'll make these changes:
+
+1. Don't remove index_merge part of the tree.
+
+2. Push range conditions down into index_merge trees that may support them.
+ if one tree has range(key1) and the other tree has imerge(key1 OR key2)
+ then perform an equvalent of this operation:
+
+ rangeA(key1) AND ( rangeB(key1) OR rangeB(key2)) =
+
+ (rangeA(key1) AND rangeB(key1)) OR (rangeA(key1) AND rangeB(key2))
+
+3. Just as before: if both sel_tree A and sel_tree B have index_merge options,
+ concatenate them together.
+
+2.2 New tree_or()
-=-=(Psergey - Wed, 03 Jun 2009, 12:09)=-=-
Dependency created: 30 now depends on 24
-=-=(Guest - Mon, 01 Jun 2009, 23:30)=-=-
High-Level Specification modified.
--- /tmp/wklog.24.old.21580 2009-06-01 23:30:06.000000000 +0300
+++ /tmp/wklog.24.new.21580 2009-06-01 23:30:06.000000000 +0300
@@ -64,6 +64,9 @@
* How strict is the limitation on the form of the WHERE?
+* Which version should this be based on? 5.1? Which patches are should be in
+ (google's/percona's/maria/etc?)
+
* TODO: The optimizer didn't compare costs of index_merge and range before (ok
it did but that was done for accesses to different tables). Will there be any
possible gotchas here?
-=-=(Guest - Wed, 27 May 2009, 13:59)=-=-
Title modified.
--- /tmp/wklog.24.old.9498 2009-05-27 13:59:23.000000000 +0300
+++ /tmp/wklog.24.new.9498 2009-05-27 13:59:23.000000000 +0300
@@ -1 +1 @@
-index_merge optimizer: dont discard index_merge union strategies when range is available
+index_merge: fair choice between index_merge union and range access
------------------------------------------------------------
-=-=(View All Progress Notes, 11 total)=-=-
http://askmonty.org/worklog/index.pl?tid=24&nolimit=1
DESCRIPTION:
Current range optimizer will discard possible index_merge/[sort]union
strategies when there is a possible range plan. This action is a part of
measures we take to avoid combinatorial explosion of possible range/
index_merge strategies.
A bad side effect of this is that for WHERE clauses in form
t.key1= 'very-frequent-value' AND (t.key2='rare-value1' OR t.key3='rare-value2')
the optimizer will
- discard union(key2,key3) in favor of range(key1)
- consider costs of using range(key1) and discard that plan also
and the overall effect is that possible poor range access will cause possible
good index_merge access not to be considered.
This WL is to about lifting this limitation at least for some subset of WHERE
clauses.
HIGH-LEVEL SPECIFICATION:
(Not a ready HLS but draft)
<contents>
Solution overview
Limitations
TODO
</contents>
Solution overview
=================
The idea is to delay discarding potential index_merge plans until the point
where it is really necessary.
This way, we won't have to do much changes in the range analyzer, but will be
able to keep potential index_merge plan just enough so that it's possible to
take it into consideration together with range access plans.
Since there are no changes in the optimizer, the ability to consider both
range and index_merge options will be limited to WHERE clauses of this form:
WHERE := range_cond(key1_1) AND
range_cond(key2_1) AND
other_cond AND
index_merge_OR_cond1(key3_1, key3_2, ...)
index_merge_OR_cond2(key4_1, key4_2, ...)
where
index_merge_OR_cond{N} := (range_cond(keyN_1) OR
range_cond(keyN_2) OR ...)
range_cond(keyX) := condition that allows to construct range access of keyX
and doesn't allow to construct range/index_merge accesses
for any keys of the table in question.
For such WHERE clauses, the range analyzer will produce SEL_TREE of this form:
SEL_TREE(
range(key1_1),
...
range(key2_1),
SEL_IMERGE( (1)
SEL_TREE(key3_1})
SEL_TREE(key3_2})
...
)
...
)
which can be used to make a cost-based choice between range and index_merge.
Limitations
-----------
This will not be a full solution in a sense that the range analyzer will not
be able to produce sel_tree (1) if the WHERE clause is specified in other form
(e.g. brackets were opened).
TODO
----
* is it a problem if there are keys that are referred to both from
index_merge and from range access?
* How strict is the limitation on the form of the WHERE?
* Which version should this be based on? 5.1? Which patches are should be in
(google's/percona's/maria/etc?)
* TODO: The optimizer didn't compare costs of index_merge and range before (ok
it did but that was done for accesses to different tables). Will there be any
possible gotchas here?
LOW-LEVEL DESIGN:
<contents>
1. Current implementation overview
1.1. Problems in the current implementation
2. New implementation
2.1 New tree_and()
2.2 New tree_or()
3. Testing and required coverage
</contents>
1. Current implementation overview
==================================
At the moment, range analyzer works as follows:
SEL_TREE structure represents
# There are sel_trees, a sel_tree is either range or merge tree
sel_tree = range_tree | imerge_tree
# a range tree has range access options, possibly for several keys
range_tree = range(key1) AND range(key2) AND ... AND range(keyN);
(here range(keyi) may represent ranges not for initial keyi prefixes,
but ranges for any infixes for keyi)
# merge tree represents several way to index_merge
imerge_tree = imerge1 AND imerge2 AND ...
# a way to do index merge == a set to use of different indexes.
imergeX = range_tree1 OR range_tree2 OR ..
where no pair of range_treeX have ranges over the same index.
tree_and(A, B)
{
if (both A and B are range trees)
return a range_tree with computed intersection for each range;
if (only one of A and B is a range tree)
return that tree; // DISCARD-IMERGE-1
// at this point both trees are index_merge trees
return concat_lists( A.imerge1 ... A.imergeN, B.imerge1 ... B.imergeN);
}
tree_or(A, B)
{
if (A and B are range trees)
{
R = new range_tree;
for each index i
R.add(range_union(A.range(i), B.range(i)));
if (R has at least one range access)
return R; // DISCARD-IMERGE-2
else
{
/* could not build any range accesses. construct index_merge */
remove non-ranges from A;
remove non-ranges from B;
return new index_merge(A, B); // DISCARD-IMERGE-3
}
}
else if (A is range tree and B is index_merge tree (or vice versa))
{
Perform this transformation:
range_treeA // this is A
OR
(range_treeB_11 OR range_treeB_12 OR ... OR range_treeB_1N) AND
(range_treeB_21 OR range_treeB_22 OR ... OR range_treeB_2N) AND
...
(range_treeB_K1 OR range_treeB_K2 OR ... OR range_treeB_kN)
=
(range_treeA OR range_treeB_11 OR ... OR range_treeB_1N) AND
(range_treeA OR range_treeB_21 OR ... OR range_treeB_2N) AND
...
(range_treeA OR range_treeB_11 OR ... OR range_treeB_1N)
Now each line represents an index_merge..
}
else if (both A and B are index_merge trees)
{
Perform this transformation:
imergeA1 AND imergeA2 AND ... AND imergeAN
OR
imergeB1 AND imergeB2 AND ... AND imergeBN
-> (discard all imergeA{i=2,3,...} -> // DISCARD-IMERGE-4
imergeA1
OR
imergeB1 =
= (combine imergeA1 with each of the range_treeB_1{i} ) =
combine(imergeA1 OR range_treeB_11) AND
combine(imergeA1 OR range_treeB_12) AND
... AND
combine(imergeA1 OR range_treeB_1N)
}
}
1.1. Problems in the current implementation
-------------------------------------------
As marked in the code above:
DISCARD-IMERGE-1 step will cause index_merge option to be discarded when
the WHERE clause has this form:
(t.key1=c1 OR t.key2=c2) AND t.badkey < c3
DISCARD-IMERGE-2 step will cause index_merge option to be discarded when
the WHERE clause has this form (conditions t.badkey may have abritrary form):
(t.badkey<c1 AND t.key1=c1) OR (t.key2=c2 AND t.badkey < c2)
DISCARD-IMERGE-3 manifests itself as the following effect: suppose there are
two indexes:
INDEX i1(col1, col2),
INDEX i2(col1, col3)
and this WHERE clause:
col1=c1 AND (col2=c2 OR col3=c3)
The optimizer will generate the plans that only use the "col1=c1" part. The
right side of the AND will be ignored even if it has good selectivity.
(Here no imerge for col2=c2 OR col3=c3 will be built since neither col2=c2 nor
col3=c3 represent index ranges.)
2. New implementation
=====================
<general idea>
* Don't start fighting combinatorial explosion until we've actually got one.
</>
SEL_TREE structure will be now able to hold both index_merge and range scan
candidates at the same time. That is,
sel_tree2 = range_tree AND imerge_tree
where both parts are optional (i.e. can be empty)
Operations on SEL_ARG trees will be modified to produce/process the trees of
this kind:
2.1 New tree_and()
------------------
In order not to lose plans, we'll make these changes:
A1. Don't remove index_merge part of the tree (this will take care of
DISCARD-IMERGE-1 problem)
A2. Push range conditions down into index_merge trees that may support them.
if one tree has range(key1) and the other tree has imerge(key1 OR key2)
then perform an equvalent of this operation:
rangeA(key1) AND ( rangeB(key1) OR rangeB(key2)) =
(rangeA(key1) AND rangeB(key1)) OR (rangeA(key1) AND rangeB(key2))
A3. Just as before: if both sel_tree A and sel_tree B have index_merge options,
concatenate them together.
2.2 New tree_or()
-----------------
O1. Dont remove non-range plans:
Current tree_or() code will refuse to produce index_merge plans for
conditions like
"t.key1part2=const OR t.key2part1=const"
(this is marked as DISCARD-IMERGE-3). This was justifed as the left part of
the AND condition is not usable for range access, and the operation of
tree_and() guaranteed that there was no way it could changed to make a
usable range plan. With new tree_and() and rule A2, this is no longer the
case. For example for this query:
(t.key1part2=const OR t.key2part1=const) AND t.key1part1=const
it will construct a
imerge(t.key1part2=const OR t.key2part1=const), range(t.key1part1=const)
then tree_and() will apply rule A2 to push the range down into index merge
and after that we'll have:
range(t.key1part1=const)
imerge(
t.key1part2=const AND t.key1part1=const,
t.key2part1=const
)
note that imerge(...) describes a usable index_merge plan and it's possible
that it will be the best access path.
O2. "Create index_merge accesses when possible"
Current tree_or() will not create index_merge access when it could create
non-index merge access (see DISCARD-IMERGE-2 and its example in the "Problems
in the current implementation" section). This will be changed to work as
follows: we will create index_merge made for index scans that didn't have
their match in the other sel_tree.
Ilustrating it with an example:
| sel_tree_A | sel_tree_B | A or B | include in index_merge?
------+------------+------------+--------+------------------------
key1 | cond1 | cond2 | condM | no
key2 | cond3 | cond4 | NULL | no
key3 | cond5 | | | yes, A-side
key4 | cond6 | | | yes, A-side
key5 | | cond7 | | yes, B-side
key6 | | cond8 | | yes, B-side
here we assume that
- (cond1 OR cond2) did produce a combined range. Not including them in
index_merge.
- (cond3 OR cond4) didn't produce a usable range (e.g. they were
t.key1part1=c1 AND t.key1part2=c1, respectively, and combining them
didn't yield any range list)
- All other scand didn't have their counterparts, so we'll end up with a
SEL_TREE of:
range(condM) AND index_merge((cond5 AND cond6),(cond7 AND cond8))
.
O4. There is no O4. DISCARD-INDEX-MERGE-4 will remain there. The idea is
that although DISCARD-INDEX-MERGE-4 does discard plans, so far we haven
seen any complaints that could be attributed to it.
If we face the need to lift DISCARD-INDEX-MERGE-4, our answer will be to
lift it ,and produce a cross-product:
((key1p OR key2p) AND (key3p OR key4p))
OR
((key5p OR key6p) AND (key7p OR key8p))
= (key1p OR key2p OR key5p OR key6p) AND // this part is currently
(key3p OR key4p OR key5p OR key6p) AND // produced
(key1p OR key2p OR key5p OR key6p) AND // this part will be added
(key3p OR key4p OR key5p OR key6p) //.
In order to limit the impact of this combinatorial explosion, we will
introduce a rule that we won't generate more than #defined
MAX_IMERGE_OPTS options.
3. Testing and required coverage
================================
So far could find the following user cases:
* BUG#17259: Query optimizer chooses wrong index
* BUG#17673: Optimizer does not use Index Merge optimization in some cases
* BUG#23322: Optimizer sometimes erroniously prefers other index over index merge
* BUG#30151: optimizer is very reluctant to chose index_merge algorithm
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#67 Updated (by Psergey): ICP/MRR backport
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: ICP/MRR backport
CREATION DATE..: Thu, 26 Nov 2009, 15:19
SUPERVISOR.....: Monty
IMPLEMENTOR....: Psergey
COPIES TO......:
CATEGORY.......: Server-Sprint
TASK ID........: 67 (http://askmonty.org/worklog/?tid=67)
VERSION........: Server-9.x
STATUS.........: Complete
PRIORITY.......: 60
WORKED HOURS...: 0
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Psergey - Tue, 29 Jun 2010, 13:57)=-=-
Status updated.
--- /tmp/wklog.67.old.31561 2010-06-29 13:57:50.000000000 +0000
+++ /tmp/wklog.67.new.31561 2010-06-29 13:57:50.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Complete
-=-=(Guest - Sun, 13 Jun 2010, 16:57)=-=-
Dependency deleted: 91 no longer depends on 67
-=-=(Igor - Wed, 10 Mar 2010, 19:14)=-=-
High Level Description modified.
--- /tmp/wklog.67.old.25641 2010-03-10 19:14:45.000000000 +0000
+++ /tmp/wklog.67.new.25641 2010-03-10 19:14:45.000000000 +0000
@@ -1,2 +1,2 @@
-Backport DS-MRR into MariaDB-5.2 codebase, also adding certain extra features to
-make it more usable.
+Backport ICP and DS-MRR into MariaDB-5.2 codebase, also adding certain extra
+features to make it more usable.
-=-=(Guest - Wed, 10 Mar 2010, 19:12)=-=-
Title modified.
--- /tmp/wklog.67.old.25456 2010-03-10 19:12:57.000000000 +0000
+++ /tmp/wklog.67.new.25456 2010-03-10 19:12:57.000000000 +0000
@@ -1 +1 @@
-MRR backport
+ICP/MRR backport
-=-=(Psergey - Sun, 28 Feb 2010, 14:56)=-=-
Dependency created: 91 now depends on 67
-=-=(Psergey - Sun, 28 Feb 2010, 14:54)=-=-
Dependency deleted: 94 no longer depends on 67
-=-=(Psergey - Sun, 28 Feb 2010, 14:09)=-=-
Dependency created: 94 now depends on 67
-=-=(Psergey - Thu, 26 Nov 2009, 20:21)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.9329 2009-11-26 20:21:28.000000000 +0200
+++ /tmp/wklog.67.new.9329 2009-11-26 20:21:28.000000000 +0200
@@ -65,17 +65,19 @@
2.5 Make MRR code more of a module
----------------------------------
-Some code in handler.cc can be moved to separate file.
-But changes in opt_range.cc can't.
-TODO: Sort out how much we really can do here. Initial guess is not much as the
-code consists of:
+It is not possible to make MRR to be a totally separate module, as its code
+consists of :
- Default MRR implementation in handler.cc
- Changes in opt_range.cc to use MRR instead of multiple records_in_range()
- calls. These rely on opt_range.cc's internal structures like SEL_ARG trees and
+ calls. These rely on opt_range.cc's internal stuctures like SEL_ARG trees and
so there is not much point in moving them out.
-- DS-MRR implementations which are spread over storage engines.
-and the only modularization we see is to move #1 into a separate file which
-won't achieve much.
+- DS-MRR impelementations which are spread over storage engines.
+
+We'll try to modularize what we can:
+- Move out default MRR implementation from handler.cc
+- Move possible parts out of opt_range.cc into a separate file.
+
+
2.6 Improve the cost model
--------------------------
-=-=(Psergey - Thu, 26 Nov 2009, 19:06)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.6449 2009-11-26 19:06:04.000000000 +0200
+++ /tmp/wklog.67.new.6449 2009-11-26 19:06:04.000000000 +0200
@@ -1,4 +1,3 @@
-
<contents>
1. Requirements
2. Required actions
@@ -44,6 +43,7 @@
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=index_condi…
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=mrr
+http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=icp
2.2 Backport DS-MRR code to MariaDB 5.2
---------------------------------------
-=-=(Psergey - Thu, 26 Nov 2009, 18:15)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.4161 2009-11-26 18:15:36.000000000 +0200
+++ /tmp/wklog.67.new.4161 2009-11-26 18:15:36.000000000 +0200
@@ -1,3 +1,17 @@
+
+<contents>
+1. Requirements
+2. Required actions
+2.1 Fix DS-MRR/InnoDB bugs
+2.2 Backport DS-MRR code to MariaDB 5.2
+2.3 Introduce control variables
+2.4 Other backport issues
+2.5 Make MRR code more of a module
+2.6 Improve the cost model
+2.7 Let DS-MRR support clustered primary keys
+</contents>
+
+
1. Requirements
===============
@@ -63,4 +77,28 @@
and the only modularization we see is to move #1 into a separate file which
won't achieve much.
+2.6 Improve the cost model
+--------------------------
+At the moment DS-MRR cost formula re-uses non-MRR scan costs, which uses
+records_in_range() calls, followed by index_only_read_time() or read_time()
+calls to produce the estimate for read cost.
+
+ We should change this (TODO sort out how exactly)
+
+Note: this means that the query plans will change from MariaDB 5.2.
+
+2.7 Let DS-MRR support clustered primary keys
+---------------------------------------------
+At the moment DS-MRR is not supported for clustered primary keys. It is not
+needed when MRR is used for range access, because range access is done over
+an ordered list of ranges, but it is useful for BKA.
+
+TODO:
+ it's useful for BKA because BKA makes MRR scans over un-orderered
+ non-disjoint lists of ranges. Then we can sort these and do ordered scans.
+ There is still no use for DS-MRR over clustered primary key for range
+ access, where the ranges are disjoint and ordered.
+ How about postponing this item until BKA is backported?
+
+
------------------------------------------------------------
-=-=(View All Progress Notes, 11 total)=-=-
http://askmonty.org/worklog/index.pl?tid=67&nolimit=1
DESCRIPTION:
Backport ICP and DS-MRR into MariaDB-5.2 codebase, also adding certain extra
features to make it more usable.
HIGH-LEVEL SPECIFICATION:
<contents>
1. Requirements
2. Required actions
2.1 Fix DS-MRR/InnoDB bugs
2.2 Backport DS-MRR code to MariaDB 5.2
2.3 Introduce control variables
2.4 Other backport issues
2.5 Make MRR code more of a module
2.6 Improve the cost model
2.7 Let DS-MRR support clustered primary keys
</contents>
1. Requirements
===============
We need the following:
1. Latest MRR interface support, including extensions to support ICP when
using BKA.
2. Let DS-MRR support clustered primary keys (needed when using BKA).
3. Remove conditions used for key access from the condition pushed to index
(ATM this manifests itself as "Using index condition" appearing where there
was no "Using where". TODO: example of this?)
4. Introduce a separate @@optimizer_switch flag for turning on/out ICP (atm it
is switched on/off by @@engine_condition_pushdown)
5. Introduce a separate @@mrr_buffer_size variable to control MRR buffer size
for range+MRR scans. ATM it is controlled by @@read_rnd_size flag and that
makes it unobvious for a number of users.
6. Rename multi_range_read_info_const() to look like it is not a part of MRR
interface.
8. Try to make MRR to be more of a module
7. Improve MRR's cost model.
2. Required actions
===================
Roughly in the order in which it will be done:
2.1 Fix DS-MRR/InnoDB bugs
--------------------------
We need to fix the bugs listed here:
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=index_condi…
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=mrr
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=icp
2.2 Backport DS-MRR code to MariaDB 5.2
---------------------------------------
The easiest way seems to be to to manually move the needed code from mysql-6.0
(or whatever it's called now) to MariaDB.
2.3 Introduce control variables
-------------------------------
Act on items #4 and #5 from the requirements. Should be easy as
@@optimizer_switch is supported in 5.1 codebase.
2.4 Other backport issues
-------------------------
* Figure out what to do with NDB/MRR. 5.1 codebase has "old" NDB/MRR
implementation. mysql-6.0 (and NDB's branch) have the updated NDB/MRR
but merging it into 5.1 can be very labor-intensive.
Will it be ok to disable NDB/MRR altogether?
2.5 Make MRR code more of a module
----------------------------------
It is not possible to make MRR to be a totally separate module, as its code
consists of :
- Default MRR implementation in handler.cc
- Changes in opt_range.cc to use MRR instead of multiple records_in_range()
calls. These rely on opt_range.cc's internal stuctures like SEL_ARG trees and
so there is not much point in moving them out.
- DS-MRR impelementations which are spread over storage engines.
We'll try to modularize what we can:
- Move out default MRR implementation from handler.cc
- Move possible parts out of opt_range.cc into a separate file.
2.6 Improve the cost model
--------------------------
At the moment DS-MRR cost formula re-uses non-MRR scan costs, which uses
records_in_range() calls, followed by index_only_read_time() or read_time()
calls to produce the estimate for read cost.
We should change this (TODO sort out how exactly)
Note: this means that the query plans will change from MariaDB 5.2.
2.7 Let DS-MRR support clustered primary keys
---------------------------------------------
At the moment DS-MRR is not supported for clustered primary keys. It is not
needed when MRR is used for range access, because range access is done over
an ordered list of ranges, but it is useful for BKA.
TODO:
it's useful for BKA because BKA makes MRR scans over un-orderered
non-disjoint lists of ranges. Then we can sort these and do ordered scans.
There is still no use for DS-MRR over clustered primary key for range
access, where the ranges are disjoint and ordered.
How about postponing this item until BKA is backported?
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#67 Updated (by Psergey): ICP/MRR backport
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: ICP/MRR backport
CREATION DATE..: Thu, 26 Nov 2009, 15:19
SUPERVISOR.....: Monty
IMPLEMENTOR....: Psergey
COPIES TO......:
CATEGORY.......: Server-Sprint
TASK ID........: 67 (http://askmonty.org/worklog/?tid=67)
VERSION........: Server-9.x
STATUS.........: Complete
PRIORITY.......: 60
WORKED HOURS...: 0
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Psergey - Tue, 29 Jun 2010, 13:57)=-=-
Status updated.
--- /tmp/wklog.67.old.31561 2010-06-29 13:57:50.000000000 +0000
+++ /tmp/wklog.67.new.31561 2010-06-29 13:57:50.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Complete
-=-=(Guest - Sun, 13 Jun 2010, 16:57)=-=-
Dependency deleted: 91 no longer depends on 67
-=-=(Igor - Wed, 10 Mar 2010, 19:14)=-=-
High Level Description modified.
--- /tmp/wklog.67.old.25641 2010-03-10 19:14:45.000000000 +0000
+++ /tmp/wklog.67.new.25641 2010-03-10 19:14:45.000000000 +0000
@@ -1,2 +1,2 @@
-Backport DS-MRR into MariaDB-5.2 codebase, also adding certain extra features to
-make it more usable.
+Backport ICP and DS-MRR into MariaDB-5.2 codebase, also adding certain extra
+features to make it more usable.
-=-=(Guest - Wed, 10 Mar 2010, 19:12)=-=-
Title modified.
--- /tmp/wklog.67.old.25456 2010-03-10 19:12:57.000000000 +0000
+++ /tmp/wklog.67.new.25456 2010-03-10 19:12:57.000000000 +0000
@@ -1 +1 @@
-MRR backport
+ICP/MRR backport
-=-=(Psergey - Sun, 28 Feb 2010, 14:56)=-=-
Dependency created: 91 now depends on 67
-=-=(Psergey - Sun, 28 Feb 2010, 14:54)=-=-
Dependency deleted: 94 no longer depends on 67
-=-=(Psergey - Sun, 28 Feb 2010, 14:09)=-=-
Dependency created: 94 now depends on 67
-=-=(Psergey - Thu, 26 Nov 2009, 20:21)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.9329 2009-11-26 20:21:28.000000000 +0200
+++ /tmp/wklog.67.new.9329 2009-11-26 20:21:28.000000000 +0200
@@ -65,17 +65,19 @@
2.5 Make MRR code more of a module
----------------------------------
-Some code in handler.cc can be moved to separate file.
-But changes in opt_range.cc can't.
-TODO: Sort out how much we really can do here. Initial guess is not much as the
-code consists of:
+It is not possible to make MRR to be a totally separate module, as its code
+consists of :
- Default MRR implementation in handler.cc
- Changes in opt_range.cc to use MRR instead of multiple records_in_range()
- calls. These rely on opt_range.cc's internal structures like SEL_ARG trees and
+ calls. These rely on opt_range.cc's internal stuctures like SEL_ARG trees and
so there is not much point in moving them out.
-- DS-MRR implementations which are spread over storage engines.
-and the only modularization we see is to move #1 into a separate file which
-won't achieve much.
+- DS-MRR impelementations which are spread over storage engines.
+
+We'll try to modularize what we can:
+- Move out default MRR implementation from handler.cc
+- Move possible parts out of opt_range.cc into a separate file.
+
+
2.6 Improve the cost model
--------------------------
-=-=(Psergey - Thu, 26 Nov 2009, 19:06)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.6449 2009-11-26 19:06:04.000000000 +0200
+++ /tmp/wklog.67.new.6449 2009-11-26 19:06:04.000000000 +0200
@@ -1,4 +1,3 @@
-
<contents>
1. Requirements
2. Required actions
@@ -44,6 +43,7 @@
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=index_condi…
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=mrr
+http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=icp
2.2 Backport DS-MRR code to MariaDB 5.2
---------------------------------------
-=-=(Psergey - Thu, 26 Nov 2009, 18:15)=-=-
High-Level Specification modified.
--- /tmp/wklog.67.old.4161 2009-11-26 18:15:36.000000000 +0200
+++ /tmp/wklog.67.new.4161 2009-11-26 18:15:36.000000000 +0200
@@ -1,3 +1,17 @@
+
+<contents>
+1. Requirements
+2. Required actions
+2.1 Fix DS-MRR/InnoDB bugs
+2.2 Backport DS-MRR code to MariaDB 5.2
+2.3 Introduce control variables
+2.4 Other backport issues
+2.5 Make MRR code more of a module
+2.6 Improve the cost model
+2.7 Let DS-MRR support clustered primary keys
+</contents>
+
+
1. Requirements
===============
@@ -63,4 +77,28 @@
and the only modularization we see is to move #1 into a separate file which
won't achieve much.
+2.6 Improve the cost model
+--------------------------
+At the moment DS-MRR cost formula re-uses non-MRR scan costs, which uses
+records_in_range() calls, followed by index_only_read_time() or read_time()
+calls to produce the estimate for read cost.
+
+ We should change this (TODO sort out how exactly)
+
+Note: this means that the query plans will change from MariaDB 5.2.
+
+2.7 Let DS-MRR support clustered primary keys
+---------------------------------------------
+At the moment DS-MRR is not supported for clustered primary keys. It is not
+needed when MRR is used for range access, because range access is done over
+an ordered list of ranges, but it is useful for BKA.
+
+TODO:
+ it's useful for BKA because BKA makes MRR scans over un-orderered
+ non-disjoint lists of ranges. Then we can sort these and do ordered scans.
+ There is still no use for DS-MRR over clustered primary key for range
+ access, where the ranges are disjoint and ordered.
+ How about postponing this item until BKA is backported?
+
+
------------------------------------------------------------
-=-=(View All Progress Notes, 11 total)=-=-
http://askmonty.org/worklog/index.pl?tid=67&nolimit=1
DESCRIPTION:
Backport ICP and DS-MRR into MariaDB-5.2 codebase, also adding certain extra
features to make it more usable.
HIGH-LEVEL SPECIFICATION:
<contents>
1. Requirements
2. Required actions
2.1 Fix DS-MRR/InnoDB bugs
2.2 Backport DS-MRR code to MariaDB 5.2
2.3 Introduce control variables
2.4 Other backport issues
2.5 Make MRR code more of a module
2.6 Improve the cost model
2.7 Let DS-MRR support clustered primary keys
</contents>
1. Requirements
===============
We need the following:
1. Latest MRR interface support, including extensions to support ICP when
using BKA.
2. Let DS-MRR support clustered primary keys (needed when using BKA).
3. Remove conditions used for key access from the condition pushed to index
(ATM this manifests itself as "Using index condition" appearing where there
was no "Using where". TODO: example of this?)
4. Introduce a separate @@optimizer_switch flag for turning on/out ICP (atm it
is switched on/off by @@engine_condition_pushdown)
5. Introduce a separate @@mrr_buffer_size variable to control MRR buffer size
for range+MRR scans. ATM it is controlled by @@read_rnd_size flag and that
makes it unobvious for a number of users.
6. Rename multi_range_read_info_const() to look like it is not a part of MRR
interface.
8. Try to make MRR to be more of a module
7. Improve MRR's cost model.
2. Required actions
===================
Roughly in the order in which it will be done:
2.1 Fix DS-MRR/InnoDB bugs
--------------------------
We need to fix the bugs listed here:
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=index_condi…
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=mrr
http://bugs.mysql.com/search.php?cmd=display&status=Active&tags=icp
2.2 Backport DS-MRR code to MariaDB 5.2
---------------------------------------
The easiest way seems to be to to manually move the needed code from mysql-6.0
(or whatever it's called now) to MariaDB.
2.3 Introduce control variables
-------------------------------
Act on items #4 and #5 from the requirements. Should be easy as
@@optimizer_switch is supported in 5.1 codebase.
2.4 Other backport issues
-------------------------
* Figure out what to do with NDB/MRR. 5.1 codebase has "old" NDB/MRR
implementation. mysql-6.0 (and NDB's branch) have the updated NDB/MRR
but merging it into 5.1 can be very labor-intensive.
Will it be ok to disable NDB/MRR altogether?
2.5 Make MRR code more of a module
----------------------------------
It is not possible to make MRR to be a totally separate module, as its code
consists of :
- Default MRR implementation in handler.cc
- Changes in opt_range.cc to use MRR instead of multiple records_in_range()
calls. These rely on opt_range.cc's internal stuctures like SEL_ARG trees and
so there is not much point in moving them out.
- DS-MRR impelementations which are spread over storage engines.
We'll try to modularize what we can:
- Move out default MRR implementation from handler.cc
- Move possible parts out of opt_range.cc into a separate file.
2.6 Improve the cost model
--------------------------
At the moment DS-MRR cost formula re-uses non-MRR scan costs, which uses
records_in_range() calls, followed by index_only_read_time() or read_time()
calls to produce the estimate for read cost.
We should change this (TODO sort out how exactly)
Note: this means that the query plans will change from MariaDB 5.2.
2.7 Let DS-MRR support clustered primary keys
---------------------------------------------
At the moment DS-MRR is not supported for clustered primary keys. It is not
needed when MRR is used for range access, because range access is done over
an ordered list of ranges, but it is useful for BKA.
TODO:
it's useful for BKA because BKA makes MRR scans over un-orderered
non-disjoint lists of ranges. Then we can sort these and do ordered scans.
There is still no use for DS-MRR over clustered primary key for range
access, where the ranges are disjoint and ordered.
How about postponing this item until BKA is backported?
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#120 Updated (by Knielsen): Replication API for stacked event generators
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: Replication API for stacked event generators
CREATION DATE..: Mon, 07 Jun 2010, 13:13
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......:
CATEGORY.......: Server-Sprint
TASK ID........: 120 (http://askmonty.org/worklog/?tid=120)
VERSION........: Server-9.x
STATUS.........: In-Progress
PRIORITY.......: 60
WORKED HOURS...: 2
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Knielsen - Tue, 29 Jun 2010, 13:51)=-=-
Status updated.
--- /tmp/wklog.120.old.31179 2010-06-29 13:51:20.000000000 +0000
+++ /tmp/wklog.120.new.31179 2010-06-29 13:51:20.000000000 +0000
@@ -1 +1 @@
-Assigned
+In-Progress
-=-=(Knielsen - Mon, 28 Jun 2010, 07:11)=-=-
High-Level Specification modified.
--- /tmp/wklog.120.old.18416 2010-06-28 07:11:09.000000000 +0000
+++ /tmp/wklog.120.new.18416 2010-06-28 07:11:09.000000000 +0000
@@ -14,10 +14,14 @@
An example of an event consumer is the writing of the binlog on a master.
-Event generators are not really plugins. Rather, there are specific points in
-the server where events are generated. However, a generator can be part of a
-plugin, for example a PBXT engine-level replication event generator would be
-part of the PBXT storage engine plugin.
+Some event generators are not really plugins. Rather, there are specific
+points in the server where events are generated. However, a generator can be
+part of a plugin, for example a PBXT engine-level replication event generator
+would be part of the PBXT storage engine plugin. And for example we could
+write a filter plugin, which would be stacked on top of an existing generator
+and provide the same event types and interfaces, but filtered in some way (for
+example by removing certain events on the master side, or by re-writing events
+in certain ways).
Event consumers on the other hand could be a plugin.
@@ -85,6 +89,9 @@
some is kept as reference to context (eg. THD) only. This however looses most
of the mentioned advantages for materialisation.
+Also, the non-materialising interface should be a good interface on top of
+which to build a materialising interface.
+
The design proposed here aims for as little materialisation as possible.
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Category updated.
--- /tmp/wklog.120.old.7440 2010-06-24 14:29:38.000000000 +0000
+++ /tmp/wklog.120.new.7440 2010-06-24 14:29:38.000000000 +0000
@@ -1 +1 @@
-Server-RawIdeaBin
+Server-Sprint
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Status updated.
--- /tmp/wklog.120.old.7440 2010-06-24 14:29:38.000000000 +0000
+++ /tmp/wklog.120.new.7440 2010-06-24 14:29:38.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Assigned
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Low Level Design modified.
--- /tmp/wklog.120.old.7355 2010-06-24 14:29:11.000000000 +0000
+++ /tmp/wklog.120.new.7355 2010-06-24 14:29:11.000000000 +0000
@@ -1 +1,385 @@
+A consumer is implented as a virtual class (interface). There is one virtual
+function for every event that can be received. A consumer would derive from
+the base class and override methods for the events it wants to receive.
+
+There is one consumer interface for each generator. When a generator A is
+stacked on B, the consumer interface for A inherits from the interface for
+B. This way, when A defers an event to B, the consumer for A will receive the
+corresponding event from B.
+
+There are methods for a consumer to register itself to receive events from
+each generator. I still need to find a way for a consumer in one plugin to
+register itself with a generator implemented in another plugin (eg. PBXT
+engine-level replication). I also need to add a way for consumers to
+de-register themselves.
+
+The current design has consumer callbacks return 0 for success and error code
+otherwise. I still need to think more about whether this is useful (ie. what
+is the semantics of returning an error from a consumer callback).
+
+Each event passed to consumers is defined as a class with public accessor
+methods to a private context (which is mostly the THD).
+
+My intension is to make all events passed around const, so that the same event
+can be passed to each of multiple registered consumers (and to emphasise that
+consumers do not have the ability to modify events). It still needs to be seen
+whether that const-ness will be feasible in practise without very heavy
+modification/constification of exiting code.
+
+What follows is a partial draft of a possible definition of the API as
+concrete C++ class definitions.
+
+-----------------------------------------------------------------------
+
+/*
+ Virtual base class for generated replication events.
+
+ This is the parent of events generated from all kinds of generators. Only
+ child classes can be instantiated.
+
+ This class can be used by code that wants to treat events in a generic way,
+ without any knowledge of event details. I still need to decide whether such
+ generic code is sensible.
+*/
+class rpl_event_base
+{
+ /*
+ Maybe we will want the ability to materialise an event to a standard
+ binary format. This could be achieved with a base method like this. The
+ actual materialisation would be implemented in each deriving class. The
+ public methods would provide different interfaces for specifying the
+ buffer or for writing directly into IO_CACHE or file.
+ */
+
+ /* Return 0 on success, -1 on error, -2 on out-of-buffer. */
+ int materialise(uchar *buffer, size_t buflen) const;
+ /*
+ Returns NULL on error or else malloc()ed buffer with materialised event,
+ caller must free().
+ */
+ uchar *materialise() const;
+ /* Same but using passed in memroot. */
+ uchar *materialise(mem_root *memroot) const;
+ /*
+ Materialise to user-supplied writer function (could write directly to file
+ or the like).
+ */
+ int materialise(int (*writer)(uchar *data, size_t len, void *context)) const;
+
+ /*
+ As to for what to do with a materialised event, there are a couple of
+ possibilities.
+
+ One is to have a de_materialise() method somewhere that can construct an
+ rpl_event_base (really a derived class of course) from a buffer or writer
+ function. This would require each accessor function to conditionally read
+ its data from either THD context or buffer (GCC is able to optimise
+ several such conditionals in multiple accessor function calls into one
+ conditional), or we can make all accessors virtual if the performance hit
+ is acceptable.
+
+ Another is to have different classes for accessing events read from
+ materialised event data.
+
+ Also, I still need to think about whether it is at all useful to be able
+ to generically materialise an event at this level. It may be that any
+ binlog/transport will in any case need to undertand more of the format of
+ events, so that such materialisation/transport is better done at a
+ different layer.
+ */
+
+protected:
+ /* Implementation which is the basis for materialise(). */
+ virtual int do_materialise(int (*writer)(uchar *data, size_t len,
+ void *context)) const = 0;
+
+private:
+ /* Virtual base class, private constructor to prevent instantiation. */
+ rpl_event_base();
+};
+
+
+/*
+ These are the event types output from the transaction event generator.
+
+ This generator is not stacked on anything.
+
+ The transaction event generator marks the start and end (commit or rollback)
+ of transactions. It also gives information about whether the transaction was
+ a full transaction or autocommitted statement, whether transactional tables
+ were involved, whether non-transactional tables were involved, and XA
+ information (ToDo).
+*/
+
+/* Base class for transaction events. */
+class rpl_event_transaction_base : public rpl_event_base
+{
+public:
+ /*
+ Get the local transaction id. This idea is only unique within one server.
+ It is allocated whenever a new transaction is started.
+ Can be used to identify events belonging to the same transaction in a
+ binlog-like stream of events streamed in parallel among multiple
+ transactions.
+ */
+ uint64_t get_local_trx_id() const { return thd->local_trx_id; };
+
+ bool get_is_autocommit() const;
+
+private:
+ /* The context is the THD. */
+ THD *thd;
+
+ rpl_event_transaction_base(THD *_thd) : thd(_thd) { };
+};
+
+/* Transaction start event. */
+class rpl_event_transaction_start : public rpl_event_transaction_base
+{
+
+};
+
+/* Transaction commit. */
+class rpl_event_transaction_commit : public rpl_event_transaction_base
+{
+public:
+ /*
+ The global transaction id is unique cross-server.
+
+ It can be used to identify the position from which to start a slave
+ replicating from a master.
+
+ This global ID is only available once the transaction is decided to commit
+ by the TC manager / primary redundancy service. This TC also allocates the
+ ID and decides the exact semantics (can there be gaps, etc); however the
+ format is fixed (cluster_id, running_counter).
+ */
+ struct global_transaction_id
+ {
+ uint32_t cluster_id;
+ uint64_t counter;
+ };
+
+ const global_transaction_id *get_global_transaction_id() const;
+};
+
+/* Transaction rollback. */
+class rpl_event_transaction_rollback : public rpl_event_transaction_base
+{
+
+};
+
+
+/* Base class for statement events. */
+class rpl_event_statement_base : public rpl_event_base
+{
+public:
+ LEX_STRING get_current_db() const;
+};
+
+class rpl_event_statement_start : public rpl_event_statement_base
+{
+
+};
+
+class rpl_event_statement_end : public rpl_event_statement_base
+{
+public:
+ int get_errorcode() const;
+};
+
+class rpl_event_statement_query : public rpl_event_statement_base
+{
+public:
+ LEX_STRING get_query_string();
+ ulong get_sql_mode();
+ const CHARSET_INFO *get_character_set_client();
+ const CHARSET_INFO *get_collation_connection();
+ const CHARSET_INFO *get_collation_server();
+ const CHARSET_INFO *get_collation_default_db();
+
+ /*
+ Access to relevant flags that affect query execution.
+
+ Use as if (ev->get_flags() & (uint32)ROW_FOREIGN_KEY_CHECKS) { ... }
+ */
+ enum flag_bits
+ {
+ STMT_FOREIGN_KEY_CHECKS, // @@foreign_key_checks
+ STMT_UNIQUE_KEY_CHECKS, // @@unique_checks
+ STMT_AUTO_IS_NULL, // @@sql_auto_is_null
+ };
+ uint32_t get_flags();
+
+ ulong get_auto_increment_offset();
+ ulong get_auto_increment_increment();
+
+ // And so on for: time zone; day/month names; connection id; LAST_INSERT_ID;
+ // INSERT_ID; random seed; user variables.
+ //
+ // We probably also need get_uses_temporary_table(), get_used_user_vars(),
+ // get_uses_auto_increment() and so on, so a consumer can get more
+ // information about what kind of context information a query will need when
+ // executed on a slave.
+};
+
+class rpl_event_statement_load_query : public rpl_event_statement_query
+{
+
+};
+
+/*
+ This event is fired with blocks of data for files read (from server-local
+ file or client connection) for LOAD DATA.
+*/
+class rpl_event_statement_load_data_block : public rpl_event_statement_base
+{
+public:
+ struct block
+ {
+ const uchar *ptr;
+ size_t size;
+ };
+ block get_block() const;
+};
+
+/* Base class for row-based replication events. */
+class rpl_event_row_base : public rpl_event_base
+{
+public:
+ /*
+ Access to relevant handler extra flags and other flags that affect row
+ operations.
+
+ Use as if (ev->get_flags() & (uint32)ROW_WRITE_CAN_REPLACE) { ... }
+ */
+ enum flag_bits
+ {
+ ROW_WRITE_CAN_REPLACE, // HA_EXTRA_WRITE_CAN_REPLACE
+ ROW_IGNORE_DUP_KEY, // HA_EXTRA_IGNORE_DUP_KEY
+ ROW_IGNORE_NO_KEY, // HA_EXTRA_IGNORE_NO_KEY
+ ROW_DISABLE_FOREIGN_KEY_CHECKS, // ! @@foreign_key_checks
+ ROW_DISABLE_UNIQUE_KEY_CHECKS, // ! @@unique_checks
+ };
+ uint32_t get_flags();
+
+ /* Access to list of tables modified. */
+ class table_iterator
+ {
+ public:
+ /* Returns table, NULL after last. */
+ const TABLE *get_next();
+ private:
+ // ...
+ };
+ table_iterator get_modified_tables() const;
+
+private:
+ /* Context used to provide accessors. */
+ THD *thd;
+
+protected:
+ rpl_event_row_base(THD *_thd) : thd(_thd) { }
+};
+
+
+class rpl_event_row_write : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_write_set() const;
+ const uchar *get_after_image() const;
+};
+
+class rpl_event_row_update : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_read_set() const;
+ const BITMAP *get_write_set() const;
+ const uchar *get_before_image() const;
+ const uchar *get_after_image() const;
+};
+
+class rpl_event_row_delete : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_read_set() const;
+ const uchar *get_before_image() const;
+};
+
+
+/*
+ Event consumer callbacks.
+
+ An event consumer registers with an event generator to receive event
+ notifications from that generator.
+
+ The consumer has callbacks (in the form of virtual functions) for the
+ individual event types the consumer is interested in. Only callbacks that
+ are non-NULL will be invoked. If an event applies to multiple callbacks in a
+ single callback struct, it will only be passed to the most specific non-NULL
+ callback (so events never fire more than once per registration).
+
+ The lifetime of the memory holding the event is only for the duration of the
+ callback invocation, unless otherwise noted.
+
+ Callbacks return 0 for success or error code (ToDo: does this make sense?).
+*/
+
+struct rpl_event_consumer_transaction
+{
+ virtual int trx_start(const rpl_event_transaction_start *) { return 0; }
+ virtual int trx_commit(const rpl_event_transaction_commit *) { return 0; }
+ virtual int trx_rollback(const rpl_event_transaction_rollback *) { return 0; }
+};
+
+/*
+ Consuming statement-based events.
+
+ The statement event generator is stacked on top of the transaction event
+ generator, so we can receive those events as well.
+*/
+struct rpl_event_consumer_statement : public rpl_event_consumer_transaction
+{
+ virtual int stmt_start(const rpl_event_statement_start *) { return 0; }
+ virtual int stmt_end(const rpl_event_statement_end *) { return 0; }
+
+ virtual int stmt_query(const rpl_event_statement_query *) { return 0; }
+
+ /* Data for a file used in LOAD DATA [LOCAL] INFILE. */
+ virtual int stmt_load_data_block(const rpl_event_statement_load_data_block *)
+ { return 0; }
+
+ /*
+ These are specific kinds of statements; if specified they override
+ consume_stmt_query() for the corresponding event.
+ */
+ virtual int stmt_load_query(const rpl_event_statement_load_query *ev)
+ { return stmt_query(ev); }
+};
+
+/*
+ Consuming row-based events.
+
+ The row event generator is stacked on top of the statement event generator.
+*/
+struct rpl_event_consumer_row : public rpl_event_consumer_statement
+{
+ virtual int row_write(const rpl_event_row_write *) { return 0; }
+ virtual int row_update(const rpl_event_row_update *) { return 0; }
+ virtual int row_delete(const rpl_event_row_delete *) { return 0; }
+};
+
+
+/*
+ Registration functions.
+
+ ToDo: Make a way to de-register.
+
+ ToDo: Find a good way for a plugin (eg. PBXT) to publish a generator
+ registration method.
+*/
+
+int rpl_event_transaction_register(const rpl_event_consumer_transaction *cbs);
+int rpl_event_statement_register(const rpl_event_consumer_statement *cbs);
+int rpl_event_row_register(const rpl_event_consumer_row *cbs);
-=-=(Knielsen - Thu, 24 Jun 2010, 14:28)=-=-
High-Level Specification modified.
--- /tmp/wklog.120.old.7341 2010-06-24 14:28:17.000000000 +0000
+++ /tmp/wklog.120.new.7341 2010-06-24 14:28:17.000000000 +0000
@@ -1 +1,159 @@
+Generators and consumbers
+-------------------------
+
+We have the two concepts:
+
+1. Event _generators_, that produce events describing all changes to data in a
+ server.
+
+2. Event consumers, that receive such events and use them in various ways.
+
+Examples of event generators is execution of SQL statements, which generates
+events like those used for statement-based replication. Another example is
+PBXT engine-level replication.
+
+An example of an event consumer is the writing of the binlog on a master.
+
+Event generators are not really plugins. Rather, there are specific points in
+the server where events are generated. However, a generator can be part of a
+plugin, for example a PBXT engine-level replication event generator would be
+part of the PBXT storage engine plugin.
+
+Event consumers on the other hand could be a plugin.
+
+One generator can be stacked on top of another. This means that a generator on
+top (for example row-based events) will handle some events itself
+(eg. non-deterministic update in mixed-mode binlogging). Other events that it
+does not want to or cannot handle (for example deterministic delete or DDL)
+will be defered to the generator below (for example statement-based events).
+
+
+Materialisation (or not)
+------------------------
+
+A central decision is how to represent events that are generated in the API at
+the point of generation.
+
+I want to avoid making the API require that events are materialised. By
+"Materialised" I mean that all (or most) of the data for the event is written
+into memory in a struct/class used inside the server or serialised in a data
+buffer (byte buffer) in a format suitable for network transport or disk
+storage.
+
+Using a non-materialised event means storing just a reference to appropriate
+context that allows to retrieve all information for the event using
+accessors. Ie. typically this would be based on getting the event information
+from the THD pointer.
+
+Some reasons to avoid using materialised events in the API:
+
+ - Replication events have a _lot_ of detailed context information that can be
+ needed in events: user-defined variables, random seed, character sets,
+ table column names and types, etc. etc. If we make the API based on
+ materialisation, then the initial decision about which context information
+ to include with which events will have to be done in the API, while ideally
+ we want this decision to be done by the individual consumer plugin. There
+ will this be a conflict between what to include (to allow consumers access)
+ and what to exclude (to avoid excessive needless work).
+
+ - Materialising means defining a very specific format, which will tend to
+ make the API less generic and flexible.
+
+ - Unless the materialised format is made _very_ specific (and thus very
+ inflexible), it is unlikely to be directly useful for transport
+ (eg. binlog), so it will need to be re-materialised into a different format
+ anyway, wasting work.
+
+ - If a generator on top handles an event, then we want to avoid wasting work
+ materialising an event in a generator below which would be completely
+ unused. Thus there would be a need for the upper generator to somehow
+ notify the lower generator ahead of event generation time to not fire an
+ event, complicating the API.
+
+Some advantages for materialisation:
+
+ - Using an API based on passing around some well-defined struct event (or
+ byte buffer) will be simpler than the complex class hierarchy proposed here
+ with no requirement for materialisation.
+
+ - Defining a materialised format would allow an easy way to use the same
+ consumer code on a generator that produces events at the source of
+ execution and on a generator that produces events from eg. reading them
+ from an event log.
+
+Note that there can be some middle way, where some data is materialised and
+some is kept as reference to context (eg. THD) only. This however looses most
+of the mentioned advantages for materialisation.
+
+The design proposed here aims for as little materialisation as possible.
+
+
+Default materialisation format
+------------------------------
+
+
+While the proposed API doesn't _require_ materialisation, we can still think
+about providing the _option_ for built-in materialisation. This could be
+useful if such materialisation is made suitable for transport to a different
+server (eg. no endian-dependance etc). If there is a facility for such
+materialisation built-in to the API, it becomes possible to write something
+like a generic binlog plugin or generic network transport plugin. This would
+be really useful for eg. PBXT engine-level replication, as it could be
+implemented without having to re-invent a binlog format.
+
+I added in the proposed API a simple facility to materialise every event as a
+string of bytes. To use this, I still need to add a suitable facility to
+de-materialise the event.
+
+However, it is still an open question whether such a facility will be at all
+useful. It still has some of the problems with materialisation mentioned
+above. And I think it is likely that a good binlog implementation will need
+to do more than just blindly copy opaque events from one endpoint to
+another. For example, it might need different event boundaries (merge and/or
+split events); it might need to augment or modify events, or inject new
+events, etc.
+
+So I think maybe it is better to add such a generic materialisation facility
+on top of the basic event generator API. Such a facility would provide
+materialisation of an replication event stream, not of individual events, so
+would be more flexible in providing a good implementation. It would be
+implemented for all generators. It would separate from both the event
+generator API (so we have flexibility to put a filter class in-between
+generator and materialisation), and could also be separate from the actual
+transport handling stuff like fsync() of binlog files and socket connections
+etc. It would be paired with a corresponding applier API which would handle
+executing events on a slave.
+
+Then we can have a default materialised event format, which is available, but
+not mandatory. So there can still be other formats alongside (like legacy
+MySQL 5.1 binlog event format and maybe Tungsten would have its own format).
+
+
+Encapsulation
+-------------
+
+Another fundamental question about the design is the level of encapsulation
+used for the API.
+
+At the implementation level, a lot of the work is basically to pull out all of
+the needed information from the THD object/context. The API I propose tries to
+_not_ expose the THD to consumers. Instead it provides accessor functions for
+all the bits and pieces relevant to each replication event, while the event
+class itself likely will be more or less just an encapsulated THD.
+
+So an alternative would be to have a generic event that was just (type, THD).
+Then consumers could just pull out whatever information they want from the
+THD. The THD implementation is already exposed to storage engines. This would
+of course greatly reduce the size of the API, eliminating lots of class
+definitions and accessor functions. Though arguably it wouldn't really
+simplify the API, as the complexity would just be in understanding the THD
+class.
+
+Note that we do not have to take any performance hit from using encapsulated
+accessors since compilers can inline them (though if inlining then we do not
+get any ABI stability with respect to THD implemetation).
+
+For now, the API is proposed without exposing the THD class. (Similar
+encapsulation could be added in actual implementation to also not expose TABLE
+and similar classes).
-=-=(Knielsen - Thu, 24 Jun 2010, 12:04)=-=-
Dependency created: 107 now depends on 120
-=-=(Knielsen - Thu, 24 Jun 2010, 11:59)=-=-
High Level Description modified.
--- /tmp/wklog.120.old.516 2010-06-24 11:59:24.000000000 +0000
+++ /tmp/wklog.120.new.516 2010-06-24 11:59:24.000000000 +0000
@@ -11,4 +11,4 @@
Event generators can be stacked, and a generator may defer event generation to
the next one down the stack. For example, the row-level replication event
-generator may defer DLL to the statement-level replication event generator.
+generator may defer DDL to the statement-level replication event generator.
-=-=(Knielsen - Mon, 21 Jun 2010, 08:35)=-=-
Research and design thoughts.
DESCRIPTION:
A part of the replication project, MWL#107.
Events are produced by event Generators. Examples are
- Generation of statement-based replication events
- Generation of row-based events
- Generation of PBXT engine-level replication events
and maybe reading of events from relay log on slave may also be an example of
generating events.
Event generators can be stacked, and a generator may defer event generation to
the next one down the stack. For example, the row-level replication event
generator may defer DDL to the statement-level replication event generator.
HIGH-LEVEL SPECIFICATION:
Generators and consumbers
-------------------------
We have the two concepts:
1. Event _generators_, that produce events describing all changes to data in a
server.
2. Event consumers, that receive such events and use them in various ways.
Examples of event generators is execution of SQL statements, which generates
events like those used for statement-based replication. Another example is
PBXT engine-level replication.
An example of an event consumer is the writing of the binlog on a master.
Some event generators are not really plugins. Rather, there are specific
points in the server where events are generated. However, a generator can be
part of a plugin, for example a PBXT engine-level replication event generator
would be part of the PBXT storage engine plugin. And for example we could
write a filter plugin, which would be stacked on top of an existing generator
and provide the same event types and interfaces, but filtered in some way (for
example by removing certain events on the master side, or by re-writing events
in certain ways).
Event consumers on the other hand could be a plugin.
One generator can be stacked on top of another. This means that a generator on
top (for example row-based events) will handle some events itself
(eg. non-deterministic update in mixed-mode binlogging). Other events that it
does not want to or cannot handle (for example deterministic delete or DDL)
will be defered to the generator below (for example statement-based events).
Materialisation (or not)
------------------------
A central decision is how to represent events that are generated in the API at
the point of generation.
I want to avoid making the API require that events are materialised. By
"Materialised" I mean that all (or most) of the data for the event is written
into memory in a struct/class used inside the server or serialised in a data
buffer (byte buffer) in a format suitable for network transport or disk
storage.
Using a non-materialised event means storing just a reference to appropriate
context that allows to retrieve all information for the event using
accessors. Ie. typically this would be based on getting the event information
from the THD pointer.
Some reasons to avoid using materialised events in the API:
- Replication events have a _lot_ of detailed context information that can be
needed in events: user-defined variables, random seed, character sets,
table column names and types, etc. etc. If we make the API based on
materialisation, then the initial decision about which context information
to include with which events will have to be done in the API, while ideally
we want this decision to be done by the individual consumer plugin. There
will this be a conflict between what to include (to allow consumers access)
and what to exclude (to avoid excessive needless work).
- Materialising means defining a very specific format, which will tend to
make the API less generic and flexible.
- Unless the materialised format is made _very_ specific (and thus very
inflexible), it is unlikely to be directly useful for transport
(eg. binlog), so it will need to be re-materialised into a different format
anyway, wasting work.
- If a generator on top handles an event, then we want to avoid wasting work
materialising an event in a generator below which would be completely
unused. Thus there would be a need for the upper generator to somehow
notify the lower generator ahead of event generation time to not fire an
event, complicating the API.
Some advantages for materialisation:
- Using an API based on passing around some well-defined struct event (or
byte buffer) will be simpler than the complex class hierarchy proposed here
with no requirement for materialisation.
- Defining a materialised format would allow an easy way to use the same
consumer code on a generator that produces events at the source of
execution and on a generator that produces events from eg. reading them
from an event log.
Note that there can be some middle way, where some data is materialised and
some is kept as reference to context (eg. THD) only. This however looses most
of the mentioned advantages for materialisation.
Also, the non-materialising interface should be a good interface on top of
which to build a materialising interface.
The design proposed here aims for as little materialisation as possible.
Default materialisation format
------------------------------
While the proposed API doesn't _require_ materialisation, we can still think
about providing the _option_ for built-in materialisation. This could be
useful if such materialisation is made suitable for transport to a different
server (eg. no endian-dependance etc). If there is a facility for such
materialisation built-in to the API, it becomes possible to write something
like a generic binlog plugin or generic network transport plugin. This would
be really useful for eg. PBXT engine-level replication, as it could be
implemented without having to re-invent a binlog format.
I added in the proposed API a simple facility to materialise every event as a
string of bytes. To use this, I still need to add a suitable facility to
de-materialise the event.
However, it is still an open question whether such a facility will be at all
useful. It still has some of the problems with materialisation mentioned
above. And I think it is likely that a good binlog implementation will need
to do more than just blindly copy opaque events from one endpoint to
another. For example, it might need different event boundaries (merge and/or
split events); it might need to augment or modify events, or inject new
events, etc.
So I think maybe it is better to add such a generic materialisation facility
on top of the basic event generator API. Such a facility would provide
materialisation of an replication event stream, not of individual events, so
would be more flexible in providing a good implementation. It would be
implemented for all generators. It would separate from both the event
generator API (so we have flexibility to put a filter class in-between
generator and materialisation), and could also be separate from the actual
transport handling stuff like fsync() of binlog files and socket connections
etc. It would be paired with a corresponding applier API which would handle
executing events on a slave.
Then we can have a default materialised event format, which is available, but
not mandatory. So there can still be other formats alongside (like legacy
MySQL 5.1 binlog event format and maybe Tungsten would have its own format).
Encapsulation
-------------
Another fundamental question about the design is the level of encapsulation
used for the API.
At the implementation level, a lot of the work is basically to pull out all of
the needed information from the THD object/context. The API I propose tries to
_not_ expose the THD to consumers. Instead it provides accessor functions for
all the bits and pieces relevant to each replication event, while the event
class itself likely will be more or less just an encapsulated THD.
So an alternative would be to have a generic event that was just (type, THD).
Then consumers could just pull out whatever information they want from the
THD. The THD implementation is already exposed to storage engines. This would
of course greatly reduce the size of the API, eliminating lots of class
definitions and accessor functions. Though arguably it wouldn't really
simplify the API, as the complexity would just be in understanding the THD
class.
Note that we do not have to take any performance hit from using encapsulated
accessors since compilers can inline them (though if inlining then we do not
get any ABI stability with respect to THD implemetation).
For now, the API is proposed without exposing the THD class. (Similar
encapsulation could be added in actual implementation to also not expose TABLE
and similar classes).
LOW-LEVEL DESIGN:
A consumer is implented as a virtual class (interface). There is one virtual
function for every event that can be received. A consumer would derive from
the base class and override methods for the events it wants to receive.
There is one consumer interface for each generator. When a generator A is
stacked on B, the consumer interface for A inherits from the interface for
B. This way, when A defers an event to B, the consumer for A will receive the
corresponding event from B.
There are methods for a consumer to register itself to receive events from
each generator. I still need to find a way for a consumer in one plugin to
register itself with a generator implemented in another plugin (eg. PBXT
engine-level replication). I also need to add a way for consumers to
de-register themselves.
The current design has consumer callbacks return 0 for success and error code
otherwise. I still need to think more about whether this is useful (ie. what
is the semantics of returning an error from a consumer callback).
Each event passed to consumers is defined as a class with public accessor
methods to a private context (which is mostly the THD).
My intension is to make all events passed around const, so that the same event
can be passed to each of multiple registered consumers (and to emphasise that
consumers do not have the ability to modify events). It still needs to be seen
whether that const-ness will be feasible in practise without very heavy
modification/constification of exiting code.
What follows is a partial draft of a possible definition of the API as
concrete C++ class definitions.
-----------------------------------------------------------------------
/*
Virtual base class for generated replication events.
This is the parent of events generated from all kinds of generators. Only
child classes can be instantiated.
This class can be used by code that wants to treat events in a generic way,
without any knowledge of event details. I still need to decide whether such
generic code is sensible.
*/
class rpl_event_base
{
/*
Maybe we will want the ability to materialise an event to a standard
binary format. This could be achieved with a base method like this. The
actual materialisation would be implemented in each deriving class. The
public methods would provide different interfaces for specifying the
buffer or for writing directly into IO_CACHE or file.
*/
/* Return 0 on success, -1 on error, -2 on out-of-buffer. */
int materialise(uchar *buffer, size_t buflen) const;
/*
Returns NULL on error or else malloc()ed buffer with materialised event,
caller must free().
*/
uchar *materialise() const;
/* Same but using passed in memroot. */
uchar *materialise(mem_root *memroot) const;
/*
Materialise to user-supplied writer function (could write directly to file
or the like).
*/
int materialise(int (*writer)(uchar *data, size_t len, void *context)) const;
/*
As to for what to do with a materialised event, there are a couple of
possibilities.
One is to have a de_materialise() method somewhere that can construct an
rpl_event_base (really a derived class of course) from a buffer or writer
function. This would require each accessor function to conditionally read
its data from either THD context or buffer (GCC is able to optimise
several such conditionals in multiple accessor function calls into one
conditional), or we can make all accessors virtual if the performance hit
is acceptable.
Another is to have different classes for accessing events read from
materialised event data.
Also, I still need to think about whether it is at all useful to be able
to generically materialise an event at this level. It may be that any
binlog/transport will in any case need to undertand more of the format of
events, so that such materialisation/transport is better done at a
different layer.
*/
protected:
/* Implementation which is the basis for materialise(). */
virtual int do_materialise(int (*writer)(uchar *data, size_t len,
void *context)) const = 0;
private:
/* Virtual base class, private constructor to prevent instantiation. */
rpl_event_base();
};
/*
These are the event types output from the transaction event generator.
This generator is not stacked on anything.
The transaction event generator marks the start and end (commit or rollback)
of transactions. It also gives information about whether the transaction was
a full transaction or autocommitted statement, whether transactional tables
were involved, whether non-transactional tables were involved, and XA
information (ToDo).
*/
/* Base class for transaction events. */
class rpl_event_transaction_base : public rpl_event_base
{
public:
/*
Get the local transaction id. This idea is only unique within one server.
It is allocated whenever a new transaction is started.
Can be used to identify events belonging to the same transaction in a
binlog-like stream of events streamed in parallel among multiple
transactions.
*/
uint64_t get_local_trx_id() const { return thd->local_trx_id; };
bool get_is_autocommit() const;
private:
/* The context is the THD. */
THD *thd;
rpl_event_transaction_base(THD *_thd) : thd(_thd) { };
};
/* Transaction start event. */
class rpl_event_transaction_start : public rpl_event_transaction_base
{
};
/* Transaction commit. */
class rpl_event_transaction_commit : public rpl_event_transaction_base
{
public:
/*
The global transaction id is unique cross-server.
It can be used to identify the position from which to start a slave
replicating from a master.
This global ID is only available once the transaction is decided to commit
by the TC manager / primary redundancy service. This TC also allocates the
ID and decides the exact semantics (can there be gaps, etc); however the
format is fixed (cluster_id, running_counter).
*/
struct global_transaction_id
{
uint32_t cluster_id;
uint64_t counter;
};
const global_transaction_id *get_global_transaction_id() const;
};
/* Transaction rollback. */
class rpl_event_transaction_rollback : public rpl_event_transaction_base
{
};
/* Base class for statement events. */
class rpl_event_statement_base : public rpl_event_base
{
public:
LEX_STRING get_current_db() const;
};
class rpl_event_statement_start : public rpl_event_statement_base
{
};
class rpl_event_statement_end : public rpl_event_statement_base
{
public:
int get_errorcode() const;
};
class rpl_event_statement_query : public rpl_event_statement_base
{
public:
LEX_STRING get_query_string();
ulong get_sql_mode();
const CHARSET_INFO *get_character_set_client();
const CHARSET_INFO *get_collation_connection();
const CHARSET_INFO *get_collation_server();
const CHARSET_INFO *get_collation_default_db();
/*
Access to relevant flags that affect query execution.
Use as if (ev->get_flags() & (uint32)ROW_FOREIGN_KEY_CHECKS) { ... }
*/
enum flag_bits
{
STMT_FOREIGN_KEY_CHECKS, // @@foreign_key_checks
STMT_UNIQUE_KEY_CHECKS, // @@unique_checks
STMT_AUTO_IS_NULL, // @@sql_auto_is_null
};
uint32_t get_flags();
ulong get_auto_increment_offset();
ulong get_auto_increment_increment();
// And so on for: time zone; day/month names; connection id; LAST_INSERT_ID;
// INSERT_ID; random seed; user variables.
//
// We probably also need get_uses_temporary_table(), get_used_user_vars(),
// get_uses_auto_increment() and so on, so a consumer can get more
// information about what kind of context information a query will need when
// executed on a slave.
};
class rpl_event_statement_load_query : public rpl_event_statement_query
{
};
/*
This event is fired with blocks of data for files read (from server-local
file or client connection) for LOAD DATA.
*/
class rpl_event_statement_load_data_block : public rpl_event_statement_base
{
public:
struct block
{
const uchar *ptr;
size_t size;
};
block get_block() const;
};
/* Base class for row-based replication events. */
class rpl_event_row_base : public rpl_event_base
{
public:
/*
Access to relevant handler extra flags and other flags that affect row
operations.
Use as if (ev->get_flags() & (uint32)ROW_WRITE_CAN_REPLACE) { ... }
*/
enum flag_bits
{
ROW_WRITE_CAN_REPLACE, // HA_EXTRA_WRITE_CAN_REPLACE
ROW_IGNORE_DUP_KEY, // HA_EXTRA_IGNORE_DUP_KEY
ROW_IGNORE_NO_KEY, // HA_EXTRA_IGNORE_NO_KEY
ROW_DISABLE_FOREIGN_KEY_CHECKS, // ! @@foreign_key_checks
ROW_DISABLE_UNIQUE_KEY_CHECKS, // ! @@unique_checks
};
uint32_t get_flags();
/* Access to list of tables modified. */
class table_iterator
{
public:
/* Returns table, NULL after last. */
const TABLE *get_next();
private:
// ...
};
table_iterator get_modified_tables() const;
private:
/* Context used to provide accessors. */
THD *thd;
protected:
rpl_event_row_base(THD *_thd) : thd(_thd) { }
};
class rpl_event_row_write : public rpl_event_row_base
{
public:
const BITMAP *get_write_set() const;
const uchar *get_after_image() const;
};
class rpl_event_row_update : public rpl_event_row_base
{
public:
const BITMAP *get_read_set() const;
const BITMAP *get_write_set() const;
const uchar *get_before_image() const;
const uchar *get_after_image() const;
};
class rpl_event_row_delete : public rpl_event_row_base
{
public:
const BITMAP *get_read_set() const;
const uchar *get_before_image() const;
};
/*
Event consumer callbacks.
An event consumer registers with an event generator to receive event
notifications from that generator.
The consumer has callbacks (in the form of virtual functions) for the
individual event types the consumer is interested in. Only callbacks that
are non-NULL will be invoked. If an event applies to multiple callbacks in a
single callback struct, it will only be passed to the most specific non-NULL
callback (so events never fire more than once per registration).
The lifetime of the memory holding the event is only for the duration of the
callback invocation, unless otherwise noted.
Callbacks return 0 for success or error code (ToDo: does this make sense?).
*/
struct rpl_event_consumer_transaction
{
virtual int trx_start(const rpl_event_transaction_start *) { return 0; }
virtual int trx_commit(const rpl_event_transaction_commit *) { return 0; }
virtual int trx_rollback(const rpl_event_transaction_rollback *) { return 0; }
};
/*
Consuming statement-based events.
The statement event generator is stacked on top of the transaction event
generator, so we can receive those events as well.
*/
struct rpl_event_consumer_statement : public rpl_event_consumer_transaction
{
virtual int stmt_start(const rpl_event_statement_start *) { return 0; }
virtual int stmt_end(const rpl_event_statement_end *) { return 0; }
virtual int stmt_query(const rpl_event_statement_query *) { return 0; }
/* Data for a file used in LOAD DATA [LOCAL] INFILE. */
virtual int stmt_load_data_block(const rpl_event_statement_load_data_block *)
{ return 0; }
/*
These are specific kinds of statements; if specified they override
consume_stmt_query() for the corresponding event.
*/
virtual int stmt_load_query(const rpl_event_statement_load_query *ev)
{ return stmt_query(ev); }
};
/*
Consuming row-based events.
The row event generator is stacked on top of the statement event generator.
*/
struct rpl_event_consumer_row : public rpl_event_consumer_statement
{
virtual int row_write(const rpl_event_row_write *) { return 0; }
virtual int row_update(const rpl_event_row_update *) { return 0; }
virtual int row_delete(const rpl_event_row_delete *) { return 0; }
};
/*
Registration functions.
ToDo: Make a way to de-register.
ToDo: Find a good way for a plugin (eg. PBXT) to publish a generator
registration method.
*/
int rpl_event_transaction_register(const rpl_event_consumer_transaction *cbs);
int rpl_event_statement_register(const rpl_event_consumer_statement *cbs);
int rpl_event_row_register(const rpl_event_consumer_row *cbs);
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#120 Updated (by Knielsen): Replication API for stacked event generators
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: Replication API for stacked event generators
CREATION DATE..: Mon, 07 Jun 2010, 13:13
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......:
CATEGORY.......: Server-Sprint
TASK ID........: 120 (http://askmonty.org/worklog/?tid=120)
VERSION........: Server-9.x
STATUS.........: In-Progress
PRIORITY.......: 60
WORKED HOURS...: 2
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Knielsen - Tue, 29 Jun 2010, 13:51)=-=-
Status updated.
--- /tmp/wklog.120.old.31179 2010-06-29 13:51:20.000000000 +0000
+++ /tmp/wklog.120.new.31179 2010-06-29 13:51:20.000000000 +0000
@@ -1 +1 @@
-Assigned
+In-Progress
-=-=(Knielsen - Mon, 28 Jun 2010, 07:11)=-=-
High-Level Specification modified.
--- /tmp/wklog.120.old.18416 2010-06-28 07:11:09.000000000 +0000
+++ /tmp/wklog.120.new.18416 2010-06-28 07:11:09.000000000 +0000
@@ -14,10 +14,14 @@
An example of an event consumer is the writing of the binlog on a master.
-Event generators are not really plugins. Rather, there are specific points in
-the server where events are generated. However, a generator can be part of a
-plugin, for example a PBXT engine-level replication event generator would be
-part of the PBXT storage engine plugin.
+Some event generators are not really plugins. Rather, there are specific
+points in the server where events are generated. However, a generator can be
+part of a plugin, for example a PBXT engine-level replication event generator
+would be part of the PBXT storage engine plugin. And for example we could
+write a filter plugin, which would be stacked on top of an existing generator
+and provide the same event types and interfaces, but filtered in some way (for
+example by removing certain events on the master side, or by re-writing events
+in certain ways).
Event consumers on the other hand could be a plugin.
@@ -85,6 +89,9 @@
some is kept as reference to context (eg. THD) only. This however looses most
of the mentioned advantages for materialisation.
+Also, the non-materialising interface should be a good interface on top of
+which to build a materialising interface.
+
The design proposed here aims for as little materialisation as possible.
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Category updated.
--- /tmp/wklog.120.old.7440 2010-06-24 14:29:38.000000000 +0000
+++ /tmp/wklog.120.new.7440 2010-06-24 14:29:38.000000000 +0000
@@ -1 +1 @@
-Server-RawIdeaBin
+Server-Sprint
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Status updated.
--- /tmp/wklog.120.old.7440 2010-06-24 14:29:38.000000000 +0000
+++ /tmp/wklog.120.new.7440 2010-06-24 14:29:38.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+Assigned
-=-=(Knielsen - Thu, 24 Jun 2010, 14:29)=-=-
Low Level Design modified.
--- /tmp/wklog.120.old.7355 2010-06-24 14:29:11.000000000 +0000
+++ /tmp/wklog.120.new.7355 2010-06-24 14:29:11.000000000 +0000
@@ -1 +1,385 @@
+A consumer is implented as a virtual class (interface). There is one virtual
+function for every event that can be received. A consumer would derive from
+the base class and override methods for the events it wants to receive.
+
+There is one consumer interface for each generator. When a generator A is
+stacked on B, the consumer interface for A inherits from the interface for
+B. This way, when A defers an event to B, the consumer for A will receive the
+corresponding event from B.
+
+There are methods for a consumer to register itself to receive events from
+each generator. I still need to find a way for a consumer in one plugin to
+register itself with a generator implemented in another plugin (eg. PBXT
+engine-level replication). I also need to add a way for consumers to
+de-register themselves.
+
+The current design has consumer callbacks return 0 for success and error code
+otherwise. I still need to think more about whether this is useful (ie. what
+is the semantics of returning an error from a consumer callback).
+
+Each event passed to consumers is defined as a class with public accessor
+methods to a private context (which is mostly the THD).
+
+My intension is to make all events passed around const, so that the same event
+can be passed to each of multiple registered consumers (and to emphasise that
+consumers do not have the ability to modify events). It still needs to be seen
+whether that const-ness will be feasible in practise without very heavy
+modification/constification of exiting code.
+
+What follows is a partial draft of a possible definition of the API as
+concrete C++ class definitions.
+
+-----------------------------------------------------------------------
+
+/*
+ Virtual base class for generated replication events.
+
+ This is the parent of events generated from all kinds of generators. Only
+ child classes can be instantiated.
+
+ This class can be used by code that wants to treat events in a generic way,
+ without any knowledge of event details. I still need to decide whether such
+ generic code is sensible.
+*/
+class rpl_event_base
+{
+ /*
+ Maybe we will want the ability to materialise an event to a standard
+ binary format. This could be achieved with a base method like this. The
+ actual materialisation would be implemented in each deriving class. The
+ public methods would provide different interfaces for specifying the
+ buffer or for writing directly into IO_CACHE or file.
+ */
+
+ /* Return 0 on success, -1 on error, -2 on out-of-buffer. */
+ int materialise(uchar *buffer, size_t buflen) const;
+ /*
+ Returns NULL on error or else malloc()ed buffer with materialised event,
+ caller must free().
+ */
+ uchar *materialise() const;
+ /* Same but using passed in memroot. */
+ uchar *materialise(mem_root *memroot) const;
+ /*
+ Materialise to user-supplied writer function (could write directly to file
+ or the like).
+ */
+ int materialise(int (*writer)(uchar *data, size_t len, void *context)) const;
+
+ /*
+ As to for what to do with a materialised event, there are a couple of
+ possibilities.
+
+ One is to have a de_materialise() method somewhere that can construct an
+ rpl_event_base (really a derived class of course) from a buffer or writer
+ function. This would require each accessor function to conditionally read
+ its data from either THD context or buffer (GCC is able to optimise
+ several such conditionals in multiple accessor function calls into one
+ conditional), or we can make all accessors virtual if the performance hit
+ is acceptable.
+
+ Another is to have different classes for accessing events read from
+ materialised event data.
+
+ Also, I still need to think about whether it is at all useful to be able
+ to generically materialise an event at this level. It may be that any
+ binlog/transport will in any case need to undertand more of the format of
+ events, so that such materialisation/transport is better done at a
+ different layer.
+ */
+
+protected:
+ /* Implementation which is the basis for materialise(). */
+ virtual int do_materialise(int (*writer)(uchar *data, size_t len,
+ void *context)) const = 0;
+
+private:
+ /* Virtual base class, private constructor to prevent instantiation. */
+ rpl_event_base();
+};
+
+
+/*
+ These are the event types output from the transaction event generator.
+
+ This generator is not stacked on anything.
+
+ The transaction event generator marks the start and end (commit or rollback)
+ of transactions. It also gives information about whether the transaction was
+ a full transaction or autocommitted statement, whether transactional tables
+ were involved, whether non-transactional tables were involved, and XA
+ information (ToDo).
+*/
+
+/* Base class for transaction events. */
+class rpl_event_transaction_base : public rpl_event_base
+{
+public:
+ /*
+ Get the local transaction id. This idea is only unique within one server.
+ It is allocated whenever a new transaction is started.
+ Can be used to identify events belonging to the same transaction in a
+ binlog-like stream of events streamed in parallel among multiple
+ transactions.
+ */
+ uint64_t get_local_trx_id() const { return thd->local_trx_id; };
+
+ bool get_is_autocommit() const;
+
+private:
+ /* The context is the THD. */
+ THD *thd;
+
+ rpl_event_transaction_base(THD *_thd) : thd(_thd) { };
+};
+
+/* Transaction start event. */
+class rpl_event_transaction_start : public rpl_event_transaction_base
+{
+
+};
+
+/* Transaction commit. */
+class rpl_event_transaction_commit : public rpl_event_transaction_base
+{
+public:
+ /*
+ The global transaction id is unique cross-server.
+
+ It can be used to identify the position from which to start a slave
+ replicating from a master.
+
+ This global ID is only available once the transaction is decided to commit
+ by the TC manager / primary redundancy service. This TC also allocates the
+ ID and decides the exact semantics (can there be gaps, etc); however the
+ format is fixed (cluster_id, running_counter).
+ */
+ struct global_transaction_id
+ {
+ uint32_t cluster_id;
+ uint64_t counter;
+ };
+
+ const global_transaction_id *get_global_transaction_id() const;
+};
+
+/* Transaction rollback. */
+class rpl_event_transaction_rollback : public rpl_event_transaction_base
+{
+
+};
+
+
+/* Base class for statement events. */
+class rpl_event_statement_base : public rpl_event_base
+{
+public:
+ LEX_STRING get_current_db() const;
+};
+
+class rpl_event_statement_start : public rpl_event_statement_base
+{
+
+};
+
+class rpl_event_statement_end : public rpl_event_statement_base
+{
+public:
+ int get_errorcode() const;
+};
+
+class rpl_event_statement_query : public rpl_event_statement_base
+{
+public:
+ LEX_STRING get_query_string();
+ ulong get_sql_mode();
+ const CHARSET_INFO *get_character_set_client();
+ const CHARSET_INFO *get_collation_connection();
+ const CHARSET_INFO *get_collation_server();
+ const CHARSET_INFO *get_collation_default_db();
+
+ /*
+ Access to relevant flags that affect query execution.
+
+ Use as if (ev->get_flags() & (uint32)ROW_FOREIGN_KEY_CHECKS) { ... }
+ */
+ enum flag_bits
+ {
+ STMT_FOREIGN_KEY_CHECKS, // @@foreign_key_checks
+ STMT_UNIQUE_KEY_CHECKS, // @@unique_checks
+ STMT_AUTO_IS_NULL, // @@sql_auto_is_null
+ };
+ uint32_t get_flags();
+
+ ulong get_auto_increment_offset();
+ ulong get_auto_increment_increment();
+
+ // And so on for: time zone; day/month names; connection id; LAST_INSERT_ID;
+ // INSERT_ID; random seed; user variables.
+ //
+ // We probably also need get_uses_temporary_table(), get_used_user_vars(),
+ // get_uses_auto_increment() and so on, so a consumer can get more
+ // information about what kind of context information a query will need when
+ // executed on a slave.
+};
+
+class rpl_event_statement_load_query : public rpl_event_statement_query
+{
+
+};
+
+/*
+ This event is fired with blocks of data for files read (from server-local
+ file or client connection) for LOAD DATA.
+*/
+class rpl_event_statement_load_data_block : public rpl_event_statement_base
+{
+public:
+ struct block
+ {
+ const uchar *ptr;
+ size_t size;
+ };
+ block get_block() const;
+};
+
+/* Base class for row-based replication events. */
+class rpl_event_row_base : public rpl_event_base
+{
+public:
+ /*
+ Access to relevant handler extra flags and other flags that affect row
+ operations.
+
+ Use as if (ev->get_flags() & (uint32)ROW_WRITE_CAN_REPLACE) { ... }
+ */
+ enum flag_bits
+ {
+ ROW_WRITE_CAN_REPLACE, // HA_EXTRA_WRITE_CAN_REPLACE
+ ROW_IGNORE_DUP_KEY, // HA_EXTRA_IGNORE_DUP_KEY
+ ROW_IGNORE_NO_KEY, // HA_EXTRA_IGNORE_NO_KEY
+ ROW_DISABLE_FOREIGN_KEY_CHECKS, // ! @@foreign_key_checks
+ ROW_DISABLE_UNIQUE_KEY_CHECKS, // ! @@unique_checks
+ };
+ uint32_t get_flags();
+
+ /* Access to list of tables modified. */
+ class table_iterator
+ {
+ public:
+ /* Returns table, NULL after last. */
+ const TABLE *get_next();
+ private:
+ // ...
+ };
+ table_iterator get_modified_tables() const;
+
+private:
+ /* Context used to provide accessors. */
+ THD *thd;
+
+protected:
+ rpl_event_row_base(THD *_thd) : thd(_thd) { }
+};
+
+
+class rpl_event_row_write : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_write_set() const;
+ const uchar *get_after_image() const;
+};
+
+class rpl_event_row_update : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_read_set() const;
+ const BITMAP *get_write_set() const;
+ const uchar *get_before_image() const;
+ const uchar *get_after_image() const;
+};
+
+class rpl_event_row_delete : public rpl_event_row_base
+{
+public:
+ const BITMAP *get_read_set() const;
+ const uchar *get_before_image() const;
+};
+
+
+/*
+ Event consumer callbacks.
+
+ An event consumer registers with an event generator to receive event
+ notifications from that generator.
+
+ The consumer has callbacks (in the form of virtual functions) for the
+ individual event types the consumer is interested in. Only callbacks that
+ are non-NULL will be invoked. If an event applies to multiple callbacks in a
+ single callback struct, it will only be passed to the most specific non-NULL
+ callback (so events never fire more than once per registration).
+
+ The lifetime of the memory holding the event is only for the duration of the
+ callback invocation, unless otherwise noted.
+
+ Callbacks return 0 for success or error code (ToDo: does this make sense?).
+*/
+
+struct rpl_event_consumer_transaction
+{
+ virtual int trx_start(const rpl_event_transaction_start *) { return 0; }
+ virtual int trx_commit(const rpl_event_transaction_commit *) { return 0; }
+ virtual int trx_rollback(const rpl_event_transaction_rollback *) { return 0; }
+};
+
+/*
+ Consuming statement-based events.
+
+ The statement event generator is stacked on top of the transaction event
+ generator, so we can receive those events as well.
+*/
+struct rpl_event_consumer_statement : public rpl_event_consumer_transaction
+{
+ virtual int stmt_start(const rpl_event_statement_start *) { return 0; }
+ virtual int stmt_end(const rpl_event_statement_end *) { return 0; }
+
+ virtual int stmt_query(const rpl_event_statement_query *) { return 0; }
+
+ /* Data for a file used in LOAD DATA [LOCAL] INFILE. */
+ virtual int stmt_load_data_block(const rpl_event_statement_load_data_block *)
+ { return 0; }
+
+ /*
+ These are specific kinds of statements; if specified they override
+ consume_stmt_query() for the corresponding event.
+ */
+ virtual int stmt_load_query(const rpl_event_statement_load_query *ev)
+ { return stmt_query(ev); }
+};
+
+/*
+ Consuming row-based events.
+
+ The row event generator is stacked on top of the statement event generator.
+*/
+struct rpl_event_consumer_row : public rpl_event_consumer_statement
+{
+ virtual int row_write(const rpl_event_row_write *) { return 0; }
+ virtual int row_update(const rpl_event_row_update *) { return 0; }
+ virtual int row_delete(const rpl_event_row_delete *) { return 0; }
+};
+
+
+/*
+ Registration functions.
+
+ ToDo: Make a way to de-register.
+
+ ToDo: Find a good way for a plugin (eg. PBXT) to publish a generator
+ registration method.
+*/
+
+int rpl_event_transaction_register(const rpl_event_consumer_transaction *cbs);
+int rpl_event_statement_register(const rpl_event_consumer_statement *cbs);
+int rpl_event_row_register(const rpl_event_consumer_row *cbs);
-=-=(Knielsen - Thu, 24 Jun 2010, 14:28)=-=-
High-Level Specification modified.
--- /tmp/wklog.120.old.7341 2010-06-24 14:28:17.000000000 +0000
+++ /tmp/wklog.120.new.7341 2010-06-24 14:28:17.000000000 +0000
@@ -1 +1,159 @@
+Generators and consumbers
+-------------------------
+
+We have the two concepts:
+
+1. Event _generators_, that produce events describing all changes to data in a
+ server.
+
+2. Event consumers, that receive such events and use them in various ways.
+
+Examples of event generators is execution of SQL statements, which generates
+events like those used for statement-based replication. Another example is
+PBXT engine-level replication.
+
+An example of an event consumer is the writing of the binlog on a master.
+
+Event generators are not really plugins. Rather, there are specific points in
+the server where events are generated. However, a generator can be part of a
+plugin, for example a PBXT engine-level replication event generator would be
+part of the PBXT storage engine plugin.
+
+Event consumers on the other hand could be a plugin.
+
+One generator can be stacked on top of another. This means that a generator on
+top (for example row-based events) will handle some events itself
+(eg. non-deterministic update in mixed-mode binlogging). Other events that it
+does not want to or cannot handle (for example deterministic delete or DDL)
+will be defered to the generator below (for example statement-based events).
+
+
+Materialisation (or not)
+------------------------
+
+A central decision is how to represent events that are generated in the API at
+the point of generation.
+
+I want to avoid making the API require that events are materialised. By
+"Materialised" I mean that all (or most) of the data for the event is written
+into memory in a struct/class used inside the server or serialised in a data
+buffer (byte buffer) in a format suitable for network transport or disk
+storage.
+
+Using a non-materialised event means storing just a reference to appropriate
+context that allows to retrieve all information for the event using
+accessors. Ie. typically this would be based on getting the event information
+from the THD pointer.
+
+Some reasons to avoid using materialised events in the API:
+
+ - Replication events have a _lot_ of detailed context information that can be
+ needed in events: user-defined variables, random seed, character sets,
+ table column names and types, etc. etc. If we make the API based on
+ materialisation, then the initial decision about which context information
+ to include with which events will have to be done in the API, while ideally
+ we want this decision to be done by the individual consumer plugin. There
+ will this be a conflict between what to include (to allow consumers access)
+ and what to exclude (to avoid excessive needless work).
+
+ - Materialising means defining a very specific format, which will tend to
+ make the API less generic and flexible.
+
+ - Unless the materialised format is made _very_ specific (and thus very
+ inflexible), it is unlikely to be directly useful for transport
+ (eg. binlog), so it will need to be re-materialised into a different format
+ anyway, wasting work.
+
+ - If a generator on top handles an event, then we want to avoid wasting work
+ materialising an event in a generator below which would be completely
+ unused. Thus there would be a need for the upper generator to somehow
+ notify the lower generator ahead of event generation time to not fire an
+ event, complicating the API.
+
+Some advantages for materialisation:
+
+ - Using an API based on passing around some well-defined struct event (or
+ byte buffer) will be simpler than the complex class hierarchy proposed here
+ with no requirement for materialisation.
+
+ - Defining a materialised format would allow an easy way to use the same
+ consumer code on a generator that produces events at the source of
+ execution and on a generator that produces events from eg. reading them
+ from an event log.
+
+Note that there can be some middle way, where some data is materialised and
+some is kept as reference to context (eg. THD) only. This however looses most
+of the mentioned advantages for materialisation.
+
+The design proposed here aims for as little materialisation as possible.
+
+
+Default materialisation format
+------------------------------
+
+
+While the proposed API doesn't _require_ materialisation, we can still think
+about providing the _option_ for built-in materialisation. This could be
+useful if such materialisation is made suitable for transport to a different
+server (eg. no endian-dependance etc). If there is a facility for such
+materialisation built-in to the API, it becomes possible to write something
+like a generic binlog plugin or generic network transport plugin. This would
+be really useful for eg. PBXT engine-level replication, as it could be
+implemented without having to re-invent a binlog format.
+
+I added in the proposed API a simple facility to materialise every event as a
+string of bytes. To use this, I still need to add a suitable facility to
+de-materialise the event.
+
+However, it is still an open question whether such a facility will be at all
+useful. It still has some of the problems with materialisation mentioned
+above. And I think it is likely that a good binlog implementation will need
+to do more than just blindly copy opaque events from one endpoint to
+another. For example, it might need different event boundaries (merge and/or
+split events); it might need to augment or modify events, or inject new
+events, etc.
+
+So I think maybe it is better to add such a generic materialisation facility
+on top of the basic event generator API. Such a facility would provide
+materialisation of an replication event stream, not of individual events, so
+would be more flexible in providing a good implementation. It would be
+implemented for all generators. It would separate from both the event
+generator API (so we have flexibility to put a filter class in-between
+generator and materialisation), and could also be separate from the actual
+transport handling stuff like fsync() of binlog files and socket connections
+etc. It would be paired with a corresponding applier API which would handle
+executing events on a slave.
+
+Then we can have a default materialised event format, which is available, but
+not mandatory. So there can still be other formats alongside (like legacy
+MySQL 5.1 binlog event format and maybe Tungsten would have its own format).
+
+
+Encapsulation
+-------------
+
+Another fundamental question about the design is the level of encapsulation
+used for the API.
+
+At the implementation level, a lot of the work is basically to pull out all of
+the needed information from the THD object/context. The API I propose tries to
+_not_ expose the THD to consumers. Instead it provides accessor functions for
+all the bits and pieces relevant to each replication event, while the event
+class itself likely will be more or less just an encapsulated THD.
+
+So an alternative would be to have a generic event that was just (type, THD).
+Then consumers could just pull out whatever information they want from the
+THD. The THD implementation is already exposed to storage engines. This would
+of course greatly reduce the size of the API, eliminating lots of class
+definitions and accessor functions. Though arguably it wouldn't really
+simplify the API, as the complexity would just be in understanding the THD
+class.
+
+Note that we do not have to take any performance hit from using encapsulated
+accessors since compilers can inline them (though if inlining then we do not
+get any ABI stability with respect to THD implemetation).
+
+For now, the API is proposed without exposing the THD class. (Similar
+encapsulation could be added in actual implementation to also not expose TABLE
+and similar classes).
-=-=(Knielsen - Thu, 24 Jun 2010, 12:04)=-=-
Dependency created: 107 now depends on 120
-=-=(Knielsen - Thu, 24 Jun 2010, 11:59)=-=-
High Level Description modified.
--- /tmp/wklog.120.old.516 2010-06-24 11:59:24.000000000 +0000
+++ /tmp/wklog.120.new.516 2010-06-24 11:59:24.000000000 +0000
@@ -11,4 +11,4 @@
Event generators can be stacked, and a generator may defer event generation to
the next one down the stack. For example, the row-level replication event
-generator may defer DLL to the statement-level replication event generator.
+generator may defer DDL to the statement-level replication event generator.
-=-=(Knielsen - Mon, 21 Jun 2010, 08:35)=-=-
Research and design thoughts.
DESCRIPTION:
A part of the replication project, MWL#107.
Events are produced by event Generators. Examples are
- Generation of statement-based replication events
- Generation of row-based events
- Generation of PBXT engine-level replication events
and maybe reading of events from relay log on slave may also be an example of
generating events.
Event generators can be stacked, and a generator may defer event generation to
the next one down the stack. For example, the row-level replication event
generator may defer DDL to the statement-level replication event generator.
HIGH-LEVEL SPECIFICATION:
Generators and consumbers
-------------------------
We have the two concepts:
1. Event _generators_, that produce events describing all changes to data in a
server.
2. Event consumers, that receive such events and use them in various ways.
Examples of event generators is execution of SQL statements, which generates
events like those used for statement-based replication. Another example is
PBXT engine-level replication.
An example of an event consumer is the writing of the binlog on a master.
Some event generators are not really plugins. Rather, there are specific
points in the server where events are generated. However, a generator can be
part of a plugin, for example a PBXT engine-level replication event generator
would be part of the PBXT storage engine plugin. And for example we could
write a filter plugin, which would be stacked on top of an existing generator
and provide the same event types and interfaces, but filtered in some way (for
example by removing certain events on the master side, or by re-writing events
in certain ways).
Event consumers on the other hand could be a plugin.
One generator can be stacked on top of another. This means that a generator on
top (for example row-based events) will handle some events itself
(eg. non-deterministic update in mixed-mode binlogging). Other events that it
does not want to or cannot handle (for example deterministic delete or DDL)
will be defered to the generator below (for example statement-based events).
Materialisation (or not)
------------------------
A central decision is how to represent events that are generated in the API at
the point of generation.
I want to avoid making the API require that events are materialised. By
"Materialised" I mean that all (or most) of the data for the event is written
into memory in a struct/class used inside the server or serialised in a data
buffer (byte buffer) in a format suitable for network transport or disk
storage.
Using a non-materialised event means storing just a reference to appropriate
context that allows to retrieve all information for the event using
accessors. Ie. typically this would be based on getting the event information
from the THD pointer.
Some reasons to avoid using materialised events in the API:
- Replication events have a _lot_ of detailed context information that can be
needed in events: user-defined variables, random seed, character sets,
table column names and types, etc. etc. If we make the API based on
materialisation, then the initial decision about which context information
to include with which events will have to be done in the API, while ideally
we want this decision to be done by the individual consumer plugin. There
will this be a conflict between what to include (to allow consumers access)
and what to exclude (to avoid excessive needless work).
- Materialising means defining a very specific format, which will tend to
make the API less generic and flexible.
- Unless the materialised format is made _very_ specific (and thus very
inflexible), it is unlikely to be directly useful for transport
(eg. binlog), so it will need to be re-materialised into a different format
anyway, wasting work.
- If a generator on top handles an event, then we want to avoid wasting work
materialising an event in a generator below which would be completely
unused. Thus there would be a need for the upper generator to somehow
notify the lower generator ahead of event generation time to not fire an
event, complicating the API.
Some advantages for materialisation:
- Using an API based on passing around some well-defined struct event (or
byte buffer) will be simpler than the complex class hierarchy proposed here
with no requirement for materialisation.
- Defining a materialised format would allow an easy way to use the same
consumer code on a generator that produces events at the source of
execution and on a generator that produces events from eg. reading them
from an event log.
Note that there can be some middle way, where some data is materialised and
some is kept as reference to context (eg. THD) only. This however looses most
of the mentioned advantages for materialisation.
Also, the non-materialising interface should be a good interface on top of
which to build a materialising interface.
The design proposed here aims for as little materialisation as possible.
Default materialisation format
------------------------------
While the proposed API doesn't _require_ materialisation, we can still think
about providing the _option_ for built-in materialisation. This could be
useful if such materialisation is made suitable for transport to a different
server (eg. no endian-dependance etc). If there is a facility for such
materialisation built-in to the API, it becomes possible to write something
like a generic binlog plugin or generic network transport plugin. This would
be really useful for eg. PBXT engine-level replication, as it could be
implemented without having to re-invent a binlog format.
I added in the proposed API a simple facility to materialise every event as a
string of bytes. To use this, I still need to add a suitable facility to
de-materialise the event.
However, it is still an open question whether such a facility will be at all
useful. It still has some of the problems with materialisation mentioned
above. And I think it is likely that a good binlog implementation will need
to do more than just blindly copy opaque events from one endpoint to
another. For example, it might need different event boundaries (merge and/or
split events); it might need to augment or modify events, or inject new
events, etc.
So I think maybe it is better to add such a generic materialisation facility
on top of the basic event generator API. Such a facility would provide
materialisation of an replication event stream, not of individual events, so
would be more flexible in providing a good implementation. It would be
implemented for all generators. It would separate from both the event
generator API (so we have flexibility to put a filter class in-between
generator and materialisation), and could also be separate from the actual
transport handling stuff like fsync() of binlog files and socket connections
etc. It would be paired with a corresponding applier API which would handle
executing events on a slave.
Then we can have a default materialised event format, which is available, but
not mandatory. So there can still be other formats alongside (like legacy
MySQL 5.1 binlog event format and maybe Tungsten would have its own format).
Encapsulation
-------------
Another fundamental question about the design is the level of encapsulation
used for the API.
At the implementation level, a lot of the work is basically to pull out all of
the needed information from the THD object/context. The API I propose tries to
_not_ expose the THD to consumers. Instead it provides accessor functions for
all the bits and pieces relevant to each replication event, while the event
class itself likely will be more or less just an encapsulated THD.
So an alternative would be to have a generic event that was just (type, THD).
Then consumers could just pull out whatever information they want from the
THD. The THD implementation is already exposed to storage engines. This would
of course greatly reduce the size of the API, eliminating lots of class
definitions and accessor functions. Though arguably it wouldn't really
simplify the API, as the complexity would just be in understanding the THD
class.
Note that we do not have to take any performance hit from using encapsulated
accessors since compilers can inline them (though if inlining then we do not
get any ABI stability with respect to THD implemetation).
For now, the API is proposed without exposing the THD class. (Similar
encapsulation could be added in actual implementation to also not expose TABLE
and similar classes).
LOW-LEVEL DESIGN:
A consumer is implented as a virtual class (interface). There is one virtual
function for every event that can be received. A consumer would derive from
the base class and override methods for the events it wants to receive.
There is one consumer interface for each generator. When a generator A is
stacked on B, the consumer interface for A inherits from the interface for
B. This way, when A defers an event to B, the consumer for A will receive the
corresponding event from B.
There are methods for a consumer to register itself to receive events from
each generator. I still need to find a way for a consumer in one plugin to
register itself with a generator implemented in another plugin (eg. PBXT
engine-level replication). I also need to add a way for consumers to
de-register themselves.
The current design has consumer callbacks return 0 for success and error code
otherwise. I still need to think more about whether this is useful (ie. what
is the semantics of returning an error from a consumer callback).
Each event passed to consumers is defined as a class with public accessor
methods to a private context (which is mostly the THD).
My intension is to make all events passed around const, so that the same event
can be passed to each of multiple registered consumers (and to emphasise that
consumers do not have the ability to modify events). It still needs to be seen
whether that const-ness will be feasible in practise without very heavy
modification/constification of exiting code.
What follows is a partial draft of a possible definition of the API as
concrete C++ class definitions.
-----------------------------------------------------------------------
/*
Virtual base class for generated replication events.
This is the parent of events generated from all kinds of generators. Only
child classes can be instantiated.
This class can be used by code that wants to treat events in a generic way,
without any knowledge of event details. I still need to decide whether such
generic code is sensible.
*/
class rpl_event_base
{
/*
Maybe we will want the ability to materialise an event to a standard
binary format. This could be achieved with a base method like this. The
actual materialisation would be implemented in each deriving class. The
public methods would provide different interfaces for specifying the
buffer or for writing directly into IO_CACHE or file.
*/
/* Return 0 on success, -1 on error, -2 on out-of-buffer. */
int materialise(uchar *buffer, size_t buflen) const;
/*
Returns NULL on error or else malloc()ed buffer with materialised event,
caller must free().
*/
uchar *materialise() const;
/* Same but using passed in memroot. */
uchar *materialise(mem_root *memroot) const;
/*
Materialise to user-supplied writer function (could write directly to file
or the like).
*/
int materialise(int (*writer)(uchar *data, size_t len, void *context)) const;
/*
As to for what to do with a materialised event, there are a couple of
possibilities.
One is to have a de_materialise() method somewhere that can construct an
rpl_event_base (really a derived class of course) from a buffer or writer
function. This would require each accessor function to conditionally read
its data from either THD context or buffer (GCC is able to optimise
several such conditionals in multiple accessor function calls into one
conditional), or we can make all accessors virtual if the performance hit
is acceptable.
Another is to have different classes for accessing events read from
materialised event data.
Also, I still need to think about whether it is at all useful to be able
to generically materialise an event at this level. It may be that any
binlog/transport will in any case need to undertand more of the format of
events, so that such materialisation/transport is better done at a
different layer.
*/
protected:
/* Implementation which is the basis for materialise(). */
virtual int do_materialise(int (*writer)(uchar *data, size_t len,
void *context)) const = 0;
private:
/* Virtual base class, private constructor to prevent instantiation. */
rpl_event_base();
};
/*
These are the event types output from the transaction event generator.
This generator is not stacked on anything.
The transaction event generator marks the start and end (commit or rollback)
of transactions. It also gives information about whether the transaction was
a full transaction or autocommitted statement, whether transactional tables
were involved, whether non-transactional tables were involved, and XA
information (ToDo).
*/
/* Base class for transaction events. */
class rpl_event_transaction_base : public rpl_event_base
{
public:
/*
Get the local transaction id. This idea is only unique within one server.
It is allocated whenever a new transaction is started.
Can be used to identify events belonging to the same transaction in a
binlog-like stream of events streamed in parallel among multiple
transactions.
*/
uint64_t get_local_trx_id() const { return thd->local_trx_id; };
bool get_is_autocommit() const;
private:
/* The context is the THD. */
THD *thd;
rpl_event_transaction_base(THD *_thd) : thd(_thd) { };
};
/* Transaction start event. */
class rpl_event_transaction_start : public rpl_event_transaction_base
{
};
/* Transaction commit. */
class rpl_event_transaction_commit : public rpl_event_transaction_base
{
public:
/*
The global transaction id is unique cross-server.
It can be used to identify the position from which to start a slave
replicating from a master.
This global ID is only available once the transaction is decided to commit
by the TC manager / primary redundancy service. This TC also allocates the
ID and decides the exact semantics (can there be gaps, etc); however the
format is fixed (cluster_id, running_counter).
*/
struct global_transaction_id
{
uint32_t cluster_id;
uint64_t counter;
};
const global_transaction_id *get_global_transaction_id() const;
};
/* Transaction rollback. */
class rpl_event_transaction_rollback : public rpl_event_transaction_base
{
};
/* Base class for statement events. */
class rpl_event_statement_base : public rpl_event_base
{
public:
LEX_STRING get_current_db() const;
};
class rpl_event_statement_start : public rpl_event_statement_base
{
};
class rpl_event_statement_end : public rpl_event_statement_base
{
public:
int get_errorcode() const;
};
class rpl_event_statement_query : public rpl_event_statement_base
{
public:
LEX_STRING get_query_string();
ulong get_sql_mode();
const CHARSET_INFO *get_character_set_client();
const CHARSET_INFO *get_collation_connection();
const CHARSET_INFO *get_collation_server();
const CHARSET_INFO *get_collation_default_db();
/*
Access to relevant flags that affect query execution.
Use as if (ev->get_flags() & (uint32)ROW_FOREIGN_KEY_CHECKS) { ... }
*/
enum flag_bits
{
STMT_FOREIGN_KEY_CHECKS, // @@foreign_key_checks
STMT_UNIQUE_KEY_CHECKS, // @@unique_checks
STMT_AUTO_IS_NULL, // @@sql_auto_is_null
};
uint32_t get_flags();
ulong get_auto_increment_offset();
ulong get_auto_increment_increment();
// And so on for: time zone; day/month names; connection id; LAST_INSERT_ID;
// INSERT_ID; random seed; user variables.
//
// We probably also need get_uses_temporary_table(), get_used_user_vars(),
// get_uses_auto_increment() and so on, so a consumer can get more
// information about what kind of context information a query will need when
// executed on a slave.
};
class rpl_event_statement_load_query : public rpl_event_statement_query
{
};
/*
This event is fired with blocks of data for files read (from server-local
file or client connection) for LOAD DATA.
*/
class rpl_event_statement_load_data_block : public rpl_event_statement_base
{
public:
struct block
{
const uchar *ptr;
size_t size;
};
block get_block() const;
};
/* Base class for row-based replication events. */
class rpl_event_row_base : public rpl_event_base
{
public:
/*
Access to relevant handler extra flags and other flags that affect row
operations.
Use as if (ev->get_flags() & (uint32)ROW_WRITE_CAN_REPLACE) { ... }
*/
enum flag_bits
{
ROW_WRITE_CAN_REPLACE, // HA_EXTRA_WRITE_CAN_REPLACE
ROW_IGNORE_DUP_KEY, // HA_EXTRA_IGNORE_DUP_KEY
ROW_IGNORE_NO_KEY, // HA_EXTRA_IGNORE_NO_KEY
ROW_DISABLE_FOREIGN_KEY_CHECKS, // ! @@foreign_key_checks
ROW_DISABLE_UNIQUE_KEY_CHECKS, // ! @@unique_checks
};
uint32_t get_flags();
/* Access to list of tables modified. */
class table_iterator
{
public:
/* Returns table, NULL after last. */
const TABLE *get_next();
private:
// ...
};
table_iterator get_modified_tables() const;
private:
/* Context used to provide accessors. */
THD *thd;
protected:
rpl_event_row_base(THD *_thd) : thd(_thd) { }
};
class rpl_event_row_write : public rpl_event_row_base
{
public:
const BITMAP *get_write_set() const;
const uchar *get_after_image() const;
};
class rpl_event_row_update : public rpl_event_row_base
{
public:
const BITMAP *get_read_set() const;
const BITMAP *get_write_set() const;
const uchar *get_before_image() const;
const uchar *get_after_image() const;
};
class rpl_event_row_delete : public rpl_event_row_base
{
public:
const BITMAP *get_read_set() const;
const uchar *get_before_image() const;
};
/*
Event consumer callbacks.
An event consumer registers with an event generator to receive event
notifications from that generator.
The consumer has callbacks (in the form of virtual functions) for the
individual event types the consumer is interested in. Only callbacks that
are non-NULL will be invoked. If an event applies to multiple callbacks in a
single callback struct, it will only be passed to the most specific non-NULL
callback (so events never fire more than once per registration).
The lifetime of the memory holding the event is only for the duration of the
callback invocation, unless otherwise noted.
Callbacks return 0 for success or error code (ToDo: does this make sense?).
*/
struct rpl_event_consumer_transaction
{
virtual int trx_start(const rpl_event_transaction_start *) { return 0; }
virtual int trx_commit(const rpl_event_transaction_commit *) { return 0; }
virtual int trx_rollback(const rpl_event_transaction_rollback *) { return 0; }
};
/*
Consuming statement-based events.
The statement event generator is stacked on top of the transaction event
generator, so we can receive those events as well.
*/
struct rpl_event_consumer_statement : public rpl_event_consumer_transaction
{
virtual int stmt_start(const rpl_event_statement_start *) { return 0; }
virtual int stmt_end(const rpl_event_statement_end *) { return 0; }
virtual int stmt_query(const rpl_event_statement_query *) { return 0; }
/* Data for a file used in LOAD DATA [LOCAL] INFILE. */
virtual int stmt_load_data_block(const rpl_event_statement_load_data_block *)
{ return 0; }
/*
These are specific kinds of statements; if specified they override
consume_stmt_query() for the corresponding event.
*/
virtual int stmt_load_query(const rpl_event_statement_load_query *ev)
{ return stmt_query(ev); }
};
/*
Consuming row-based events.
The row event generator is stacked on top of the statement event generator.
*/
struct rpl_event_consumer_row : public rpl_event_consumer_statement
{
virtual int row_write(const rpl_event_row_write *) { return 0; }
virtual int row_update(const rpl_event_row_update *) { return 0; }
virtual int row_delete(const rpl_event_row_delete *) { return 0; }
};
/*
Registration functions.
ToDo: Make a way to de-register.
ToDo: Find a good way for a plugin (eg. PBXT) to publish a generator
registration method.
*/
int rpl_event_transaction_register(const rpl_event_consumer_transaction *cbs);
int rpl_event_statement_register(const rpl_event_consumer_statement *cbs);
int rpl_event_row_register(const rpl_event_consumer_row *cbs);
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#107 Updated (by Sergei): New replication APIs
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: New replication APIs
CREATION DATE..: Mon, 15 Mar 2010, 13:55
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......: Sergei
CATEGORY.......: Server-Sprint
TASK ID........: 107 (http://askmonty.org/worklog/?tid=107)
VERSION........: Server-9.x
STATUS.........: In-Progress
PRIORITY.......: 60
WORKED HOURS...: 69
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Sergei - Tue, 29 Jun 2010, 13:50)=-=-
Status updated.
--- /tmp/wklog.107.old.31164 2010-06-29 13:50:15.000000000 +0000
+++ /tmp/wklog.107.new.31164 2010-06-29 13:50:15.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+In-Progress
-=-=(Knielsen - Thu, 24 Jun 2010, 12:04)=-=-
Dependency created: 107 now depends on 120
-=-=(Knielsen - Mon, 21 Jun 2010, 08:36)=-=-
Research and design thoughts.
-=-=(Knielsen - Mon, 07 Jun 2010, 12:11)=-=-
High Level Description modified.
--- /tmp/wklog.107.old.31097 2010-06-07 12:11:57.000000000 +0000
+++ /tmp/wklog.107.new.31097 2010-06-07 12:11:57.000000000 +0000
@@ -7,3 +7,6 @@
https://lists.launchpad.net/maria-developers/msg01998.html
+Wiki page for the project:
+
+ http://askmonty.org/wiki/ReplicationProject
-=-=(Knielsen - Mon, 29 Mar 2010, 07:33)=-=-
Research and design discussions: Galera, 2pc/XA, group commit, multi-engine transactions.
-=-=(Knielsen - Wed, 24 Mar 2010, 10:39)=-=-
Design discussions
-=-=(Knielsen - Mon, 15 Mar 2010, 14:28)=-=-
Research into the problem, and discussions on phone/mailing list
-=-=(Guest - Mon, 15 Mar 2010, 14:18)=-=-
High-Level Specification modified.
--- /tmp/wklog.107.old.9086 2010-03-15 14:18:18.000000000 +0000
+++ /tmp/wklog.107.new.9086 2010-03-15 14:18:18.000000000 +0000
@@ -1 +1,43 @@
+Current ideas/status after discussions on the mailing list:
+
+ - Implement a set of plugin APIs and use them to move all of the existing
+ MySQL replication into a (set of) plugins.
+
+ - Design the APIs so that they can support full MySQL replication, but also
+ so that they do not hardcode assumptions about how this replication
+ implementation is done, and so that they will be suitable for other types of
+ replication (Tungsten, Galera, parallel replication, ...).
+
+ - APIs need to include the concept of a global transaction ID. Need to
+ determine the extent to which the semantics of such ID will be defined
+ by the API, and to which extend it will be defined by the plugin
+ implementations.
+
+ - APIs should properly support reliable crash-recovery with decent
+ performance (eg. not require multiple mandatory fsync()s per commit, and
+ not make group commit impossible).
+
+ - Would be nice if the API provided facilities for implementing good
+ consistency checking support (mainly checking master tables against slave
+ tables is hard here I think, but also applying wrong binlog data and
+ individual event checksums).
+
+
+Steps to make this more concrete:
+
+ - Investigate the current MySQL replication, and list all of the places where
+ a plugin implementation will need to connect/hook into the MySQL server.
+ * handler::{write,update,delete}_row()
+ * Statement execution
+ * Transaction start/commit
+ * Table open
+ * Query safe/not/safe for statement based replication
+ * Statement-based logging details (user variables, random seed, etc.)
+ * ...
+
+ - Use this list to make an initial sketch of the set of APIs we need.
+
+ - Use the list to determine the feasibility of this project and the level of
+ detail in the API needed to support a full replication implementation as a
+ plugin.
-=-=(Sergei - Mon, 15 Mar 2010, 14:13)=-=-
Observers changed: Sergei
DESCRIPTION:
This is a top-level task for the project of designing a new set of replication
APIs for MariaDB.
This task is for the initial discussion of what to do and where to focus.
The project is started in this email thread:
https://lists.launchpad.net/maria-developers/msg01998.html
Wiki page for the project:
http://askmonty.org/wiki/ReplicationProject
HIGH-LEVEL SPECIFICATION:
Current ideas/status after discussions on the mailing list:
- Implement a set of plugin APIs and use them to move all of the existing
MySQL replication into a (set of) plugins.
- Design the APIs so that they can support full MySQL replication, but also
so that they do not hardcode assumptions about how this replication
implementation is done, and so that they will be suitable for other types of
replication (Tungsten, Galera, parallel replication, ...).
- APIs need to include the concept of a global transaction ID. Need to
determine the extent to which the semantics of such ID will be defined
by the API, and to which extend it will be defined by the plugin
implementations.
- APIs should properly support reliable crash-recovery with decent
performance (eg. not require multiple mandatory fsync()s per commit, and
not make group commit impossible).
- Would be nice if the API provided facilities for implementing good
consistency checking support (mainly checking master tables against slave
tables is hard here I think, but also applying wrong binlog data and
individual event checksums).
Steps to make this more concrete:
- Investigate the current MySQL replication, and list all of the places where
a plugin implementation will need to connect/hook into the MySQL server.
* handler::{write,update,delete}_row()
* Statement execution
* Transaction start/commit
* Table open
* Query safe/not/safe for statement based replication
* Statement-based logging details (user variables, random seed, etc.)
* ...
- Use this list to make an initial sketch of the set of APIs we need.
- Use the list to determine the feasibility of this project and the level of
detail in the API needed to support a full replication implementation as a
plugin.
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0

[Maria-developers] WL#107 Updated (by Sergei): New replication APIs
by worklog-noreply@askmonty.org 29 Jun '10
by worklog-noreply@askmonty.org 29 Jun '10
29 Jun '10
-----------------------------------------------------------------------
WORKLOG TASK
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
TASK...........: New replication APIs
CREATION DATE..: Mon, 15 Mar 2010, 13:55
SUPERVISOR.....: Knielsen
IMPLEMENTOR....: Knielsen
COPIES TO......: Sergei
CATEGORY.......: Server-Sprint
TASK ID........: 107 (http://askmonty.org/worklog/?tid=107)
VERSION........: Server-9.x
STATUS.........: In-Progress
PRIORITY.......: 60
WORKED HOURS...: 69
ESTIMATE.......: 0 (hours remain)
ORIG. ESTIMATE.: 0
PROGRESS NOTES:
-=-=(Sergei - Tue, 29 Jun 2010, 13:50)=-=-
Status updated.
--- /tmp/wklog.107.old.31164 2010-06-29 13:50:15.000000000 +0000
+++ /tmp/wklog.107.new.31164 2010-06-29 13:50:15.000000000 +0000
@@ -1 +1 @@
-Un-Assigned
+In-Progress
-=-=(Knielsen - Thu, 24 Jun 2010, 12:04)=-=-
Dependency created: 107 now depends on 120
-=-=(Knielsen - Mon, 21 Jun 2010, 08:36)=-=-
Research and design thoughts.
-=-=(Knielsen - Mon, 07 Jun 2010, 12:11)=-=-
High Level Description modified.
--- /tmp/wklog.107.old.31097 2010-06-07 12:11:57.000000000 +0000
+++ /tmp/wklog.107.new.31097 2010-06-07 12:11:57.000000000 +0000
@@ -7,3 +7,6 @@
https://lists.launchpad.net/maria-developers/msg01998.html
+Wiki page for the project:
+
+ http://askmonty.org/wiki/ReplicationProject
-=-=(Knielsen - Mon, 29 Mar 2010, 07:33)=-=-
Research and design discussions: Galera, 2pc/XA, group commit, multi-engine transactions.
-=-=(Knielsen - Wed, 24 Mar 2010, 10:39)=-=-
Design discussions
-=-=(Knielsen - Mon, 15 Mar 2010, 14:28)=-=-
Research into the problem, and discussions on phone/mailing list
-=-=(Guest - Mon, 15 Mar 2010, 14:18)=-=-
High-Level Specification modified.
--- /tmp/wklog.107.old.9086 2010-03-15 14:18:18.000000000 +0000
+++ /tmp/wklog.107.new.9086 2010-03-15 14:18:18.000000000 +0000
@@ -1 +1,43 @@
+Current ideas/status after discussions on the mailing list:
+
+ - Implement a set of plugin APIs and use them to move all of the existing
+ MySQL replication into a (set of) plugins.
+
+ - Design the APIs so that they can support full MySQL replication, but also
+ so that they do not hardcode assumptions about how this replication
+ implementation is done, and so that they will be suitable for other types of
+ replication (Tungsten, Galera, parallel replication, ...).
+
+ - APIs need to include the concept of a global transaction ID. Need to
+ determine the extent to which the semantics of such ID will be defined
+ by the API, and to which extend it will be defined by the plugin
+ implementations.
+
+ - APIs should properly support reliable crash-recovery with decent
+ performance (eg. not require multiple mandatory fsync()s per commit, and
+ not make group commit impossible).
+
+ - Would be nice if the API provided facilities for implementing good
+ consistency checking support (mainly checking master tables against slave
+ tables is hard here I think, but also applying wrong binlog data and
+ individual event checksums).
+
+
+Steps to make this more concrete:
+
+ - Investigate the current MySQL replication, and list all of the places where
+ a plugin implementation will need to connect/hook into the MySQL server.
+ * handler::{write,update,delete}_row()
+ * Statement execution
+ * Transaction start/commit
+ * Table open
+ * Query safe/not/safe for statement based replication
+ * Statement-based logging details (user variables, random seed, etc.)
+ * ...
+
+ - Use this list to make an initial sketch of the set of APIs we need.
+
+ - Use the list to determine the feasibility of this project and the level of
+ detail in the API needed to support a full replication implementation as a
+ plugin.
-=-=(Sergei - Mon, 15 Mar 2010, 14:13)=-=-
Observers changed: Sergei
DESCRIPTION:
This is a top-level task for the project of designing a new set of replication
APIs for MariaDB.
This task is for the initial discussion of what to do and where to focus.
The project is started in this email thread:
https://lists.launchpad.net/maria-developers/msg01998.html
Wiki page for the project:
http://askmonty.org/wiki/ReplicationProject
HIGH-LEVEL SPECIFICATION:
Current ideas/status after discussions on the mailing list:
- Implement a set of plugin APIs and use them to move all of the existing
MySQL replication into a (set of) plugins.
- Design the APIs so that they can support full MySQL replication, but also
so that they do not hardcode assumptions about how this replication
implementation is done, and so that they will be suitable for other types of
replication (Tungsten, Galera, parallel replication, ...).
- APIs need to include the concept of a global transaction ID. Need to
determine the extent to which the semantics of such ID will be defined
by the API, and to which extend it will be defined by the plugin
implementations.
- APIs should properly support reliable crash-recovery with decent
performance (eg. not require multiple mandatory fsync()s per commit, and
not make group commit impossible).
- Would be nice if the API provided facilities for implementing good
consistency checking support (mainly checking master tables against slave
tables is hard here I think, but also applying wrong binlog data and
individual event checksums).
Steps to make this more concrete:
- Investigate the current MySQL replication, and list all of the places where
a plugin implementation will need to connect/hook into the MySQL server.
* handler::{write,update,delete}_row()
* Statement execution
* Transaction start/commit
* Table open
* Query safe/not/safe for statement based replication
* Statement-based logging details (user variables, random seed, etc.)
* ...
- Use this list to make an initial sketch of the set of APIs we need.
- Use the list to determine the feasibility of this project and the level of
detail in the API needed to support a full replication implementation as a
plugin.
ESTIMATED WORK TIME
ESTIMATED COMPLETION DATE
-----------------------------------------------------------------------
WorkLog (v4.0.0)
1
0
This patch installs all the files that were missing from the installer
package. Now, the installer has the same set of files as the zip file.
Diff'ed against the current 5.1 tree.
Bo Thorsen.
Monty Program AB.
--
MariaDB: MySQL replacement
Community developed. Feature enhanced. Backward compatible.
2
2

[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (igor:2779)
by Igor Babaev 28 Jun '10
by Igor Babaev 28 Jun '10
28 Jun '10
#At lp:maria based on revid:knielsen@knielsen-hq.org-20091130132430-edrwle5zh6udx9rp
2779 Igor Babaev 2010-06-28
Optimization that checks for expressions whether they are always null.
modified:
mysql-test/r/func_in.result
sql/item.h
sql/item_cmpfunc.cc
sql/item_cmpfunc.h
sql/item_func.cc
sql/item_func.h
sql/item_sum.h
sql/sql_select.cc
sql/sql_udf.h
=== modified file 'mysql-test/r/func_in.result'
--- a/mysql-test/r/func_in.result 2009-10-05 05:27:36 +0000
+++ b/mysql-test/r/func_in.result 2010-06-29 00:24:26 +0000
@@ -642,10 +642,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_int c_int 4 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_int IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_int IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_decimal IN (1, 2, 3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c_decimal c_decimal 3 NULL 3 Using where
@@ -654,10 +654,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_decimal c_decimal 3 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_decimal IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_decimal IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_float IN (1, 2, 3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c_float c_float 4 NULL 3 Using where
@@ -666,10 +666,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_float c_float 4 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_float IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_float IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_bit IN (1, 2, 3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c_bit c_bit 2 NULL 3 Using where
@@ -678,10 +678,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_bit c_bit 2 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_bit IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_bit IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_date
IN ('2009-09-01', '2009-09-02', '2009-09-03');
id select_type table type possible_keys key key_len ref rows Extra
@@ -692,10 +692,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_date c_date 3 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_date IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_date IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_datetime
IN ('2009-09-01 00:00:01', '2009-09-02 00:00:01', '2009-09-03 00:00:01');
id select_type table type possible_keys key key_len ref rows Extra
@@ -706,10 +706,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_datetime c_datetime 8 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_datetime IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_datetime IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_timestamp
IN ('2009-09-01 00:00:01', '2009-09-01 00:00:02', '2009-09-01 00:00:03');
id select_type table type possible_keys key key_len ref rows Extra
@@ -720,10 +720,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_timestamp c_timestamp 4 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_timestamp IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_timestamp IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_year IN (1, 2, 3);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c_year c_year 1 NULL 3 Using where
@@ -732,10 +732,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_year c_year 1 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_year IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_year IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_char IN ('1', '2', '3');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c_char c_char 10 NULL 3 Using where
@@ -744,10 +744,10 @@ id select_type table type possible_keys
1 SIMPLE t1 range c_char c_char 10 NULL 3 Using where
EXPLAIN SELECT * FROM t1 WHERE c_char IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN SELECT * FROM t1 WHERE c_char IN (NULL, NULL);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DROP TABLE t1;
#
End of 5.1 tests
=== modified file 'sql/item.h'
--- a/sql/item.h 2009-11-16 20:49:51 +0000
+++ b/sql/item.h 2010-06-29 00:24:26 +0000
@@ -774,6 +774,7 @@ public:
will not change until next fix_fields) and its value is known.
*/
virtual bool const_item() const { return used_tables() == 0; }
+ virtual bool is_always_null() const { return 0; }
/*
Returns true if this is constant but its value may be not known yet.
(Can be used for parameters of prep. stmts or of stored procedures.)
@@ -1563,6 +1564,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_NULL; }
bool basic_const_item() const { return 1; }
+ bool is_always_null() const { return 1; }
Item *clone_item() { return new Item_null(name); }
bool is_null() { return 1; }
=== modified file 'sql/item_cmpfunc.cc'
--- a/sql/item_cmpfunc.cc 2009-11-16 20:49:51 +0000
+++ b/sql/item_cmpfunc.cc 2010-06-29 00:24:26 +0000
@@ -1983,6 +1983,19 @@ bool Item_func_between::fix_fields(THD *
(args[1]->not_null_tables() &
args[2]->not_null_tables()));
+ if (negated)
+ {
+ always_null_cache= 1;
+ if (!args[0]->is_always_null())
+ always_null_cache= args[1]->is_always_null() &&
+ args[2]->is_always_null();
+ }
+ else
+ {
+ always_null_cache= args[0]->is_always_null() ||
+ args[1]->is_always_null() ||
+ args[2]->is_always_null();
+ }
return 0;
}
@@ -3545,6 +3558,23 @@ Item_func_in::fix_fields(THD *thd, Item
for (arg= args + 1, arg_end= args + arg_count; arg != arg_end; arg++)
not_null_tables_cache&= (*arg)->not_null_tables();
not_null_tables_cache|= (*args)->not_null_tables();
+ if (negated)
+ {
+ always_null_cache= 0;
+ for (arg= args, arg_end= args + arg_count;
+ !always_null_cache && arg != arg_end; arg++)
+ always_null_cache= (*arg)->is_always_null();
+ }
+ else
+ {
+ always_null_cache= 1;
+ if (!(*args)->is_always_null())
+ {
+ for (arg= args + 1, arg_end= args + arg_count;
+ always_null_cache && arg != arg_end; arg++)
+ always_null_cache= (*arg)->is_always_null();
+ }
+ }
return 0;
}
=== modified file 'sql/item_cmpfunc.h'
--- a/sql/item_cmpfunc.h 2009-11-16 20:49:51 +0000
+++ b/sql/item_cmpfunc.h 2010-06-29 00:24:26 +0000
@@ -239,6 +239,7 @@ public:
longlong val_int();
void cleanup();
const char *func_name() const { return "<in_optimizer>"; }
+ bool is_null_preserving() const { return 0; }
Item_cache **get_cache() { return &cache; }
void keep_top_level_cache();
};
@@ -438,6 +439,7 @@ public:
longlong val_int();
enum Functype functype() const { return NOT_ALL_FUNC; }
const char *func_name() const { return "<not>"; }
+ bool is_null_preserving() const { return 0; }
virtual void print(String *str, enum_query_type query_type);
void set_sum_test(Item_sum_hybrid *item) { test_sum_item= item; };
void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; };
@@ -453,6 +455,7 @@ public:
Item_func_nop_all(Item *a) :Item_func_not_all(a) {}
longlong val_int();
const char *func_name() const { return "<nop>"; }
+ bool is_null_preserving() const { return 0; }
Item *neg_transformer(THD *thd);
};
@@ -480,6 +483,7 @@ public:
enum Functype rev_functype() const { return EQUAL_FUNC; }
cond_result eq_cmp_result() const { return COND_TRUE; }
const char *func_name() const { return "<=>"; }
+ bool is_null_preserving() const { return 0; }
Item *neg_transformer(THD *thd) { return 0; }
};
@@ -597,6 +601,7 @@ public:
optimize_type select_optimize() const { return OPTIMIZE_KEY; }
enum Functype functype() const { return BETWEEN; }
const char *func_name() const { return "between"; }
+ bool is_null_preserving() const { return !negated; }
bool fix_fields(THD *, Item **);
void fix_length_and_dec();
virtual void print(String *str, enum_query_type query_type);
@@ -663,6 +668,7 @@ public:
const char *func_name() const { return "coalesce"; }
table_map not_null_tables() const { return 0; }
enum_field_types field_type() const { return cached_field_type; }
+ bool is_null_preserving() const { return 0; }
};
@@ -720,6 +726,7 @@ public:
void fix_length_and_dec();
uint decimal_precision() const { return args[0]->decimal_precision(); }
const char *func_name() const { return "nullif"; }
+ bool is_null_preserving() const { return 0; }
virtual inline void print(String *str, enum_query_type query_type)
{
@@ -1152,6 +1159,7 @@ public:
void fix_length_and_dec();
uint decimal_precision() const;
table_map not_null_tables() const { return 0; }
+ bool is_null_preserving() const { return 0; }
enum Item_result result_type () const { return cached_result_type; }
enum_field_types field_type() const { return cached_field_type; }
const char *func_name() const { return "case"; }
@@ -1225,6 +1233,7 @@ public:
virtual void print(String *str, enum_query_type query_type);
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
+ bool is_null_preserving() { return arg_count == 2 || negated; }
bool nulls_in_row();
bool is_bool_func() { return 1; }
CHARSET_INFO *compare_collation() { return cmp_collation.collation; }
@@ -1275,6 +1284,7 @@ public:
update_used_tables();
}
const char *func_name() const { return "isnull"; }
+ bool is_null_preserving() const { return 0; }
/* Optimize case of not_null_column IS NULL */
virtual void update_used_tables()
{
@@ -1340,6 +1350,7 @@ public:
decimals=0; max_length=1; maybe_null=0;
}
const char *func_name() const { return "isnotnull"; }
+ bool is_null_preserving() const { return 0; }
optimize_type select_optimize() const { return OPTIMIZE_NULL; }
table_map not_null_tables() const
{ return abort_on_null ? not_null_tables_cache : 0; }
@@ -1465,6 +1476,7 @@ public:
enum Type type() const { return COND_ITEM; }
List<Item>* argument_list() { return &list; }
+ bool is_null_preserving() const { return 0; }
table_map used_tables() const;
void update_used_tables();
virtual void print(String *str, enum_query_type query_type);
=== modified file 'sql/item_func.cc'
--- a/sql/item_func.cc 2009-11-16 20:49:51 +0000
+++ b/sql/item_func.cc 2010-06-29 00:24:26 +0000
@@ -156,6 +156,8 @@ Item_func::fix_fields(THD *thd, Item **r
used_tables_cache= not_null_tables_cache= 0;
const_item_cache=1;
+ always_null_cache= 0;
+ bool maybe_always_null= is_null_preserving();
if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
return TRUE; // Fatal error if flag is set!
@@ -193,6 +195,8 @@ Item_func::fix_fields(THD *thd, Item **r
not_null_tables_cache|= item->not_null_tables();
const_item_cache&= item->const_item();
with_subselect|= item->with_subselect;
+ if (maybe_always_null && !always_null_cache)
+ always_null_cache= item->is_always_null();
}
}
fix_length_and_dec();
@@ -202,7 +206,7 @@ Item_func::fix_fields(THD *thd, Item **r
return FALSE;
}
-
+
bool Item_func::walk(Item_processor processor, bool walk_subquery,
uchar *argument)
{
@@ -2863,6 +2867,7 @@ udf_handler::fix_fields(THD *thd, Item_r
func->maybe_null=0;
used_tables_cache=0;
const_item_cache=1;
+ always_null_cache= 0;
if ((f_args.arg_count=arg_count))
{
=== modified file 'sql/item_func.h'
--- a/sql/item_func.h 2009-11-16 20:49:51 +0000
+++ b/sql/item_func.h 2010-06-29 00:24:26 +0000
@@ -40,6 +40,7 @@ public:
uint arg_count;
table_map used_tables_cache, not_null_tables_cache;
bool const_item_cache;
+ bool always_null_cache;
enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC,
GE_FUNC,GT_FUNC,FT_FUNC,
LIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC,
@@ -135,7 +136,9 @@ public:
instead.
*/
virtual const char *func_name() const= 0;
+ virtual bool is_null_preserving() const { return TRUE; }
virtual bool const_item() const { return const_item_cache; }
+ bool is_always_null() const { return always_null_cache; }
inline Item **arguments() const { return args; }
void set_arguments(List<Item> &list);
inline uint argument_count() const { return arg_count; }
@@ -983,6 +986,7 @@ public:
Item_func_last_insert_id(Item *a) :Item_int_func(a) {}
longlong val_int();
const char *func_name() const { return "last_insert_id"; }
+ bool is_null_preserving() const { return 0; }
void fix_length_and_dec()
{
if (arg_count)
@@ -1034,6 +1038,7 @@ public:
Item_udf_func(udf_func *udf_arg, List<Item> &list)
:Item_func(list), udf(udf_arg) {}
const char *func_name() const { return udf.name(); }
+ bool is_null_preserving() const { return 0; }
enum Functype functype() const { return UDF_FUNC; }
bool fix_fields(THD *thd, Item **ref)
{
@@ -1041,6 +1046,7 @@ public:
bool res= udf.fix_fields(thd, this, arg_count, args);
used_tables_cache= udf.used_tables_cache;
const_item_cache= udf.const_item_cache;
+ always_null_cache= udf.always_null_cache;
fixed= 1;
return res;
}
@@ -1352,6 +1358,7 @@ public:
virtual void print(String *str, enum_query_type query_type);
void print_as_stmt(String *str, enum_query_type query_type);
const char *func_name() const { return "set_user_var"; }
+ bool is_null_preserving() const { return 0; }
int save_in_field(Field *field, bool no_conversions,
bool can_use_result_field);
int save_in_field(Field *field, bool no_conversions)
@@ -1468,6 +1475,7 @@ public:
String* val_str(String*);
/* TODO: fix to support views */
const char *func_name() const { return "get_system_var"; }
+ bool is_null_preserving() const { return 0; }
/**
Indicates whether this system variable is written to the binlog or not.
@@ -1628,6 +1636,8 @@ public:
const char *func_name() const;
+ bool is_null_preserving() const { return 0; }
+
enum enum_field_types field_type() const;
Field *tmp_table_field(TABLE *t_arg);
=== modified file 'sql/item_sum.h'
--- a/sql/item_sum.h 2009-09-15 10:46:35 +0000
+++ b/sql/item_sum.h 2010-06-29 00:24:26 +0000
@@ -287,6 +287,7 @@ public:
Item_sum(THD *thd, Item_sum *item);
enum Type type() const { return SUM_FUNC_ITEM; }
virtual enum Sumfunctype sum_func () const=0;
+ bool is_null_preserving() const { return 0; }
/*
This method is similar to add(), but it is called when the current
=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc 2009-11-27 13:20:59 +0000
+++ b/sql/sql_select.cc 2010-06-29 00:24:26 +0000
@@ -7886,6 +7886,10 @@ static COND *build_equal_items_for_cond(
}
else if (cond->type() == Item::FUNC_ITEM)
{
+ Item *new_item;
+ if ((Item_func *)cond->is_always_null() && (new_item= new Item_null()))
+ return new_item;
+
List<Item> eq_list;
/*
If an equality predicate forms the whole and level,
=== modified file 'sql/sql_udf.h'
--- a/sql/sql_udf.h 2007-07-06 12:18:49 +0000
+++ b/sql/sql_udf.h 2010-06-29 00:24:26 +0000
@@ -63,6 +63,7 @@ class udf_handler :public Sql_alloc
public:
table_map used_tables_cache;
bool const_item_cache;
+ bool always_null_cache;
bool not_original;
udf_handler(udf_func *udf_arg) :u_d(udf_arg), buffers(0), error(0),
is_null(0), initialized(0), not_original(0)
1
0

[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (igor:2747)
by Igor Babaev 28 Jun '10
by Igor Babaev 28 Jun '10
28 Jun '10
#At lp:maria based on revid:monty@askmonty.org-20091014080956-d6xr2v3glk4v53sg
2747 Igor Babaev 2010-06-28
Partitioned key cache (mwl#85) for maria-5.1. Saved for possible
future request.
modified:
include/keycache.h
mysql-test/r/information_schema.result
mysql-test/r/information_schema_all_engines.result
mysql-test/r/key_cache.result
mysql-test/t/key_cache.test
mysys/mf_keycache.c
sql/handler.cc
sql/handler.h
sql/mysqld.cc
sql/set_var.cc
sql/set_var.h
sql/sql_show.cc
sql/sql_test.cc
sql/table.h
storage/myisam/mi_check.c
storage/myisam/mi_close.c
storage/myisam/mi_delete_all.c
storage/myisam/mi_extra.c
storage/myisam/mi_keycache.c
storage/myisam/mi_locking.c
storage/myisam/mi_page.c
storage/myisam/mi_panic.c
storage/myisam/mi_preload.c
storage/myisam/mi_test1.c
storage/myisam/mi_test2.c
storage/myisam/mi_test3.c
storage/myisam/myisam_ftdump.c
storage/myisam/myisamchk.c
storage/myisam/myisamdef.h
storage/myisam/myisamlog.c
=== modified file 'include/keycache.h'
--- a/include/keycache.h 2007-12-16 15:03:44 +0000
+++ b/include/keycache.h 2010-06-29 00:10:53 +0000
@@ -19,96 +19,121 @@
#define _keycache_h
C_MODE_START
-/* declare structures that is used by st_key_cache */
-struct st_block_link;
-typedef struct st_block_link BLOCK_LINK;
-struct st_keycache_page;
-typedef struct st_keycache_page KEYCACHE_PAGE;
-struct st_hash_link;
-typedef struct st_hash_link HASH_LINK;
-/* info about requests in a waiting queue */
-typedef struct st_keycache_wqueue
+/*
+ Currently the default key cache is created as non-partitioned at
+ the start of the server unless the server is started with the parameter
+ --key-cache-partitions that is greater than 0
+*/
+
+#define DEFAULT_KEY_CACHE_PARTITIONS 0
+
+/*
+ MAX_KEY_CACHE_PARTITIONS cannot be greater than
+ sizeof(MYISAM_SHARE::dirty_part_map)
+ Currently sizeof(MYISAM_SHARE::dirty_part_map)=sizeof(ulonglong)
+*/
+
+#define MAX_KEY_CACHE_PARTITIONS 64
+
+
+/* The structure to get statistical data about a key cache */
+
+typedef struct st_key_cache_statistics
+{
+ ulonglong mem_size; /* memory for cache buffers/auxiliary structures */
+ ulonglong block_size; /* size of the each buffers in the key cache */
+ ulonglong blocks_used; /* maximum number of used blocks/buffers */
+ ulonglong blocks_unused; /* number of currently unused blocks */
+ ulonglong blocks_changed; /* number of currently dirty blocks */
+ ulonglong read_requests; /* number of read requests (read hits) */
+ ulonglong reads; /* number of actual reads from files into buffers */
+ ulonglong write_requests; /* number of write requests (write hits) */
+ ulonglong writes; /* number of actual writes from buffers into files */
+} KEY_CACHE_STATISTICS;
+
+/* The type of a key cache object */
+typedef enum key_cache_type
{
- struct st_my_thread_var *last_thread; /* circular list of waiting threads */
-} KEYCACHE_WQUEUE;
+ SIMPLE_KEY_CACHE,
+ PARTITIONED_KEY_CACHE
+} KEY_CACHE_TYPE;
-#define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */
/*
- The key cache structure
- It also contains read-only statistics parameters.
+ An object of the type KEY_CACHE_FUNCS contains pointers to all functions
+ from the key cache interface.
+ Currently a key cache can be of two types: simple and partitioned.
+ For each of them its own static structure of the type KEY_CACHE_FUNCS is
+ defined . The structures contain the pointers to the implementations of
+ the interface functions used by simple key caches and partitioned key
+ caches respectively. Pointers to these structures are assigned to key cache
+ objects at the time of their creation.
*/
+typedef struct st_key_cache_funcs
+{
+ int (*init) (void *, uint key_cache_block_size,
+ size_t use_mem, uint division_limit, uint age_threshold);
+ int (*resize) (void *, uint key_cache_block_size,
+ size_t use_mem, uint division_limit, uint age_threshold);
+ void (*change_param) (void *keycache_cb,
+ uint division_limit, uint age_threshold);
+ uchar* (*read) (void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length, int return_buffer);
+ int (*insert) (void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length);
+ int (*write) (void *keycache_cb,
+ File file, void *file_extra,
+ my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length, int force_write);
+ int (*flush) (void *keycache_cb,
+ int file, void *file_extra,
+ enum flush_type type);
+ int (*reset_counters) (const char *name, void *keycache_cb);
+ void (*end) (void *keycache_cb, my_bool cleanup);
+ void (*get_stats) (void *keycache_cb, uint partition_no,
+ KEY_CACHE_STATISTICS *key_cache_stats);
+ ulonglong (*get_stat_val) (void *keycache_cb, uint var_no);
+} KEY_CACHE_FUNCS;
+
+
typedef struct st_key_cache
{
- my_bool key_cache_inited;
- my_bool in_resize; /* true during resize operation */
- my_bool resize_in_flush; /* true during flush of resize operation */
+ KEY_CACHE_TYPE key_cache_type; /* type of the key cache used for debugging */
+ void *keycache_cb; /* control block of the used key cache */
+ KEY_CACHE_FUNCS *interface_funcs; /* interface functions of the key cache */
+ ulonglong param_buff_size; /* size the memory allocated for the cache */
+ ulong param_block_size; /* size of the blocks in the key cache */
+ ulong param_division_limit; /* min. percentage of warm blocks */
+ ulong param_age_threshold; /* determines when hot block is downgraded */
+ ulong param_partitions; /* number of the key cache partitions */
+ my_bool key_cache_inited; /* <=> key cache has been created */
my_bool can_be_used; /* usage of cache for read/write is allowed */
- size_t key_cache_mem_size; /* specified size of the cache memory */
- uint key_cache_block_size; /* size of the page buffer of a cache block */
- ulong min_warm_blocks; /* min number of warm blocks; */
- ulong age_threshold; /* age threshold for hot blocks */
- ulonglong keycache_time; /* total number of block link operations */
- uint hash_entries; /* max number of entries in the hash table */
- int hash_links; /* max number of hash links */
- int hash_links_used; /* number of hash links currently used */
- int disk_blocks; /* max number of blocks in the cache */
- ulong blocks_used; /* maximum number of concurrently used blocks */
- ulong blocks_unused; /* number of currently unused blocks */
- ulong blocks_changed; /* number of currently dirty blocks */
- ulong warm_blocks; /* number of blocks in warm sub-chain */
- ulong cnt_for_resize_op; /* counter to block resize operation */
- long blocks_available; /* number of blocks available in the LRU chain */
- HASH_LINK **hash_root; /* arr. of entries into hash table buckets */
- HASH_LINK *hash_link_root; /* memory for hash table links */
- HASH_LINK *free_hash_list; /* list of free hash links */
- BLOCK_LINK *free_block_list; /* list of free blocks */
- BLOCK_LINK *block_root; /* memory for block links */
- uchar HUGE_PTR *block_mem; /* memory for block buffers */
- BLOCK_LINK *used_last; /* ptr to the last block of the LRU chain */
- BLOCK_LINK *used_ins; /* ptr to the insertion block in LRU chain */
- pthread_mutex_t cache_lock; /* to lock access to the cache structure */
- KEYCACHE_WQUEUE resize_queue; /* threads waiting during resize operation */
- /*
- Waiting for a zero resize count. Using a queue for symmetry though
- only one thread can wait here.
- */
- KEYCACHE_WQUEUE waiting_for_resize_cnt;
- KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */
- KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */
- BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/
- BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/
-
- /*
- The following variables are and variables used to hold parameters for
- initializing the key cache.
- */
-
- ulonglong param_buff_size; /* size the memory allocated for the cache */
- ulong param_block_size; /* size of the blocks in the key cache */
- ulong param_division_limit; /* min. percentage of warm blocks */
- ulong param_age_threshold; /* determines when hot block is downgraded */
-
- /* Statistics variables. These are reset in reset_key_cache_counters(). */
- ulong global_blocks_changed; /* number of currently dirty blocks */
+ my_bool in_init; /* Set to 1 in MySQL during init/resize */
+ uint partitions; /* actual number of partitions */
+ size_t key_cache_mem_size; /* specified size of the cache memory */
+ ulong blocks_used; /* maximum number of concurrently used blocks */
+ ulong blocks_unused; /* number of currently unused blocks */
+ ulong global_blocks_changed; /* number of currently dirty blocks */
ulonglong global_cache_w_requests;/* number of write requests (write hits) */
ulonglong global_cache_write; /* number of writes from cache to files */
ulonglong global_cache_r_requests;/* number of read requests (read hits) */
ulonglong global_cache_read; /* number of reads from files to cache */
-
- int blocks; /* max number of blocks in the cache */
- my_bool in_init; /* Set to 1 in MySQL during init/resize */
} KEY_CACHE;
+
/* The default key cache */
extern KEY_CACHE dflt_key_cache_var, *dflt_key_cache;
extern int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
size_t use_mem, uint division_limit,
- uint age_threshold);
+ uint age_threshold, uint partitions);
extern int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
size_t use_mem, uint division_limit,
uint age_threshold);
@@ -122,12 +147,18 @@ extern int key_cache_insert(KEY_CACHE *k
File file, my_off_t filepos, int level,
uchar *buff, uint length);
extern int key_cache_write(KEY_CACHE *keycache,
- File file, my_off_t filepos, int level,
+ File file, void *file_extra,
+ my_off_t filepos, int level,
uchar *buff, uint length,
- uint block_length,int force_write);
+ uint block_length, int force_write);
extern int flush_key_blocks(KEY_CACHE *keycache,
- int file, enum flush_type type);
+ int file, void *file_extra,
+ enum flush_type type);
extern void end_key_cache(KEY_CACHE *keycache, my_bool cleanup);
+extern void get_key_cache_statistics(KEY_CACHE *keycache,
+ uint partition_no,
+ KEY_CACHE_STATISTICS *key_cache_stats);
+extern ulonglong get_key_cache_stat_value(KEY_CACHE *keycache, uint var_no);
/* Functions to handle multiple key caches */
extern my_bool multi_keycache_init(void);
@@ -140,5 +171,11 @@ extern void multi_key_cache_change(KEY_C
KEY_CACHE *new_data);
extern int reset_key_cache_counters(const char *name,
KEY_CACHE *key_cache);
+extern int repartition_key_cache(KEY_CACHE *keycache,
+ uint key_cache_block_size,
+ size_t use_mem,
+ uint division_limit,
+ uint age_threshold,
+ uint partitions);
C_MODE_END
#endif /* _keycache_h */
=== modified file 'mysql-test/r/information_schema.result'
--- a/mysql-test/r/information_schema.result 2009-09-29 20:19:43 +0000
+++ b/mysql-test/r/information_schema.result 2010-06-29 00:10:53 +0000
@@ -67,6 +67,7 @@ INNODB_LOCK_WAITS
INNODB_RSEG
INNODB_TABLE_STATS
INNODB_TRX
+KEY_CACHES
KEY_COLUMN_USAGE
PARTITIONS
PLUGINS
=== modified file 'mysql-test/r/information_schema_all_engines.result'
--- a/mysql-test/r/information_schema_all_engines.result 2009-08-03 20:09:53 +0000
+++ b/mysql-test/r/information_schema_all_engines.result 2010-06-29 00:10:53 +0000
@@ -11,6 +11,7 @@ EVENTS
FILES
GLOBAL_STATUS
GLOBAL_VARIABLES
+KEY_CACHES
KEY_COLUMN_USAGE
PARTITIONS
PLUGINS
@@ -69,6 +70,7 @@ EVENTS EVENT_SCHEMA
FILES TABLE_SCHEMA
GLOBAL_STATUS VARIABLE_NAME
GLOBAL_VARIABLES VARIABLE_NAME
+KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -127,6 +129,7 @@ EVENTS EVENT_SCHEMA
FILES TABLE_SCHEMA
GLOBAL_STATUS VARIABLE_NAME
GLOBAL_VARIABLES VARIABLE_NAME
+KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -204,6 +207,7 @@ INNODB_LOCK_WAITS information_schema.INN
INNODB_RSEG information_schema.INNODB_RSEG 1
INNODB_TABLE_STATS information_schema.INNODB_TABLE_STATS 1
INNODB_TRX information_schema.INNODB_TRX 1
+KEY_CACHES information_schema.KEY_CACHES 1
KEY_COLUMN_USAGE information_schema.KEY_COLUMN_USAGE 1
PARTITIONS information_schema.PARTITIONS 1
PBXT_STATISTICS information_schema.PBXT_STATISTICS 1
@@ -238,6 +242,7 @@ Database: information_schema
| FILES |
| GLOBAL_STATUS |
| GLOBAL_VARIABLES |
+| KEY_CACHES |
| KEY_COLUMN_USAGE |
| PARTITIONS |
| PLUGINS |
@@ -286,6 +291,7 @@ Database: INFORMATION_SCHEMA
| FILES |
| GLOBAL_STATUS |
| GLOBAL_VARIABLES |
+| KEY_CACHES |
| KEY_COLUMN_USAGE |
| PARTITIONS |
| PLUGINS |
@@ -328,5 +334,5 @@ Wildcard: inf_rmation_schema
+--------------------+
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
table_schema count(*)
-information_schema 43
+information_schema 44
mysql 22
=== modified file 'mysql-test/r/key_cache.result'
--- a/mysql-test/r/key_cache.result 2009-03-16 19:54:50 +0000
+++ b/mysql-test/r/key_cache.result 2010-06-29 00:10:53 +0000
@@ -1,5 +1,7 @@
drop table if exists t1, t2, t3;
-SET @save_key_buffer=@@key_buffer_size;
+SET @save_key_buffer_size=@@key_buffer_size;
+SET @save_key_cache_block_size=@@key_cache_block_size;
+SET @save_key_cache_partitions=@@key_cache_partitions;
SELECT @@key_buffer_size, @@small.key_buffer_size;
@@key_buffer_size @@small.key_buffer_size
2097152 131072
@@ -37,7 +39,7 @@ SELECT @@small.key_buffer_size;
SELECT @@medium.key_buffer_size;
@@medium.key_buffer_size
0
-SET @@global.key_buffer_size=@save_key_buffer;
+SET @@global.key_buffer_size=@save_key_buffer_size;
SELECT @@default.key_buffer_size;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'default.key_buffer_size' at line 1
SELECT @@skr.storage_engine="test";
@@ -366,3 +368,537 @@ Variable_name Value
key_cache_block_size 1536
SET GLOBAL key_cache_block_size= @bug28478_key_cache_block_size;
DROP TABLE t1;
+set global key_buffer_size=@save_key_buffer_size;
+set global key_cache_block_size=@save_key_cache_block_size;
+select @@key_buffer_size;
+@@key_buffer_size
+2097152
+select @@key_cache_block_size;
+@@key_cache_block_size
+1024
+select @@key_cache_partitions;
+@@key_cache_partitions
+0
+create table t1 (
+p int not null auto_increment primary key,
+a char(10));
+create table t2 (
+p int not null auto_increment primary key,
+i int, a char(10), key k1(i), key k2(a));
+select @@key_cache_partitions;
+@@key_cache_partitions
+0
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default NULL NULL 2097152 1024 0 # 0 0 0 0 0
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+(3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+p a
+1 qqqq
+2 yyyy
+select * from t2;
+p i a
+1 1 qqqq
+2 1 pppp
+3 1 yyyy
+4 3 zzzz
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+show status like 'key_%';
+Variable_name Value
+Key_blocks_not_flushed 0
+Key_blocks_unused KEY_BLOCKS_UNUSED
+Key_blocks_used 4
+Key_read_requests 22
+Key_reads 0
+Key_write_requests 26
+Key_writes 6
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default NULL NULL 2097152 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+delete from t2 where a='zzzz';
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+delete from t1;
+delete from t2;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+set global key_cache_partitions=2;
+select @@key_cache_partitions;
+@@key_cache_partitions
+2
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 1048576 1024 0 # 0 0 0 0 0
+default 2 2 1048576 1024 0 # 0 0 0 0 0
+default 2 NULL 2097152 1024 0 # 0 0 0 0 0
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+(3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+p a
+1 qqqq
+2 yyyy
+select * from t2;
+p i a
+1 1 qqqq
+2 1 pppp
+3 1 yyyy
+4 3 zzzz
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+show status like 'key_%';
+Variable_name Value
+Key_blocks_not_flushed 0
+Key_blocks_unused KEY_BLOCKS_UNUSED
+Key_blocks_used 4
+Key_read_requests 22
+Key_reads 0
+Key_write_requests 26
+Key_writes 6
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 1048576 1024 3 # 0 10 0 13 4
+default 2 2 1048576 1024 1 # 0 12 0 13 2
+default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+delete from t1;
+delete from t2;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 1048576 1024 3 # 0 10 0 13 4
+default 2 2 1048576 1024 1 # 0 12 0 13 2
+default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+set global key_cache_partitions=1;
+select @@key_cache_partitions;
+@@key_cache_partitions
+1
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 1 1 2097152 1024 0 # 0 0 0 0 0
+default 1 NULL 2097152 1024 0 # 0 0 0 0 0
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+(3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+p a
+1 qqqq
+2 yyyy
+select * from t2;
+p i a
+1 1 qqqq
+2 1 pppp
+3 1 yyyy
+4 3 zzzz
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+show status like 'key_%';
+Variable_name Value
+Key_blocks_not_flushed 0
+Key_blocks_unused KEY_BLOCKS_UNUSED
+Key_blocks_used 4
+Key_read_requests 22
+Key_reads 0
+Key_write_requests 26
+Key_writes 6
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 1 1 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+delete from t1;
+delete from t2;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 1 1 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 1 0 2 1
+flush tables;
+flush status;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 1 1 2097152 1024 4 # 0 0 0 0 0
+default 1 NULL 2097152 1024 4 # 0 0 0 0 0
+small NULL NULL 1048576 1024 1 # 0 0 0 0 0
+set global key_buffer_size=32*1024;
+select @@key_buffer_size;
+@@key_buffer_size
+32768
+set global key_cache_partitions=2;
+select @@key_cache_partitions;
+@@key_cache_partitions
+2
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 0 # 0 0 0 0 0
+default 2 2 16384 1024 0 # 0 0 0 0 0
+default 2 NULL 32768 1024 0 # 0 0 0 0 0
+small NULL NULL 1048576 1024 1 # 0 0 0 0 0
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+(3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+p a
+1 qqqq
+2 yyyy
+select * from t2;
+p i a
+1 1 qqqq
+2 1 pppp
+3 1 yyyy
+4 3 zzzz
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 1 # 0 12 0 13 2
+default 2 2 16384 1024 3 # 0 10 0 13 4
+default 2 NULL 32768 1024 4 # 0 22 0 26 6
+small NULL NULL 1048576 1024 1 # 0 0 0 0 0
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 1951 # 1976 43
+default 2 2 16384 1024 # # 0 4782 # 1708 60
+default 2 NULL 32768 1024 # # 0 6733 # 3684 103
+small NULL NULL 1048576 1024 # # 0 0 # 0 0
+select * from t1 where p between 1010 and 1020 ;
+p a
+select * from t2 where p between 1010 and 1020 ;
+p i a
+1010 2 pppp
+1011 2 yyyy
+1012 3 zzzz
+1013 2 qqqq
+1014 2 pppp
+1015 2 yyyy
+1016 3 zzzz
+1017 2 qqqq
+1018 2 pppp
+1019 2 yyyy
+1020 3 zzzz
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 1954 # 1976 43
+default 2 2 16384 1024 # # 0 4796 # 1708 60
+default 2 NULL 32768 1024 # # 0 6750 # 3684 103
+small NULL NULL 1048576 1024 # # 0 0 # 0 0
+flush tables;
+flush status;
+update t1 set a='zzzz' where a='qqqq';
+update t2 set i=1 where i=2;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 940 10 939 10
+default 2 2 16384 1024 # # 0 2136 8 613 8
+default 2 NULL 32768 1024 # # 0 3076 18 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_buffer_size=256*1024;
+select @@keycache1.key_buffer_size;
+@@keycache1.key_buffer_size
+262144
+set global keycache1.key_cache_partitions=7;
+select @@keycache1.key_cache_partitions;
+@@keycache1.key_cache_partitions
+7
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 940 10 939 10
+default 2 2 16384 1024 # # 0 2136 8 613 8
+default 2 NULL 32768 1024 # # 0 3076 18 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 7 1 37449 2048 # # 0 0 0 0 0
+keycache1 7 2 37449 2048 # # 0 0 0 0 0
+keycache1 7 3 37449 2048 # # 0 0 0 0 0
+keycache1 7 4 37449 2048 # # 0 0 0 0 0
+keycache1 7 5 37449 2048 # # 0 0 0 0 0
+keycache1 7 6 37449 2048 # # 0 0 0 0 0
+keycache1 7 7 37449 2048 # # 0 0 0 0 0
+keycache1 7 NULL 262143 2048 # # 0 0 0 0 0
+select * from information_schema.key_caches where key_cache_name like "key%";
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+keycache1 7 1 37449 2048 0 # 0 0 0 0 0
+keycache1 7 2 37449 2048 0 # 0 0 0 0 0
+keycache1 7 3 37449 2048 0 # 0 0 0 0 0
+keycache1 7 4 37449 2048 0 # 0 0 0 0 0
+keycache1 7 5 37449 2048 0 # 0 0 0 0 0
+keycache1 7 6 37449 2048 0 # 0 0 0 0 0
+keycache1 7 7 37449 2048 0 # 0 0 0 0 0
+keycache1 7 NULL 262143 2048 0 # 0 0 0 0 0
+cache index t1 key (`primary`) in keycache1;
+Table Op Msg_type Msg_text
+test.t1 assign_to_keycache status OK
+explain select p from t1 where p between 1010 and 1020;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using index
+select p from t1 where p between 1010 and 1020;
+p
+explain select i from t2 where p between 1010 and 1020;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 28 Using where
+select i from t2 where p between 1010 and 1020;
+i
+1
+1
+3
+1
+1
+1
+3
+1
+1
+1
+3
+explain select count(*) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index k1 k1 5 NULL 1024 Using index
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.i 1 Using index
+select count(*) from t1, t2 where t1.p = t2.i;
+count(*)
+256
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 7 1 37449 2048 # # 0 2 1 0 0
+keycache1 7 2 37449 2048 # # 0 7 1 0 0
+keycache1 7 3 37449 2048 # # 0 0 0 0 0
+keycache1 7 4 37449 2048 # # 0 5 1 0 0
+keycache1 7 5 37449 2048 # # 0 0 0 0 0
+keycache1 7 6 37449 2048 # # 0 0 0 0 0
+keycache1 7 7 37449 2048 # # 0 0 0 0 0
+keycache1 7 NULL 262143 2048 # # 0 14 3 0 0
+select * from information_schema.key_caches where key_cache_name like "key%";
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+keycache1 7 1 37449 2048 1 # 0 2 1 0 0
+keycache1 7 2 37449 2048 1 # 0 7 1 0 0
+keycache1 7 3 37449 2048 0 # 0 0 0 0 0
+keycache1 7 4 37449 2048 1 # 0 5 1 0 0
+keycache1 7 5 37449 2048 0 # 0 0 0 0 0
+keycache1 7 6 37449 2048 0 # 0 0 0 0 0
+keycache1 7 7 37449 2048 0 # 0 0 0 0 0
+keycache1 7 NULL 262143 2048 3 # 0 14 3 0 0
+cache index t2 in keycache1;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+update t2 set p=p+3000, i=2 where a='qqqq';
+select * from information_schema.key_caches where key_cache_name like "key%";
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+keycache1 7 1 37449 2048 3 # 0 44 3 43 2
+keycache1 7 2 37449 2048 4 # 0 61 4 51 1
+keycache1 7 3 37449 2048 4 # 0 177 4 176 3
+keycache1 7 4 37449 2048 4 # 0 122 4 119 3
+keycache1 7 5 37449 2048 4 # 0 840 4 335 4
+keycache1 7 6 37449 2048 3 # 0 627 3 133 3
+keycache1 7 7 37449 2048 3 # 0 211 3 214 3
+keycache1 7 NULL 262143 2048 25 # 0 2082 25 1071 19
+set global keycache2.key_buffer_size=1024*1024;
+cache index t2 in keycache2;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+insert into t2 values (2000, 3, 'yyyy');
+select * from information_schema.key_caches where key_cache_name like "keycache2";
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+keycache2 NULL NULL 1048576 1024 0 # 0 0 0 0 0
+select * from information_schema.key_caches where key_cache_name like "key%";
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+keycache1 7 1 37449 2048 3 # 0 44 3 43 2
+keycache1 7 2 37449 2048 4 # 0 61 4 51 1
+keycache1 7 3 37449 2048 4 # 0 177 4 176 3
+keycache1 7 4 37449 2048 4 # 0 122 4 119 3
+keycache1 7 5 37449 2048 4 # 0 840 4 335 4
+keycache1 7 6 37449 2048 3 # 0 627 3 133 3
+keycache1 7 7 37449 2048 3 # 0 211 3 214 3
+keycache1 7 NULL 262143 2048 25 # 0 2082 25 1071 19
+keycache2 NULL NULL 1048576 1024 0 # 0 0 0 0 0
+cache index t2 in keycache1;
+Table Op Msg_type Msg_text
+test.t2 assign_to_keycache status OK
+update t2 set p=p+5000 where a='zzzz';
+select * from t2 where p between 1010 and 1020;
+p i a
+1010 1 pppp
+1011 1 yyyy
+1014 1 pppp
+1015 1 yyyy
+1018 1 pppp
+1019 1 yyyy
+explain select p from t2 where p between 1010 and 1020;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+select p from t2 where p between 1010 and 1020;
+p
+1010
+1011
+1014
+1015
+1018
+1019
+explain select i from t2 where a='yyyy' and i=3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref k1,k2 k1 5 const 188 Using where
+select i from t2 where a='yyyy' and i=3;
+i
+3
+explain select a from t2 where a='yyyy' and i=3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref k1,k2 k1 5 const 188 Using where
+select a from t2 where a='yyyy' and i=3 ;
+a
+yyyy
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 7 1 37449 2048 # # 0 85 6 68 3
+keycache1 7 2 37449 2048 # # 0 122 6 102 2
+keycache1 7 3 37449 2048 # # 0 271 8 254 6
+keycache1 7 4 37449 2048 # # 0 179 6 170 4
+keycache1 7 5 37449 2048 # # 0 1445 7 416 6
+keycache1 7 6 37449 2048 # # 0 863 6 345 5
+keycache1 7 7 37449 2048 # # 0 236 4 239 4
+keycache1 7 NULL 262143 2048 # # 0 3201 43 1594 30
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=2*1024;
+insert into t2 values (7000, 3, 'yyyy');
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 7 1 37449 2048 # # 0 1 1 1 1
+keycache1 7 2 37449 2048 # # 0 1 1 0 0
+keycache1 7 3 37449 2048 # # 0 0 0 0 0
+keycache1 7 4 37449 2048 # # 0 1 1 1 1
+keycache1 7 5 37449 2048 # # 0 1 1 0 0
+keycache1 7 6 37449 2048 # # 0 2 2 1 1
+keycache1 7 7 37449 2048 # # 0 0 0 0 0
+keycache1 7 NULL 262143 2048 # # 0 6 6 3 3
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=8*1024;
+insert into t2 values (8000, 3, 'yyyy');
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 3 1 87381 8192 # # 0 1 1 1 1
+keycache1 3 2 87381 8192 # # 0 3 2 1 1
+keycache1 3 3 87381 8192 # # 0 2 2 1 1
+keycache1 3 NULL 262143 8192 # # 0 6 5 3 3
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_buffer_size=64*1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=2*1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 3 1 21845 2048 # # 0 0 0 0 0
+keycache1 3 2 21845 2048 # # 0 0 0 0 0
+keycache1 3 3 21845 2048 # # 0 0 0 0 0
+keycache1 3 NULL 65535 2048 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=8*1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_buffer_size=0;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=8*1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_buffer_size=0;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_buffer_size=128*1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 1 1 131072 8192 # # 0 0 0 0 0
+keycache1 1 NULL 131072 8192 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+set global keycache1.key_cache_block_size=1024;
+select * from information_schema.key_caches;
+KEY_CACHE_NAME PARTITIONS PARTITION_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
+default 2 1 16384 1024 # # 0 966 12 939 10
+default 2 2 16384 1024 # # 0 2206 12 613 8
+default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+small NULL NULL 1048576 1024 # # 0 0 0 0 0
+keycache1 7 1 18724 1024 # # 0 0 0 0 0
+keycache1 7 2 18724 1024 # # 0 0 0 0 0
+keycache1 7 3 18724 1024 # # 0 0 0 0 0
+keycache1 7 4 18724 1024 # # 0 0 0 0 0
+keycache1 7 5 18724 1024 # # 0 0 0 0 0
+keycache1 7 6 18724 1024 # # 0 0 0 0 0
+keycache1 7 7 18724 1024 # # 0 0 0 0 0
+keycache1 7 NULL 131068 1024 # # 0 0 0 0 0
+keycache2 NULL NULL 1048576 1024 # # 0 0 0 0 0
+drop table t1,t2;
+set global keycache1.key_buffer_size=0;
+set global keycache2.key_buffer_size=0;
+set global key_buffer_size=@save_key_buffer_size;
+set global key_cache_partitions=@save_key_cache_partitions;
=== modified file 'mysql-test/t/key_cache.test'
--- a/mysql-test/t/key_cache.test 2008-03-27 16:43:17 +0000
+++ b/mysql-test/t/key_cache.test 2010-06-29 00:10:53 +0000
@@ -1,11 +1,13 @@
#
-# Test of multiple key caches
+# Test of multiple key caches, simple an partitioned
#
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
-SET @save_key_buffer=@@key_buffer_size;
+SET @save_key_buffer_size=@@key_buffer_size;
+SET @save_key_cache_block_size=@@key_cache_block_size;
+SET @save_key_cache_partitions=@@key_cache_partitions;
SELECT @@key_buffer_size, @@small.key_buffer_size;
@@ -33,7 +35,7 @@ SELECT @@`default`.key_buffer_size;
SELECT @@small.key_buffer_size;
SELECT @@medium.key_buffer_size;
-SET @@global.key_buffer_size=@save_key_buffer;
+SET @@global.key_buffer_size=@save_key_buffer_size;
#
# Errors
@@ -247,3 +249,263 @@ SET GLOBAL key_cache_block_size= @bug284
DROP TABLE t1;
# End of 4.1 tests
+
+#
+# Test cases for partitioned key caches
+#
+
+# Test usage of the KEY_CACHE table from information schema
+# for a simple key cache
+
+set global key_buffer_size=@save_key_buffer_size;
+set global key_cache_block_size=@save_key_cache_block_size;
+select @@key_buffer_size;
+select @@key_cache_block_size;
+select @@key_cache_partitions;
+
+create table t1 (
+ p int not null auto_increment primary key,
+ a char(10));
+create table t2 (
+ p int not null auto_increment primary key,
+ i int, a char(10), key k1(i), key k2(a));
+
+select @@key_cache_partitions;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+ (3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+select * from t2;
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+
+--replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED
+show status like 'key_%';
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+delete from t2 where a='zzzz';
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+delete from t1;
+delete from t2;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+# For the key cache with 2 partitions execute the same sequence of
+# statements as for the simple cache above.
+# The statistical information on the number of i/o requests and
+# the number of is expected to be the same.
+
+set global key_cache_partitions=2;
+select @@key_cache_partitions;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+ (3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+select * from t2;
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+
+--replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED
+show status like 'key_%';
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+delete from t1;
+delete from t2;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+# Check that we can work with one partition with the same results
+
+set global key_cache_partitions=1;
+select @@key_cache_partitions;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+ (3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+select * from t2;
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+
+--replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED
+show status like 'key_%';
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+delete from t1;
+delete from t2;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+flush tables; flush status;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+# Switch back to 2 partitions
+
+set global key_buffer_size=32*1024;
+select @@key_buffer_size;
+set global key_cache_partitions=2;
+select @@key_cache_partitions;
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+insert into t1 values (1, 'qqqq'), (2, 'yyyy');
+insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
+ (3, 1, 'yyyy'), (4, 3, 'zzzz');
+select * from t1;
+select * from t2;
+update t1 set p=3 where p=1;
+update t2 set i=2 where i=1;
+
+--replace_column 7 #
+select * from information_schema.key_caches;
+
+# Add more rows to tables t1 and t2
+
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+insert into t1(a) select a from t1;
+
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+insert into t2(i,a) select i,a from t2;
+
+--replace_column 6 # 7 # 10 #
+select * from information_schema.key_caches;
+
+select * from t1 where p between 1010 and 1020 ;
+select * from t2 where p between 1010 and 1020 ;
+--replace_column 6 # 7 # 10 #
+select * from information_schema.key_caches;
+
+flush tables; flush status;
+update t1 set a='zzzz' where a='qqqq';
+update t2 set i=1 where i=2;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+# Now test how we can work with 7 partitions
+
+set global keycache1.key_buffer_size=256*1024;
+select @@keycache1.key_buffer_size;
+set global keycache1.key_cache_partitions=7;
+select @@keycache1.key_cache_partitions;
+
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+--replace_column 7 #
+select * from information_schema.key_caches where key_cache_name like "key%";
+
+cache index t1 key (`primary`) in keycache1;
+
+explain select p from t1 where p between 1010 and 1020;
+select p from t1 where p between 1010 and 1020;
+explain select i from t2 where p between 1010 and 1020;
+select i from t2 where p between 1010 and 1020;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+select count(*) from t1, t2 where t1.p = t2.i;
+
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+--replace_column 7 #
+select * from information_schema.key_caches where key_cache_name like "key%";
+
+cache index t2 in keycache1;
+update t2 set p=p+3000, i=2 where a='qqqq';
+--replace_column 7 #
+select * from information_schema.key_caches where key_cache_name like "key%";
+
+set global keycache2.key_buffer_size=1024*1024;
+cache index t2 in keycache2;
+insert into t2 values (2000, 3, 'yyyy');
+--replace_column 7 #
+select * from information_schema.key_caches where key_cache_name like "keycache2";
+--replace_column 7 #
+select * from information_schema.key_caches where key_cache_name like "key%";
+
+cache index t2 in keycache1;
+update t2 set p=p+5000 where a='zzzz';
+select * from t2 where p between 1010 and 1020;
+explain select p from t2 where p between 1010 and 1020;
+select p from t2 where p between 1010 and 1020;
+explain select i from t2 where a='yyyy' and i=3;
+select i from t2 where a='yyyy' and i=3;
+explain select a from t2 where a='yyyy' and i=3;
+select a from t2 where a='yyyy' and i=3 ;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=2*1024;
+insert into t2 values (7000, 3, 'yyyy');
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=8*1024;
+insert into t2 values (8000, 3, 'yyyy');
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_buffer_size=64*1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=2*1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=8*1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_buffer_size=0;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=8*1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_buffer_size=0;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_buffer_size=128*1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+set global keycache1.key_cache_block_size=1024;
+--replace_column 6 # 7 #
+select * from information_schema.key_caches;
+
+drop table t1,t2;
+
+set global keycache1.key_buffer_size=0;
+set global keycache2.key_buffer_size=0;
+
+set global key_buffer_size=@save_key_buffer_size;
+set global key_cache_partitions=@save_key_cache_partitions;
+
+#End of 5.1 tests
=== modified file 'mysys/mf_keycache.c'
--- a/mysys/mf_keycache.c 2009-09-07 20:50:10 +0000
+++ b/mysys/mf_keycache.c 2010-06-29 00:10:53 +0000
@@ -13,6 +13,35 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/******************************************************************************
+ The file contains the following modules:
+
+ Simple Key Cache Module
+
+ Partitioned Key Cache Module
+
+ Key Cache Interface Module
+
+******************************************************************************/
+
+#include "mysys_priv.h"
+#include "mysys_err.h"
+#include <keycache.h>
+#include "my_static.h"
+#include <m_string.h>
+#include <my_bit.h>
+#include <errno.h>
+#include <stdarg.h>
+
+/******************************************************************************
+ Simple Key Cache Module
+
+ The module contains implementations of all key cache interface functions
+ employed by partitioned key caches.
+
+******************************************************************************/
+
/*
These functions handle keyblock cacheing for ISAM and MyISAM tables.
@@ -101,14 +130,77 @@
I/O finished.
*/
-#include "mysys_priv.h"
-#include "mysys_err.h"
-#include <keycache.h>
-#include "my_static.h"
-#include <m_string.h>
-#include <my_bit.h>
-#include <errno.h>
-#include <stdarg.h>
+/* declare structures that is used by st_key_cache */
+
+struct st_block_link;
+typedef struct st_block_link BLOCK_LINK;
+struct st_keycache_page;
+typedef struct st_keycache_page KEYCACHE_PAGE;
+struct st_hash_link;
+typedef struct st_hash_link HASH_LINK;
+
+/* info about requests in a waiting queue */
+typedef struct st_keycache_wqueue
+{
+ struct st_my_thread_var *last_thread; /* circular list of waiting threads */
+} KEYCACHE_WQUEUE;
+
+#define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */
+
+/* Control block for a simple (non-partitioned) key cache */
+
+typedef struct st_s_key_cache_cb
+{
+ my_bool key_cache_inited; /* <=> control block is allocated */
+ my_bool in_resize; /* true during resize operation */
+ my_bool resize_in_flush; /* true during flush of resize operation */
+ my_bool can_be_used; /* usage of cache for read/write is allowed */
+ size_t key_cache_mem_size; /* specified size of the cache memory */
+ uint key_cache_block_size; /* size of the page buffer of a cache block */
+ ulong min_warm_blocks; /* min number of warm blocks; */
+ ulong age_threshold; /* age threshold for hot blocks */
+ ulonglong keycache_time; /* total number of block link operations */
+ uint hash_entries; /* max number of entries in the hash table */
+ int hash_links; /* max number of hash links */
+ int hash_links_used; /* number of hash links currently used */
+ int disk_blocks; /* max number of blocks in the cache */
+ ulong blocks_used; /* maximum number of concurrently used blocks */
+ ulong blocks_unused; /* number of currently unused blocks */
+ ulong blocks_changed; /* number of currently dirty blocks */
+ ulong warm_blocks; /* number of blocks in warm sub-chain */
+ ulong cnt_for_resize_op; /* counter to block resize operation */
+ long blocks_available; /* number of blocks available in the LRU chain */
+ HASH_LINK **hash_root; /* arr. of entries into hash table buckets */
+ HASH_LINK *hash_link_root; /* memory for hash table links */
+ HASH_LINK *free_hash_list; /* list of free hash links */
+ BLOCK_LINK *free_block_list; /* list of free blocks */
+ BLOCK_LINK *block_root; /* memory for block links */
+ uchar HUGE_PTR *block_mem; /* memory for block buffers */
+ BLOCK_LINK *used_last; /* ptr to the last block of the LRU chain */
+ BLOCK_LINK *used_ins; /* ptr to the insertion block in LRU chain */
+ pthread_mutex_t cache_lock; /* to lock access to the cache structure */
+ KEYCACHE_WQUEUE resize_queue; /* threads waiting during resize operation */
+ /*
+ Waiting for a zero resize count. Using a queue for symmetry though
+ only one thread can wait here.
+ */
+ KEYCACHE_WQUEUE waiting_for_resize_cnt;
+ KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */
+ KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */
+ BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/
+ BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/
+
+ /* Statistics variables. These are reset in reset_key_cache_counters(). */
+ ulong global_blocks_changed; /* number of currently dirty blocks */
+ ulonglong global_cache_w_requests;/* number of write requests (write hits) */
+ ulonglong global_cache_write; /* number of writes from cache to files */
+ ulonglong global_cache_r_requests;/* number of read requests (read hits) */
+ ulonglong global_cache_read; /* number of reads from files to cache */
+
+ int blocks; /* max number of blocks in the cache */
+ uint hash_factor; /* factor used to calculate hash function */
+ my_bool in_init; /* Set to 1 in MySQL during init/resize */
+} S_KEY_CACHE_CB;
/*
Some compilation flags have been added specifically for this module
@@ -220,7 +312,12 @@ KEY_CACHE *dflt_key_cache= &dflt_key_cac
#define FLUSH_CACHE 2000 /* sort this many blocks at once */
-static int flush_all_key_blocks(KEY_CACHE *keycache);
+static int flush_all_key_blocks(S_KEY_CACHE_CB *keycache);
+/*
+static void s_change_key_cache_param(void *keycache_cb, uint division_limit,
+ uint age_threshold);
+*/
+static void s_end_key_cache(void *keycache_cb, my_bool cleanup);
#ifdef THREAD
static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
pthread_mutex_t *mutex);
@@ -229,15 +326,16 @@ static void release_whole_queue(KEYCACHE
#define wait_on_queue(wqueue, mutex) do {} while (0)
#define release_whole_queue(wqueue) do {} while (0)
#endif
-static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block);
+static void free_block(S_KEY_CACHE_CB *keycache, BLOCK_LINK *block);
#if !defined(DBUG_OFF)
-static void test_key_cache(KEY_CACHE *keycache,
+static void test_key_cache(S_KEY_CACHE_CB *keycache,
const char *where, my_bool lock);
#endif
-
+#define KEYCACHE_BASE_EXPR(f, pos) \
+ ((ulong) ((pos) / keycache->key_cache_block_size) + (ulong) (f))
#define KEYCACHE_HASH(f, pos) \
-(((ulong) ((pos) / keycache->key_cache_block_size) + \
- (ulong) (f)) & (keycache->hash_entries-1))
+ ((KEYCACHE_BASE_EXPR(f, pos) / keycache->hash_factor) & \
+ (keycache->hash_entries-1))
#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1))
#define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log"
@@ -333,9 +431,10 @@ static int keycache_pthread_cond_signal(
#define inline /* disabled inline for easier debugging */
static int fail_block(BLOCK_LINK *block);
static int fail_hlink(HASH_LINK *hlink);
-static int cache_empty(KEY_CACHE *keycache);
+static int cache_empty(S_KEY_CACHE_CB *keycache);
#endif
+
static inline uint next_power(uint value)
{
return (uint) my_round_up_to_next_power((uint32) value) << 1;
@@ -343,19 +442,32 @@ static inline uint next_power(uint value
/*
- Initialize a key cache
+ Initialize a simple key cache
SYNOPSIS
- init_key_cache()
- keycache pointer to a key cache data structure
- key_cache_block_size size of blocks to keep cached data
- use_mem total memory to use for the key cache
- division_limit division limit (may be zero)
- age_threshold age threshold (may be zero)
+ s_init_key_cache()
+ keycache_cb pointer to the control block of a simple key cache
+ key_cache_block_size size of blocks to keep cached data
+ use_mem memory to use for the key cache buferrs/structures
+ division_limit division limit (may be zero)
+ age_threshold age threshold (may be zero)
+
+ DESCRIPTION
+ This function is the implementation of the init_key_cache interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function builds a simple key cache and initializes the control block
+ structure of the type S_KEY_CACHE_CB that is used for this key cache.
+ The parameter keycache_cb is supposed to point to this structure.
+ The parameter key_cache_block_size specifies the size of the blocks in
+ the key cache to be built. The parameters division_limit and age_threshhold
+ determine the initial values of those characteristics of the key cache
+ that are used for midpoint insertion strategy. The parameter use_mem
+ specifies the total amount of memory to be allocated for key cache blocks
+ and auxiliary structures.
RETURN VALUE
number of blocks in the key cache, if successful,
- 0 - otherwise.
+ <= 0 - otherwise.
NOTES.
if keycache->key_cache_inited != 0 we assume that the key cache
@@ -367,10 +479,12 @@ static inline uint next_power(uint value
*/
-int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
- size_t use_mem, uint division_limit,
- uint age_threshold)
+static
+int s_init_key_cache(void *keycache_cb, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold)
{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
ulong blocks, hash_links;
size_t length;
int error;
@@ -384,12 +498,15 @@ int init_key_cache(KEY_CACHE *keycache,
DBUG_RETURN(0);
}
+ keycache->blocks_used= keycache->blocks_unused= 0;
+ keycache->global_blocks_changed= 0;
keycache->global_cache_w_requests= keycache->global_cache_r_requests= 0;
keycache->global_cache_read= keycache->global_cache_write= 0;
keycache->disk_blocks= -1;
if (! keycache->key_cache_inited)
{
keycache->key_cache_inited= 1;
+ keycache->hash_factor= 1;
/*
Initialize these variables once only.
Their value must survive re-initialization during resizing.
@@ -531,51 +648,43 @@ err:
/*
- Resize a key cache
+ Prepare for resizing a simple key cache
SYNOPSIS
- resize_key_cache()
- keycache pointer to a key cache data structure
- key_cache_block_size size of blocks to keep cached data
- use_mem total memory to use for the new key cache
- division_limit new division limit (if not zero)
- age_threshold new age threshold (if not zero)
+ s_prepare_resize_key_cache()
+ keycache_cb pointer to the control block of a simple key cache
+ with_resize_queue <=> resize queue is used
+ release_lock <=> release the key cache lock before return
- RETURN VALUE
- number of blocks in the key cache, if successful,
- 0 - otherwise.
+ DESCRIPTION
+ This function flushes all dirty pages from a simple key cache and after
+ this it destroys the key cache calling s_end_key_cache. The function
+ considers the parameter keycache_cb as a pointer to the control block
+ structure of the type S_KEY_CACHE_CB for this key cache.
+ The parameter with_resize_queue determines weather the resize queue is
+ involved (MySQL server never uses this queue). The parameter release_lock
+ says weather the key cache lock must be released before return from
+ the function.
- NOTES.
- The function first compares the memory size and the block size parameters
- with the key cache values.
+ RETURN VALUE
+ 0 - on success,
+ 1 - otherwise.
- If they differ the function free the the memory allocated for the
- old key cache blocks by calling the end_key_cache function and
- then rebuilds the key cache with new blocks by calling
- init_key_cache.
+ NOTES
+ This function is the called by s_resize_key_cache and p_resize_key_cache
+ that resize simple and partitioned key caches respectively.
- The function starts the operation only when all other threads
- performing operations with the key cache let her to proceed
- (when cnt_for_resize=0).
*/
-int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
- size_t use_mem, uint division_limit,
- uint age_threshold)
+static
+int s_prepare_resize_key_cache(void *keycache_cb,
+ my_bool with_resize_queue,
+ my_bool release_lock)
{
- int blocks;
- DBUG_ENTER("resize_key_cache");
-
- if (!keycache->key_cache_inited)
- DBUG_RETURN(keycache->disk_blocks);
-
- if(key_cache_block_size == keycache->key_cache_block_size &&
- use_mem == keycache->key_cache_mem_size)
- {
- change_key_cache_param(keycache, division_limit, age_threshold);
- DBUG_RETURN(keycache->disk_blocks);
- }
-
+ int res= 0;
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_prepare_resize_key_cache");
+
keycache_pthread_mutex_lock(&keycache->cache_lock);
#ifdef THREAD
@@ -585,7 +694,7 @@ int resize_key_cache(KEY_CACHE *keycache
one resizer only. In set_var.cc keycache->in_init is used to block
multiple attempts.
*/
- while (keycache->in_resize)
+ while (with_resize_queue && keycache->in_resize)
{
/* purecov: begin inspected */
wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
@@ -610,8 +719,8 @@ int resize_key_cache(KEY_CACHE *keycache
{
/* TODO: if this happens, we should write a warning in the log file ! */
keycache->resize_in_flush= 0;
- blocks= 0;
keycache->can_be_used= 0;
+ res= 1;
goto finish;
}
DBUG_ASSERT(cache_empty(keycache));
@@ -637,29 +746,145 @@ int resize_key_cache(KEY_CACHE *keycache
#else
KEYCACHE_DBUG_ASSERT(keycache->cnt_for_resize_op == 0);
#endif
-
- /*
- Free old cache structures, allocate new structures, and initialize
- them. Note that the cache_lock mutex and the resize_queue are left
- untouched. We do not lose the cache_lock and will release it only at
- the end of this function.
- */
- end_key_cache(keycache, 0); /* Don't free mutex */
- /* The following will work even if use_mem is 0 */
- blocks= init_key_cache(keycache, key_cache_block_size, use_mem,
- division_limit, age_threshold);
+
+ s_end_key_cache(keycache_cb, 0);
finish:
+ if (release_lock)
+ keycache_pthread_mutex_unlock(&keycache->cache_lock);
+ DBUG_RETURN(res);
+}
+
+
+/*
+ Finalize resizing a simple key cache
+
+ SYNOPSIS
+ s_finish_resize_key_cache()
+ keycache_cb pointer to the control block of a simple key cache
+ with_resize_queue <=> resize queue is used
+ acquire_lock <=> acquire the key cache lock at start
+
+ DESCRIPTION
+ This function performs finalizing actions for the operation of
+ resizing a simple key cache. The function considers the parameter
+ keycache_cb as a pointer to the control block structure of the type
+ S_KEY_CACHE_CB for this key cache. The function sets the flag
+ in_resize in this structure to FALSE.
+ The parameter with_resize_queue determines weather the resize queue
+ is involved (MySQL server never uses this queue).
+ The parameter acquire_lock says weather the key cache lock must be
+ acquired at the start of the function.
+
+ RETURN VALUE
+ none
+
+ NOTES
+ This function is the called by s_resize_key_cache and p_resize_key_cache
+ that resize simple and partitioned key caches respectively.
+
+*/
+
+static
+void s_finish_resize_key_cache(void *keycache_cb,
+ my_bool with_resize_queue,
+ my_bool acquire_lock)
+{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_finish_resize_key_cache");
+
+ if (acquire_lock)
+ keycache_pthread_mutex_lock(&keycache->cache_lock);
+
/*
Mark the resize finished. This allows other threads to start a
resize or to request new cache blocks.
*/
keycache->in_resize= 0;
-
- /* Signal waiting threads. */
- release_whole_queue(&keycache->resize_queue);
+
+ if (with_resize_queue)
+ {
+ /* Signal waiting threads. */
+ release_whole_queue(&keycache->resize_queue);
+ }
keycache_pthread_mutex_unlock(&keycache->cache_lock);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Resize a simple key cache
+
+ SYNOPSIS
+ s_resize_key_cache()
+ keycache_cb pointer to the control block of a simple key cache
+ key_cache_block_size size of blocks to keep cached data
+ use_mem memory to use for the key cache buffers/structures
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ This function is the implementation of the resize_key_cache interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for the simple key
+ cache to be resized.
+ The parameter key_cache_block_size specifies the new size of the blocks in
+ the key cache. The parameters division_limit and age_threshold
+ determine the new initial values of those characteristics of the key cache
+ that are used for midpoint insertion strategy. The parameter use_mem
+ specifies the total amount of memory to be allocated for key cache blocks
+ and auxiliary structures in the new key cache.
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES.
+ The function first calls the function s_prepare_resize_key_cache
+ to flush all dirty blocks from key cache, to free memory used
+ for key cache blocks and auxiliary structures. After this the
+ function builds a new key cache with new parameters.
+
+ This implementation doesn't block the calls and executions of other
+ functions from the key cache interface. However it assumes that the
+ calls of s_resize_key_cache itself are serialized.
+
+ The function starts the operation only when all other threads
+ performing operations with the key cache let her to proceed
+ (when cnt_for_resize=0).
+
+*/
+
+static
+int s_resize_key_cache(void *keycache_cb, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold)
+{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ int blocks= 0;
+ DBUG_ENTER("s_resize_key_cache");
+
+ if (!keycache->key_cache_inited)
+ DBUG_RETURN(keycache->disk_blocks);
+
+ /*
+ Note that the cache_lock mutex and the resize_queue are left untouched.
+ We do not lose the cache_lock and will release it only at the end of
+ this function.
+ */
+ if (s_prepare_resize_key_cache(keycache_cb, 1, 0))
+ goto finish;
+
+ /* The following will work even if use_mem is 0 */
+ blocks= s_init_key_cache(keycache, key_cache_block_size, use_mem,
+ division_limit, age_threshold);
+
+finish:
+ s_finish_resize_key_cache(keycache_cb, 1, 0);
+
DBUG_RETURN(blocks);
}
@@ -667,7 +892,7 @@ finish:
/*
Increment counter blocking resize key cache operation
*/
-static inline void inc_counter_for_resize_op(KEY_CACHE *keycache)
+static inline void inc_counter_for_resize_op(S_KEY_CACHE_CB *keycache)
{
keycache->cnt_for_resize_op++;
}
@@ -677,35 +902,49 @@ static inline void inc_counter_for_resiz
Decrement counter blocking resize key cache operation;
Signal the operation to proceed when counter becomes equal zero
*/
-static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
+static inline void dec_counter_for_resize_op(S_KEY_CACHE_CB *keycache)
{
if (!--keycache->cnt_for_resize_op)
release_whole_queue(&keycache->waiting_for_resize_cnt);
}
+
/*
- Change the key cache parameters
+ Change key cache parameters of a simple key cache
SYNOPSIS
- change_key_cache_param()
- keycache pointer to a key cache data structure
- division_limit new division limit (if not zero)
- age_threshold new age threshold (if not zero)
+ s_change_key_cache_param()
+ keycache_cb pointer to the control block of a simple key cache
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ This function is the implementation of the change_key_cache_param interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for the simple key
+ cache where new values of the division limit and the age threshold used
+ for midpoint insertion strategy are to be set. The parameters
+ division_limit and age_threshold provide these new values.
RETURN VALUE
none
NOTES.
- Presently the function resets the key cache parameters
- concerning midpoint insertion strategy - division_limit and
- age_threshold.
+ Presently the function resets the key cache parameters concerning
+ midpoint insertion strategy - division_limit and age_threshold.
+ This function changes some parameters of a given key cache without
+ reformatting it. The function does not touch the contents the key
+ cache blocks.
+
*/
-void change_key_cache_param(KEY_CACHE *keycache, uint division_limit,
- uint age_threshold)
+static
+void s_change_key_cache_param(void *keycache_cb, uint division_limit,
+ uint age_threshold)
{
- DBUG_ENTER("change_key_cache_param");
-
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_change_key_cache_param");
keycache_pthread_mutex_lock(&keycache->cache_lock);
if (division_limit)
keycache->min_warm_blocks= (keycache->disk_blocks *
@@ -719,20 +958,32 @@ void change_key_cache_param(KEY_CACHE *k
/*
- Remove key_cache from memory
+ Destroy a simple key cache
SYNOPSIS
- end_key_cache()
- keycache key cache handle
- cleanup Complete free (Free also mutex for key cache)
+ s_end_key_cache()
+ keycache_cb pointer to the control block of a simple key cache
+ cleanup <=> complete free (free also mutex for key cache)
+
+ DESCRIPTION
+ This function is the implementation of the end_key_cache interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for the simple key
+ cache to be destroyed.
+ The function frees the memory allocated for the key cache blocks and
+ auxiliary structures. If the value of the parameter cleanup is TRUE
+ then even the key cache mutex is freed.
RETURN VALUE
none
*/
-void end_key_cache(KEY_CACHE *keycache, my_bool cleanup)
+static
+void s_end_key_cache(void *keycache_cb, my_bool cleanup)
{
- DBUG_ENTER("end_key_cache");
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_end_key_cache");
DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache));
if (!keycache->key_cache_inited)
@@ -760,7 +1011,14 @@ void end_key_cache(KEY_CACHE *keycache,
(ulong) keycache->global_cache_r_requests,
(ulong) keycache->global_cache_read));
- if (cleanup)
+ /*
+ Reset these values to be able to detect a disabled key cache.
+ See Bug#44068 (RESTORE can disable the MyISAM Key Cache).
+ */
+ keycache->blocks_used= 0;
+ keycache->blocks_unused= 0;
+
+ if (cleanup)
{
pthread_mutex_destroy(&keycache->cache_lock);
keycache->key_cache_inited= keycache->can_be_used= 0;
@@ -1016,7 +1274,7 @@ static inline void link_changed(BLOCK_LI
void
*/
-static void link_to_file_list(KEY_CACHE *keycache,
+static void link_to_file_list(S_KEY_CACHE_CB *keycache,
BLOCK_LINK *block, int file,
my_bool unlink_block)
{
@@ -1057,7 +1315,7 @@ static void link_to_file_list(KEY_CACHE
void
*/
-static void link_to_changed_list(KEY_CACHE *keycache,
+static void link_to_changed_list(S_KEY_CACHE_CB *keycache,
BLOCK_LINK *block)
{
DBUG_ASSERT(block->status & BLOCK_IN_USE);
@@ -1112,7 +1370,7 @@ static void link_to_changed_list(KEY_CAC
not linked in the LRU ring.
*/
-static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot,
+static void link_block(S_KEY_CACHE_CB *keycache, BLOCK_LINK *block, my_bool hot,
my_bool at_end)
{
BLOCK_LINK *ins;
@@ -1233,7 +1491,7 @@ static void link_block(KEY_CACHE *keycac
See NOTES for link_block
*/
-static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block)
+static void unlink_block(S_KEY_CACHE_CB *keycache, BLOCK_LINK *block)
{
DBUG_ASSERT((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
DBUG_ASSERT(block->hash_link); /*backptr to block NULL from free_block()*/
@@ -1291,7 +1549,7 @@ static void unlink_block(KEY_CACHE *keyc
RETURN
void
*/
-static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count)
+static void reg_requests(S_KEY_CACHE_CB *keycache, BLOCK_LINK *block, int count)
{
DBUG_ASSERT(block->status & BLOCK_IN_USE);
DBUG_ASSERT(block->hash_link);
@@ -1334,7 +1592,7 @@ static void reg_requests(KEY_CACHE *keyc
not linked in the LRU ring.
*/
-static void unreg_request(KEY_CACHE *keycache,
+static void unreg_request(S_KEY_CACHE_CB *keycache,
BLOCK_LINK *block, int at_end)
{
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
@@ -1343,7 +1601,11 @@ static void unreg_request(KEY_CACHE *key
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
DBUG_ASSERT(!block->next_used);
DBUG_ASSERT(!block->prev_used);
- if (! --block->requests)
+ /*
+ Unregister the request, but do not link erroneous blocks into the
+ LRU ring.
+ */
+ if (!--block->requests && !(block->status & BLOCK_ERROR))
{
my_bool hot;
if (block->hits_left)
@@ -1419,7 +1681,7 @@ static void remove_reader(BLOCK_LINK *bl
signals on its termination
*/
-static void wait_for_readers(KEY_CACHE *keycache,
+static void wait_for_readers(S_KEY_CACHE_CB *keycache,
BLOCK_LINK *block)
{
#ifdef THREAD
@@ -1469,7 +1731,7 @@ static inline void link_hash(HASH_LINK *
Remove a hash link from the hash table
*/
-static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
+static void unlink_hash(S_KEY_CACHE_CB *keycache, HASH_LINK *hash_link)
{
KEYCACHE_DBUG_PRINT("unlink_hash", ("fd: %u pos_ %lu #requests=%u",
(uint) hash_link->file,(ulong) hash_link->diskpos, hash_link->requests));
@@ -1525,7 +1787,7 @@ static void unlink_hash(KEY_CACHE *keyca
Get the hash link for a page
*/
-static HASH_LINK *get_hash_link(KEY_CACHE *keycache,
+static HASH_LINK *get_hash_link(S_KEY_CACHE_CB *keycache,
int file, my_off_t filepos)
{
reg1 HASH_LINK *hash_link, **start;
@@ -1646,7 +1908,7 @@ restart:
waits until first of this operations links any block back.
*/
-static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
+static BLOCK_LINK *find_key_block(S_KEY_CACHE_CB *keycache,
File file, my_off_t filepos,
int init_hits_left,
int wrmode, int *page_st)
@@ -1716,6 +1978,7 @@ restart:
- block assigned but not yet read from file (invalid data).
*/
+#ifdef THREAD
if (keycache->in_resize)
{
/* This is a request during a resize operation */
@@ -1957,6 +2220,9 @@ restart:
}
DBUG_RETURN(0);
}
+#else /* THREAD */
+ DBUG_ASSERT(!keycache->in_resize);
+#endif
if (page_status == PAGE_READ &&
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
@@ -2210,9 +2476,9 @@ restart:
thread might change the block->hash_link value
*/
error= my_pwrite(block->hash_link->file,
- block->buffer+block->offset,
+ block->buffer + block->offset,
block->length - block->offset,
- block->hash_link->diskpos+ block->offset,
+ block->hash_link->diskpos + block->offset,
MYF(MY_NABP | MY_WAIT_IF_FULL));
keycache_pthread_mutex_lock(&keycache->cache_lock);
@@ -2402,7 +2668,7 @@ restart:
portion is less than read_length, but not less than min_length.
*/
-static void read_block(KEY_CACHE *keycache,
+static void read_block(S_KEY_CACHE_CB *keycache,
BLOCK_LINK *block, uint read_length,
uint min_length, my_bool primary)
{
@@ -2490,43 +2756,62 @@ static void read_block(KEY_CACHE *keycac
/*
- Read a block of data from a cached file into a buffer;
+ Read a block of data from a simple key cache into a buffer
SYNOPSIS
- key_cache_read()
- keycache pointer to a key cache data structure
- file handler for the file for the block of data to be read
- filepos position of the block of data in the file
- level determines the weight of the data
- buff buffer to where the data must be placed
- length length of the buffer
- block_length length of the block in the key cache buffer
- return_buffer return pointer to the key cache buffer with the data
+ s_key_cache_read()
+ keycache_cb pointer to the control block of a simple key cache
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+ level determines the weight of the data
+ buff buffer to where the data must be placed
+ length length of the buffer
+ block_length length of the read data from a key cache block
+ return_buffer return pointer to the key cache buffer with the data
+ DESCRIPTION
+ This function is the implementation of the key_cache_read interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key
+ cache.
+ In a general case the function reads a block of data from the key cache
+ into the buffer buff of the size specified by the parameter length. The
+ beginning of the block of data to be read is specified by the parameters
+ file and filepos. The length of the read data is the same as the length
+ of the buffer. The data is read into the buffer in key_cache_block_size
+ increments. If the next portion of the data is not found in any key cache
+ block, first it is read from file into the key cache.
+ If the parameter return_buffer is not ignored and its value is TRUE, and
+ the data to be read of the specified size block_length can be read from one
+ key cache buffer, then the function returns a pointer to the data in the
+ key cache buffer.
+ The function takse into account parameters block_length and return buffer
+ only in a single-threaded environment.
+ The parameter 'level' is used only by the midpoint insertion strategy
+ when the data or its portion cannot be found in the key cache.
+
RETURN VALUE
- Returns address from where the data is placed if sucessful, 0 - otherwise.
+ Returns address from where the data is placed if successful, 0 - otherwise.
- NOTES.
- The function ensures that a block of data of size length from file
- positioned at filepos is in the buffers for some key cache blocks.
- Then the function either copies the data into the buffer buff, or,
- if return_buffer is TRUE, it just returns the pointer to the key cache
- buffer with the data.
+ NOTES
Filepos must be a multiple of 'block_length', but it doesn't
have to be a multiple of key_cache_block_size;
+
*/
-uchar *key_cache_read(KEY_CACHE *keycache,
- File file, my_off_t filepos, int level,
- uchar *buff, uint length,
- uint block_length __attribute__((unused)),
- int return_buffer __attribute__((unused)))
+uchar *s_key_cache_read(void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length __attribute__((unused)),
+ int return_buffer __attribute__((unused)))
{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
my_bool locked_and_incremented= FALSE;
int error=0;
uchar *start= buff;
- DBUG_ENTER("key_cache_read");
+ DBUG_ENTER("s_key_cache_read");
DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
(uint) file, (ulong) filepos, length));
@@ -2536,7 +2821,6 @@ uchar *key_cache_read(KEY_CACHE *keycach
reg1 BLOCK_LINK *block;
uint read_length;
uint offset;
- uint status;
int page_st;
/*
@@ -2570,10 +2854,12 @@ uchar *key_cache_read(KEY_CACHE *keycach
/* Read data in key_cache_block_size increments */
do
{
- /* Cache could be disabled in a later iteration. */
-
+ /* Cache could be disabled in a later iteration. */
if (!keycache->can_be_used)
- goto no_key_cache;
+ {
+ KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache cannot be used"));
+ goto no_key_cache;
+ }
/* Start reading at the beginning of the cache block. */
filepos-= offset;
/* Do not read beyond the end of the cache block. */
@@ -2634,7 +2920,7 @@ uchar *key_cache_read(KEY_CACHE *keycach
}
/* block status may have added BLOCK_ERROR in the above 'if'. */
- if (!((status= block->status) & BLOCK_ERROR))
+ if (!(block->status & BLOCK_ERROR))
{
#ifndef THREAD
if (! return_buffer)
@@ -2660,14 +2946,22 @@ uchar *key_cache_read(KEY_CACHE *keycach
remove_reader(block);
- /*
- Link the block into the LRU ring if it's the last submitted
- request for the block. This enables eviction for the block.
- */
- unreg_request(keycache, block, 1);
+ /* Error injection for coverage testing. */
+ DBUG_EXECUTE_IF("key_cache_read_block_error",
+ block->status|= BLOCK_ERROR;);
- if (status & BLOCK_ERROR)
+ /* Do not link erroneous blocks into the LRU ring, but free them. */
+ if (!(block->status & BLOCK_ERROR))
+ {
+ /*
+ Link the block into the LRU ring if it's the last submitted
+ request for the block. This enables eviction for the block.
+ */
+ unreg_request(keycache, block, 1);
+ }
+ else
{
+ free_block(keycache, block);
error= 1;
break;
}
@@ -2677,7 +2971,7 @@ uchar *key_cache_read(KEY_CACHE *keycach
if (return_buffer)
DBUG_RETURN(block->buffer);
#endif
- next_block:
+ next_block:
buff+= read_length;
filepos+= read_length+offset;
offset= 0;
@@ -2685,6 +2979,7 @@ uchar *key_cache_read(KEY_CACHE *keycach
} while ((length-= read_length));
goto end;
}
+ KEYCACHE_DBUG_PRINT("key_cache_read", ("keycache not initialized"));
no_key_cache:
/* Key cache is not used */
@@ -2705,34 +3000,55 @@ end:
dec_counter_for_resize_op(keycache);
keycache_pthread_mutex_unlock(&keycache->cache_lock);
}
+ DBUG_PRINT("exit", ("error: %d", error ));
DBUG_RETURN(error ? (uchar*) 0 : start);
}
/*
- Insert a block of file data from a buffer into key cache
+ Insert a block of file data from a buffer into a simple key cache
SYNOPSIS
- key_cache_insert()
- keycache pointer to a key cache data structure
+ s_key_cache_insert()
+ keycache_cb pointer to the control block of a simple key cache
file handler for the file to insert data from
filepos position of the block of data in the file to insert
level determines the weight of the data
buff buffer to read data from
length length of the data in the buffer
- NOTES
- This is used by MyISAM to move all blocks from a index file to the key
- cache
-
+ DESCRIPTION
+ This function is the implementation of the key_cache_insert interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key
+ cache.
+ The function writes a block of file data from a buffer into the key cache.
+ The buffer is specified with the parameters buff and length - the pointer
+ to the beginning of the buffer and its size respectively. It's assumed
+ the buffer contains the data from 'file' allocated from the position
+ filepos. The data is copied from the buffer in key_cache_block_size
+ increments.
+ The parameter level is used to set one characteristic for the key buffers
+ loaded with the data from buff. The characteristic is used only by the
+ midpoint insertion strategy.
+
RETURN VALUE
0 if a success, 1 - otherwise.
+
+ NOTES
+ The function is used by MyISAM to move all blocks from a index file to
+ the key cache. It can be performed in parallel with reading the file data
+ from the key buffers by other threads.
+
*/
-int key_cache_insert(KEY_CACHE *keycache,
- File file, my_off_t filepos, int level,
- uchar *buff, uint length)
+static
+int s_key_cache_insert(void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length)
{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
int error= 0;
DBUG_ENTER("key_cache_insert");
DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
@@ -2916,16 +3232,25 @@ int key_cache_insert(KEY_CACHE *keycache
remove_reader(block);
- /*
- Link the block into the LRU ring if it's the last submitted
- request for the block. This enables eviction for the block.
- */
- unreg_request(keycache, block, 1);
-
- error= (block->status & BLOCK_ERROR);
+ /* Error injection for coverage testing. */
+ DBUG_EXECUTE_IF("key_cache_insert_block_error",
+ block->status|= BLOCK_ERROR; errno=EIO;);
- if (error)
+ /* Do not link erroneous blocks into the LRU ring, but free them. */
+ if (!(block->status & BLOCK_ERROR))
+ {
+ /*
+ Link the block into the LRU ring if it's the last submitted
+ request for the block. This enables eviction for the block.
+ */
+ unreg_request(keycache, block, 1);
+ }
+ else
+ {
+ free_block(keycache, block);
+ error= 1;
break;
+ }
buff+= read_length;
filepos+= read_length+offset;
@@ -2943,43 +3268,65 @@ int key_cache_insert(KEY_CACHE *keycache
/*
- Write a buffer into a cached file.
+ Write a buffer into a simple key cache
SYNOPSIS
- key_cache_write()
- keycache pointer to a key cache data structure
- file handler for the file to write data to
- filepos position in the file to write data to
- level determines the weight of the data
- buff buffer with the data
- length length of the buffer
- dont_write if is 0 then all dirty pages involved in writing
- should have been flushed from key cache
+ s_key_cache_write()
+ keycache_cb pointer to the control block of a simple key cache
+ file handler for the file to write data to
+ file_extra maps of key cache partitions containing
+ dirty pages from file
+ filepos position in the file to write data to
+ level determines the weight of the data
+ buff buffer with the data
+ length length of the buffer
+ dont_write if is 0 then all dirty pages involved in writing
+ should have been flushed from key cache
+ DESCRIPTION
+ This function is the implementation of the key_cache_write interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key
+ cache.
+ In a general case the function copies data from a buffer into the key
+ cache. The buffer is specified with the parameters buff and length -
+ the pointer to the beginning of the buffer and its size respectively.
+ It's assumed the buffer contains the data to be written into 'file'
+ starting from the position filepos. The data is copied from the buffer
+ in key_cache_block_size increments.
+ If the value of the parameter dont_write is FALSE then the function
+ also writes the data into file.
+ The parameter level is used to set one characteristic for the key buffers
+ filled with the data from buff. The characteristic is employed only by
+ the midpoint insertion strategy.
+ The parameter file_extra currently makes sense only for simple key caches
+ that are elements of a partitioned key cache. It provides a pointer to the
+ shared bitmap of the partitions that may contains dirty pages for the file.
+ This bitmap is used to optimize the function p_flush_key_blocks.
+
RETURN VALUE
0 if a success, 1 - otherwise.
- NOTES.
- The function copies the data of size length from buff into buffers
- for key cache blocks that are assigned to contain the portion of
- the file starting with position filepos.
- It ensures that this data is flushed to the file if dont_write is FALSE.
- Filepos must be a multiple of 'block_length', but it doesn't
- have to be a multiple of key_cache_block_size;
+ NOTES
+ This implementation exploits the fact that the function is called only
+ when a thread has got an exclusive lock for the key file.
- dont_write is always TRUE in the server (info->lock_type is never F_UNLCK).
*/
-int key_cache_write(KEY_CACHE *keycache,
- File file, my_off_t filepos, int level,
- uchar *buff, uint length,
- uint block_length __attribute__((unused)),
- int dont_write)
+static
+int s_key_cache_write(void *keycache_cb,
+ File file, void *file_extra __attribute__((unused)),
+ my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length __attribute__((unused)),
+ int dont_write)
{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
my_bool locked_and_incremented= FALSE;
int error=0;
- DBUG_ENTER("key_cache_write");
+ DBUG_ENTER("s_key_cache_write");
DBUG_PRINT("enter",
("fd: %u pos: %lu length: %u block_length: %u"
" key_block_length: %u",
@@ -3206,14 +3553,24 @@ int key_cache_write(KEY_CACHE *keycache,
*/
remove_reader(block);
- /*
- Link the block into the LRU ring if it's the last submitted
- request for the block. This enables eviction for the block.
- */
- unreg_request(keycache, block, 1);
+ /* Error injection for coverage testing. */
+ DBUG_EXECUTE_IF("key_cache_write_block_error",
+ block->status|= BLOCK_ERROR;);
- if (block->status & BLOCK_ERROR)
+ /* Do not link erroneous blocks into the LRU ring, but free them. */
+ if (!(block->status & BLOCK_ERROR))
+ {
+ /*
+ Link the block into the LRU ring if it's the last submitted
+ request for the block. This enables eviction for the block.
+ */
+ unreg_request(keycache, block, 1);
+ }
+ else
{
+ /* Pretend a "clean" block to avoid complications. */
+ block->status&= ~(BLOCK_CHANGED);
+ free_block(keycache, block);
error= 1;
break;
}
@@ -3284,12 +3641,13 @@ end:
Block must have a request registered on it.
*/
-static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
+static void free_block(S_KEY_CACHE_CB *keycache, BLOCK_LINK *block)
{
KEYCACHE_THREAD_TRACE("free block");
KEYCACHE_DBUG_PRINT("free_block",
- ("block %u to be freed, hash_link %p",
- BLOCK_NUMBER(block), block->hash_link));
+ ("block %u to be freed, hash_link %p status: %u",
+ BLOCK_NUMBER(block), block->hash_link,
+ block->status));
/*
Assert that the block is not free already. And that it is in a clean
state. Note that the block might just be assigned to a hash_link and
@@ -3371,10 +3729,14 @@ static void free_block(KEY_CACHE *keycac
if (block->status & BLOCK_IN_EVICTION)
return;
- /* Here the block must be in the LRU ring. Unlink it again. */
- DBUG_ASSERT(block->next_used && block->prev_used &&
- *block->prev_used == block);
- unlink_block(keycache, block);
+ /* Error blocks are not put into the LRU ring. */
+ if (!(block->status & BLOCK_ERROR))
+ {
+ /* Here the block must be in the LRU ring. Unlink it again. */
+ DBUG_ASSERT(block->next_used && block->prev_used &&
+ *block->prev_used == block);
+ unlink_block(keycache, block);
+ }
if (block->temperature == BLOCK_WARM)
keycache->warm_blocks--;
block->temperature= BLOCK_COLD;
@@ -3419,7 +3781,7 @@ static int cmp_sec_link(BLOCK_LINK **a,
free used blocks if requested
*/
-static int flush_cached_blocks(KEY_CACHE *keycache,
+static int flush_cached_blocks(S_KEY_CACHE_CB *keycache,
File file, BLOCK_LINK **cache,
BLOCK_LINK **end,
enum flush_type type)
@@ -3463,10 +3825,9 @@ static int flush_cached_blocks(KEY_CACHE
(BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
block->status|= BLOCK_IN_FLUSHWRITE;
keycache_pthread_mutex_unlock(&keycache->cache_lock);
- error= my_pwrite(file,
- block->buffer+block->offset,
+ error= my_pwrite(file, block->buffer + block->offset,
block->length - block->offset,
- block->hash_link->diskpos+ block->offset,
+ block->hash_link->diskpos + block->offset,
MYF(MY_NABP | MY_WAIT_IF_FULL));
keycache_pthread_mutex_lock(&keycache->cache_lock);
keycache->global_cache_write++;
@@ -3527,7 +3888,7 @@ static int flush_cached_blocks(KEY_CACHE
/*
- flush all key blocks for a file to disk, but don't do any mutex locks.
+ Flush all key blocks for a file to disk, but don't do any mutex locks
SYNOPSIS
flush_key_blocks_int()
@@ -3549,7 +3910,7 @@ static int flush_cached_blocks(KEY_CACHE
1 error
*/
-static int flush_key_blocks_int(KEY_CACHE *keycache,
+static int flush_key_blocks_int(S_KEY_CACHE_CB *keycache,
File file, enum flush_type type)
{
BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache;
@@ -3986,23 +4347,49 @@ err:
/*
- Flush all blocks for a file to disk
+ Flush all blocks for a file from key buffers of a simple key cache
SYNOPSIS
- flush_key_blocks()
- keycache pointer to a key cache data structure
- file handler for the file to flush to
- flush_type type of the flush
+ s_flush_key_blocks()
+ keycache_cb pointer to the control block of a simple key cache
+ file handler for the file to flush to
+ file_extra maps of key cache partitions containing
+ dirty pages from file (not used)
+ flush_type type of the flush operation
+ DESCRIPTION
+ This function is the implementation of the flush_key_blocks interface
+ function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key
+ cache.
+ In a general case the function flushes the data from all dirty key
+ buffers related to the file 'file' into this file. The function does
+ exactly this if the value of the parameter type is FLUSH_KEEP. If the
+ value of this parameter is FLUSH_RELEASE, the function additionally
+ releases the key buffers containing data from 'file' for new usage.
+ If the value of the parameter type is FLUSH_IGNORE_CHANGED the function
+ just releases the key buffers containing data from 'file'.
+ The parameter file_extra currently is not used by this function.
+
RETURN
0 ok
1 error
+
+ NOTES
+ This implementation exploits the fact that the function is called only
+ when a thread has got an exclusive lock for the key file.
+
*/
-int flush_key_blocks(KEY_CACHE *keycache,
- File file, enum flush_type type)
+static
+int s_flush_key_blocks(void *keycache_cb,
+ File file,
+ void *file_extra __attribute__((unused)),
+ enum flush_type type)
{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
int res= 0;
DBUG_ENTER("flush_key_blocks");
DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache));
@@ -4055,7 +4442,7 @@ int flush_key_blocks(KEY_CACHE *keycache
!= 0 Error
*/
-static int flush_all_key_blocks(KEY_CACHE *keycache)
+static int flush_all_key_blocks(S_KEY_CACHE_CB *keycache)
{
BLOCK_LINK *block;
uint total_found;
@@ -4158,37 +4545,45 @@ static int flush_all_key_blocks(KEY_CACH
/*
- Reset the counters of a key cache.
+ Reset the counters of a simple key cache
SYNOPSIS
- reset_key_cache_counters()
- name the name of a key cache
- key_cache pointer to the key kache to be reset
+ s_reset_key_cache_counters()
+ name the name of a key cache
+ keycache_cb pointer to the control block of a simple key cache
DESCRIPTION
- This procedure is used by process_key_caches() to reset the counters of all
- currently used key caches, both the default one and the named ones.
+ This function is the implementation of the reset_key_cache_counters
+ interface function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key cache.
+ This function resets the values of all statistical counters for the key
+ cache to 0.
+ The parameter name is currently not used.
RETURN
0 on success (always because it can't fail)
+
*/
-int reset_key_cache_counters(const char *name __attribute__((unused)),
- KEY_CACHE *key_cache)
+static
+int s_reset_key_cache_counters(const char *name __attribute__((unused)),
+ void *keycache_cb)
{
- DBUG_ENTER("reset_key_cache_counters");
- if (!key_cache->key_cache_inited)
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_reset_key_cache_counters");
+ if (!keycache->key_cache_inited)
{
DBUG_PRINT("info", ("Key cache %s not initialized.", name));
DBUG_RETURN(0);
}
DBUG_PRINT("info", ("Resetting counters for key cache %s.", name));
- key_cache->global_blocks_changed= 0; /* Key_blocks_not_flushed */
- key_cache->global_cache_r_requests= 0; /* Key_read_requests */
- key_cache->global_cache_read= 0; /* Key_reads */
- key_cache->global_cache_w_requests= 0; /* Key_write_requests */
- key_cache->global_cache_write= 0; /* Key_writes */
+ keycache->global_blocks_changed= 0; /* Key_blocks_not_flushed */
+ keycache->global_cache_r_requests= 0; /* Key_read_requests */
+ keycache->global_cache_read= 0; /* Key_reads */
+ keycache->global_cache_w_requests= 0; /* Key_write_requests */
+ keycache->global_cache_write= 0; /* Key_writes */
DBUG_RETURN(0);
}
@@ -4197,7 +4592,7 @@ int reset_key_cache_counters(const char
/*
Test if disk-cache is ok
*/
-static void test_key_cache(KEY_CACHE *keycache __attribute__((unused)),
+static void test_key_cache(S_KEY_CACHE_CB *keycache __attribute__((unused)),
const char *where __attribute__((unused)),
my_bool lock __attribute__((unused)))
{
@@ -4211,7 +4606,7 @@ static void test_key_cache(KEY_CACHE *ke
#define MAX_QUEUE_LEN 100
-static void keycache_dump(KEY_CACHE *keycache)
+static void keycache_dump(S_KEY_CACHE_CB *keycache)
{
FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w");
struct st_my_thread_var *last;
@@ -4404,8 +4799,8 @@ static void keycache_debug_print(const c
va_start(args,fmt);
if (keycache_debug_log)
{
- VOID(vfprintf(keycache_debug_log, fmt, args));
- VOID(fputc('\n',keycache_debug_log));
+ void(vfprintf(keycache_debug_log, fmt, args));
+ void(fputc('\n',keycache_debug_log));
}
va_end(args);
}
@@ -4451,7 +4846,7 @@ static int fail_hlink(HASH_LINK *hlink)
return 0; /* Let the assert fail. */
}
-static int cache_empty(KEY_CACHE *keycache)
+static int cache_empty(S_KEY_CACHE_CB *keycache)
{
int errcnt= 0;
int idx;
@@ -4489,3 +4884,1675 @@ static int cache_empty(KEY_CACHE *keycac
}
#endif
+
+/*
+ Get statistics for a simple key cache
+
+ SYNOPSIS
+ get_key_cache_statistics()
+ keycache_cb pointer to the control block of a simple key cache
+ partition_no partition number (not used)
+ key_cache_stats OUT pointer to the structure for the returned statistics
+
+ DESCRIPTION
+ This function is the implementation of the get_key_cache_statistics
+ interface function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key cache.
+ This function returns the statistical data for the key cache.
+ The parameter partition_no is not used by this function.
+
+ RETURN
+ none
+
+*/
+
+static
+void s_get_key_cache_statistics(void *keycache_cb,
+ uint partition_no __attribute__((unused)),
+ KEY_CACHE_STATISTICS *key_cache_stats)
+{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ DBUG_ENTER("s_get_key_cache_statistics");
+
+ key_cache_stats->mem_size= (longlong) keycache->key_cache_mem_size;
+ key_cache_stats->block_size= (longlong) keycache->key_cache_block_size;
+ key_cache_stats->blocks_used= keycache->blocks_used;
+ key_cache_stats->blocks_unused= keycache->blocks_unused;
+ key_cache_stats->blocks_changed= keycache->global_blocks_changed;
+ key_cache_stats->read_requests= keycache->global_cache_r_requests;
+ key_cache_stats->reads= keycache->global_cache_read;
+ key_cache_stats->write_requests= keycache->global_cache_w_requests;
+ key_cache_stats->writes= keycache->global_cache_write;
+ DBUG_VOID_RETURN;
+}
+
+
+static size_t s_key_cache_stat_var_offsets[]=
+{
+ offsetof(S_KEY_CACHE_CB, blocks_used),
+ offsetof(S_KEY_CACHE_CB, blocks_unused),
+ offsetof(S_KEY_CACHE_CB, global_blocks_changed),
+ offsetof(S_KEY_CACHE_CB, global_cache_w_requests),
+ offsetof(S_KEY_CACHE_CB, global_cache_write),
+ offsetof(S_KEY_CACHE_CB, global_cache_r_requests),
+ offsetof(S_KEY_CACHE_CB, global_cache_read)
+};
+
+
+/*
+ Get the value of a statistical variable for a simple key cache
+
+ SYNOPSIS
+ s_get_key_cache_stat_value()
+ keycache_cb pointer to the control block of a simple key cache
+ var_no the ordered number of a statistical variable
+
+ DESCRIPTION
+ This function is the implementation of the s_get_key_cache_stat_value
+ interface function that is employed by simple (non-partitioned) key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type S_KEY_CACHE_CB for a simple key cache.
+ This function returns the value of the statistical variable var_no
+ for this key cache. The variables are numbered starting from 0 to 6.
+
+ RETURN
+ The value of the specified statistical variable
+
+*/
+
+static
+ulonglong s_get_key_cache_stat_value(void *keycache_cb, uint var_no)
+{
+ S_KEY_CACHE_CB *keycache= (S_KEY_CACHE_CB *) keycache_cb;
+ size_t var_ofs= s_key_cache_stat_var_offsets[var_no];
+ ulonglong res= 0;
+ DBUG_ENTER("s_get_key_cache_stat_value");
+
+ if (var_no < 3)
+ res= (ulonglong) (*(long *) ((char *) keycache + var_ofs));
+ else
+ res= *(ulonglong *) ((char *) keycache + var_ofs);
+
+ DBUG_RETURN(res);
+}
+
+
+/*
+ The array of pointer to the key cache interface functions used for simple
+ key caches. Any simple key cache objects including those incorporated into
+ partitioned keys caches exploit this array.
+
+ The current implementation of these functions allows to call them from
+ the MySQL server code directly. We don't do it though.
+*/
+
+static KEY_CACHE_FUNCS s_key_cache_funcs =
+{
+ s_init_key_cache,
+ s_resize_key_cache,
+ s_change_key_cache_param,
+ s_key_cache_read,
+ s_key_cache_insert,
+ s_key_cache_write,
+ s_flush_key_blocks,
+ s_reset_key_cache_counters,
+ s_end_key_cache,
+ s_get_key_cache_statistics,
+ s_get_key_cache_stat_value
+};
+
+
+/******************************************************************************
+ Partitioned Key Cache Module
+
+ The module contains implementations of all key cache interface functions
+ employed by partitioned key caches.
+
+ A partitioned key cache is a collection of structures for simple key caches
+ called key cache partitions. Any page from a file can be placed into a buffer
+ of only one partition. The number of the partition is calculated from
+ the file number and the position of the page in the file, and it's always the
+ same for the page. The function that maps pages into partitions takes care
+ of even distribution of pages among partitions.
+
+ Partition key cache mitigate one of the major problem of simple key cache:
+ thread contention for key cache lock (mutex). Every call of a key cache
+ interface function must acquire this lock. So threads compete for this lock
+ even in the case when they have acquired shared locks for the file and
+ pages they want read from are in the key cache buffers.
+ When working with a partitioned key cache any key cache interface function
+ that needs only one page has to acquire the key cache lock only for the
+ partition the page is ascribed to. This makes the chances for threads not
+ compete for the same key cache lock better. Unfortunately if we use a
+ partitioned key cache with N partitions for B-tree indexes we can't say
+ that the chances becomes N times less. The fact is that any index lookup
+ operation requires reading from the root page that, for any index, is always
+ ascribed to the same partition. To resolve this problem we should have
+ employed more sophisticated mechanisms of working with root pages.
+
+ Currently the number of partitions in a partitioned key cache is limited
+ by 64. We could increase this limit. Simultaneously we would have to increase
+ accordingly the size of the bitmap dirty_part_map from the MYISAM_SHARE
+ structure.
+
+******************************************************************************/
+
+/* Control block for a partitioned key cache */
+
+typedef struct st_p_key_cache_cb
+{
+ my_bool key_cache_inited; /*<=> control block is allocated */
+ S_KEY_CACHE_CB **partition_array; /* array of the key cache partitions */
+ uint partitions; /* number of partitions in the key cache */
+ size_t key_cache_mem_size; /* specified size of the cache memory */
+ uint key_cache_block_size; /* size of the page buffer of a cache block */
+} P_KEY_CACHE_CB;
+
+static
+void p_end_key_cache(void *keycache_cb, my_bool cleanup);
+
+/*
+ Determine the partition to which the index block to read is ascribed
+
+ SYNOPSIS
+ get_key_cache_partition()
+ keycache pointer to the control block of a partitioned key cache
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+
+ DESCRIPTION
+ The function determines the number of the partition in whose buffer the
+ block from 'file' at the position filepos has to be placed for reading.
+ The function returns the control block of the simple key cache for this
+ partition to the caller.
+
+ RETURN VALUE
+ The pointer to the control block of the partition to which the specified
+ file block is ascribed.
+*/
+
+static
+S_KEY_CACHE_CB *get_key_cache_partition(P_KEY_CACHE_CB *keycache,
+ File file, my_off_t filepos)
+{
+ uint i= KEYCACHE_BASE_EXPR( file, filepos) % keycache->partitions;
+ return keycache->partition_array[i];
+}
+
+
+/*
+ Determine the partition to which the index block to write is ascribed
+
+ SYNOPSIS
+ get_key_cache_partition()
+ keycache pointer to the control block of a partitioned key cache
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+ dirty_part_map pointer to the bitmap of dirty partitions for the file
+
+ DESCRIPTION
+ The function determines the number of the partition in whose buffer the
+ block from 'file' at the position filepos has to be placed for writing and
+ marks the partition as dirty in the dirty_part_map bitmap.
+ The function returns the control block of the simple key cache for this
+ partition to the caller.
+
+ RETURN VALUE
+ The pointer to the control block of the partition to which the specified
+ file block is ascribed.
+*/
+
+static
+S_KEY_CACHE_CB *get_key_cache_partition_for_write(P_KEY_CACHE_CB *keycache,
+ File file, my_off_t filepos,
+ ulonglong* dirty_part_map)
+{
+ uint i= KEYCACHE_BASE_EXPR( file, filepos) % keycache->partitions;
+ *dirty_part_map|= 1<<i;
+ return keycache->partition_array[i];
+}
+
+
+/*
+ Initialize a partitioned key cache
+
+ SYNOPSIS
+ p_init_key_cache()
+ keycache_cb pointer to the control block of a partitioned key cache
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for all key cache partitions
+ division_limit division limit (may be zero)
+ age_threshold age threshold (may be zero)
+
+ DESCRIPTION
+ This function is the implementation of the init_key_cache interface function
+ that is employed by partitioned key caches.
+ The function builds and initializes an array of simple key caches, and then
+ initializes the control block structure of the type P_KEY_CACHE_CB that is
+ used for a partitioned key cache. The parameter keycache_cb is supposed to
+ point to this structure. The number of partitions in the partitioned key
+ cache to be built must be passed through the field 'partitions' of this
+ structure. The parameter key_cache_block_size specifies the size of the
+ blocks in the the simple key caches to be built. The parameters
+ division_limit and age_threshold determine the initial values of those
+ characteristics of the simple key caches that are used for midpoint
+ insertion strategy. The parameter use_mem specifies the total amount of
+ memory to be allocated for the key cache blocks in all simple key caches
+ and for all auxiliary structures.
+
+ RETURN VALUE
+ total number of blocks in key cache partitions, if successful,
+ <= 0 - otherwise.
+
+ NOTES
+ If keycache->key_cache_inited != 0 then we assume that the memory for
+ the array of partitions has been already allocated.
+
+ It's assumed that no two threads call this function simultaneously
+ referring to the same key cache handle.
+*/
+
+static
+int p_init_key_cache(void *keycache_cb, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold)
+{
+ int i;
+ size_t mem_per_cache;
+ int cnt;
+ S_KEY_CACHE_CB *partition;
+ S_KEY_CACHE_CB **partition_ptr;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ int blocks= -1;
+ DBUG_ENTER("p_init_key_cache");
+
+ keycache->key_cache_block_size = key_cache_block_size;
+
+ if (keycache->key_cache_inited)
+ partition_ptr= keycache->partition_array;
+ else
+ {
+ if(!(partition_ptr=
+ (S_KEY_CACHE_CB **) my_malloc(sizeof(S_KEY_CACHE_CB *) * partitions,
+ MYF(0))))
+ DBUG_RETURN(blocks);
+ keycache->partition_array= partition_ptr;
+ }
+
+ mem_per_cache = use_mem / partitions;
+
+ for (i= 0; i < (int) partitions; i++)
+ {
+ my_bool key_cache_inited= keycache->key_cache_inited;
+ if (key_cache_inited)
+ partition= *partition_ptr;
+ else
+ {
+ if (!(partition= (S_KEY_CACHE_CB *) my_malloc(sizeof(S_KEY_CACHE_CB),
+ MYF(0))))
+ continue;
+ partition->key_cache_inited= 0;
+ }
+
+ if ((cnt= s_init_key_cache(partition,
+ key_cache_block_size, mem_per_cache,
+ division_limit, age_threshold)) <= 0)
+ {
+ s_end_key_cache(partition, 1);
+ my_free((uchar *) partition, MYF(0));
+ partition= 0;
+ if (key_cache_inited)
+ {
+ memmove(partition_ptr, partition_ptr+1,
+ sizeof(partition_ptr)*(partitions-i-1));
+ }
+ if (i == 0)
+ {
+ i--;
+ partitions--;
+ if (partitions)
+ mem_per_cache = use_mem / partitions;
+ }
+ continue;
+ }
+
+ if (blocks < 0)
+ blocks= 0;
+ blocks+= cnt;
+ *partition_ptr++= partition;
+ }
+
+ keycache->partitions= partitions= partition_ptr-keycache->partition_array;
+ keycache->key_cache_mem_size= mem_per_cache * partitions;
+ for (i= 0; i < (int) partitions; i++)
+ keycache->partition_array[i]->hash_factor= partitions;
+
+ keycache->key_cache_inited= 1;
+
+ DBUG_RETURN(blocks);
+}
+
+
+/*
+ Resize a partitioned key cache
+
+ SYNOPSIS
+ p_resize_key_cache()
+ keycache_cb pointer to the control block of a partitioned key cache
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for the new key cache
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ This function is the implementation of the resize_key_cache interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for the partitioned
+ key cache to be resized.
+ The parameter key_cache_block_size specifies the new size of the blocks in
+ the simple key caches that comprise the partitioned key cache.
+ The parameters division_limit and age_threshold determine the new initial
+ values of those characteristics of the simple key cache that are used for
+ midpoint insertion strategy. The parameter use-mem specifies the total
+ amount of memory to be allocated for the key cache blocks in all new
+ simple key caches and for all auxiliary structures.
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES.
+ The function first calls s_prepare_resize_key_cache for each simple
+ key cache effectively flushing all dirty pages from it and destroying
+ the key cache. Then p_init_key cache is called. This call builds all
+ the new array of simple key caches containing the same number of
+ elements as the old one. After this the function calls the function
+ s_finish_resize_key_cache for each simple key cache from this array.
+
+ This implementation doesn't block the calls and executions of other
+ functions from the key cache interface. However it assumes that the
+ calls of s_resize_key_cache itself are serialized.
+
+*/
+
+static
+int p_resize_key_cache(void *keycache_cb, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ my_bool cleanup= use_mem == 0;
+ int blocks= -1;
+ int err= 0;
+ DBUG_ENTER("p_resize_key_cache");
+ if (use_mem == 0)
+ {
+ p_end_key_cache(keycache_cb, 0);
+ DBUG_RETURN(blocks);
+ }
+ for (i= 0; i < partitions; i++)
+ {
+ err|= s_prepare_resize_key_cache(keycache->partition_array[i], 0, 1);
+ }
+ if (!err && use_mem)
+ blocks= p_init_key_cache(keycache_cb, key_cache_block_size, use_mem,
+ division_limit, age_threshold);
+ if (blocks > 0 && !cleanup)
+ {
+ for (i= 0; i < partitions; i++)
+ {
+ s_finish_resize_key_cache(keycache->partition_array[i], 0, 1);
+ }
+ }
+ DBUG_RETURN(blocks);
+}
+
+
+/*
+ Change key cache parameters of a partitioned key cache
+
+ SYNOPSIS
+ p_change_key_cache_param()
+ keycache_cb pointer to the control block of a partitioned key cache
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ This function is the implementation of the change_key_cache_param interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for the simple key
+ cache where new values of the division limit and the age threshold used
+ for midpoint insertion strategy are to be set. The parameters
+ division_limit and age_threshold provide these new values.
+
+ RETURN VALUE
+ none
+
+ NOTES
+ The function just calls s_change_key_cache_param for each element from the
+ array of simple caches that comprise the partitioned key cache.
+
+*/
+
+static
+void p_change_key_cache_param(void *keycache_cb, uint division_limit,
+ uint age_threshold)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ DBUG_ENTER("p_change_key_cache_param");
+ for (i= 0; i < partitions; i++)
+ {
+ s_change_key_cache_param(keycache->partition_array[i], division_limit,
+ age_threshold);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Destroy a partitioned key cache
+
+ SYNOPSIS
+ p_end_key_cache()
+ keycache_cb pointer to the control block of a partitioned key cache
+ cleanup <=> complete free (free also control block structures
+ for all simple key caches)
+
+ DESCRIPTION
+ This function is the implementation of the end_key_cache interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for the partitioned
+ key cache to be destroyed.
+ The function frees the memory allocated for the cache blocks and
+ auxiliary structures used by simple key caches that comprise the
+ partitioned key cache. If the value of the parameter cleanup is TRUE
+ then even the memory used for control blocks of the simple key caches
+ and the array of pointers to them are freed.
+
+ RETURN VALUE
+ none
+
+*/
+
+static
+void p_end_key_cache(void *keycache_cb, my_bool cleanup)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ DBUG_ENTER("p_end_key_cache");
+ DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache));
+
+ for (i= 0; i < partitions; i++)
+ {
+ s_end_key_cache(keycache->partition_array[i], cleanup);
+ }
+ if (cleanup) {
+ for (i= 0; i < partitions; i++)
+ my_free((uchar*) keycache->partition_array[i], MYF(0));
+ my_free((uchar*) keycache->partition_array, MYF(0));
+ keycache->key_cache_inited= 0;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Read a block of data from a partitioned key cache into a buffer
+
+ SYNOPSIS
+
+ p_key_cache_read()
+ keycache_cb pointer to the control block of a partitioned key cache
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+ level determines the weight of the data
+ buff buffer to where the data must be placed
+ length length of the buffer
+ block_length length of the read data from a key cache block
+ return_buffer return pointer to the key cache buffer with the data
+
+ DESCRIPTION
+ This function is the implementation of the key_cache_read interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ In a general case the function reads a block of data from the key cache
+ into the buffer buff of the size specified by the parameter length. The
+ beginning of the block of data to be read is specified by the parameters
+ file and filepos. The length of the read data is the same as the length
+ of the buffer. The data is read into the buffer in key_cache_block_size
+ increments. To read each portion the function first finds out in what
+ partition of the key cache this portion(page) is to be saved, and calls
+ s_key_cache_read with the pointer to the corresponding simple key as
+ its first parameter.
+ If the parameter return_buffer is not ignored and its value is TRUE, and
+ the data to be read of the specified size block_length can be read from one
+ key cache buffer, then the function returns a pointer to the data in the
+ key cache buffer.
+ The function takes into account parameters block_length and return buffer
+ only in a single-threaded environment.
+ The parameter 'level' is used only by the midpoint insertion strategy
+ when the data or its portion cannot be found in the key cache.
+
+ RETURN VALUE
+ Returns address from where the data is placed if successful, 0 - otherwise.
+
+*/
+
+static
+uchar *p_key_cache_read(void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length __attribute__((unused)),
+ int return_buffer __attribute__((unused)))
+{
+ uint r_length;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint offset= (uint) (filepos % keycache->key_cache_block_size);
+ uchar *start= buff;
+ DBUG_ENTER("p_key_cache_read");
+ DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
+ (uint) file, (ulong) filepos, length));
+
+#ifndef THREAD
+ if (block_length > keycache->key_cache_block_size || offset)
+ return_buffer=0;
+#endif
+
+ /* Read data in key_cache_block_size increments */
+ do
+ {
+ S_KEY_CACHE_CB *partition= get_key_cache_partition(keycache,
+ file, filepos);
+ uchar *ret_buff= 0;
+ r_length= length;
+ set_if_smaller(r_length, keycache->key_cache_block_size - offset);
+ ret_buff= s_key_cache_read((void *) partition,
+ file, filepos, level,
+ buff, r_length,
+ block_length, return_buffer);
+ if (ret_buff == 0)
+ DBUG_RETURN(0);
+#ifndef THREAD
+ /* This is only true if we were able to read everything in one block */
+ if (return_buffer)
+ DBUG_RETURN(ret_buff);
+#endif
+ filepos+= r_length;
+ buff+= r_length;
+ offset= 0;
+ } while ((length-= r_length));
+
+ DBUG_RETURN(start);
+}
+
+
+/*
+ Insert a block of file data from a buffer into a partitioned key cache
+
+ SYNOPSIS
+ p_key_cache_insert()
+ keycache_cb pointer to the control block of a partitioned key cache
+ file handler for the file to insert data from
+ filepos position of the block of data in the file to insert
+ level determines the weight of the data
+ buff buffer to read data from
+ length length of the data in the buffer
+
+ DESCRIPTION
+ This function is the implementation of the key_cache_insert interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned key
+ cache.
+ The function writes a block of file data from a buffer into the key cache.
+ The buffer is specified with the parameters buff and length - the pointer
+ to the beginning of the buffer and its size respectively. It's assumed
+ that the buffer contains the data from 'file' allocated from the position
+ filepos. The data is copied from the buffer in key_cache_block_size
+ increments. For every portion of data the function finds out in what simple
+ key cache from the array of partitions the data must be stored, and after
+ this calls s_key_cache_insert to copy the data into a key buffer of this
+ simple key cache.
+ The parameter level is used to set one characteristic for the key buffers
+ loaded with the data from buff. The characteristic is used only by the
+ midpoint insertion strategy.
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+
+ NOTES
+ The function is used by MyISAM to move all blocks from a index file to
+ the key cache. It can be performed in parallel with reading the file data
+ from the key buffers by other threads.
+
+*/
+
+static
+int p_key_cache_insert(void *keycache_cb,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length)
+{
+ uint w_length;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint offset= (uint) (filepos % keycache->key_cache_block_size);
+ DBUG_ENTER("p_key_cache_insert");
+ DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
+ (uint) file,(ulong) filepos, length));
+
+
+ /* Write data in key_cache_block_size increments */
+ do
+ {
+ S_KEY_CACHE_CB *partition= get_key_cache_partition(keycache,
+ file, filepos);
+ w_length= length;
+ set_if_smaller(w_length, keycache->key_cache_block_size);
+ if (s_key_cache_insert((void *) partition,
+ file, filepos, level,
+ buff, w_length))
+ DBUG_RETURN(1);
+
+ filepos+= w_length;
+ buff+= w_length;
+ offset = 0;
+ } while ((length-= w_length));
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Write data from a buffer into a partitioned key cache
+
+ SYNOPSIS
+
+ p_key_cache_write()
+ keycache_cb pointer to the control block of a partitioned key cache
+ file handler for the file to write data to
+ filepos position in the file to write data to
+ level determines the weight of the data
+ buff buffer with the data
+ length length of the buffer
+ dont_write if is 0 then all dirty pages involved in writing
+ should have been flushed from key cache
+ file_extra maps of key cache partitions containing
+ dirty pages from file
+
+ DESCRIPTION
+ This function is the implementation of the key_cache_write interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ In a general case the function copies data from a buffer into the key
+ cache. The buffer is specified with the parameters buff and length -
+ the pointer to the beginning of the buffer and its size respectively.
+ It's assumed the buffer contains the data to be written into 'file'
+ starting from the position filepos. The data is copied from the buffer
+ in key_cache_block_size increments. For every portion of data the
+ function finds out in what simple key cache from the array of partitions
+ the data must be stored, and after this calls s_key_cache_write to copy
+ the data into a key buffer of this simple key cache.
+ If the value of the parameter dont_write is FALSE then the function
+ also writes the data into file.
+ The parameter level is used to set one characteristic for the key buffers
+ filled with the data from buff. The characteristic is employed only by
+ the midpoint insertion strategy.
+ The parameter file_expra provides a pointer to the shared bitmap of
+ the partitions that may contains dirty pages for the file. This bitmap
+ is used to optimize the function p_flush_key_blocks.
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+
+ NOTES
+ This implementation exploits the fact that the function is called only
+ when a thread has got an exclusive lock for the key file.
+
+*/
+
+static
+int p_key_cache_write(void *keycache_cb,
+ File file, void *file_extra,
+ my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length __attribute__((unused)),
+ int dont_write)
+{
+ uint w_length;
+ ulonglong *part_map= (ulonglong *) file_extra;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint offset= (uint) (filepos % keycache->key_cache_block_size);
+ DBUG_ENTER("p_key_cache_write");
+ DBUG_PRINT("enter",
+ ("fd: %u pos: %lu length: %u block_length: %u"
+ " key_block_length: %u",
+ (uint) file, (ulong) filepos, length, block_length,
+ keycache ? keycache->key_cache_block_size : 0));
+
+
+ /* Write data in key_cache_block_size increments */
+ do
+ {
+ S_KEY_CACHE_CB *partition= get_key_cache_partition_for_write(keycache,
+ file, filepos,
+ part_map);
+ w_length = length;
+ set_if_smaller(w_length, keycache->key_cache_block_size );
+ if (s_key_cache_write(partition,
+ file, 0, filepos, level,
+ buff, w_length, block_length,
+ dont_write))
+ DBUG_RETURN(1);
+
+ filepos+= w_length;
+ buff+= w_length;
+ offset= 0;
+ } while ((length-= w_length));
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Flush all blocks for a file from key buffers of a partitioned key cache
+
+ SYNOPSIS
+
+ p_flush_key_blocks()
+ keycache_cb pointer to the control block of a partitioned key cache
+ file handler for the file to flush to
+ file_extra maps of key cache partitions containing
+ dirty pages from file (not used)
+ flush_type type of the flush operation
+
+ DESCRIPTION
+ This function is the implementation of the flush_key_blocks interface
+ function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ In a general case the function flushes the data from all dirty key
+ buffers related to the file 'file' into this file. The function does
+ exactly this if the value of the parameter type is FLUSH_KEEP. If the
+ value of this parameter is FLUSH_RELEASE, the function additionally
+ releases the key buffers containing data from 'file' for new usage.
+ If the value of the parameter type is FLUSH_IGNORE_CHANGED the function
+ just releases the key buffers containing data from 'file'.
+ The function performs the operation by calling s_flush_key_blocks
+ for the elements of the array of the simple key caches that comprise
+ the partitioned key_cache. If the value of the parameter type is
+ FLUSH_KEEP s_flush_key_blocks is called only for the partitions with
+ possibly dirty pages marked in the bitmap pointed to by the parameter
+ file_extra.
+
+ RETURN
+ 0 ok
+ 1 error
+
+ NOTES
+ This implementation exploits the fact that the function is called only
+ when a thread has got an exclusive lock for the key file.
+
+*/
+
+static
+int p_flush_key_blocks(void *keycache_cb,
+ File file, void *file_extra,
+ enum flush_type type)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ int err= 0;
+ ulonglong *dirty_part_map= (ulonglong *) file_extra;
+ DBUG_ENTER("p_flush_key_blocks");
+ DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache));
+
+ for (i= 0; i < partitions; i++)
+ {
+ S_KEY_CACHE_CB *partition= keycache->partition_array[i];
+ if ((type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE) &&
+ !((*dirty_part_map) & (1<<i)))
+ continue;
+ err+= test(s_flush_key_blocks(partition, file, 0, type));
+ }
+ *dirty_part_map= 0;
+
+ if (err > 0)
+ err= 1;
+
+ DBUG_RETURN(err);
+}
+
+
+/*
+ Reset the counters of a partitioned key cache
+
+ SYNOPSIS
+ p_reset_key_cache_counters()
+ name the name of a key cache
+ keycache_cb pointer to the control block of a partitioned key cache
+
+ DESCRIPTION
+ This function is the implementation of the reset_key_cache_counters
+ interface function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ This function resets the values of the statistical counters of the simple
+ key caches comprising partitioned key cache to 0. It does it by calling
+ s_reset_key_cache_counters for each key cache partition.
+ The parameter name is currently not used.
+
+ RETURN
+ 0 on success (always because it can't fail)
+
+*/
+
+static
+int p_reset_key_cache_counters(const char *name __attribute__((unused)),
+ void *keycache_cb)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ DBUG_ENTER("p_reset_key_cache_counters");
+
+ for (i = 0; i < partitions; i++)
+ {
+ s_reset_key_cache_counters(name, keycache->partition_array[i]);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Get statistics for a partition key cache
+
+ SYNOPSIS
+ p_get_key_cache_statistics()
+ keycache_cb pointer to the control block of a partitioned key cache
+ partition_no partition number to get statistics for
+ key_cache_stats OUT pointer to the structure for the returned statistics
+
+ DESCRIPTION
+ This function is the implementation of the get_key_cache_statistics
+ interface function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ If the value of the parameter partition_no is equal to 0 then aggregated
+ statistics for all partitions is returned in the fields of the
+ structure key_cache_stat of the type KEY_CACHE_STATISTICS . Otherwise
+ the function returns data for the partition number partition_no of the
+ key cache in the structure key_cache_stat. (Here partitions are numbered
+ starting from 1.)
+
+ RETURN
+ none
+
+*/
+
+static
+void p_get_key_cache_statistics(void *keycache_cb, uint partition_no,
+ KEY_CACHE_STATISTICS *key_cache_stats)
+{
+ uint i;
+ S_KEY_CACHE_CB *partition;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ DBUG_ENTER("p_get_key_cache_statistics_");
+
+ if (partition_no != 0)
+ {
+ partition= keycache->partition_array[partition_no-1];
+ s_get_key_cache_statistics((void *) partition, 0, key_cache_stats);
+ DBUG_VOID_RETURN;
+ }
+ key_cache_stats->mem_size= (longlong) keycache->key_cache_mem_size;
+ key_cache_stats->block_size= (longlong) keycache->key_cache_block_size;
+ for (i = 0; i < partitions; i++)
+ {
+ partition= keycache->partition_array[i];
+ key_cache_stats->blocks_used+= partition->blocks_used;
+ key_cache_stats->blocks_unused+= partition->blocks_unused;
+ key_cache_stats->blocks_changed+= partition->global_blocks_changed;
+ key_cache_stats->read_requests+= partition->global_cache_r_requests;
+ key_cache_stats->reads+= partition->global_cache_read;
+ key_cache_stats->write_requests+= partition->global_cache_w_requests;
+ key_cache_stats->writes+= partition->global_cache_write;
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Get the value of a statistical variable for a partitioned key cache
+
+ SYNOPSIS
+ p_get_key_cache_stat_value()
+ keycache_cb pointer to the control block of a partitioned key cache
+ var_no the ordered number of a statistical variable
+
+ DESCRIPTION
+ This function is the implementation of the get_key_cache_stat_value
+ interface function that is employed by partitioned key caches.
+ The function considers the parameter keycache_cb as a pointer to the
+ control block structure of the type P_KEY_CACHE_CB for a partitioned
+ key cache.
+ This function returns the value of the statistical variable var_no
+ for this key cache. The variables are numbered starting from 0 to 6.
+ The returned value is calculated as the sum of the values of the
+ statistical variable with number var_no for all simple key caches that
+ comprise the partitioned key cache.
+
+ RETURN
+ The value of the specified statistical variable
+
+*/
+
+static
+ulonglong p_get_key_cache_stat_value(void *keycache_cb, uint var_no)
+{
+ uint i;
+ P_KEY_CACHE_CB *keycache= (P_KEY_CACHE_CB *) keycache_cb;
+ uint partitions= keycache->partitions;
+ size_t var_ofs= s_key_cache_stat_var_offsets[var_no];
+ ulonglong res= 0;
+ DBUG_ENTER("p_get_key_cache_stat_value");
+
+ if (var_no < 3)
+ {
+ for (i = 0; i < partitions; i++)
+ {
+ S_KEY_CACHE_CB *partition= keycache->partition_array[i];
+ res+= (ulonglong) (*(long *) ((char *) partition + var_ofs));
+ }
+ }
+ else
+ {
+ for (i = 0; i < partitions; i++)
+ {
+ S_KEY_CACHE_CB *partition= keycache->partition_array[i];
+ res+= *(ulonglong *) ((char *) partition + var_ofs);
+ }
+ }
+ DBUG_RETURN(res);
+}
+
+
+/*
+ The array of pointers to the key cache interface functions used by
+ partitioned key caches. Any partitioned key cache object caches exploits
+ this array.
+
+ The current implementation of these functions does not allow to call
+ them from the MySQL server code directly. The key cache interface
+ wrappers must be used for this purpose.
+*/
+
+static KEY_CACHE_FUNCS p_key_cache_funcs =
+{
+ p_init_key_cache,
+ p_resize_key_cache,
+ p_change_key_cache_param,
+ p_key_cache_read,
+ p_key_cache_insert,
+ p_key_cache_write,
+ p_flush_key_blocks,
+ p_reset_key_cache_counters,
+ p_end_key_cache,
+ p_get_key_cache_statistics,
+ p_get_key_cache_stat_value
+};
+
+
+/******************************************************************************
+ Key Cache Interface Module
+
+ The module contains wrappers for all key cache interface functions.
+
+ Currently there are key caches of two types: simple key caches and
+ partitioned key caches. Each type (class) has its own implementation of the
+ basic key cache operations used the MyISAM storage engine. The pointers
+ to the implementation functions are stored in two static structures of the
+ type KEY_CACHE_FUNC: s_key_cache_funcs - for simple key caches, and
+ p_key_cache_funcs - for partitioned key caches. When a key cache object is
+ created the constructor procedure init_key_cache places a pointer to the
+ corresponding table into one of its fields. The procedure also initializes
+ a control block for the key cache oject and saves the pointer to this
+ block in another field of the key cache object.
+ When a key cache wrapper function is invoked for a key cache object to
+ perform a basic key cache operation it looks into the interface table
+ associated with the key cache oject and calls the corresponding
+ implementation of the operation. It passes the saved key cache control
+ block to this implementation. If, for some reasons, the control block
+ has not been fully initialized yet, the wrapper function either does not
+ do anything or, in the case when it perform a read/write operation, the
+ function do it directly through the system i/o functions.
+
+ As we can see the model with which the key cache interface is supported
+ as quite conventional for interfaces in general.
+
+******************************************************************************/
+
+
+/*
+ Initialize a key cache
+
+ SYNOPSIS
+ init_key_cache()
+ keycache pointer to the key cache to be initialized
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for cache buffers/structures
+ division_limit division limit (may be zero)
+ age_threshold age threshold (may be zero)
+ partitions number of partitions in the key cache
+
+ DESCRIPTION
+ The function creates a control block structure for a key cache and
+ places the pointer to this block in the structure keycache.
+ If the value of the parameter 'partitions' is 0 then a simple key cache
+ is created. Otherwise a partitioned key cache with the specified number
+ of partitions is created.
+ The parameter key_cache_block_size specifies the size of the blocks in
+ the key cache to be created. The parameters division_limit and
+ age_threshold determine the initial values of those characteristics of
+ the key cache that are used for midpoint insertion strategy. The parameter
+ use_mem specifies the total amount of memory to be allocated for the
+ key cache buffers and for all auxiliary structures.
+
+ RETURN VALUE
+ total number of blocks in key cache partitions, if successful,
+ <= 0 - otherwise.
+
+ NOTES
+ if keycache->key_cache_inited != 0 we assume that the memory
+ for the control block of the key cache has been already allocated.
+
+ It's assumed that no two threads call this function simultaneously
+ referring to the same key cache handle.
+
+*/
+
+int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold, uint partitions)
+{
+ void *keycache_cb;
+ int blocks;
+ if (keycache->key_cache_inited)
+ keycache_cb= keycache->keycache_cb;
+ else
+ {
+ if (partitions == 0)
+ {
+ if (!(keycache_cb= (void *) my_malloc(sizeof(S_KEY_CACHE_CB), MYF(0))))
+ return 0;
+ ((S_KEY_CACHE_CB *) keycache_cb)->key_cache_inited= 0;
+ keycache->key_cache_type= SIMPLE_KEY_CACHE;
+ keycache->interface_funcs= &s_key_cache_funcs;
+ }
+ else
+ {
+ if (!(keycache_cb= (void *) my_malloc(sizeof(P_KEY_CACHE_CB), MYF(0))))
+ return 0;
+ ((P_KEY_CACHE_CB *) keycache_cb)->key_cache_inited= 0;
+ keycache->key_cache_type= PARTITIONED_KEY_CACHE;
+ keycache->interface_funcs= &p_key_cache_funcs;
+ }
+ keycache->keycache_cb= keycache_cb;
+ keycache->key_cache_inited= 1;
+ }
+
+ if (partitions != 0)
+ {
+ ((P_KEY_CACHE_CB *) keycache_cb)->partitions= partitions;
+ }
+ keycache->can_be_used= 0;
+ blocks= keycache->interface_funcs->init(keycache_cb, key_cache_block_size,
+ use_mem, division_limit,
+ age_threshold);
+ keycache->partitions= partitions ?
+ ((P_KEY_CACHE_CB *) keycache_cb)->partitions : 0;
+ DBUG_ASSERT(partitions <= MAX_KEY_CACHE_PARTITIONS);
+ if (blocks > 0)
+ keycache->can_be_used= 1;
+ return blocks;
+}
+
+
+/*
+ Resize a key cache
+
+ SYNOPSIS
+ resize_key_cache()
+ keycache pointer to the key cache to be resized
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for the new key cache
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ The function operates over the key cache key cache.
+ The parameter key_cache_block_size specifies the new size of the block
+ buffers in the key cache. The parameters division_limit and age_threshold
+ determine the new initial values of those characteristics of the key cache
+ that are used for midpoint insertion strategy. The parameter use_mem
+ specifies the total amount of memory to be allocated for the key cache
+ buffers and for all auxiliary structures.
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES
+ The function does not block the calls and executions of other functions
+ from the key cache interface. However it assumes that the calls of
+ resize_key_cache itself are serialized.
+
+ Currently the function is called when the values of the variables
+ key_buffer_size and/or key_cache_block_size are being reset for
+ the key cache keycache.
+
+*/
+
+int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
+ size_t use_mem, uint division_limit, uint age_threshold)
+{
+ int blocks= -1;
+ if (keycache->key_cache_inited)
+ {
+ if ((uint) keycache->param_partitions != keycache->partitions && use_mem)
+ blocks= repartition_key_cache (keycache,
+ key_cache_block_size, use_mem,
+ division_limit, age_threshold,
+ (uint) keycache->param_partitions);
+ else
+ {
+ blocks= keycache->interface_funcs->resize(keycache->keycache_cb,
+ key_cache_block_size,
+ use_mem, division_limit,
+ age_threshold);
+
+ if (keycache->partitions)
+ keycache->partitions=
+ ((P_KEY_CACHE_CB *)(keycache->keycache_cb))->partitions;
+ }
+ if (blocks <= 0)
+ keycache->can_be_used= 0;
+ }
+ return blocks;
+}
+
+
+/*
+ Change key cache parameters of a key cache
+
+ SYNOPSIS
+ change_key_cache_param()
+ keycache pointer to the key cache to change parameters for
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+
+ DESCRIPTION
+ The function sets new values of the division limit and the age threshold
+ used when the key cache keycach employs midpoint insertion strategy.
+ The parameters division_limit and age_threshold provide these new values.
+
+ RETURN VALUE
+ none
+
+ NOTES
+ Currently the function is called when the values of the variables
+ key_cache_division_limit and/or key_cache_age_threshold are being reset
+ for the key cache keycache.
+
+*/
+
+void change_key_cache_param(KEY_CACHE *keycache, uint division_limit,
+ uint age_threshold)
+{
+ if (keycache->key_cache_inited)
+ {
+
+ keycache->interface_funcs->change_param(keycache->keycache_cb,
+ division_limit,
+ age_threshold);
+ }
+}
+
+
+/*
+ Destroy a key cache
+
+ SYNOPSIS
+ end_key_cache()
+ keycache pointer to the key cache to be destroyed
+ cleanup <=> complete free
+
+ DESCRIPTION
+ The function frees the memory allocated for the cache blocks and
+ auxiliary structures used by the key cache keycache. If the value
+ of the parameter cleanup is TRUE then all resources used by the key
+ cache are to be freed.
+
+ RETURN VALUE
+ none
+*/
+
+void end_key_cache(KEY_CACHE *keycache, my_bool cleanup)
+{
+ if (keycache->key_cache_inited)
+ {
+ keycache->interface_funcs->end(keycache->keycache_cb, cleanup);
+ if (cleanup)
+ {
+ if (keycache->keycache_cb)
+ {
+ my_free((uchar *) keycache->keycache_cb, MYF(0));
+ keycache->keycache_cb= 0;
+ }
+ keycache->key_cache_inited= 0;
+ }
+ keycache->can_be_used= 0;
+ }
+}
+
+
+/*
+ Read a block of data from a key cache into a buffer
+
+ SYNOPSIS
+
+ key_cache_read()
+ keycache pointer to the key cache to read data from
+ file handler for the file for the block of data to be read
+ filepos position of the block of data in the file
+ level determines the weight of the data
+ buff buffer to where the data must be placed
+ length length of the buffer
+ block_length length of the data read from a key cache block
+ return_buffer return pointer to the key cache buffer with the data
+
+ DESCRIPTION
+ The function operates over buffers of the key cache keycache.
+ In a general case the function reads a block of data from the key cache
+ into the buffer buff of the size specified by the parameter length. The
+ beginning of the block of data to be read is specified by the parameters
+ file and filepos. The length of the read data is the same as the length
+ of the buffer.
+ If the parameter return_buffer is not ignored and its value is TRUE, and
+ the data to be read of the specified size block_length can be read from one
+ key cache buffer, then the function returns a pointer to the data in the
+ key cache buffer.
+ The parameter 'level' is used only by the midpoint insertion strategy
+ when the data or its portion cannot be found in the key cache.
+ The function reads data into the buffer directly from file if the control
+ block of the key cache has not been initialized yet.
+
+ RETURN VALUE
+ Returns address from where the data is placed if successful, 0 - otherwise.
+
+ NOTES.
+ Filepos must be a multiple of 'block_length', but it doesn't
+ have to be a multiple of key_cache_block_size;
+*/
+
+uchar *key_cache_read(KEY_CACHE *keycache,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length, int return_buffer)
+{
+ if (keycache->key_cache_inited && keycache->can_be_used)
+ return keycache->interface_funcs->read(keycache->keycache_cb,
+ file, filepos, level,
+ buff, length,
+ block_length, return_buffer);
+
+ /* We can't use mutex here as the key cache may not be initialized */
+ keycache->global_cache_r_requests++;
+ keycache->global_cache_read++;
+
+ if (my_pread(file, (uchar*) buff, length, filepos, MYF(MY_NABP)))
+ return (uchar *) 0;
+
+ return buff;
+}
+
+
+/*
+ Insert a block of file data from a buffer into a key cache
+
+ SYNOPSIS
+ key_cache_insert()
+ keycache pointer to the key cache to insert data into
+ file handler for the file to insert data from
+ filepos position of the block of data in the file to insert
+ level determines the weight of the data
+ buff buffer to read data from
+ length length of the data in the buffer
+
+ DESCRIPTION
+ The function operates over buffers of the key cache keycache.
+ The function writes a block of file data from a buffer into the key cache.
+ The buffer is specified with the parameters buff and length - the pointer
+ to the beginning of the buffer and its size respectively. It's assumed
+ that the buffer contains the data from 'file' allocated from the position
+ filepos.
+ The parameter level is used to set one characteristic for the key buffers
+ loaded with the data from buff. The characteristic is used only by the
+ midpoint insertion strategy.
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+
+ NOTES
+ The function is used by MyISAM to move all blocks from a index file to
+ the key cache.
+ It is assumed that it may be performed in parallel with reading the file
+ data from the key buffers by other threads.
+
+*/
+
+int key_cache_insert(KEY_CACHE *keycache,
+ File file, my_off_t filepos, int level,
+ uchar *buff, uint length)
+{
+ if (keycache->key_cache_inited && keycache->can_be_used)
+ return keycache->interface_funcs->insert(keycache->keycache_cb,
+ file, filepos, level,
+ buff, length);
+ return 0;
+}
+
+
+/*
+ Write data from a buffer into a key cache
+
+ SYNOPSIS
+
+ key_cache_write()
+ keycache pointer to the key cache to write data to
+ file handler for the file to write data to
+ filepos position in the file to write data to
+ level determines the weight of the data
+ buff buffer with the data
+ length length of the buffer
+ dont_write if is 0 then all dirty pages involved in writing
+ should have been flushed from key cache
+ file_extra pointer to optional file attributes
+
+ DESCRIPTION
+ The function operates over buffers of the key cache keycache.
+ In a general case the function writes data from a buffer into the key
+ cache. The buffer is specified with the parameters buff and length -
+ the pointer to the beginning of the buffer and its size respectively.
+ It's assumed the buffer contains the data to be written into 'file'
+ starting from the position filepos.
+ If the value of the parameter dont_write is FALSE then the function
+ also writes the data into file.
+ The parameter level is used to set one characteristic for the key buffers
+ filled with the data from buff. The characteristic is employed only by
+ the midpoint insertion strategy.
+ The parameter file_expra may point to additional file attributes used
+ for optimization or other purposes.
+ The function writes data from the buffer directly into file if the control
+ block of the key cache has not been initialized yet.
+
+ RETURN VALUE
+ 0 if a success, 1 - otherwise.
+
+ NOTES
+ This implementation may exploit the fact that the function is called only
+ when a thread has got an exclusive lock for the key file.
+
+*/
+
+int key_cache_write(KEY_CACHE *keycache,
+ File file, void *file_extra,
+ my_off_t filepos, int level,
+ uchar *buff, uint length,
+ uint block_length, int force_write)
+{
+ if (keycache->key_cache_inited && keycache->can_be_used)
+ return keycache->interface_funcs->write(keycache->keycache_cb,
+ file, file_extra,
+ filepos, level,
+ buff, length,
+ block_length, force_write);
+
+ /* We can't use mutex here as the key cache may not be initialized */
+ keycache->global_cache_w_requests++;
+ keycache->global_cache_write++;
+ if (my_pwrite(file, buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL)))
+ return 1;
+
+ return 0;
+}
+
+
+/*
+ Flush all blocks for a file from key buffers of a key cache
+
+ SYNOPSIS
+
+ flush_key_blocks()
+ keycache pointer to the key cache whose blocks are to be flushed
+ file handler for the file to flush to
+ file_extra maps of key cache (used for partitioned key caches)
+ flush_type type of the flush operation
+
+ DESCRIPTION
+ The function operates over buffers of the key cache keycache.
+ In a general case the function flushes the data from all dirty key
+ buffers related to the file 'file' into this file. The function does
+ exactly this if the value of the parameter type is FLUSH_KEEP. If the
+ value of this parameter is FLUSH_RELEASE, the function additionally
+ releases the key buffers containing data from 'file' for new usage.
+ If the value of the parameter type is FLUSH_IGNORE_CHANGED the function
+ just releases the key buffers containing data from 'file'.
+ If the value of the parameter type is FLUSH_KEEP the function may use
+ the value of the parameter file_extra pointing to possibly dirty
+ partitions to optimize the operation for partitioned key caches.
+
+ RETURN
+ 0 ok
+ 1 error
+
+ NOTES
+ Any implementation of the function may exploit the fact that the function
+ is called only when a thread has got an exclusive lock for the key file.
+
+*/
+
+int flush_key_blocks(KEY_CACHE *keycache,
+ int file, void *file_extra,
+ enum flush_type type)
+{
+ if (keycache->key_cache_inited)
+ return keycache->interface_funcs->flush(keycache->keycache_cb,
+ file, file_extra, type);
+ return 0;
+}
+
+
+/*
+ Reset the counters of a key cache
+
+ SYNOPSIS
+ reset_key_cache_counters()
+ name the name of a key cache (unused)
+ keycache pointer to the key cache for which to reset counters
+
+ DESCRIPTION
+ This function resets the values of the statistical counters for the key
+ cache keycache.
+ The parameter name is currently not used.
+
+ RETURN
+ 0 on success (always because it can't fail)
+
+ NOTES
+ This procedure is used by process_key_caches() to reset the counters of all
+ currently used key caches, both the default one and the named ones.
+
+*/
+
+int reset_key_cache_counters(const char *name __attribute__((unused)),
+ KEY_CACHE *keycache)
+{
+ if (keycache->key_cache_inited)
+ {
+
+ return keycache->interface_funcs->reset_counters(name,
+ keycache->keycache_cb);
+ }
+ return 0;
+}
+
+
+/*
+ Get statistics for a key cache
+
+ SYNOPSIS
+ get_key_cache_statistics()
+ keycache pointer to the key cache to get statistics for
+ partition_no partition number to get statistics for
+ key_cache_stats OUT pointer to the structure for the returned statistics
+
+ DESCRIPTION
+ If the value of the parameter partition_no is equal to 0 then statistics
+ for the whole key cache keycache (aggregated statistics) is returned in the
+ fields of the structure key_cache_stat of the type KEY_CACHE_STATISTICS.
+ Otherwise the value of the parameter partition_no makes sense only for
+ a partitioned key cache. In this case the function returns statistics
+ for the partition with the specified number partition_no.
+
+ RETURN
+ none
+
+*/
+
+void get_key_cache_statistics(KEY_CACHE *keycache, uint partition_no,
+ KEY_CACHE_STATISTICS *key_cache_stats)
+{
+ bzero(key_cache_stats, sizeof(KEY_CACHE_STATISTICS));
+ if (keycache->key_cache_inited)
+ {
+ keycache->interface_funcs->get_stats(keycache->keycache_cb,
+ partition_no, key_cache_stats);
+ }
+}
+
+
+/*
+ Get the value of a statistical variable for a key cache
+
+ SYNOPSIS
+ get_key_cache_stat_value()
+ keycache pointer to the key cache to get statistics for
+ var_no the ordered number of a statistical variable
+
+ DESCRIPTION
+ This function returns the value of the statistical variable var_no for
+ the key cache keycache. The variables are numbered starting from 0 to 6.
+
+ RETURN
+ The value of the specified statistical variable.
+
+ NOTES
+ Currently for any key cache the function can return values for the
+ following 7 statistical variables:
+
+ Name Number
+
+ blocks_used 0
+ blocks_unused 1
+ blocks_changed 2
+ read_requests 3
+ reads 4
+ write_requests 5
+ writes 6
+
+*/
+
+ulonglong get_key_cache_stat_value(KEY_CACHE *keycache, uint var_no)
+{
+ if (keycache->key_cache_inited)
+ {
+ return keycache->interface_funcs->get_stat_val(keycache->keycache_cb,
+ var_no);
+ }
+ else
+ return 0;
+}
+
+
+/*
+ Repartition a key cache
+
+ SYNOPSIS
+ repartition_key_cache()
+ keycache pointer to the key cache to be repartitioned
+ key_cache_block_size size of blocks to keep cached data
+ use_mem total memory to use for the new key cache
+ division_limit new division limit (if not zero)
+ age_threshold new age threshold (if not zero)
+ partitions new number of partitions in the key cache
+
+ DESCRIPTION
+ The function operates over the key cache keycache.
+ The parameter partitions specifies the number of partitions in the key
+ cache after repartitioning. If the value of this parameter is 0 then
+ a simple key cache must be created instead of the old one.
+ The parameter key_cache_block_size specifies the new size of the block
+ buffers in the key cache. The parameters division_limit and age_threshold
+ determine the new initial values of those characteristics of the key cache
+ that are used for midpoint insertion strategy. The parameter use_mem
+ specifies the total amount of memory to be allocated for the new key
+ cache buffers and for all auxiliary structures.
+
+ RETURN VALUE
+ number of blocks in the key cache, if successful,
+ 0 - otherwise.
+
+ NOTES
+ The function does not block the calls and executions of other functions
+ from the key cache interface. However it assumes that the calls of
+ resize_key_cache itself are serialized.
+
+ Currently the function is called when the value of the variable
+ key_cache_partitions is being reset for the key cache keycache.
+
+*/
+
+int repartition_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
+ size_t use_mem, uint division_limit,
+ uint age_threshold, uint partitions)
+{
+ uint blocks= -1;
+ if (keycache->key_cache_inited)
+ {
+ keycache->interface_funcs->resize(keycache->keycache_cb,
+ key_cache_block_size, 0,
+ division_limit, age_threshold);
+ end_key_cache(keycache, 1);
+ blocks= init_key_cache(keycache, key_cache_block_size, use_mem,
+ division_limit, age_threshold, partitions);
+ }
+ return blocks;
+}
+
=== modified file 'sql/handler.cc'
--- a/sql/handler.cc 2009-09-09 21:06:57 +0000
+++ b/sql/handler.cc 2010-06-29 00:10:53 +0000
@@ -3691,11 +3691,13 @@ int ha_init_key_cache(const char *name,
uint tmp_block_size= (uint) key_cache->param_block_size;
uint division_limit= key_cache->param_division_limit;
uint age_threshold= key_cache->param_age_threshold;
+ uint partitions= key_cache->param_partitions;
pthread_mutex_unlock(&LOCK_global_system_variables);
DBUG_RETURN(!init_key_cache(key_cache,
tmp_block_size,
tmp_buff_size,
- division_limit, age_threshold));
+ division_limit, age_threshold,
+ partitions));
}
DBUG_RETURN(0);
}
@@ -3725,10 +3727,12 @@ int ha_resize_key_cache(KEY_CACHE *key_c
/**
- Change parameters for key cache (like size)
+ Change parameters for key cache (like division_limit)
*/
int ha_change_key_cache_param(KEY_CACHE *key_cache)
{
+ DBUG_ENTER("ha_change_key_cache_param");
+
if (key_cache->key_cache_inited)
{
pthread_mutex_lock(&LOCK_global_system_variables);
@@ -3737,9 +3741,35 @@ int ha_change_key_cache_param(KEY_CACHE
pthread_mutex_unlock(&LOCK_global_system_variables);
change_key_cache_param(key_cache, division_limit, age_threshold);
}
- return 0;
+ DBUG_RETURN(0);
}
+
+/**
+ Repartition key cache
+*/
+int ha_repartition_key_cache(KEY_CACHE *key_cache)
+{
+ DBUG_ENTER("ha_repartition_key_cache");
+
+ if (key_cache->key_cache_inited)
+ {
+ pthread_mutex_lock(&LOCK_global_system_variables);
+ size_t tmp_buff_size= (size_t) key_cache->param_buff_size;
+ long tmp_block_size= (long) key_cache->param_block_size;
+ uint division_limit= key_cache->param_division_limit;
+ uint age_threshold= key_cache->param_age_threshold;
+ uint partitions= key_cache->param_partitions;
+ pthread_mutex_unlock(&LOCK_global_system_variables);
+ DBUG_RETURN(!repartition_key_cache(key_cache, tmp_block_size,
+ tmp_buff_size,
+ division_limit, age_threshold,
+ partitions));
+ }
+ DBUG_RETURN(0);
+}
+
+
/**
Free memory allocated by a key cache.
*/
=== modified file 'sql/handler.h'
--- a/sql/handler.h 2009-09-07 20:50:10 +0000
+++ b/sql/handler.h 2010-06-29 00:10:53 +0000
@@ -2026,6 +2026,7 @@ int ha_table_exists_in_engine(THD* thd,
extern "C" int ha_init_key_cache(const char *name, KEY_CACHE *key_cache);
int ha_resize_key_cache(KEY_CACHE *key_cache);
int ha_change_key_cache_param(KEY_CACHE *key_cache);
+int ha_repartition_key_cache(KEY_CACHE *key_cache);
int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache);
int ha_end_key_cache(KEY_CACHE *key_cache);
=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc 2009-10-07 13:07:10 +0000
+++ b/sql/mysqld.cc 2010-06-29 00:10:53 +0000
@@ -5713,6 +5713,7 @@ enum options_mysqld
OPT_INTERACTIVE_TIMEOUT, OPT_JOIN_BUFF_SIZE,
OPT_KEY_BUFFER_SIZE, OPT_KEY_CACHE_BLOCK_SIZE,
OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_AGE_THRESHOLD,
+ OPT_KEY_CACHE_PARTITIONS,
OPT_LONG_QUERY_TIME,
OPT_LOWER_CASE_TABLE_NAMES, OPT_MAX_ALLOWED_PACKET,
OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
@@ -6789,6 +6790,12 @@ log and this option does nothing anymore
(uchar**) 0,
0, (GET_ULONG | GET_ASK_ADDR) , REQUIRED_ARG, 100,
1, 100, 0, 1, 0},
+ {"key_cache_partitions", OPT_KEY_CACHE_PARTITIONS,
+ "The number of partitions in key cache",
+ (uchar**) &dflt_key_cache_var.param_partitions,
+ (uchar**) 0,
+ 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, DEFAULT_KEY_CACHE_PARTITIONS,
+ 0, MAX_KEY_CACHE_PARTITIONS, 0, 1, 0},
{"log-slow-filter", OPT_LOG_SLOW_FILTER,
"Log only the queries that followed certain execution plan. Multiple flags allowed in a comma-separated string. [admin, filesort, filesort_on_disk, full_join, full_scan, query_cache, query_cache_miss, tmp_table, tmp_table_on_disk]. Sets log-slow-admin-command to ON",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, QPLAN_ALWAYS_SET, 0, 0},
@@ -8664,6 +8671,7 @@ mysql_getopt_value(const char *keyname,
case OPT_KEY_CACHE_BLOCK_SIZE:
case OPT_KEY_CACHE_DIVISION_LIMIT:
case OPT_KEY_CACHE_AGE_THRESHOLD:
+ case OPT_KEY_CACHE_PARTITIONS:
{
KEY_CACHE *key_cache;
if (!(key_cache= get_or_create_key_cache(keyname, key_length)))
@@ -8681,6 +8689,8 @@ mysql_getopt_value(const char *keyname,
return (uchar**) &key_cache->param_division_limit;
case OPT_KEY_CACHE_AGE_THRESHOLD:
return (uchar**) &key_cache->param_age_threshold;
+ case OPT_KEY_CACHE_PARTITIONS:
+ return (uchar**) &key_cache->param_partitions;
}
}
}
=== modified file 'sql/set_var.cc'
--- a/sql/set_var.cc 2009-09-15 10:46:35 +0000
+++ b/sql/set_var.cc 2010-06-29 00:10:53 +0000
@@ -314,15 +314,18 @@ static sys_var_thd_ulong sys_interactive
static sys_var_thd_ulong sys_join_buffer_size(&vars, "join_buffer_size",
&SV::join_buff_size);
static sys_var_key_buffer_size sys_key_buffer_size(&vars, "key_buffer_size");
-static sys_var_key_cache_long sys_key_cache_block_size(&vars, "key_cache_block_size",
- offsetof(KEY_CACHE,
- param_block_size));
-static sys_var_key_cache_long sys_key_cache_division_limit(&vars, "key_cache_division_limit",
- offsetof(KEY_CACHE,
- param_division_limit));
-static sys_var_key_cache_long sys_key_cache_age_threshold(&vars, "key_cache_age_threshold",
- offsetof(KEY_CACHE,
- param_age_threshold));
+static sys_var_key_cache_long sys_key_cache_block_size(&vars,
+ "key_cache_block_size",
+ offsetof(KEY_CACHE,param_block_size));
+static sys_var_key_cache_long sys_key_cache_division_limit(&vars,
+ "key_cache_division_limit",
+ offsetof(KEY_CACHE, param_division_limit));
+static sys_var_key_cache_long sys_key_cache_age_threshold(&vars,
+ "key_cache_age_threshold",
+ offsetof(KEY_CACHE, param_age_threshold));
+static sys_var_key_cache_long sys_key_cache_partitions(&vars,
+ "key_cache_partitions",
+ offsetof(KEY_CACHE, param_partitions));
static sys_var_const sys_language(&vars, "language",
OPT_GLOBAL, SHOW_CHAR,
(uchar*) language);
@@ -2528,7 +2531,21 @@ bool sys_var_key_cache_long::update(THD
pthread_mutex_unlock(&LOCK_global_system_variables);
- error= (bool) (ha_resize_key_cache(key_cache));
+ switch (offset) {
+
+ case offsetof(KEY_CACHE, param_block_size):
+ error= (bool) (ha_resize_key_cache(key_cache));
+ break;
+
+ case offsetof(KEY_CACHE, param_division_limit):
+ case offsetof(KEY_CACHE, param_age_threshold):
+ error= (bool) (ha_change_key_cache_param(key_cache));
+ break;
+
+ case offsetof(KEY_CACHE, param_partitions):
+ error= (bool) (ha_repartition_key_cache(key_cache));
+ break;
+ }
pthread_mutex_lock(&LOCK_global_system_variables);
key_cache->in_init= 0;
@@ -4131,6 +4148,7 @@ static KEY_CACHE *create_key_cache(const
key_cache->param_block_size= dflt_key_cache_var.param_block_size;
key_cache->param_division_limit= dflt_key_cache_var.param_division_limit;
key_cache->param_age_threshold= dflt_key_cache_var.param_age_threshold;
+ key_cache->param_partitions= dflt_key_cache_var.param_partitions;
}
}
DBUG_RETURN(key_cache);
=== modified file 'sql/set_var.h'
--- a/sql/set_var.h 2009-09-15 10:46:35 +0000
+++ b/sql/set_var.h 2010-06-29 00:10:53 +0000
@@ -1411,6 +1411,7 @@ public:
my_free((uchar*) name, MYF(0));
}
friend bool process_key_caches(process_key_cache_t func);
+ friend int fill_key_cache_tables(THD *thd, TABLE_LIST *tables, COND *cond);
friend void delete_elements(I_List<NAMED_LIST> *list,
void (*free_element)(const char*, uchar*));
};
=== modified file 'sql/sql_show.cc'
--- a/sql/sql_show.cc 2009-09-23 11:03:47 +0000
+++ b/sql/sql_show.cc 2010-06-29 00:10:53 +0000
@@ -2106,6 +2106,31 @@ inline void make_upper(char *buf)
*buf= my_toupper(system_charset_info, *buf);
}
+
+static void update_key_cache_stat_var(KEY_CACHE *key_cache, size_t ofs)
+{
+ uint var_no;
+ switch (ofs) {
+ case offsetof(KEY_CACHE, blocks_used):
+ case offsetof(KEY_CACHE, blocks_unused):
+ case offsetof(KEY_CACHE, global_blocks_changed):
+ var_no= (ofs-offsetof(KEY_CACHE, blocks_used))/sizeof(ulong);
+ *(ulong *)((char *) key_cache + ofs)=
+ (ulong) get_key_cache_stat_value(key_cache, var_no);
+ break;
+ case offsetof(KEY_CACHE, global_cache_r_requests):
+ case offsetof(KEY_CACHE, global_cache_read):
+ case offsetof(KEY_CACHE, global_cache_w_requests):
+ case offsetof(KEY_CACHE, global_cache_write):
+ var_no= 3+(ofs-offsetof(KEY_CACHE, global_cache_w_requests))/
+ sizeof(ulonglong);
+ *(ulonglong *)((char *) key_cache + ofs)=
+ get_key_cache_stat_value(key_cache, var_no);
+ break;
+ }
+}
+
+
static bool show_status_array(THD *thd, const char *wild,
SHOW_VAR *variables,
enum enum_var_type value_type,
@@ -2238,10 +2263,12 @@ static bool show_status_array(THD *thd,
break;
}
case SHOW_KEY_CACHE_LONG:
+ update_key_cache_stat_var(dflt_key_cache, (size_t) value);
value= (char*) dflt_key_cache + (ulong)value;
end= int10_to_str(*(long*) value, buff, 10);
break;
case SHOW_KEY_CACHE_LONGLONG:
+ update_key_cache_stat_var(dflt_key_cache, (size_t) value);
value= (char*) dflt_key_cache + (ulong)value;
end= longlong10_to_str(*(longlong*) value, buff, 10);
break;
@@ -6095,6 +6122,90 @@ int fill_schema_files(THD *thd, TABLE_LI
}
+static
+int store_key_cache_table_record(THD *thd, TABLE *table,
+ const char *name, uint name_length,
+ KEY_CACHE *key_cache,
+ uint partitions, uint partition_no)
+{
+ KEY_CACHE_STATISTICS key_cache_stats;
+ uint err;
+ DBUG_ENTER("store_key_cache_table_record");
+
+ get_key_cache_statistics(key_cache, partition_no, &key_cache_stats);
+
+ if (key_cache_stats.mem_size == 0)
+ DBUG_RETURN(0);
+
+ restore_record(table, s->default_values);
+ table->field[0]->store(name, name_length, system_charset_info);
+ if (partitions == 0)
+ table->field[1]->set_null();
+ else
+ {
+ table->field[1]->set_notnull();
+ table->field[1]->store((long) partitions, TRUE);
+ }
+
+ if (partition_no == 0)
+ table->field[2]->set_null();
+ else
+ {
+ table->field[2]->set_notnull();
+ table->field[2]->store((long) partition_no, TRUE);
+ }
+ table->field[3]->store(key_cache_stats.mem_size, TRUE);
+ table->field[4]->store(key_cache_stats.block_size, TRUE);
+ table->field[5]->store(key_cache_stats.blocks_used, TRUE);
+ table->field[6]->store(key_cache_stats.blocks_unused, TRUE);
+ table->field[7]->store(key_cache_stats.blocks_changed, TRUE);
+ table->field[8]->store(key_cache_stats.read_requests, TRUE);
+ table->field[9]->store(key_cache_stats.reads, TRUE);
+ table->field[10]->store(key_cache_stats.write_requests, TRUE);
+ table->field[11]->store(key_cache_stats.writes, TRUE);
+
+ err= schema_table_store_record(thd, table);
+ DBUG_RETURN(err);
+}
+
+
+int fill_key_cache_tables(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ TABLE *table= tables->table;
+ I_List_iterator<NAMED_LIST> it(key_caches);
+ NAMED_LIST *element;
+ DBUG_ENTER("fill_key_cache_tables");
+
+ while ((element= it++))
+ {
+ KEY_CACHE *key_cache= (KEY_CACHE *) element->data;
+
+ if (!key_cache->key_cache_inited)
+ continue;
+
+ uint partitions= key_cache->partitions;
+ DBUG_ASSERT(partitions <= MAX_KEY_CACHE_PARTITIONS);
+
+ if (partitions)
+ {
+ for (uint i= 0; i < partitions; i++)
+ {
+ if (store_key_cache_table_record(thd, table,
+ element->name, element->name_length,
+ key_cache, partitions, i+1))
+ DBUG_RETURN(1);
+ }
+ }
+
+ if (store_key_cache_table_record(thd, table,
+ element->name, element->name_length,
+ key_cache, partitions, 0))
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
ST_FIELD_INFO schema_fields_info[]=
{
{"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
@@ -6672,6 +6783,35 @@ ST_FIELD_INFO referential_constraints_fi
};
+ST_FIELD_INFO keycache_fields_info[]=
+{
+ {"KEY_CACHE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
+ {"PARTITIONS", 3, MYSQL_TYPE_LONG, 0,
+ (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED) , 0, SKIP_OPEN_TABLE},
+ {"PARTITION_NUMBER", 3, MYSQL_TYPE_LONG, 0,
+ (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE},
+ {"FULL_SIZE", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE},
+ {"BLOCK_SIZE", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), 0, SKIP_OPEN_TABLE },
+ {"USED_BLOCKS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_blocks_used", SKIP_OPEN_TABLE},
+ {"UNUSED_BLOCKS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_blocks_unused", SKIP_OPEN_TABLE},
+ {"DIRTY_BLOCKS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_blocks_not_flushed", SKIP_OPEN_TABLE},
+ {"READ_REQUESTS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_read_requests", SKIP_OPEN_TABLE},
+ {"READS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_reads", SKIP_OPEN_TABLE},
+ {"WRITE_REQUESTS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_write_requests", SKIP_OPEN_TABLE},
+ {"WRITES", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0,
+ (MY_I_S_UNSIGNED), "Key_writes", SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
+};
+
+
/*
Description of ST_FIELD_INFO in table.h
@@ -6707,6 +6847,8 @@ ST_SCHEMA_TABLE schema_tables[]=
fill_status, make_old_format, 0, 0, -1, 0, 0},
{"GLOBAL_VARIABLES", variables_fields_info, create_schema_table,
fill_variables, make_old_format, 0, 0, -1, 0, 0},
+ {"KEY_CACHES", keycache_fields_info, create_schema_table,
+ fill_key_cache_tables, make_old_format, 0, -1,-1, 0, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0,
OPEN_TABLE_ONLY},
=== modified file 'sql/sql_test.cc'
--- a/sql/sql_test.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_test.cc 2010-06-29 00:10:53 +0000
@@ -435,7 +435,8 @@ static int print_key_cache_status(const
Buffer_size: %10lu\n\
Block_size: %10lu\n\
Division_limit: %10lu\n\
-Age_limit: %10lu\n\
+Age_threshold: %10lu\n\
+Partitions: %10lu\n\
blocks used: %10lu\n\
not flushed: %10lu\n\
w_requests: %10s\n\
@@ -445,6 +446,7 @@ reads: %10s\n\n",
name,
(ulong) key_cache->param_buff_size, key_cache->param_block_size,
key_cache->param_division_limit, key_cache->param_age_threshold,
+ key_cache->param_partitions,
key_cache->blocks_used,key_cache->global_blocks_changed,
llstr(key_cache->global_cache_w_requests,llbuff1),
llstr(key_cache->global_cache_write,llbuff2),
=== modified file 'sql/table.h'
--- a/sql/table.h 2009-09-15 10:46:35 +0000
+++ b/sql/table.h 2010-06-29 00:10:53 +0000
@@ -887,6 +887,7 @@ enum enum_schema_tables
SCH_FILES,
SCH_GLOBAL_STATUS,
SCH_GLOBAL_VARIABLES,
+ SCH_KEY_CACHES,
SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES,
SCH_PARTITIONS,
=== modified file 'storage/myisam/mi_check.c'
--- a/storage/myisam/mi_check.c 2009-06-29 21:03:30 +0000
+++ b/storage/myisam/mi_check.c 2010-06-29 00:10:53 +0000
@@ -334,7 +334,8 @@ int chk_size(HA_CHECK *param, register M
/* The following is needed if called externally (not from myisamchk) */
flush_key_blocks(info->s->key_cache,
- info->s->kfile, FLUSH_FORCE_WRITE);
+ info->s->kfile, &info->s->dirty_part_map,
+ FLUSH_FORCE_WRITE);
size= my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE));
if ((skr=(my_off_t) info->state->key_file_length) != size)
@@ -1477,6 +1478,7 @@ static int mi_drop_all_indexes(HA_CHECK
*/
DBUG_PRINT("repair", ("all disabled are empty: create missing"));
error= flush_key_blocks(share->key_cache, share->kfile,
+ &share->dirty_part_map,
FLUSH_FORCE_WRITE);
goto end;
}
@@ -1491,6 +1493,7 @@ static int mi_drop_all_indexes(HA_CHECK
/* Remove all key blocks of this index file from key cache. */
if ((error= flush_key_blocks(share->key_cache, share->kfile,
+ &share->dirty_part_map,
FLUSH_IGNORE_CHANGED)))
goto end; /* purecov: inspected */
@@ -1550,7 +1553,7 @@ int mi_repair(HA_CHECK *param, register
if (!param->using_global_keycache)
VOID(init_key_cache(dflt_key_cache, param->key_cache_block_size,
- param->use_buffers, 0, 0));
+ (size_t) param->use_buffers, 0, 0, 0));
if (init_io_cache(¶m->read_cache,info->dfile,
(uint) param->read_buffer_length,
@@ -1763,7 +1766,8 @@ err:
VOID(end_io_cache(¶m->read_cache));
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
VOID(end_io_cache(&info->rec_cache));
- got_error|=flush_blocks(param, share->key_cache, share->kfile);
+ got_error|=flush_blocks(param, share->key_cache, share->kfile,
+ &share->dirty_part_map);
if (!got_error && param->testflag & T_UNPACK)
{
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
@@ -1909,9 +1913,10 @@ void lock_memory(HA_CHECK *param __attri
/* Flush all changed blocks to disk */
-int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file)
+int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file,
+ ulonglong *dirty_part_map)
{
- if (flush_key_blocks(key_cache, file, FLUSH_RELEASE))
+ if (flush_key_blocks(key_cache, file, dirty_part_map, FLUSH_RELEASE))
{
mi_check_print_error(param,"%d when trying to write bufferts",my_errno);
return(1);
@@ -1978,7 +1983,8 @@ int mi_sort_index(HA_CHECK *param, regis
}
/* Flush key cache for this file if we are calling this outside myisamchk */
- flush_key_blocks(share->key_cache,share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_IGNORE_CHANGED);
share->state.version=(ulong) time((time_t*) 0);
old_state= share->state; /* save state if not stored */
@@ -2537,7 +2543,8 @@ int mi_repair_by_sort(HA_CHECK *param, r
memcpy( &share->state.state, info->state, sizeof(*info->state));
err:
- got_error|= flush_blocks(param, share->key_cache, share->kfile);
+ got_error|= flush_blocks(param, share->key_cache, share->kfile,
+ &share->dirty_part_map);
VOID(end_io_cache(&info->rec_cache));
if (!got_error)
{
@@ -3059,7 +3066,8 @@ int mi_repair_parallel(HA_CHECK *param,
memcpy(&share->state.state, info->state, sizeof(*info->state));
err:
- got_error|= flush_blocks(param, share->key_cache, share->kfile);
+ got_error|= flush_blocks(param, share->key_cache, share->kfile,
+ &share->dirty_part_map);
/*
Destroy the write cache. The master thread did already detach from
the share by remove_io_thread() or it was not yet started (if the
=== modified file 'storage/myisam/mi_close.c'
--- a/storage/myisam/mi_close.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/mi_close.c 2010-06-29 00:10:53 +0000
@@ -64,6 +64,7 @@ int mi_close(register MI_INFO *info)
if (share->kfile >= 0) abort(););
if (share->kfile >= 0 &&
flush_key_blocks(share->key_cache, share->kfile,
+ &share->dirty_part_map,
share->temporary ? FLUSH_IGNORE_CHANGED :
FLUSH_RELEASE))
error=my_errno;
=== modified file 'storage/myisam/mi_delete_all.c'
--- a/storage/myisam/mi_delete_all.c 2008-04-28 16:24:05 +0000
+++ b/storage/myisam/mi_delete_all.c 2010-06-29 00:10:53 +0000
@@ -52,7 +52,8 @@ int mi_delete_all_rows(MI_INFO *info)
If we are using delayed keys or if the user has done changes to the tables
since it was locked then there may be key blocks in the key cache
*/
- flush_key_blocks(share->key_cache, share->kfile, FLUSH_IGNORE_CHANGED);
+ flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_IGNORE_CHANGED);
#ifdef HAVE_MMAP
if (share->file_map)
_mi_unmap_file(info);
=== modified file 'storage/myisam/mi_extra.c'
--- a/storage/myisam/mi_extra.c 2009-10-06 06:13:56 +0000
+++ b/storage/myisam/mi_extra.c 2010-06-29 00:10:53 +0000
@@ -263,6 +263,7 @@ int mi_extra(MI_INFO *info, enum ha_extr
pthread_mutex_lock(&share->intern_lock);
/* Flush pages that we don't need anymore */
if (flush_key_blocks(share->key_cache, share->kfile,
+ &share->dirty_part_map,
(function == HA_EXTRA_PREPARE_FOR_DROP ?
FLUSH_IGNORE_CHANGED : FLUSH_RELEASE)))
{
@@ -321,7 +322,8 @@ int mi_extra(MI_INFO *info, enum ha_extr
break;
case HA_EXTRA_FLUSH:
if (!share->temporary)
- flush_key_blocks(share->key_cache, share->kfile, FLUSH_KEEP);
+ flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_KEEP);
#ifdef HAVE_PWRITE
_mi_decrement_open_count(info);
#endif
=== modified file 'storage/myisam/mi_keycache.c'
--- a/storage/myisam/mi_keycache.c 2008-03-29 15:56:33 +0000
+++ b/storage/myisam/mi_keycache.c 2010-06-29 00:10:53 +0000
@@ -75,7 +75,8 @@ int mi_assign_to_key_cache(MI_INFO *info
in the old key cache.
*/
- if (flush_key_blocks(share->key_cache, share->kfile, FLUSH_RELEASE))
+ if (flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_RELEASE))
{
error= my_errno;
mi_print_error(info->s, HA_ERR_CRASHED);
@@ -90,7 +91,8 @@ int mi_assign_to_key_cache(MI_INFO *info
(This can never fail as there is never any not written data in the
new key cache)
*/
- (void) flush_key_blocks(key_cache, share->kfile, FLUSH_RELEASE);
+ (void) flush_key_blocks(key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_RELEASE);
/*
ensure that setting the key cache and changing the multi_key_cache
@@ -102,6 +104,7 @@ int mi_assign_to_key_cache(MI_INFO *info
This should be seen at the lastes for the next call to an myisam function.
*/
share->key_cache= key_cache;
+ share->dirty_part_map= 0;
/* store the key cache in the global hash structure for future opens */
if (multi_key_cache_set((uchar*) share->unique_file_name,
=== modified file 'storage/myisam/mi_locking.c'
--- a/storage/myisam/mi_locking.c 2009-10-06 06:57:22 +0000
+++ b/storage/myisam/mi_locking.c 2010-06-29 00:10:53 +0000
@@ -68,7 +68,9 @@ int mi_lock_database(MI_INFO *info, int
--share->tot_locks;
if (info->lock_type == F_WRLCK && !share->w_locks &&
!share->delay_key_write && flush_key_blocks(share->key_cache,
- share->kfile,FLUSH_KEEP))
+ share->kfile,
+ &share->dirty_part_map,
+ FLUSH_KEEP))
{
error=my_errno;
mi_print_error(info->s, HA_ERR_CRASHED);
@@ -513,7 +515,8 @@ int _mi_test_if_changed(register MI_INFO
{ /* Keyfile has changed */
DBUG_PRINT("info",("index file changed"));
if (share->state.process != share->this_process)
- VOID(flush_key_blocks(share->key_cache, share->kfile, FLUSH_RELEASE));
+ VOID(flush_key_blocks(share->key_cache, share->kfile,
+ &share->dirty_part_map, FLUSH_RELEASE));
share->last_process=share->state.process;
info->last_unique= share->state.unique;
info->last_loop= share->state.update_count;
=== modified file 'storage/myisam/mi_page.c'
--- a/storage/myisam/mi_page.c 2009-05-06 12:03:24 +0000
+++ b/storage/myisam/mi_page.c 2010-06-29 00:10:53 +0000
@@ -94,10 +94,11 @@ int _mi_write_keypage(register MI_INFO *
}
#endif
DBUG_RETURN((key_cache_write(info->s->key_cache,
- info->s->kfile,page, level, (uchar*) buff,length,
- (uint) keyinfo->block_length,
- (int) ((info->lock_type != F_UNLCK) ||
- info->s->delay_key_write))));
+ info->s->kfile, &info->s->dirty_part_map,
+ page, level, (uchar*) buff, length,
+ (uint) keyinfo->block_length,
+ (int) ((info->lock_type != F_UNLCK) ||
+ info->s->delay_key_write))));
} /* mi_write_keypage */
@@ -116,7 +117,8 @@ int _mi_dispose(register MI_INFO *info,
mi_sizestore(buff,old_link);
info->s->state.changed|= STATE_NOT_SORTED_PAGES;
DBUG_RETURN(key_cache_write(info->s->key_cache,
- info->s->kfile, pos , level, buff,
+ info->s->kfile, &info->s->dirty_part_map,
+ pos , level, buff,
sizeof(buff),
(uint) keyinfo->block_length,
(int) (info->lock_type != F_UNLCK)));
=== modified file 'storage/myisam/mi_panic.c'
--- a/storage/myisam/mi_panic.c 2006-12-31 00:32:21 +0000
+++ b/storage/myisam/mi_panic.c 2010-06-29 00:10:53 +0000
@@ -47,7 +47,8 @@ int mi_panic(enum ha_panic_function flag
if (info->s->options & HA_OPTION_READ_ONLY_DATA)
break;
#endif
- if (flush_key_blocks(info->s->key_cache, info->s->kfile, FLUSH_RELEASE))
+ if (flush_key_blocks(info->s->key_cache, info->s->kfile,
+ &info->s->dirty_part_map, FLUSH_RELEASE))
error=my_errno;
if (info->opt_flag & WRITE_CACHE_USED)
if (flush_io_cache(&info->rec_cache))
=== modified file 'storage/myisam/mi_preload.c'
--- a/storage/myisam/mi_preload.c 2007-05-24 12:26:10 +0000
+++ b/storage/myisam/mi_preload.c 2010-06-29 00:10:53 +0000
@@ -65,7 +65,7 @@ int mi_preload(MI_INFO *info, ulonglong
}
}
else
- block_length= share->key_cache->key_cache_block_size;
+ block_length= share->key_cache->param_block_size;
length= info->preload_buff_size/block_length * block_length;
set_if_bigger(length, block_length);
@@ -73,7 +73,8 @@ int mi_preload(MI_INFO *info, ulonglong
if (!(buff= (uchar *) my_malloc(length, MYF(MY_WME))))
DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM);
- if (flush_key_blocks(share->key_cache,share->kfile, FLUSH_RELEASE))
+ if (flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
+ FLUSH_RELEASE))
goto err;
do
=== modified file 'storage/myisam/mi_test1.c'
--- a/storage/myisam/mi_test1.c 2008-04-28 16:24:05 +0000
+++ b/storage/myisam/mi_test1.c 2010-06-29 00:10:53 +0000
@@ -49,7 +49,8 @@ int main(int argc,char *argv[])
MY_INIT(argv[0]);
my_init();
if (key_cacheing)
- init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,IO_SIZE*16,0,0);
+ init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,IO_SIZE*16,0,0,
+ DEFAULT_KEY_CACHE_PARTITIONS);
get_options(argc,argv);
exit(run_test("test1"));
=== modified file 'storage/myisam/mi_test2.c'
--- a/storage/myisam/mi_test2.c 2008-04-28 16:24:05 +0000
+++ b/storage/myisam/mi_test2.c 2010-06-29 00:10:53 +0000
@@ -215,7 +215,8 @@ int main(int argc, char *argv[])
if (!silent)
printf("- Writing key:s\n");
if (key_cacheing)
- init_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size,0,0);
+ init_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size,0,0,
+ DEFAULT_KEY_CACHE_PARTITIONS);
if (do_locking)
mi_lock_database(file,F_WRLCK);
if (write_cacheing)
=== modified file 'storage/myisam/mi_test3.c'
--- a/storage/myisam/mi_test3.c 2008-04-28 16:24:05 +0000
+++ b/storage/myisam/mi_test3.c 2010-06-29 00:10:53 +0000
@@ -177,7 +177,8 @@ void start_test(int id)
exit(1);
}
if (key_cacheing && rnd(2) == 0)
- init_key_cache(dflt_key_cache, KEY_CACHE_BLOCK_SIZE, 65536L, 0, 0);
+ init_key_cache(dflt_key_cache, KEY_CACHE_BLOCK_SIZE, 65536L, 0, 0,
+ DEFAULT_KEY_CACHE_PARTITIONS);
printf("Process %d, pid: %d\n",id,getpid()); fflush(stdout);
for (error=i=0 ; i < tests && !error; i++)
=== modified file 'storage/myisam/myisam_ftdump.c'
--- a/storage/myisam/myisam_ftdump.c 2007-05-10 09:59:39 +0000
+++ b/storage/myisam/myisam_ftdump.c 2010-06-29 00:10:53 +0000
@@ -83,7 +83,7 @@ int main(int argc,char *argv[])
usage();
}
- init_key_cache(dflt_key_cache,MI_KEY_BLOCK_LENGTH,USE_BUFFER_INIT, 0, 0);
+ init_key_cache(dflt_key_cache,MI_KEY_BLOCK_LENGTH,USE_BUFFER_INIT, 0, 0, 0);
if (!(info=mi_open(argv[0], O_RDONLY,
HA_OPEN_ABORT_IF_LOCKED|HA_OPEN_FROM_SQL_LAYER)))
=== modified file 'storage/myisam/myisamchk.c'
--- a/storage/myisam/myisamchk.c 2009-09-19 21:21:29 +0000
+++ b/storage/myisam/myisamchk.c 2010-06-29 00:10:53 +0000
@@ -1102,7 +1102,7 @@ static int myisamchk(HA_CHECK *param, ch
{
if (param->testflag & (T_EXTEND | T_MEDIUM))
VOID(init_key_cache(dflt_key_cache,opt_key_cache_block_size,
- (size_t) param->use_buffers, 0, 0));
+ (size_t) param->use_buffers, 0, 0, 0));
VOID(init_io_cache(¶m->read_cache,datafile,
(uint) param->read_buffer_length,
READ_CACHE,
@@ -1116,7 +1116,8 @@ static int myisamchk(HA_CHECK *param, ch
HA_OPTION_COMPRESS_RECORD)) ||
(param->testflag & (T_EXTEND | T_MEDIUM)))
error|=chk_data_link(param, info, test(param->testflag & T_EXTEND));
- error|=flush_blocks(param, share->key_cache, share->kfile);
+ error|=flush_blocks(param, share->key_cache, share->kfile,
+ &share->dirty_part_map);
VOID(end_io_cache(¶m->read_cache));
}
if (!error)
@@ -1526,7 +1527,7 @@ static int mi_sort_records(HA_CHECK *par
DBUG_RETURN(0); /* Nothing to do */
init_key_cache(dflt_key_cache, opt_key_cache_block_size,
- (size_t) param->use_buffers, 0, 0);
+ (size_t) param->use_buffers, 0, 0, 0);
if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length,
WRITE_CACHE,share->pack.header_length,1,
MYF(MY_WME | MY_WAIT_IF_FULL)))
@@ -1641,8 +1642,8 @@ err:
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
sort_info.buff=0;
share->state.sortkey=sort_key;
- DBUG_RETURN(flush_blocks(param, share->key_cache, share->kfile) |
- got_error);
+ DBUG_RETURN(flush_blocks(param, share->key_cache, share->kfile,
+ &share->dirty_part_map) | got_error);
} /* sort_records */
=== modified file 'storage/myisam/myisamdef.h'
--- a/storage/myisam/myisamdef.h 2009-10-06 06:57:22 +0000
+++ b/storage/myisam/myisamdef.h 2010-06-29 00:10:53 +0000
@@ -174,6 +174,8 @@ typedef struct st_mi_isam_share
*index_file_name;
uchar *file_map; /* mem-map of file if possible */
KEY_CACHE *key_cache; /* ref to the current key cache */
+ /* To mark the key cache partitions containing dirty pages for this file */
+ ulonglong dirty_part_map;
MI_DECODE_TREE *decode_trees;
uint16 *decode_tables;
/* Function to use for a row checksum. */
@@ -732,7 +734,8 @@ void mi_check_print_info _VARARGS((HA_CH
#ifdef THREAD
pthread_handler_t thr_find_all_keys(void *arg);
#endif
-int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file);
+int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file,
+ ulonglong *dirty_part_map);
#ifdef __cplusplus
}
#endif
=== modified file 'storage/myisam/myisamlog.c'
--- a/storage/myisam/myisamlog.c 2008-02-18 22:35:17 +0000
+++ b/storage/myisam/myisamlog.c 2010-06-29 00:10:53 +0000
@@ -333,7 +333,7 @@ static int examine_log(char * file_name,
init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1,
(tree_element_free) file_info_free, NULL);
VOID(init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE,
- 0, 0));
+ 0, 0, 0));
files_open=0; access_time=0;
while (access_time++ != number_of_commands &&
1
0