[Commits] 056b6fe1d59: MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command
revision-id: 056b6fe1d59b515a6380e50783b3c4ad0f93959f (mariadb-10.1.38-110-g056b6fe1d59) parent(s): 0bb924e18c338fa2dc901041aad09998b399efc1 author: Sergei Petrunia committer: Sergei Petrunia timestamp: 2019-04-18 23:12:43 +0300 message: MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command Archive storage engine assumed that any query that attempts to read from the table will call ha_archive::info() beforehand. ha_archive would flush un-written data in that call (this would make it visible for the reads). Break this assumption. Flush the data when the table is opened for reading. This way, one can do multiple write statements without causing a flush, but as soon as we might need the data, we flush it. --- mysql-test/suite/archive/archive_eits.result | 24 +++++++++++++ mysql-test/suite/archive/archive_eits.test | 32 ++++++++++++++++++ storage/archive/ha_archive.cc | 50 ++++++++++++++++++---------- storage/archive/ha_archive.h | 3 ++ 4 files changed, 92 insertions(+), 17 deletions(-) diff --git a/mysql-test/suite/archive/archive_eits.result b/mysql-test/suite/archive/archive_eits.result new file mode 100644 index 00000000000..e077c2e4954 --- /dev/null +++ b/mysql-test/suite/archive/archive_eits.result @@ -0,0 +1,24 @@ +drop table if exists t1; +# +# MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command +# +CREATE TABLE t1 (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT)engine=archive; +INSERT INTO t1 VALUES +(101, PointFromText('POINT(10 10)')), +(102, PointFromText('POINT(20 10)')), +(103, PointFromText('POINT(20 20)')), +(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); +set @tmp1= @@optimizer_use_condition_selectivity; +set @tmp2= @@use_stat_tables; +set optimizer_use_condition_selectivity=4; +set use_stat_tables=PREFERABLY; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze note The storage engine for the table doesn't support analyze +select * from mysql.table_stats where table_name='t1' and db_name=database(); +db_name table_name cardinality +test t1 4 +drop table t1; +set optimizer_use_condition_selectivity=@tmp1; +set use_stat_tables=@tmp2; diff --git a/mysql-test/suite/archive/archive_eits.test b/mysql-test/suite/archive/archive_eits.test new file mode 100644 index 00000000000..04c4ccdb709 --- /dev/null +++ b/mysql-test/suite/archive/archive_eits.test @@ -0,0 +1,32 @@ +-- source include/have_archive.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +--echo # +--echo # MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command +--echo # + +CREATE TABLE t1 (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT)engine=archive; +INSERT INTO t1 VALUES +(101, PointFromText('POINT(10 10)')), +(102, PointFromText('POINT(20 10)')), +(103, PointFromText('POINT(20 20)')), +(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); + +set @tmp1= @@optimizer_use_condition_selectivity; +set @tmp2= @@use_stat_tables; + +set optimizer_use_condition_selectivity=4; +set use_stat_tables=PREFERABLY; +ANALYZE TABLE t1; + +select * from mysql.table_stats where table_name='t1' and db_name=database(); + +drop table t1; + +set optimizer_use_condition_selectivity=@tmp1; +set use_stat_tables=@tmp2; + + diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index d70757e8142..e985f75d646 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1650,7 +1650,6 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) DBUG_VOID_RETURN; } - /* Hints for optimizer, see ha_tina for more information */ @@ -1658,22 +1657,7 @@ int ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); - mysql_mutex_lock(&share->mutex); - if (share->dirty) - { - DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); - DBUG_ASSERT(share->archive_write_open); - azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->dirty= FALSE; - } - - /* - This should be an accurate number now, though bulk and delayed inserts can - cause the number to be inaccurate. - */ - stats.records= share->rows_recorded; - mysql_mutex_unlock(&share->mutex); - + flush_and_clear_pending_writes(); stats.deleted= 0; DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records)); @@ -1716,6 +1700,38 @@ int ha_archive::info(uint flag) } +int ha_archive::external_lock(THD *thd, int lock_type) +{ + if (lock_type == F_RDLCK) + { + // We are going to read from the table. Flush any pending writes that we + // may have + flush_and_clear_pending_writes(); + } + return 0; +} + + +void ha_archive::flush_and_clear_pending_writes() +{ + mysql_mutex_lock(&share->mutex); + if (share->dirty) + { + DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); + DBUG_ASSERT(share->archive_write_open); + azflush(&(share->archive_write), Z_SYNC_FLUSH); + share->dirty= FALSE; + } + + /* + This should be an accurate number now, though bulk and delayed inserts can + cause the number to be inaccurate. + */ + stats.records= share->rows_recorded; + mysql_mutex_unlock(&share->mutex); +} + + /* This method tells us that a bulk insert operation is about to occur. We set a flag which will keep write_row from saying that its data is dirty. This in diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 56ff566db8c..a74374a340f 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -169,5 +169,8 @@ class ha_archive: public handler int unpack_row(azio_stream *file_to_read, uchar *record); unsigned int pack_row(uchar *record, azio_stream *writer); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); + int external_lock(THD *thd, int lock_type); +private: + void flush_and_clear_pending_writes(); };
participants (1)
-
Sergei Petrunia