[Commits] b0fe082b365: Merge remote-tracking branch 'origin/5.5-galera' into 10.0-galera
revision-id: b0fe082b365d989fcf905e5c40c3fe60fd756858 (mariadb-galera-10.0.36-3-gb0fe082b365) parent(s): 93ff64ebd7a7b2a534acc3ee8bf14cbfd8658d0f 2ee9343c873ad31c2dd0d2175dec2ef3b48ca5ba author: Jan Lindström committer: Jan Lindström timestamp: 2018-10-30 13:22:52 +0200 message: Merge remote-tracking branch 'origin/5.5-galera' into 10.0-galera .gitignore | 1 + CMakeLists.txt | 2 +- client/CMakeLists.txt | 1 + client/mysql.cc | 6 +- cmake/libutils.cmake | 12 +- cmake/os/Windows.cmake | 5 +- cmake/plugin.cmake | 3 +- cmake/wsrep.cmake | 2 +- cmake/zlib.cmake | 18 +- libmysqld/CMakeLists.txt | 2 +- .../rpl_tests/rpl_lower_case_table_names.test | 141 ++ mysql-test/lib/My/Config.pm | 6 +- mysql-test/mysql-test-run.pl | 24 +- mysql-test/r/alter_table.result | 50 + mysql-test/r/alter_table.test | 1908 ++++++++++++++++ mysql-test/r/derived_opt.result | 35 +- mysql-test/r/derived_view.result | 6 +- mysql-test/r/func_isnull.result | 20 + mysql-test/r/func_time.result | 161 ++ mysql-test/r/grant.result | 29 + mysql-test/r/grant.test | 2291 ++++++++++++++++++++ mysql-test/r/innodb_mrr_cpk.result | 2 +- mysql-test/r/join.result | 6 +- mysql-test/r/lowercase_fs_off.result | 62 + mysql-test/r/lowercase_fs_off.test | 124 ++ mysql-test/r/mysqld--help.result | 7 + mysql-test/r/sp-security.result | 23 + mysql-test/r/sp-security.test | 1069 +++++++++ mysql-test/r/sp.result | 17 + mysql-test/r/subselect_mat.result | 6 +- mysql-test/r/subselect_sj_mat.result | 6 +- mysql-test/r/type_float.result | 36 + mysql-test/r/type_float.test | 417 ++++ mysql-test/suite/heap/heap_btree.result | 4 + mysql-test/suite/heap/heap_btree.test | 9 + mysql-test/suite/maria/fulltext2.result | 86 + mysql-test/suite/maria/fulltext2.test | 77 + mysql-test/suite/maria/maria.result | 4 + mysql-test/suite/maria/maria.test | 10 + mysql-test/suite/parts/r/update_and_cache.result | 7 + mysql-test/suite/parts/t/update_and_cache.test | 12 + mysql-test/suite/rpl/r/rpl_15919.result | 16 + .../rpl/r/rpl_lcase_tblnames_rewrite_db.result | 33 + .../suite/rpl/r/rpl_row_lcase_tblnames.result | 47 + mysql-test/suite/rpl/r/rpl_row_spatial.result | 14 + .../suite/rpl/r/rpl_stm_lcase_tblnames.result | 44 + mysql-test/suite/rpl/t/rpl_15919-master.opt | 1 + mysql-test/suite/rpl/t/rpl_15919-slave.opt | 1 + mysql-test/suite/rpl/t/rpl_15919.test | 16 + .../rpl/t/rpl_lcase_tblnames_rewrite_db-slave.opt | 1 + .../suite/rpl/t/rpl_lcase_tblnames_rewrite_db.test | 60 + .../suite/rpl/t/rpl_row_lcase_tblnames-slave.opt | 1 + mysql-test/suite/rpl/t/rpl_row_lcase_tblnames.test | 12 + mysql-test/suite/rpl/t/rpl_row_spatial.test | 17 + .../suite/rpl/t/rpl_stm_lcase_tblnames-slave.opt | 1 + mysql-test/suite/rpl/t/rpl_stm_lcase_tblnames.test | 12 + mysql-test/suite/sys_vars/r/all_vars.result | 1 + mysql-test/t/alter_table.test | 84 +- mysql-test/t/derived_opt.test | 38 + mysql-test/t/func_isnull.test | 16 + mysql-test/t/func_time.test | 53 + mysql-test/t/grant.test | 59 +- mysql-test/t/join.test | 3 +- mysql-test/t/lowercase_fs_off.test | 12 +- mysql-test/t/sp-security.test | 19 +- mysql-test/t/sp.test | 21 + mysql-test/t/type_float.test | 42 +- mysys/CMakeLists.txt | 1 + mysys/mf_iocache2.c | 4 +- mysys/my_alloc.c | 3 +- res | 22 + scripts/mysql_system_tables_fix.sql | 11 +- sql/CMakeLists.txt | 4 +- sql/field.cc | 3 +- sql/field_conv.cc | 2 +- sql/item.cc | 195 +- sql/item.h | 92 +- sql/item_cmpfunc.cc | 13 + sql/item_cmpfunc.h | 1 + sql/item_func.cc | 5 +- sql/item_func.h | 10 +- sql/item_sum.cc | 6 +- sql/item_sum.h | 19 +- sql/item_timefunc.cc | 29 +- sql/key.cc | 3 +- sql/log_event.cc | 70 +- sql/mysqld.cc | 22 +- sql/protocol.cc | 8 +- sql/sql_acl.cc | 4 +- sql/sql_base.cc | 14 +- sql/sql_delete.cc | 1 + sql/sql_error.h | 7 +- sql/sql_lex.cc | 2 +- sql/sql_list.h | 5 +- sql/sql_select.cc | 8 + sql/sql_show.cc | 2 +- sql/sql_time.cc | 2 +- sql/sql_type_int.h | 44 + sql/sql_update.cc | 2 +- sql/sys_vars.cc | 13 + sql/table.h | 10 + sql/wsrep_mysqld.cc | 1 + sql/wsrep_mysqld.h | 2 +- sql/wsrep_mysqld_c.h | 26 + storage/heap/hp_create.c | 22 +- storage/innobase/handler/ha_innodb.cc | 64 +- storage/innobase/handler/ha_innodb.h | 6 +- storage/innobase/row/row0ins.cc | 49 +- storage/maria/ma_blockrec.c | 18 +- storage/maria/ma_check.c | 9 +- storage/maria/ma_ft_boolean_search.c | 2 +- storage/maria/ma_write.c | 15 +- storage/xtradb/.clang-format | 111 + storage/xtradb/handler/ha_innodb.cc | 64 +- storage/xtradb/handler/ha_innodb.h | 6 +- storage/xtradb/row/row0ins.cc | 49 +- support-files/mysql.server.sh | 31 +- zlib/CMakeLists.txt | 155 +- zlib/ChangeLog | 666 +++++- zlib/FAQ | 267 ++- zlib/INDEX | 41 +- zlib/README | 94 +- zlib/README.MySQL | 16 - zlib/adler32.c | 113 +- zlib/algorithm.txt | 209 -- zlib/amiga/Makefile.pup | 69 + zlib/amiga/Makefile.sas | 68 + zlib/compress.c | 45 +- zlib/crc32.c | 149 +- zlib/crc32.h | 2 +- zlib/deflate.c | 1269 +++++++---- zlib/deflate.h | 78 +- zlib/gzclose.c | 25 + zlib/gzguts.h | 218 ++ zlib/gzio.c | 1031 --------- zlib/gzlib.c | 637 ++++++ zlib/gzread.c | 654 ++++++ zlib/gzwrite.c | 665 ++++++ zlib/infback.c | 107 +- zlib/inffast.c | 159 +- zlib/inffast.h | 4 +- zlib/inffixed.h | 6 +- zlib/inflate.c | 507 +++-- zlib/inflate.h | 38 +- zlib/inftrees.c | 109 +- zlib/inftrees.h | 27 +- zlib/make_vms.com | 867 ++++++++ zlib/msdos/Makefile.bor | 115 + zlib/msdos/Makefile.dj2 | 104 + zlib/msdos/Makefile.emx | 69 + zlib/msdos/Makefile.msc | 112 + zlib/msdos/Makefile.tc | 100 + zlib/nintendods/README | 5 + zlib/old/Makefile.emx | 69 + zlib/old/Makefile.riscos | 151 ++ zlib/old/README | 3 + zlib/old/descrip.mms | 48 + zlib/old/os2/Makefile.os2 | 136 ++ zlib/old/os2/zlib.def | 51 + zlib/old/visual-basic.txt | 160 ++ zlib/os400/README400 | 48 + zlib/os400/bndsrc | 119 + zlib/os400/make.sh | 366 ++++ zlib/os400/zlib.inc | 527 +++++ zlib/qnx/package.qpg | 141 ++ zlib/test/example.c | 602 +++++ zlib/test/infcover.c | 671 ++++++ zlib/test/minigzip.c | 651 ++++++ zlib/treebuild.xml | 116 + zlib/trees.c | 246 +-- zlib/trees.h | 4 +- zlib/uncompr.c | 100 +- zlib/watcom/watcom_f.mak | 43 + zlib/watcom/watcom_l.mak | 43 + zlib/win32/DLL_FAQ.txt | 397 ++++ zlib/win32/Makefile.bor | 110 + zlib/win32/Makefile.gcc | 182 ++ zlib/win32/Makefile.msc | 163 ++ zlib/win32/README-WIN32.txt | 103 + zlib/win32/VisualC.txt | 3 + zlib/win32/zlib.def | 94 + zlib/win32/zlib1.rc | 40 + zlib/zconf.h.cmakein | 536 +++++ zlib/{zconf.h => zconf.h.in} | 324 ++- zlib/zlib.3 | 122 +- zlib/zlib.h | 1639 +++++++++----- zlib/zlib.pc.cmakein | 13 + zlib/zlib.pc.in | 13 + zlib/zlib2ansi | 152 ++ zlib/zutil.c | 101 +- zlib/zutil.h | 192 +- 191 files changed, 21729 insertions(+), 3993 deletions(-) diff --cc .gitignore index 2b25ccc6fdc,673af24783b..8c9b4acec5e --- a/.gitignore +++ b/.gitignore @@@ -230,238 -237,7 +230,239 @@@ storage/mroonga/vendor/groonga/src/grns storage/mroonga/vendor/groonga/src/groonga storage/mroonga/vendor/groonga/src/groonga-benchmark storage/mroonga/vendor/groonga/src/suggest/groonga-suggest-create-dataset +storage/mroonga/mysql-test/mroonga/storage/r/information_schema_plugins.result +storage/mroonga/mysql-test/mroonga/storage/r/variable_version.result + zlib/zconf.h +# C and C++ + +# Compiled Object files +*.slo +*.o +*.ko +*.obj +*.elf +*.exp +*.dep +*.idb + +# Precompiled Headers +*.gch +*.pch + +# Compiled Static libraries +*.lib +*.a +*.la +*.lai +*.lo + +# Compiled Dynamic libraries +*.so +*.so.* +*.dylib +*.dll + +# Executables +*.exe +*.out +*.app +*.i*86 +*.x86_64 +*.hex + + +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# User-specific files +*.suo +*.user +*.userosscache +*.sln.docstates +*.sln + +*.vcproj +*.vcproj.* +*.vcproj.*.* +*.vcproj.*.*.* +*.vcxproj +*.vcxproj.* +*.vcxproj.*.* +*.vcxproj.*.*.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +build/ +bld/ +[Bb]in/ +[Oo]bj/ + +# Roslyn cache directories +*.ide/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +#NUNIT +*.VisualState.xml +TestResult.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +*_i.c +*_p.c +*_i.h +*.ilk +*.meta +*.pdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opensdf +*.sdf +*.cachefile + +# Visual Studio profiler +*.psess +*.vsp +*.vspx + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# JustCode is a .NET coding addin-in +.JustCode + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# NCrunch +_NCrunch_* +.*crunch*.local.xml + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# TODO: Comment the next line if you want to checkin your web deploy settings +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# NuGet Packages +*.nupkg +# The packages folder can be ignored because of Package Restore +**/packages/* +# except build/, which is used as an MSBuild target. +!**/packages/build/ +# If using the old MSBuild-Integrated Package Restore, uncomment this: +#!**/packages/repositories.config + +# Windows Azure Build Output +csx/ +*.build.csdef + +# Windows Store app package directory +AppPackages/ + +# Others +# sql/ +*.Cache +ClientBin/ +[Ss]tyle[Cc]op.* +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.pfx +*.publishsettings +node_modules/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm + +# SQL Server files +*.mdf +*.ldf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings + +# Microsoft Fakes +FakesAssemblies/ # macOS garbage .DS_Store diff --cc mysql-test/mysql-test-run.pl index d5758abb03e,7c65e8f68b8..63801357dd5 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@@ -137,8 -137,8 +137,6 @@@ my $opt_start_dirty my $opt_start_exit; my $start_only; --our @global_suppressions; -- END { if ( defined $opt_tmpdir_pid and $opt_tmpdir_pid == $$ ) { @@@ -190,8 -188,9 +188,6 @@@ my @DEFAULT_SUITES= qw sys_vars- unit- vcol- -- wsrep- -- galera- ); my $opt_suites; @@@ -360,10 -357,8 +356,9 @@@ my $source_dist= -d "../sql" my $opt_max_save_core= env_or_val(MTR_MAX_SAVE_CORE => 5); my $opt_max_save_datadir= env_or_val(MTR_MAX_SAVE_DATADIR => 20); my $opt_max_test_fail= env_or_val(MTR_MAX_TEST_FAIL => 10); +my $opt_core_on_failure= 0; my $opt_parallel= $ENV{MTR_PARALLEL} || 1; - my $opt_port_group_size = $ENV{MTR_PORT_GROUP_SIZE} || 20; # lock file to stop tests my $opt_stop_file= $ENV{MTR_STOP_FILE}; @@@ -1469,7 -1461,7 +1463,7 @@@ sub command_line_setup foreach my $fs (@tmpfs_locations) { - if ( -d $fs && ! -l $fs ) - if ( -d $fs && -w $fs ) ++ if ( -d $fs && ! -l $fs && -w $fs ) { my $template= "var_${opt_build_thread}_XXXX"; $opt_mem= tempdir( $template, DIR => $fs, CLEANUP => 0); @@@ -3190,6 -3158,6 +3184,7 @@@ sub ndbcluster_start ($) return 0; } ++ sub mysql_server_start($) { my ($mysqld, $tinfo) = @_; @@@ -4817,7 -4749,7 +4812,6 @@@ sub extract_warning_lines ($$) # Perl code. my @antipatterns = ( -- @global_suppressions, qr/error .*connecting to master/, qr/Plugin 'ndbcluster' will be forced to shutdown/, qr/InnoDB: Error: in ALTER TABLE `test`.`t[12]`/, diff --cc mysql-test/r/alter_table.result index c6e3c7e31d9,56bb5fe0020..b0c5664c92a --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@@ -1402,849 -1392,53 +1402,899 @@@ t1 CREATE TABLE `t1` `consultant_id` bigint(20) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8 DROP TABLE t1; + # + # BUG#27788685: NO WARNING WHEN TRUNCATING A STRING WITH DATA LOSS + # + SET GLOBAL max_allowed_packet=17825792; + CREATE TABLE t1 (t1_fld1 TEXT); + CREATE TABLE t2 (t2_fld1 MEDIUMTEXT); + CREATE TABLE t3 (t3_fld1 LONGTEXT); + INSERT INTO t1 VALUES (REPEAT('a',300)); + INSERT INTO t2 VALUES (REPEAT('b',65680)); + INSERT INTO t3 VALUES (REPEAT('c',16777300)); + SELECT LENGTH(t1_fld1) FROM t1; + LENGTH(t1_fld1) + 300 + SELECT LENGTH(t2_fld1) FROM t2; + LENGTH(t2_fld1) + 65680 + SELECT LENGTH(t3_fld1) FROM t3; + LENGTH(t3_fld1) + 16777300 + # With strict mode + SET SQL_MODE='STRICT_ALL_TABLES'; + ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; + ERROR 22001: Data too long for column 'my_t1_fld1' at row 1 + ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; + ERROR 22001: Data too long for column 'my_t2_fld1' at row 1 + ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; + ERROR 22001: Data too long for column 'my_t3_fld1' at row 1 + # With non-strict mode + SET SQL_MODE=''; + ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; + Warnings: + Warning 1265 Data truncated for column 'my_t1_fld1' at row 1 + ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; + Warnings: + Warning 1265 Data truncated for column 'my_t2_fld1' at row 1 + ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; + Warnings: + Warning 1265 Data truncated for column 'my_t3_fld1' at row 1 + SELECT LENGTH(my_t1_fld1) FROM t1; + LENGTH(my_t1_fld1) + 255 + SELECT LENGTH(my_t2_fld1) FROM t2; + LENGTH(my_t2_fld1) + 65535 + SELECT LENGTH(my_t3_fld1) FROM t3; + LENGTH(my_t3_fld1) + 16777215 + DROP TABLE t1, t2, t3; + SET SQL_MODE=default; + SET GLOBAL max_allowed_packet=default; +CREATE TABLE t1 ( +id INT(11) NOT NULL, +x_param INT(11) DEFAULT NULL, +PRIMARY KEY (id) +) ENGINE=MYISAM; +ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, +ADD COLUMN IF NOT EXISTS lol INT AFTER id; +Warnings: +Note 1060 Duplicate column name 'id' +ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; +Warnings: +Note 1060 Duplicate column name 'lol' +ALTER TABLE t1 DROP COLUMN IF EXISTS lol; +ALTER TABLE t1 DROP COLUMN IF EXISTS lol; +Warnings: +Note 1091 Can't DROP 'lol'; check that column/key exists +ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); +ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); +Warnings: +Note 1061 Duplicate key name 'x_param' +ALTER TABLE t1 MODIFY IF EXISTS lol INT; +Warnings: +Note 1054 Unknown column 'lol' in 't1' +DROP INDEX IF EXISTS x_param ON t1; +DROP INDEX IF EXISTS x_param ON t1; +Warnings: +Note 1091 Can't DROP 'x_param'; check that column/key exists +CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); +CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); +Warnings: +Note 1061 Duplicate key name 'x_param1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `x_param` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `x_param1` (`x_param`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 ( +id INT(11) NOT NULL, +x_param INT(11) DEFAULT NULL, +PRIMARY KEY (id) +) ENGINE=INNODB; +CREATE TABLE t2 ( +id INT(11) NOT NULL) ENGINE=INNODB; +ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, +ADD COLUMN IF NOT EXISTS lol INT AFTER id; +Warnings: +Note 1060 Duplicate column name 'id' +ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; +Warnings: +Note 1060 Duplicate column name 'lol' +ALTER TABLE t1 DROP COLUMN IF EXISTS lol; +ALTER TABLE t1 DROP COLUMN IF EXISTS lol; +Warnings: +Note 1091 Can't DROP 'lol'; check that column/key exists +ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); +ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); +Warnings: +Note 1061 Duplicate key name 'x_param' +ALTER TABLE t1 MODIFY IF EXISTS lol INT; +Warnings: +Note 1054 Unknown column 'lol' in 't1' +DROP INDEX IF EXISTS x_param ON t1; +DROP INDEX IF EXISTS x_param ON t1; +Warnings: +Note 1091 Can't DROP 'x_param'; check that column/key exists +CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); +CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); +Warnings: +Note 1061 Duplicate key name 'x_param1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `x_param` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `x_param1` (`x_param`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); +Warnings: +Note 1061 Duplicate key name 'fk' +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `id` int(11) NOT NULL, + KEY `fk` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE t2 ADD FOREIGN KEY (id) REFERENCES t1(id); +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS t2_ibfk_1(id) REFERENCES t1(id); +Warnings: +Note 1061 Duplicate key name 't2_ibfk_1' +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; +Warnings: +Note 1091 Can't DROP 't2_ibfk_1'; check that column/key exists +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `id` int(11) NOT NULL, + KEY `id` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t2; +CREATE TABLE t2 ( +id INT(11) NOT NULL); +ALTER TABLE t2 ADD COLUMN a INT, ADD COLUMN IF NOT EXISTS a INT; +Warnings: +Note 1060 Duplicate column name 'a' +ALTER TABLE t2 ADD KEY k_id(id), ADD KEY IF NOT EXISTS k_id(id); +Warnings: +Note 1061 Duplicate key name 'k_id' +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `id` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + KEY `k_id` (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t2 DROP KEY k_id, DROP KEY IF EXISTS k_id; +Warnings: +Note 1091 Can't DROP 'k_id'; check that column/key exists +ALTER TABLE t2 DROP COLUMN a, DROP COLUMN IF EXISTS a; +Warnings: +Note 1091 Can't DROP 'a'; check that column/key exists +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `id` int(11) NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t2; +DROP TABLE t1; +CREATE TABLE t1 ( +`transaction_id` int(11) NOT NULL DEFAULT '0', +KEY `transaction_id` (`transaction_id`)); +ALTER TABLE t1 DROP KEY IF EXISTS transaction_id, ADD PRIMARY KEY IF NOT EXISTS (transaction_id); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `transaction_id` int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (`transaction_id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# Bug#11748057 (formerly known as 34972): ALTER TABLE statement doesn't +# identify correct column name. +# +CREATE TABLE t1 (c1 int unsigned , c2 char(100) not null default ''); +ALTER TABLE t1 ADD c3 char(16) NOT NULL DEFAULT '' AFTER c2, +MODIFY c2 char(100) NOT NULL DEFAULT '' AFTER c1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(10) unsigned DEFAULT NULL, + `c2` char(100) NOT NULL DEFAULT '', + `c3` char(16) NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# +# WL#5534 Online ALTER, Phase 1 +# +# Single thread tests. +# See innodb_mysql_sync.test for multi thread tests. +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB; +CREATE TABLE m1(a INT PRIMARY KEY, b INT) engine=MyISAM; +INSERT INTO t1 VALUES (1,1), (2,2); +INSERT INTO m1 VALUES (1,1), (2,2); +# +# 1: Test ALGORITHM keyword +# +# --enable_info allows us to see how many rows were updated +# by ALTER TABLE. in-place will show 0 rows, while copy > 0. +ALTER TABLE t1 ADD INDEX i1(b); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i2`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i3`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i4`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= INVALID; +ERROR HY000: Unknown ALGORITHM 'INVALID' +ALTER TABLE m1 ENABLE KEYS; +affected rows: 0 +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= DEFAULT; +affected rows: 0 +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE; +affected rows: 0 +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; +# +# 2: Test ALGORITHM + old_alter_table +# +SET SESSION old_alter_table= 1; +affected rows: 0 +ALTER TABLE t1 ADD INDEX i1(b); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i2`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i3`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i4`. This is deprecated and will be disallowed in a future release. +SET SESSION old_alter_table= 0; +affected rows: 0 +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; +# +# 3: Test unsupported in-place operation +# +ALTER TABLE t1 ADD COLUMN (c1 INT); +ALTER TABLE t1 ADD COLUMN (c2 INT), ALGORITHM= DEFAULT; +ALTER TABLE t1 ADD COLUMN (c3 INT), ALGORITHM= COPY; +ALTER TABLE t1 ADD COLUMN (c4 INT), ALGORITHM= INPLACE; +ALTER TABLE t1 DROP COLUMN c1, DROP COLUMN c2, DROP COLUMN c3, DROP COLUMN c4; +# +# 4: Test LOCK keyword +# +ALTER TABLE t1 ADD INDEX i1(b), LOCK= DEFAULT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 ADD INDEX i2(b), LOCK= NONE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i2`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i3(b), LOCK= SHARED; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i3`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i4(b), LOCK= EXCLUSIVE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i4`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i5(b), LOCK= INVALID; +ERROR HY000: Unknown LOCK type 'INVALID' +ALTER TABLE m1 ENABLE KEYS, LOCK= DEFAULT; +ALTER TABLE m1 ENABLE KEYS, LOCK= NONE; +ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE. +ALTER TABLE m1 ENABLE KEYS, LOCK= SHARED; +ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE. +ALTER TABLE m1 ENABLE KEYS, LOCK= EXCLUSIVE; +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; +# +# 5: Test ALGORITHM + LOCK +# +ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= NONE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= INPLACE, LOCK= SHARED; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i2`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i3`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= COPY, LOCK= NONE; +ERROR 0A000: LOCK=NONE is not supported. Reason: COPY algorithm requires a lock. Try LOCK=SHARED. +ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= COPY, LOCK= SHARED; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i5`. This is deprecated and will be disallowed in a future release. +ALTER TABLE t1 ADD INDEX i6(b), ALGORITHM= COPY, LOCK= EXCLUSIVE; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 1 +Warnings: +Note 1831 Duplicate index `i6`. This is deprecated and will be disallowed in a future release. +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= NONE; +ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE. +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= SHARED; +ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE. +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= EXCLUSIVE; +affected rows: 0 +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= NONE; +ERROR 0A000: LOCK=NONE is not supported. Reason: COPY algorithm requires a lock. Try LOCK=SHARED. +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= SHARED; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= EXCLUSIVE; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +DROP TABLE t1, m1; +# +# 6: Possible deadlock involving thr_lock.c +# +CREATE TABLE t1(a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); +START TRANSACTION; +INSERT INTO t1 VALUES (3,3); +# Connection con1 +# Sending: +ALTER TABLE t1 DISABLE KEYS; +# Connection default +# Waiting until ALTER TABLE is blocked. +UPDATE t1 SET b = 4; +COMMIT; +# Connection con1 +# Reaping: ALTER TABLE t1 DISABLE KEYS +# Connection default +DROP TABLE t1; +# +# 7: Which operations require copy and which can be done in-place? +# +# Test which ALTER TABLE operations are done in-place and +# which operations are done using temporary table copy. +# +# --enable_info allows us to see how many rows were updated +# by ALTER TABLE. in-place will show 0 rows, while copy > 0. +# +DROP TABLE IF EXISTS ti1, ti2, ti3, tm1, tm2, tm3; +# Single operation tests +CREATE TABLE ti1(a INT NOT NULL, b INT, c INT) engine=InnoDB; +CREATE TABLE tm1(a INT NOT NULL, b INT, c INT) engine=MyISAM; +CREATE TABLE ti2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=InnoDB; +CREATE TABLE tm2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=MyISAM; +INSERT INTO ti1 VALUES (1,1,1), (2,2,2); +INSERT INTO ti2 VALUES (1,1,1), (2,2,2); +INSERT INTO tm1 VALUES (1,1,1), (2,2,2); +INSERT INTO tm2 VALUES (1,1,1), (2,2,2); +ALTER TABLE ti1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD COLUMN d VARCHAR(200); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD COLUMN d VARCHAR(200); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD COLUMN d2 VARCHAR(200); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD COLUMN d2 VARCHAR(200); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD COLUMN e ENUM('a', 'b') FIRST; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD COLUMN e ENUM('a', 'b') FIRST; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD COLUMN f INT AFTER a; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD COLUMN f INT AFTER a; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD INDEX ii1(b); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD INDEX im1(b); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD UNIQUE INDEX ii2 (c); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD UNIQUE INDEX im2 (c); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 1 +Warnings: +Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID +ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD FULLTEXT INDEX ii4 (d2); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD FULLTEXT INDEX im4 (d2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE; +ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try ALGORITHM=COPY. +ALTER TABLE ti1 ADD PRIMARY KEY(a); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD PRIMARY KEY(a); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP INDEX ii3; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP INDEX im3; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP COLUMN d2; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP COLUMN d2; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ADD CONSTRAINT fi1 FOREIGN KEY (b) REFERENCES ti2(a); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ADD CONSTRAINT fm1 FOREIGN KEY (b) REFERENCES tm2(a); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ALTER COLUMN b SET DEFAULT 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ALTER COLUMN b SET DEFAULT 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 ALTER COLUMN b DROP DEFAULT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ALTER COLUMN b DROP DEFAULT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 CHANGE COLUMN f g INT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 CHANGE COLUMN f g INT; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 CHANGE COLUMN g h VARCHAR(20); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 CHANGE COLUMN g h VARCHAR(20); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN e ENUM('a', 'b', 'c'); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN e ENUM('a', 'b', 'c'); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN e INT; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN e INT; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN e INT AFTER h; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN e INT AFTER h; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN e INT FIRST; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN e INT FIRST; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +SET @orig_sql_mode = @@sql_mode; +SET @@sql_mode = 'STRICT_TRANS_TABLES'; +ALTER TABLE ti1 MODIFY COLUMN c INT NOT NULL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +SET @@sql_mode = @orig_sql_mode; +ALTER TABLE tm1 MODIFY COLUMN c INT NOT NULL; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN c INT NULL; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN c INT NULL; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30) AFTER d; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30) AFTER d; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP COLUMN h; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP COLUMN h; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP INDEX ii2; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP INDEX im2; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP PRIMARY KEY; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP PRIMARY KEY; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DROP FOREIGN KEY fi1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DROP FOREIGN KEY fm1; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 RENAME TO ti3; +affected rows: 0 +ALTER TABLE tm1 RENAME TO tm3; +affected rows: 0 +ALTER TABLE ti3 RENAME TO ti1; +affected rows: 0 +ALTER TABLE tm3 RENAME TO tm1; +affected rows: 0 +ALTER TABLE ti1 ORDER BY b; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 ORDER BY b; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 CONVERT TO CHARACTER SET utf16; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 CONVERT TO CHARACTER SET utf16; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 DEFAULT CHARACTER SET utf8; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 DEFAULT CHARACTER SET utf8; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 FORCE; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 FORCE; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 AUTO_INCREMENT 3; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 AUTO_INCREMENT 3; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 AVG_ROW_LENGTH 10; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 AVG_ROW_LENGTH 10; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 CHECKSUM 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 CHECKSUM 1; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 COMMENT 'test'; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 COMMENT 'test'; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MAX_ROWS 100; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MAX_ROWS 100; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 MIN_ROWS 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 MIN_ROWS 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti1 PACK_KEYS 1; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE tm1 PACK_KEYS 1; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +DROP TABLE ti1, ti2, tm1, tm2; +# Tests of >1 operation (InnoDB) +CREATE TABLE ti1(a INT PRIMARY KEY AUTO_INCREMENT, b INT) engine=InnoDB; +INSERT INTO ti1(b) VALUES (1), (2); +ALTER TABLE ti1 RENAME TO ti3, ADD INDEX ii1(b); +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +ALTER TABLE ti3 DROP INDEX ii1, AUTO_INCREMENT 5; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +INSERT INTO ti3(b) VALUES (5); +ALTER TABLE ti3 ADD INDEX ii1(b), AUTO_INCREMENT 7; +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 +INSERT INTO ti3(b) VALUES (7); +SELECT * FROM ti3; +a b +1 1 +2 2 +5 5 +7 7 +DROP TABLE ti3; +# +# 8: Scenario in which ALTER TABLE was returning an unwarranted +# ER_ILLEGAL_HA error at some point during work on this WL. +# +CREATE TABLE tm1(i INT DEFAULT 1) engine=MyISAM; +ALTER TABLE tm1 ADD INDEX ii1(i), ALTER COLUMN i DROP DEFAULT; +DROP TABLE tm1; +create table if not exists t1 (i int); +alter table t1 add key (i); +alter table t1 add key if not exists (i); +Warnings: +Note 1061 Duplicate key name 'i' +DROP TABLE t1; +create table t1 (a int); +alter table t1 change column if exists a b bigint; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b` bigint(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +create table t1 (i int); +alter table t1 add unique index if not exists idx(i); +alter table t1 add unique index if not exists idx(i); +Warnings: +Note 1061 Duplicate key name 'idx' +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + UNIQUE KEY `idx` (`i`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 ( +`event_id` bigint(20) unsigned NOT NULL DEFAULT '0', +`market_id` bigint(20) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`event_id`,`market_id`) +); +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id); +Warnings: +Note 1061 Multiple primary key defined +DROP TABLE t1; +# +# MDEV-11126 Crash while altering persistent virtual column +# +CREATE TABLE `tab1` ( +`id` bigint(20) NOT NULL AUTO_INCREMENT, +`field2` set('option1','option2','option3','option4') NOT NULL, +`field3` set('option1','option2','option3','option4','option5') NOT NULL, +`field4` set('option1','option2','option3','option4') NOT NULL, +`field5` varchar(32) NOT NULL, +`field6` varchar(32) NOT NULL, +`field7` varchar(32) NOT NULL, +`field8` varchar(32) NOT NULL, +`field9` int(11) NOT NULL DEFAULT '1', +`field10` varchar(16) NOT NULL, +`field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', +`v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, +PRIMARY KEY (`id`) +) DEFAULT CHARSET=latin1; +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128); +SHOW CREATE TABLE `tab1`; +Table Create Table +tab1 CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT; +SHOW CREATE TABLE `tab1`; +Table Create Table +tab1 CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, + PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE `tab1`; +# +# MDEV-11548 Reproducible server crash after the 2nd ALTER TABLE ADD FOREIGN KEY IF NOT EXISTS +# +CREATE TABLE t1 (id INT UNSIGNED NOT NULL PRIMARY KEY); +CREATE TABLE t2 (id1 INT UNSIGNED NOT NULL); +ALTER TABLE t2 +ADD FOREIGN KEY IF NOT EXISTS (id1) +REFERENCES t1 (id); +ALTER TABLE t2 +ADD FOREIGN KEY IF NOT EXISTS (id1) +REFERENCES t1 (id); +Warnings: +Note 1061 Duplicate key name 'id1' +DROP TABLE t2; +DROP TABLE t1; +# +# MDEV-6390 CONVERT TO CHARACTER SET utf8 doesn't change DEFAULT CHARSET. +# +CREATE TABLE t1 (id int(11) NOT NULL, a int(11) NOT NULL, b int(11)) +ENGINE=InnoDB DEFAULT CHARSET=latin1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8 +DROP TABLE t1; +# +# +# MDEV-15308 +# Assertion `ha_alter_info->alter_info->drop_list.elements > 0' failed +# in ha_innodb::prepare_inplace_alter_table +# +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN b; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN b; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN c; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY c1(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP INDEX c1; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c; +Warnings: +Note 1091 Can't DROP 'fk'; check that column/key exists +Note 1091 Can't DROP 'c'; check that column/key exists +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +# +# MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key +# +CREATE TABLE t1 ( +`ID` BIGINT(20) NOT NULL, +`RANK` MEDIUMINT(4) NOT NULL, +`CHECK_POINT` BIGINT(20) NOT NULL, +UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`) +) ENGINE=InnoDB; +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `ID` bigint(20) NOT NULL, + `RANK` mediumint(4) NOT NULL, + `CHECK_POINT` bigint(20) NOT NULL, + PRIMARY KEY (`ID`,`CHECK_POINT`), + UNIQUE KEY `HORIZON_UIDX01` (`ID`,`RANK`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +Warnings: +Note 1061 Multiple primary key defined +DROP TABLE t1; diff --cc mysql-test/r/alter_table.test index 00000000000,00000000000..aea1a880a81 new file mode 100644 --- /dev/null +++ b/mysql-test/r/alter_table.test @@@ -1,0 -1,0 +1,1908 @@@ ++if (`select plugin_auth_version < "5.6.26" from information_schema.plugins where plugin_name='innodb'`) ++{ ++ --skip Not fixed in XtraDB below 5.6.26 ++} ++--source include/have_innodb.inc ++# ++# Test of alter table ++# ++--disable_warnings ++drop table if exists t1,t2; ++drop database if exists mysqltest; ++--enable_warnings ++ ++create table t1 ( ++col1 int not null auto_increment primary key, ++col2 varchar(30) not null, ++col3 varchar (20) not null, ++col4 varchar(4) not null, ++col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, ++col6 int not null, to_be_deleted int); ++insert into t1 values (2,4,3,5,"PENDING",1,7); ++alter table t1 ++add column col4_5 varchar(20) not null after col4, ++add column col7 varchar(30) not null after col5, ++add column col8 datetime not null, drop column to_be_deleted, ++change column col2 fourth varchar(30) not null after col3, ++modify column col6 int not null first; ++select * from t1; ++drop table t1; ++ ++create table t1 (bandID MEDIUMINT UNSIGNED NOT NULL PRIMARY KEY, payoutID SMALLINT UNSIGNED NOT NULL); ++insert into t1 (bandID,payoutID) VALUES (1,6),(2,6),(3,4),(4,9),(5,10),(6,1),(7,12),(8,12); ++alter table t1 add column new_col int, order by payoutid,bandid; ++select * from t1; ++alter table t1 order by bandid,payoutid; ++select * from t1; ++drop table t1; ++ ++# Check that pack_keys and dynamic length rows are not forced. ++ ++CREATE TABLE t1 ( ++GROUP_ID int(10) unsigned DEFAULT '0' NOT NULL, ++LANG_ID smallint(5) unsigned DEFAULT '0' NOT NULL, ++NAME varchar(80) DEFAULT '' NOT NULL, ++PRIMARY KEY (GROUP_ID,LANG_ID), ++KEY NAME (NAME)); ++#show table status like "t1"; ++ALTER TABLE t1 CHANGE NAME NAME CHAR(80) not null; ++--replace_column 8 # ++SHOW FULL COLUMNS FROM t1; ++DROP TABLE t1; ++ ++# ++# Test of ALTER TABLE ... ORDER BY ++# ++ ++create table t1 (n int); ++insert into t1 values(9),(3),(12),(10); ++alter table t1 order by n; ++select * from t1; ++drop table t1; ++ ++CREATE TABLE t1 ( ++ id int(11) unsigned NOT NULL default '0', ++ category_id tinyint(4) unsigned NOT NULL default '0', ++ type_id tinyint(4) unsigned NOT NULL default '0', ++ body text NOT NULL, ++ user_id int(11) unsigned NOT NULL default '0', ++ status enum('new','old') NOT NULL default 'new', ++ PRIMARY KEY (id) ++) ENGINE=MyISAM; ++ ++ALTER TABLE t1 ORDER BY t1.id, t1.status, t1.type_id, t1.user_id, t1.body; ++DROP TABLE t1; ++ ++# ++# The following combination found a hang-bug in MyISAM ++# ++ ++CREATE TABLE t1 (AnamneseId int(10) unsigned NOT NULL auto_increment,B BLOB,PRIMARY KEY (AnamneseId)) engine=myisam; ++insert into t1 values (null,"hello"); ++LOCK TABLES t1 WRITE; ++ALTER TABLE t1 ADD Column new_col int not null; ++UNLOCK TABLES; ++OPTIMIZE TABLE t1; ++DROP TABLE t1; ++ ++# ++# Drop and add an auto_increment column ++# ++ ++create table t1 (i int unsigned not null auto_increment primary key); ++insert into t1 values (null),(null),(null),(null); ++alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i); ++select * from t1; ++drop table t1; ++ ++# ++# Bug #2628: 'alter table t1 rename mysqltest.t1' silently drops mysqltest.t1 ++# if it exists ++# ++create table t1 (name char(15)); ++insert into t1 (name) values ("current"); ++create database mysqltest; ++create table mysqltest.t1 (name char(15)); ++insert into mysqltest.t1 (name) values ("mysqltest"); ++select * from t1; ++select * from mysqltest.t1; ++--error ER_TABLE_EXISTS_ERROR ++alter table t1 rename mysqltest.t1; ++select * from t1; ++select * from mysqltest.t1; ++drop table t1; ++drop database mysqltest; ++ ++# ++# ALTER TABLE ... ENABLE/DISABLE KEYS ++ ++create table t1 (n1 int not null, n2 int, n3 int, n4 float, ++ unique(n1), ++ key (n1, n2, n3, n4), ++ key (n2, n3, n4, n1), ++ key (n3, n4, n1, n2), ++ key (n4, n1, n2, n3) ); ++alter table t1 disable keys; ++show keys from t1; ++#let $1=10000; ++let $1=10; ++--disable_query_log ++begin; ++while ($1) ++{ ++ eval insert into t1 values($1,RAND()*1000,RAND()*1000,RAND()); ++ dec $1; ++} ++commit; ++--enable_query_log ++alter table t1 enable keys; ++show keys from t1; ++drop table t1; ++ ++# ++# Alter table and rename ++# ++ ++create table t1 (i int unsigned not null auto_increment primary key); ++alter table t1 rename t2; ++alter table t2 rename t1, add c char(10) comment "no comment"; ++show columns from t1; ++drop table t1; ++ ++# implicit analyze ++ ++create table t1 (a int, b int); ++let $1=100; ++--disable_query_log ++begin; ++while ($1) ++{ ++ eval insert into t1 values(1,$1), (2,$1), (3, $1); ++ dec $1; ++} ++commit; ++--enable_query_log ++alter table t1 add unique (a,b), add key (b); ++show keys from t1; ++analyze table t1; ++show keys from t1; ++drop table t1; ++ ++# ++# Test of ALTER TABLE DELAYED ++# ++ ++CREATE TABLE t1 (i int(10), index(i) ) ENGINE=MyISAM; ++ALTER TABLE t1 DISABLE KEYS; ++INSERT DELAYED INTO t1 VALUES(1),(2),(3); ++ALTER TABLE t1 ENABLE KEYS; ++drop table t1; ++ ++# ++# Test ALTER TABLE ENABLE/DISABLE keys when things are locked ++# ++ ++CREATE TABLE t1 ( ++ Host varchar(16) binary NOT NULL default '', ++ User varchar(16) binary NOT NULL default '', ++ PRIMARY KEY (Host,User) ++) ENGINE=MyISAM; ++ ++ALTER TABLE t1 DISABLE KEYS; ++LOCK TABLES t1 WRITE; ++INSERT INTO t1 VALUES ('localhost','root'),('localhost',''),('games','monty'); ++SHOW INDEX FROM t1; ++ALTER TABLE t1 ENABLE KEYS; ++UNLOCK TABLES; ++CHECK TABLES t1; ++DROP TABLE t1; ++ ++# ++# Test with two keys ++# ++ ++CREATE TABLE t1 ( ++ Host varchar(16) binary NOT NULL default '', ++ User varchar(16) binary NOT NULL default '', ++ PRIMARY KEY (Host,User), ++ KEY (Host) ++) ENGINE=MyISAM; ++ ++ALTER TABLE t1 DISABLE KEYS; ++SHOW INDEX FROM t1; ++LOCK TABLES t1 WRITE; ++INSERT INTO t1 VALUES ('localhost','root'),('localhost',''); ++SHOW INDEX FROM t1; ++ALTER TABLE t1 ENABLE KEYS; ++SHOW INDEX FROM t1; ++UNLOCK TABLES; ++CHECK TABLES t1; ++ ++# Test RENAME with LOCK TABLES ++LOCK TABLES t1 WRITE; ++ALTER TABLE t1 RENAME t2; ++UNLOCK TABLES; ++select * from t2; ++DROP TABLE t2; ++ ++# ++# Test disable keys with locking ++# ++CREATE TABLE t1 ( ++ Host varchar(16) binary NOT NULL default '', ++ User varchar(16) binary NOT NULL default '', ++ PRIMARY KEY (Host,User), ++ KEY (Host) ++) ENGINE=MyISAM; ++ ++LOCK TABLES t1 WRITE; ++ALTER TABLE t1 DISABLE KEYS; ++SHOW INDEX FROM t1; ++DROP TABLE t1; ++ ++# ++# BUG#4717 - check for valid table names ++# ++create table t1 (a int); ++--error ER_WRONG_TABLE_NAME ++alter table t1 rename to ``; ++--error ER_WRONG_TABLE_NAME ++rename table t1 to ``; ++drop table t1; ++ ++# ++# BUG#6236 - ALTER TABLE MODIFY should set implicit NOT NULL on PK columns ++# ++drop table if exists t1, t2; ++create table t1 ( a varchar(10) not null primary key ) engine=myisam; ++create table t2 ( a varchar(10) not null primary key ) engine=merge union=(t1); ++flush tables; ++alter table t1 modify a varchar(10); ++show create table t2; ++flush tables; ++alter table t1 modify a varchar(10) not null; ++show create table t2; ++drop table if exists t1, t2; ++ ++# The following is also part of bug #6236 (CREATE TABLE didn't properly count ++# not null columns for primary keys) ++ ++create table t1 (a int, b int, c int, d int, e int, f int, g int, h int,i int, primary key (a,b,c,d,e,f,g,i,h)) engine=MyISAM; ++insert into t1 (a) values(1); ++--replace_column 7 X 8 X 9 X 10 X 11 X 12 X 13 X 14 X ++show table status like 't1'; ++alter table t1 modify a int; ++--replace_column 7 X 8 X 9 X 10 X 11 X 12 X 13 X 14 X ++show table status like 't1'; ++drop table t1; ++create table t1 (a int not null, b int not null, c int not null, d int not null, e int not null, f int not null, g int not null, h int not null,i int not null, primary key (a,b,c,d,e,f,g,i,h)) engine=MyISAM; ++insert into t1 (a) values(1); ++--replace_column 7 X 8 X 9 X 10 X 11 X 12 X 13 X 14 X ++show table status like 't1'; ++drop table t1; ++ ++# ++# Test that data get converted when character set is changed ++# Test that data doesn't get converted when src or dst is BINARY/BLOB ++# ++set names koi8r; ++create table t1 (a char(10) character set koi8r); ++insert into t1 values ('����'); ++select a,hex(a) from t1; ++alter table t1 change a a char(10) character set cp1251; ++select a,hex(a) from t1; ++alter table t1 change a a binary(4); ++select a,hex(a) from t1; ++alter table t1 change a a char(10) character set cp1251; ++select a,hex(a) from t1; ++alter table t1 change a a char(10) character set koi8r; ++select a,hex(a) from t1; ++alter table t1 change a a varchar(10) character set cp1251; ++select a,hex(a) from t1; ++alter table t1 change a a char(10) character set koi8r; ++select a,hex(a) from t1; ++alter table t1 change a a text character set cp1251; ++select a,hex(a) from t1; ++alter table t1 change a a char(10) character set koi8r; ++select a,hex(a) from t1; ++delete from t1; ++ ++# ++# Test ALTER TABLE .. CHARACTER SET .. ++# ++show create table t1; ++alter table t1 DEFAULT CHARACTER SET latin1; ++show create table t1; ++alter table t1 CONVERT TO CHARACTER SET latin1; ++show create table t1; ++alter table t1 DEFAULT CHARACTER SET cp1251; ++show create table t1; ++ ++drop table t1; ++ ++# ++# Bug#2821 ++# Test that table CHARACTER SET does not affect blobs ++# ++create table t1 (myblob longblob,mytext longtext) ++default charset latin1 collate latin1_general_cs; ++show create table t1; ++alter table t1 character set latin2; ++show create table t1; ++drop table t1; ++ ++# ++# Bug 2361 (Don't drop UNIQUE with DROP PRIMARY KEY) ++# ++ ++CREATE TABLE t1 (a int PRIMARY KEY, b INT UNIQUE); ++ALTER TABLE t1 DROP PRIMARY KEY; ++SHOW CREATE TABLE t1; ++--error ER_CANT_DROP_FIELD_OR_KEY ++ALTER TABLE t1 DROP PRIMARY KEY; ++DROP TABLE t1; ++ ++# BUG#3899 ++create table t1 (a int, b int, key(a)); ++insert into t1 values (1,1), (2,2); ++--error ER_CANT_DROP_FIELD_OR_KEY ++alter table t1 drop key no_such_key; ++alter table t1 drop key a; ++drop table t1; ++ ++# ++# BUG 12207 alter table ... discard table space on MyISAM table causes ERROR 2013 (HY000) ++# ++# Some platforms (Mac OS X, Windows) will send the error message using small letters. ++CREATE TABLE T12207(a int) ENGINE=MYISAM; ++--replace_result t12207 T12207 ++--error ER_ILLEGAL_HA ++ALTER TABLE T12207 DISCARD TABLESPACE; ++DROP TABLE T12207; ++ ++# ++# Bug #6479 ALTER TABLE ... changing charset fails for TEXT columns ++# ++# The column's character set was changed but the actual data was not ++# modified. In other words, the values were reinterpreted ++# as UTF8 instead of being converted. ++create table t1 (a text) character set koi8r; ++insert into t1 values (_koi8r'����'); ++select hex(a) from t1; ++alter table t1 convert to character set cp1251; ++select hex(a) from t1; ++drop table t1; ++ ++# ++# Test for bug #7884 "Able to add invalid unique index on TIMESTAMP prefix" ++# MySQL should not think that packed field with non-zero decimals is ++# geometry field and allow to create prefix index which is ++# shorter than packed field length. ++# ++create table t1 ( a timestamp ); ++--error ER_WRONG_SUB_KEY ++alter table t1 add unique ( a(1) ); ++drop table t1; ++ ++# ++# Bug #24395: ALTER TABLE DISABLE KEYS doesn't work when modifying the table ++# ++# This problem happens if the data change is compatible. ++# Changing to the same type is compatible for example. ++# ++--disable_warnings ++drop table if exists t1; ++--enable_warnings ++create table t1 (a int, key(a)); ++show indexes from t1; ++--echo "this used not to disable the index" ++alter table t1 modify a int, disable keys; ++show indexes from t1; ++ ++alter table t1 enable keys; ++show indexes from t1; ++ ++alter table t1 modify a bigint, disable keys; ++show indexes from t1; ++ ++alter table t1 enable keys; ++show indexes from t1; ++ ++alter table t1 add b char(10), disable keys; ++show indexes from t1; ++ ++alter table t1 add c decimal(10,2), enable keys; ++show indexes from t1; ++ ++--echo "this however did" ++alter table t1 disable keys; ++show indexes from t1; ++ ++desc t1; ++ ++alter table t1 add d decimal(15,5); ++--echo "The key should still be disabled" ++show indexes from t1; ++ ++drop table t1; ++ ++--echo "Now will test with one unique index" ++create table t1(a int, b char(10), unique(a)); ++show indexes from t1; ++alter table t1 disable keys; ++show indexes from t1; ++alter table t1 enable keys; ++ ++--echo "If no copy on noop change, this won't touch the data file" ++--echo "Unique index, no change" ++alter table t1 modify a int, disable keys; ++show indexes from t1; ++ ++--echo "Change the type implying data copy" ++--echo "Unique index, no change" ++alter table t1 modify a bigint, disable keys; ++show indexes from t1; ++ ++alter table t1 modify a bigint; ++show indexes from t1; ++ ++alter table t1 modify a int; ++show indexes from t1; ++ ++drop table t1; ++ ++--echo "Now will test with one unique and one non-unique index" ++create table t1(a int, b char(10), unique(a), key(b)); ++show indexes from t1; ++alter table t1 disable keys; ++show indexes from t1; ++alter table t1 enable keys; ++ ++ ++--echo "If no copy on noop change, this won't touch the data file" ++--echo "The non-unique index will be disabled" ++alter table t1 modify a int, disable keys; ++show indexes from t1; ++alter table t1 enable keys; ++show indexes from t1; ++ ++--echo "Change the type implying data copy" ++--echo "The non-unique index will be disabled" ++alter table t1 modify a bigint, disable keys; ++show indexes from t1; ++ ++--echo "Change again the type, but leave the indexes as_is" ++alter table t1 modify a int; ++show indexes from t1; ++--echo "Try the same. When data is no copied on similar tables, this is noop" ++alter table t1 modify a int; ++show indexes from t1; ++ ++drop table t1; ++ ++ ++# ++# Bug#11493 - Alter table rename to default database does not work without ++# db name qualifying ++# ++create database mysqltest; ++create table t1 (c1 int); ++# Move table to other database. ++alter table t1 rename mysqltest.t1; ++# Assure that it has moved. ++--error ER_BAD_TABLE_ERROR ++drop table t1; ++# Move table back. ++alter table mysqltest.t1 rename t1; ++# Assure that it is back. ++drop table t1; ++# Now test for correct message if no database is selected. ++# Create t1 in 'test'. ++create table t1 (c1 int); ++# Change to other db. ++use mysqltest; ++# Drop the current db. This de-selects any db. ++drop database mysqltest; ++# Now test for correct message. ++--error ER_NO_DB_ERROR ++alter table test.t1 rename t1; ++# Check that explicit qualifying works even with no selected db. ++alter table test.t1 rename test.t1; ++# Go back to standard 'test' db. ++use test; ++drop table t1; ++ ++# ++# BUG#23404 - ROW_FORMAT=FIXED option is lost is an index is added to the ++# table ++# ++CREATE TABLE t1(a INT) ROW_FORMAT=FIXED; ++CREATE INDEX i1 ON t1(a); ++SHOW CREATE TABLE t1; ++DROP INDEX i1 ON t1; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++# ++# Bug#24219 - ALTER TABLE ... RENAME TO ... , DISABLE KEYS leads to crash ++# ++--disable_warnings ++DROP TABLE IF EXISTS bug24219; ++DROP TABLE IF EXISTS bug24219_2; ++--enable_warnings ++ ++CREATE TABLE bug24219 (a INT, INDEX(a)); ++ ++SHOW INDEX FROM bug24219; ++ ++ALTER TABLE bug24219 RENAME TO bug24219_2, DISABLE KEYS; ++ ++SHOW INDEX FROM bug24219_2; ++ ++DROP TABLE bug24219_2; ++ ++# ++# Bug#24562 (ALTER TABLE ... ORDER BY ... with complex expression asserts) ++# ++ ++--disable_warnings ++drop table if exists table_24562; ++--enable_warnings ++ ++create table table_24562( ++ section int, ++ subsection int, ++ title varchar(50)); ++ ++insert into table_24562 values ++(1, 0, "Introduction"), ++(1, 1, "Authors"), ++(1, 2, "Acknowledgements"), ++(2, 0, "Basics"), ++(2, 1, "Syntax"), ++(2, 2, "Client"), ++(2, 3, "Server"), ++(3, 0, "Intermediate"), ++(3, 1, "Complex queries"), ++(3, 2, "Stored Procedures"), ++(3, 3, "Stored Functions"), ++(4, 0, "Advanced"), ++(4, 1, "Replication"), ++(4, 2, "Load balancing"), ++(4, 3, "High availability"), ++(5, 0, "Conclusion"); ++ ++select * from table_24562; ++ ++alter table table_24562 add column reviewer varchar(20), ++order by title; ++ ++select * from table_24562; ++ ++update table_24562 set reviewer="Me" where section=2; ++update table_24562 set reviewer="You" where section=3; ++ ++alter table table_24562 ++order by section ASC, subsection DESC; ++ ++select * from table_24562; ++ ++alter table table_24562 ++order by table_24562.subsection ASC, table_24562.section DESC; ++ ++select * from table_24562; ++ ++--error ER_PARSE_ERROR ++alter table table_24562 order by 12; ++--error ER_PARSE_ERROR ++alter table table_24562 order by (section + 12); ++--error ER_PARSE_ERROR ++alter table table_24562 order by length(title); ++--error ER_PARSE_ERROR ++alter table table_24562 order by (select 12 from dual); ++ ++--error ER_BAD_FIELD_ERROR ++alter table table_24562 order by no_such_col; ++ ++drop table table_24562; ++ ++# End of 4.1 tests ++ ++# ++# Bug #14693 (ALTER SET DEFAULT doesn't work) ++# ++ ++create table t1 (mycol int(10) not null); ++alter table t1 alter column mycol set default 0; ++desc t1; ++drop table t1; ++ ++# ++# Bug#25262 Auto Increment lost when changing Engine type ++# ++ ++create table t1(id int(8) primary key auto_increment) engine=heap; ++ ++insert into t1 values (null); ++insert into t1 values (null); ++ ++select * from t1; ++ ++# Set auto increment to 50 ++alter table t1 auto_increment = 50; ++ ++# Alter to myisam ++alter table t1 engine = myisam; ++ ++# This insert should get id 50 ++insert into t1 values (null); ++select * from t1; ++ ++# Alter to heap again ++alter table t1 engine = heap; ++insert into t1 values (null); ++select * from t1; ++ ++drop table t1; ++ ++# ++# Bug#27507: Wrong DATETIME value was allowed by ALTER TABLE in the ++# NO_ZERO_DATE mode. ++# ++set @orig_sql_mode = @@sql_mode; ++set sql_mode="no_zero_date"; ++create table t1(f1 int); ++alter table t1 add column f2 datetime not null, add column f21 date not null; ++insert into t1 values(1,'2000-01-01','2000-01-01'); ++--error 1292 ++alter table t1 add column f3 datetime not null; ++--error 1292 ++alter table t1 add column f3 date not null; ++--error 1292 ++alter table t1 add column f4 datetime not null default '2002-02-02', ++ add column f41 date not null; ++alter table t1 add column f4 datetime not null default '2002-02-02', ++ add column f41 date not null default '2002-02-02'; ++select * from t1; ++drop table t1; ++set sql_mode= @orig_sql_mode; ++ ++# ++# Some additional tests for new, faster alter table. Note that most of the ++# whole alter table code is being tested all around the test suite already. ++# ++ ++create table t1 (v varchar(32)); ++insert into t1 values ('def'),('abc'),('hij'),('3r4f'); ++select * from t1; ++# Fast alter, no copy performed ++alter table t1 change v v2 varchar(32); ++select * from t1; ++# Fast alter, no copy performed ++alter table t1 change v2 v varchar(64); ++select * from t1; ++update t1 set v = 'lmn' where v = 'hij'; ++select * from t1; ++# Regular alter table ++alter table t1 add i int auto_increment not null primary key first; ++select * from t1; ++update t1 set i=5 where i=3; ++select * from t1; ++alter table t1 change i i bigint; ++select * from t1; ++alter table t1 add unique key (i, v); ++select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn'); ++drop table t1; ++ ++# ++# Bug#6073 "ALTER table minor glich": ALTER TABLE complains that an index ++# without # prefix is not allowed for TEXT columns, while index ++# is defined with prefix. ++# ++create table t1 (t varchar(255) default null, key t (t(80))) ++engine=myisam default charset=latin1; ++alter table t1 change t t text; ++drop table t1; ++ ++# ++# Bug #26794: Adding an index with a prefix on a SPATIAL type breaks ALTER ++# TABLE ++# ++CREATE TABLE t1 (a varchar(500)); ++ ++ALTER TABLE t1 ADD b GEOMETRY NOT NULL, ADD SPATIAL INDEX(b); ++SHOW CREATE TABLE t1; ++ALTER TABLE t1 ADD KEY(b(50)); ++SHOW CREATE TABLE t1; ++ ++ALTER TABLE t1 ADD c POINT; ++SHOW CREATE TABLE t1; ++ ++--error ER_WRONG_SUB_KEY ++CREATE TABLE t2 (a INT, KEY (a(20))); ++ ++ALTER TABLE t1 ADD d INT; ++--error ER_WRONG_SUB_KEY ++ALTER TABLE t1 ADD KEY (d(20)); ++ ++# the 5.1 part of the test ++--error ER_WRONG_SUB_KEY ++ALTER TABLE t1 ADD e GEOMETRY NOT NULL, ADD SPATIAL KEY (e(30)); ++ ++DROP TABLE t1; ++ ++# ++# Bug#18038 MySQL server corrupts binary columns data ++# ++ ++CREATE TABLE t1 (s CHAR(8) BINARY); ++INSERT INTO t1 VALUES ('test'); ++SELECT LENGTH(s) FROM t1; ++ALTER TABLE t1 MODIFY s CHAR(10) BINARY; ++SELECT LENGTH(s) FROM t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 (s BINARY(8)); ++INSERT INTO t1 VALUES ('test'); ++SELECT LENGTH(s) FROM t1; ++SELECT HEX(s) FROM t1; ++ALTER TABLE t1 MODIFY s BINARY(10); ++SELECT HEX(s) FROM t1; ++SELECT LENGTH(s) FROM t1; ++DROP TABLE t1; ++ ++# ++# Bug#19386: Multiple alter causes crashed table ++# The trailing column would get corrupted data, or server could not even read ++# it. ++# ++ ++CREATE TABLE t1 (v VARCHAR(3), b INT); ++INSERT INTO t1 VALUES ('abc', 5); ++SELECT * FROM t1; ++ALTER TABLE t1 MODIFY COLUMN v VARCHAR(4); ++SELECT * FROM t1; ++DROP TABLE t1; ++ ++ ++# ++# Bug#31291 ALTER TABLE CONVERT TO CHARACTER SET does not change some data types ++# ++create table t1 (a tinytext character set latin1); ++alter table t1 convert to character set utf8; ++show create table t1; ++drop table t1; ++create table t1 (a mediumtext character set latin1); ++alter table t1 convert to character set utf8; ++show create table t1; ++drop table t1; ++ ++--echo End of 5.0 tests ++ ++# ++# Extended test coverage for ALTER TABLE behaviour under LOCK TABLES ++# It should be consistent across all platforms and for all engines ++# (Before 5.1 this was not true as behavior was different between ++# Unix/Windows and transactional/non-transactional tables). ++# See also innodb_mysql.test ++# ++--disable_warnings ++drop table if exists t1, t2, t3; ++--enable_warnings ++create table t1 (i int); ++create table t3 (j int); ++insert into t1 values (); ++insert into t3 values (); ++# Table which is altered under LOCK TABLES it should stay in list of locked ++# tables and be available after alter takes place unless ALTER contains RENAME ++# clause. We should see the new definition of table, of course. ++lock table t1 write, t3 read; ++# Example of so-called 'fast' ALTER TABLE ++alter table t1 modify i int default 1; ++insert into t1 values (); ++select * from t1; ++# And now full-blown ALTER TABLE ++alter table t1 change i c char(10) default "Two"; ++insert into t1 values (); ++select * from t1; ++# If table is renamed then it should be removed from the list ++# of locked tables. 'Fast' ALTER TABLE with RENAME clause: ++alter table t1 modify c char(10) default "Three", rename to t2; ++--error ER_TABLE_NOT_LOCKED ++select * from t1; ++--error ER_TABLE_NOT_LOCKED ++select * from t2; ++select * from t3; ++unlock tables; ++insert into t2 values (); ++select * from t2; ++lock table t2 write, t3 read; ++# Full ALTER TABLE with RENAME ++alter table t2 change c vc varchar(100) default "Four", rename to t1; ++--error ER_TABLE_NOT_LOCKED ++select * from t1; ++--error ER_TABLE_NOT_LOCKED ++select * from t2; ++select * from t3; ++unlock tables; ++insert into t1 values (); ++select * from t1; ++drop tables t1, t3; ++ ++ ++# ++# Bug#18775 - Temporary table from alter table visible to other threads ++# ++# Check if special characters work and duplicates are detected. ++--disable_warnings ++DROP TABLE IF EXISTS `t+1`, `t+2`; ++--enable_warnings ++CREATE TABLE `t+1` (c1 INT); ++ALTER TABLE `t+1` RENAME `t+2`; ++CREATE TABLE `t+1` (c1 INT); ++--error ER_TABLE_EXISTS_ERROR ++ALTER TABLE `t+1` RENAME `t+2`; ++DROP TABLE `t+1`, `t+2`; ++# ++# Same for temporary tables though these names do not become file names. ++CREATE TEMPORARY TABLE `tt+1` (c1 INT); ++ALTER TABLE `tt+1` RENAME `tt+2`; ++CREATE TEMPORARY TABLE `tt+1` (c1 INT); ++--error ER_TABLE_EXISTS_ERROR ++ALTER TABLE `tt+1` RENAME `tt+2`; ++SHOW CREATE TABLE `tt+1`; ++SHOW CREATE TABLE `tt+2`; ++DROP TABLE `tt+1`, `tt+2`; ++# ++# Check if special characters as in tmp_file_prefix work. ++CREATE TABLE `#sql1` (c1 INT); ++CREATE TABLE `@0023sql2` (c1 INT); ++SHOW TABLES; ++RENAME TABLE `#sql1` TO `@0023sql1`; ++RENAME TABLE `@0023sql2` TO `#sql2`; ++SHOW TABLES; ++ALTER TABLE `@0023sql1` RENAME `#sql-1`; ++ALTER TABLE `#sql2` RENAME `@0023sql-2`; ++SHOW TABLES; ++INSERT INTO `#sql-1` VALUES (1); ++INSERT INTO `@0023sql-2` VALUES (2); ++DROP TABLE `#sql-1`, `@0023sql-2`; ++# ++# Same for temporary tables though these names do not become file names. ++CREATE TEMPORARY TABLE `#sql1` (c1 INT); ++CREATE TEMPORARY TABLE `@0023sql2` (c1 INT); ++SHOW TABLES; ++ALTER TABLE `#sql1` RENAME `@0023sql1`; ++ALTER TABLE `@0023sql2` RENAME `#sql2`; ++SHOW TABLES; ++INSERT INTO `#sql2` VALUES (1); ++INSERT INTO `@0023sql1` VALUES (2); ++SHOW CREATE TABLE `#sql2`; ++SHOW CREATE TABLE `@0023sql1`; ++DROP TABLE `#sql2`, `@0023sql1`; ++ ++# ++# Bug #22369: Alter table rename combined with other alterations causes lost tables ++# ++# This problem happens if the data change is compatible. ++# Changing to the same type is compatible for example. ++# ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++DROP TABLE IF EXISTS t2; ++--enable_warnings ++CREATE TABLE t1 ( ++ int_field INTEGER UNSIGNED NOT NULL, ++ char_field CHAR(10), ++ INDEX(`int_field`) ++); ++ ++DESCRIBE t1; ++ ++SHOW INDEXES FROM t1; ++ ++INSERT INTO t1 VALUES (1, "edno"), (1, "edno"), (2, "dve"), (3, "tri"), (5, "pet"); ++--echo "Non-copy data change - new frm, but old data and index files" ++ALTER TABLE t1 ++ CHANGE int_field unsigned_int_field INTEGER UNSIGNED NOT NULL, ++ RENAME t2; ++ ++--error ER_NO_SUCH_TABLE ++SELECT * FROM t1 ORDER BY int_field; ++SELECT * FROM t2 ORDER BY unsigned_int_field; ++DESCRIBE t2; ++DESCRIBE t2; ++ALTER TABLE t2 MODIFY unsigned_int_field BIGINT UNSIGNED NOT NULL; ++DESCRIBE t2; ++ ++DROP TABLE t2; ++ ++# ++# Bug#28427: Columns were renamed instead of moving by ALTER TABLE. ++# ++CREATE TABLE t1 (f1 INT, f2 INT, f3 INT); ++INSERT INTO t1 VALUES (1, 2, NULL); ++SELECT * FROM t1; ++ALTER TABLE t1 MODIFY COLUMN f3 INT AFTER f1; ++SELECT * FROM t1; ++ALTER TABLE t1 MODIFY COLUMN f3 INT AFTER f2; ++SELECT * FROM t1; ++DROP TABLE t1; ++ ++# ++# BUG#29957 - alter_table.test fails ++# ++create table t1 (c char(10) default "Two"); ++lock table t1 write; ++insert into t1 values (); ++alter table t1 modify c char(10) default "Three"; ++unlock tables; ++select * from t1; ++check table t1; ++drop table t1; ++ ++# ++# Bug#33873: Fast ALTER TABLE doesn't work with multibyte character sets ++# ++ ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++CREATE TABLE t1 (id int, c int) character set latin1; ++INSERT INTO t1 VALUES (1,1); ++--enable_info ++ALTER TABLE t1 CHANGE c d int; ++ALTER TABLE t1 CHANGE d c int; ++ALTER TABLE t1 MODIFY c VARCHAR(10); ++ALTER TABLE t1 CHANGE c d varchar(10); ++ALTER TABLE t1 CHANGE d c varchar(10); ++--disable_info ++DROP TABLE t1; ++ ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++CREATE TABLE t1 (id int, c int) character set utf8; ++INSERT INTO t1 VALUES (1,1); ++--enable_info ++ALTER TABLE t1 CHANGE c d int; ++ALTER TABLE t1 CHANGE d c int; ++ALTER TABLE t1 MODIFY c VARCHAR(10); ++ALTER TABLE t1 CHANGE c d varchar(10); ++ALTER TABLE t1 CHANGE d c varchar(10); ++--disable_info ++DROP TABLE t1; ++ ++# ++# Bug#39372 "Smart" ALTER TABLE not so smart after all. ++# ++create table t1(f1 int not null, f2 int not null, key (f1), key (f2)); ++let $count= 50; ++--disable_query_log ++begin; ++while ($count) ++{ ++ EVAL insert into t1 values (1,1),(1,1),(1,1),(1,1),(1,1); ++ EVAL insert into t1 values (2,2),(2,2),(2,2),(2,2),(2,2); ++ dec $count ; ++} ++commit; ++--enable_query_log ++ ++select index_length into @unpaked_keys_size from ++information_schema.tables where table_name='t1'; ++alter table t1 pack_keys=1; ++select index_length into @paked_keys_size from ++information_schema.tables where table_name='t1'; ++select (@unpaked_keys_size > @paked_keys_size); ++ ++select max_data_length into @orig_max_data_length from ++information_schema.tables where table_name='t1'; ++alter table t1 max_rows=100; ++select max_data_length into @changed_max_data_length from ++information_schema.tables where table_name='t1'; ++select (@orig_max_data_length > @changed_max_data_length); ++ ++drop table t1; ++ ++# ++# Bug #23113: Different behavior on altering ENUM fields between 5.0 and 5.1 ++# ++CREATE TABLE t1(a INT AUTO_INCREMENT PRIMARY KEY, ++ b ENUM('a', 'b', 'c') NOT NULL); ++INSERT INTO t1 (b) VALUES ('a'), ('c'), ('b'), ('b'), ('a'); ++ALTER TABLE t1 MODIFY b ENUM('a', 'z', 'b', 'c') NOT NULL; ++SELECT * FROM t1; ++DROP TABLE t1; ++ ++# ++# Test for ALTER column DROP DEFAULT ++# ++ ++SET @save_sql_mode=@@sql_mode; ++SET sql_mode=strict_all_tables; ++ ++CREATE TABLE t1 (a int NOT NULL default 42); ++INSERT INTO t1 values (); ++SELECT * FROM t1; ++ALTER TABLE t1 ALTER COLUMN a DROP DEFAULT; ++--error 1364 ++INSERT INTO t1 values (); ++INSERT INTO t1 (a) VALUES (11); ++SELECT * FROM t1 ORDER BY a; ++DROP TABLE t1; ++SET @@sql_mode=@save_sql_mode; ++--echo # ++--echo # Bug#45567: Fast ALTER TABLE broken for enum and set ++--echo # ++ ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++ ++CREATE TABLE t1 (a ENUM('a1','a2')); ++INSERT INTO t1 VALUES ('a1'),('a2'); ++--enable_info ++--echo # No copy: No modification ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2'); ++--echo # No copy: Add new enumeration to the end ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2','a3'); ++--echo # Copy: Modify and add new to the end ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2','xx','a5'); ++--echo # Copy: Remove from the end ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2','xx'); ++--echo # Copy: Add new enumeration ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2','a0','xx'); ++--echo # No copy: Add new enumerations to the end ++ALTER TABLE t1 MODIFY COLUMN a ENUM('a1','a2','a0','xx','a5','a6'); ++--disable_info ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a SET('a1','a2')); ++INSERT INTO t1 VALUES ('a1'),('a2'); ++--enable_info ++--echo # No copy: No modification ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2'); ++--echo # No copy: Add new to the end ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','a3'); ++--echo # Copy: Modify and add new to the end ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','xx','a5'); ++--echo # Copy: Remove from the end ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','xx'); ++--echo # Copy: Add new member ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','a0','xx'); ++--echo # No copy: Add new to the end ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','a0','xx','a5','a6'); ++--echo # Copy: Numerical incrase (pack lenght) ++ALTER TABLE t1 MODIFY COLUMN a SET('a1','a2','a0','xx','a5','a6','a7','a8','a9','a10'); ++--disable_info ++DROP TABLE t1; ++ ++# ++# Bug#43508: Renaming timestamp or date column triggers table copy ++# ++ ++CREATE TABLE t1 (f1 TIMESTAMP NULL DEFAULT NULL, ++ f2 INT(11) DEFAULT NULL) ENGINE=MYISAM DEFAULT CHARSET=utf8; ++ ++INSERT INTO t1 VALUES (NULL, NULL), ("2009-10-09 11:46:19", 2); ++ ++--echo this should affect no rows as there is no real change ++--enable_info ++ALTER TABLE t1 CHANGE COLUMN f1 f1_no_real_change TIMESTAMP NULL DEFAULT NULL; ++--disable_info ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Bug #31145: ALTER TABLE DROP COLUMN, ADD COLUMN crashes (linux) ++--echo # or freezes (win) the server ++--echo # ++ ++CREATE TABLE t1 (a TEXT, id INT, b INT); ++ALTER TABLE t1 DROP COLUMN a, ADD COLUMN c TEXT FIRST; ++ ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Test for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION ++--echo # FIRST CAN CAUSE DATA TO BE CORRUPTED". ++--echo # ++--disable_warnings ++drop table if exists t1; ++--enable_warnings ++--echo # Use MyISAM engine as the fact that InnoDB doesn't support ++--echo # in-place ALTER TABLE in cases when columns are being renamed ++--echo # hides some bugs. ++create table t1 (i int, j int) engine=myisam; ++insert into t1 value (1, 2); ++--echo # First, test for original problem described in the bug report. ++select * from t1; ++--echo # Change of column order by the below ALTER TABLE statement should ++--echo # affect both column names and column contents. ++alter table t1 modify column j int first; ++select * from t1; ++--echo # Now test for similar problem with the same root. ++--echo # The below ALTER TABLE should change not only the name but ++--echo # also the value for the last column of the table. ++alter table t1 drop column i, add column k int default 0; ++select * from t1; ++--echo # Clean-up. ++drop table t1; ++ ++ ++--echo End of 5.1 tests ++ ++# ++# Bug #31031 ALTER TABLE regression in 5.0 ++# ++# The ALTER TABLE operation failed with ++# ERROR 1089 (HY000): Incorrect sub part key; ... ++# ++CREATE TABLE t1(c CHAR(10), ++ i INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY); ++INSERT INTO t1 VALUES('a',2),('b',4),('c',6); ++ALTER TABLE t1 ++ DROP i, ++ ADD i INT UNSIGNED NOT NULL AUTO_INCREMENT, ++ AUTO_INCREMENT = 1; ++DROP TABLE t1; ++ ++ ++# ++# Bug#50542 5.5.x doesn't check length of key prefixes: ++# corruption and crash results ++# ++# This case is related to Bug#31031 (above) ++# A statement where the index key is larger/wider than ++# the column type, should cause an error ++# ++--error ER_WRONG_SUB_KEY ++CREATE TABLE t1 (a CHAR(1), PRIMARY KEY (a(255))); ++ ++# Test other variants of creating indices ++CREATE TABLE t1 (a CHAR(1)); ++# ALTER TABLE ++--error ER_WRONG_SUB_KEY ++ALTER TABLE t1 ADD PRIMARY KEY (a(20)); ++--error ER_WRONG_SUB_KEY ++ALTER TABLE t1 ADD KEY (a(20)); ++# CREATE INDEX ++--error ER_WRONG_SUB_KEY ++CREATE UNIQUE INDEX i1 ON t1 (a(20)); ++--error ER_WRONG_SUB_KEY ++CREATE INDEX i2 ON t1 (a(20)); ++# cleanup ++DROP TABLE t1; ++ ++ ++# ++# Bug #45052 ALTER TABLE ADD COLUMN crashes server with multiple foreign key columns ++# The alter table fails if 2 or more new fields added and ++# also added a key with these fields ++# ++CREATE TABLE t1 (id int); ++INSERT INTO t1 VALUES (1), (2); ++ALTER TABLE t1 ADD COLUMN (f1 INT), ADD COLUMN (f2 INT), ADD KEY f2k(f2); ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Test for bug #53820 "ALTER a MEDIUMINT column table causes full ++--echo # table copy". ++--echo # ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++CREATE TABLE t1 (a INT, b MEDIUMINT); ++INSERT INTO t1 VALUES (1, 1), (2, 2); ++--echo # The below ALTER should not copy table and so no rows should ++--echo # be shown as affected. ++--enable_info ++ALTER TABLE t1 CHANGE a id INT; ++--disable_info ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Bug#11754461 CANNOT ALTER TABLE WHEN KEY PREFIX TOO LONG ++--echo # ++ ++--disable_warnings ++DROP DATABASE IF EXISTS db1; ++--enable_warnings ++ ++CREATE DATABASE db1 CHARACTER SET utf8; ++CREATE TABLE db1.t1 (bar TINYTEXT, KEY (bar(100))); ++ALTER TABLE db1.t1 ADD baz INT; ++ ++DROP DATABASE db1; ++ ++ ++--echo # Additional coverage for refactoring which is made as part ++--echo # of fix for bug #27480 "Extend CREATE TEMPORARY TABLES privilege ++--echo # to allow temp table operations". ++--echo # ++--echo # At some point the below test case failed on assertion. ++ ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++ ++CREATE TEMPORARY TABLE t1 (i int) ENGINE=MyISAM; ++ ++--error ER_ILLEGAL_HA ++ALTER TABLE t1 DISCARD TABLESPACE; ++ ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Bug#11938039 RE-EXECUTION OF FRM-ONLY ALTER TABLE WITH RENAME ++--echo # CLAUSE FAILS OR ABORTS SERVER. ++--echo # ++--disable_warnings ++drop table if exists t1; ++--enable_warnings ++create table t1 (a int); ++prepare stmt1 from 'alter table t1 alter column a set default 1, rename to t2'; ++execute stmt1; ++rename table t2 to t1; ++--echo # The below statement should succeed and not emit error or abort server. ++execute stmt1; ++deallocate prepare stmt1; ++drop table t2; ++ ++--echo # ++--echo # MDEV-8960 Can't refer the same column twice in one ALTER TABLE ++--echo # ++ ++CREATE TABLE t1 ( ++ `a` int(11) DEFAULT NULL ++) DEFAULT CHARSET=utf8; ++ ++ALTER TABLE t1 ADD COLUMN `consultant_id` integer NOT NULL, ++ALTER COLUMN `consultant_id` DROP DEFAULT; ++ ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 ( ++ `a` int(11) DEFAULT NULL ++) DEFAULT CHARSET=utf8; ++ ++ALTER TABLE t1 ADD COLUMN `consultant_id` integer NOT NULL, ++ALTER COLUMN `consultant_id` SET DEFAULT 2; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 ( ++ `a` int(11) DEFAULT NULL ++) DEFAULT CHARSET=utf8; ++ ++ALTER TABLE t1 ADD COLUMN `consultant_id` integer NOT NULL DEFAULT 2, ++ALTER COLUMN `consultant_id` DROP DEFAULT; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 ( ++ `a` int(11) DEFAULT NULL ++) DEFAULT CHARSET=utf8; ++ ++ALTER TABLE t1 ADD COLUMN `consultant_id` integer NOT NULL DEFAULT 2, ++ALTER COLUMN `consultant_id` DROP DEFAULT, ++MODIFY COLUMN `consultant_id` BIGINT; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++--echo # ++--echo # BUG#27788685: NO WARNING WHEN TRUNCATING A STRING WITH DATA LOSS ++--echo # ++ ++SET GLOBAL max_allowed_packet=17825792; ++ ++--connect(con1, localhost, root,,) ++CREATE TABLE t1 (t1_fld1 TEXT); ++CREATE TABLE t2 (t2_fld1 MEDIUMTEXT); ++CREATE TABLE t3 (t3_fld1 LONGTEXT); ++ ++INSERT INTO t1 VALUES (REPEAT('a',300)); ++INSERT INTO t2 VALUES (REPEAT('b',65680)); ++INSERT INTO t3 VALUES (REPEAT('c',16777300)); ++ ++SELECT LENGTH(t1_fld1) FROM t1; ++SELECT LENGTH(t2_fld1) FROM t2; ++SELECT LENGTH(t3_fld1) FROM t3; ++ ++--echo # With strict mode ++SET SQL_MODE='STRICT_ALL_TABLES'; ++ ++--error ER_DATA_TOO_LONG ++ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; ++--error ER_DATA_TOO_LONG ++ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; ++--error ER_DATA_TOO_LONG ++ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; ++ ++--echo # With non-strict mode ++SET SQL_MODE=''; ++ ++ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; ++ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; ++ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; ++ ++SELECT LENGTH(my_t1_fld1) FROM t1; ++SELECT LENGTH(my_t2_fld1) FROM t2; ++SELECT LENGTH(my_t3_fld1) FROM t3; ++ ++# Cleanup ++--disconnect con1 ++--source include/wait_until_disconnected.inc ++ ++--connection default ++DROP TABLE t1, t2, t3; ++ ++SET SQL_MODE=default; ++SET GLOBAL max_allowed_packet=default; ++ ++# ++# Test of ALTER TABLE IF [NOT] EXISTS ++# ++ ++CREATE TABLE t1 ( ++ id INT(11) NOT NULL, ++ x_param INT(11) DEFAULT NULL, ++ PRIMARY KEY (id) ++) ENGINE=MYISAM; ++ ++ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, ++ ADD COLUMN IF NOT EXISTS lol INT AFTER id; ++ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; ++ALTER TABLE t1 DROP COLUMN IF EXISTS lol; ++ALTER TABLE t1 DROP COLUMN IF EXISTS lol; ++ ++ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); ++ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); ++ALTER TABLE t1 MODIFY IF EXISTS lol INT; ++ ++DROP INDEX IF EXISTS x_param ON t1; ++DROP INDEX IF EXISTS x_param ON t1; ++CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); ++CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 ( ++ id INT(11) NOT NULL, ++ x_param INT(11) DEFAULT NULL, ++ PRIMARY KEY (id) ++) ENGINE=INNODB; ++ ++CREATE TABLE t2 ( ++ id INT(11) NOT NULL) ENGINE=INNODB; ++ ++ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, ++ ADD COLUMN IF NOT EXISTS lol INT AFTER id; ++ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; ++ALTER TABLE t1 DROP COLUMN IF EXISTS lol; ++ALTER TABLE t1 DROP COLUMN IF EXISTS lol; ++ ++ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); ++ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); ++ALTER TABLE t1 MODIFY IF EXISTS lol INT; ++ ++DROP INDEX IF EXISTS x_param ON t1; ++DROP INDEX IF EXISTS x_param ON t1; ++CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); ++CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); ++SHOW CREATE TABLE t1; ++ ++ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); ++ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); ++ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; ++ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; ++SHOW CREATE TABLE t2; ++ALTER TABLE t2 ADD FOREIGN KEY (id) REFERENCES t1(id); ++ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS t2_ibfk_1(id) REFERENCES t1(id); ++ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; ++ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; ++SHOW CREATE TABLE t2; ++ ++DROP TABLE t2; ++CREATE TABLE t2 ( ++ id INT(11) NOT NULL); ++ALTER TABLE t2 ADD COLUMN a INT, ADD COLUMN IF NOT EXISTS a INT; ++ALTER TABLE t2 ADD KEY k_id(id), ADD KEY IF NOT EXISTS k_id(id); ++SHOW CREATE TABLE t2; ++ALTER TABLE t2 DROP KEY k_id, DROP KEY IF EXISTS k_id; ++ALTER TABLE t2 DROP COLUMN a, DROP COLUMN IF EXISTS a; ++SHOW CREATE TABLE t2; ++ ++DROP TABLE t2; ++DROP TABLE t1; ++ ++CREATE TABLE t1 ( ++ `transaction_id` int(11) NOT NULL DEFAULT '0', ++ KEY `transaction_id` (`transaction_id`)); ++ALTER TABLE t1 DROP KEY IF EXISTS transaction_id, ADD PRIMARY KEY IF NOT EXISTS (transaction_id); ++SHOW CREATE TABLE t1; ++ ++DROP TABLE t1; ++ ++--echo # Bug#11748057 (formerly known as 34972): ALTER TABLE statement doesn't ++--echo # identify correct column name. ++--echo # ++ ++CREATE TABLE t1 (c1 int unsigned , c2 char(100) not null default ''); ++ALTER TABLE t1 ADD c3 char(16) NOT NULL DEFAULT '' AFTER c2, ++ MODIFY c2 char(100) NOT NULL DEFAULT '' AFTER c1; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++--echo # ++--echo # WL#5534 Online ALTER, Phase 1 ++--echo # ++ ++--echo # Single thread tests. ++--echo # See innodb_mysql_sync.test for multi thread tests. ++ ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++--enable_warnings ++ ++CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB; ++CREATE TABLE m1(a INT PRIMARY KEY, b INT) engine=MyISAM; ++INSERT INTO t1 VALUES (1,1), (2,2); ++INSERT INTO m1 VALUES (1,1), (2,2); ++ ++--echo # ++--echo # 1: Test ALGORITHM keyword ++--echo # ++ ++--echo # --enable_info allows us to see how many rows were updated ++--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0. ++ ++--enable_info ++ALTER TABLE t1 ADD INDEX i1(b); ++ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; ++ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; ++ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; ++--error ER_UNKNOWN_ALTER_ALGORITHM ++ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= INVALID; ++ ++ALTER TABLE m1 ENABLE KEYS; ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= DEFAULT; ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY; ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE; ++--disable_info ++ ++ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; ++ ++--echo # ++--echo # 2: Test ALGORITHM + old_alter_table ++--echo # ++ ++--enable_info ++SET SESSION old_alter_table= 1; ++ALTER TABLE t1 ADD INDEX i1(b); ++ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; ++ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; ++ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; ++SET SESSION old_alter_table= 0; ++--disable_info ++ ++ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; ++ ++--echo # ++--echo # 3: Test unsupported in-place operation ++--echo # ++ ++ALTER TABLE t1 ADD COLUMN (c1 INT); ++ALTER TABLE t1 ADD COLUMN (c2 INT), ALGORITHM= DEFAULT; ++ALTER TABLE t1 ADD COLUMN (c3 INT), ALGORITHM= COPY; ++ALTER TABLE t1 ADD COLUMN (c4 INT), ALGORITHM= INPLACE; ++ ++ALTER TABLE t1 DROP COLUMN c1, DROP COLUMN c2, DROP COLUMN c3, DROP COLUMN c4; ++ ++--echo # ++--echo # 4: Test LOCK keyword ++--echo # ++ ++--enable_info ++ALTER TABLE t1 ADD INDEX i1(b), LOCK= DEFAULT; ++ALTER TABLE t1 ADD INDEX i2(b), LOCK= NONE; ++ALTER TABLE t1 ADD INDEX i3(b), LOCK= SHARED; ++ALTER TABLE t1 ADD INDEX i4(b), LOCK= EXCLUSIVE; ++--error ER_UNKNOWN_ALTER_LOCK ++ALTER TABLE t1 ADD INDEX i5(b), LOCK= INVALID; ++--disable_info ++ ++ALTER TABLE m1 ENABLE KEYS, LOCK= DEFAULT; ++--error ER_ALTER_OPERATION_NOT_SUPPORTED ++ALTER TABLE m1 ENABLE KEYS, LOCK= NONE; ++--error ER_ALTER_OPERATION_NOT_SUPPORTED ++ALTER TABLE m1 ENABLE KEYS, LOCK= SHARED; ++ALTER TABLE m1 ENABLE KEYS, LOCK= EXCLUSIVE; ++ ++ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; ++ ++--echo # ++--echo # 5: Test ALGORITHM + LOCK ++--echo # ++ ++--enable_info ++ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= NONE; ++ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= INPLACE, LOCK= SHARED; ++ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE; ++--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ++ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= COPY, LOCK= NONE; ++ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= COPY, LOCK= SHARED; ++ALTER TABLE t1 ADD INDEX i6(b), ALGORITHM= COPY, LOCK= EXCLUSIVE; ++ ++--error ER_ALTER_OPERATION_NOT_SUPPORTED ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= NONE; ++--error ER_ALTER_OPERATION_NOT_SUPPORTED ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= SHARED; ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= EXCLUSIVE; ++--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= NONE; ++# This works because the lock will be SNW for the copy phase. ++# It will still require exclusive lock for actually enabling keys. ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= SHARED; ++ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= EXCLUSIVE; ++--disable_info ++ ++DROP TABLE t1, m1; ++ ++--echo # ++--echo # 6: Possible deadlock involving thr_lock.c ++--echo # ++ ++CREATE TABLE t1(a INT PRIMARY KEY, b INT); ++INSERT INTO t1 VALUES (1,1), (2,2); ++ ++START TRANSACTION; ++INSERT INTO t1 VALUES (3,3); ++ ++--echo # Connection con1 ++connect (con1, localhost, root); ++--echo # Sending: ++--send ALTER TABLE t1 DISABLE KEYS ++ ++--echo # Connection default ++connection default; ++--echo # Waiting until ALTER TABLE is blocked. ++let $wait_condition= ++ SELECT COUNT(*) = 1 FROM information_schema.processlist ++ WHERE state = "Waiting for table metadata lock" AND ++ info = "ALTER TABLE t1 DISABLE KEYS"; ++--source include/wait_condition.inc ++UPDATE t1 SET b = 4; ++COMMIT; ++ ++--echo # Connection con1 ++connection con1; ++--echo # Reaping: ALTER TABLE t1 DISABLE KEYS ++--reap ++disconnect con1; ++--source include/wait_until_disconnected.inc ++ ++--echo # Connection default ++connection default; ++DROP TABLE t1; ++ ++--echo # ++--echo # 7: Which operations require copy and which can be done in-place? ++--echo # ++--echo # Test which ALTER TABLE operations are done in-place and ++--echo # which operations are done using temporary table copy. ++--echo # ++--echo # --enable_info allows us to see how many rows were updated ++--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0. ++--echo # ++ ++--disable_warnings ++DROP TABLE IF EXISTS ti1, ti2, ti3, tm1, tm2, tm3; ++--enable_warnings ++ ++--echo # Single operation tests ++ ++CREATE TABLE ti1(a INT NOT NULL, b INT, c INT) engine=InnoDB; ++CREATE TABLE tm1(a INT NOT NULL, b INT, c INT) engine=MyISAM; ++CREATE TABLE ti2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=InnoDB; ++CREATE TABLE tm2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=MyISAM; ++INSERT INTO ti1 VALUES (1,1,1), (2,2,2); ++INSERT INTO ti2 VALUES (1,1,1), (2,2,2); ++INSERT INTO tm1 VALUES (1,1,1), (2,2,2); ++INSERT INTO tm2 VALUES (1,1,1), (2,2,2); ++ ++--enable_info ++ALTER TABLE ti1; ++ALTER TABLE tm1; ++ ++ALTER TABLE ti1 ADD COLUMN d VARCHAR(200); ++ALTER TABLE tm1 ADD COLUMN d VARCHAR(200); ++ALTER TABLE ti1 ADD COLUMN d2 VARCHAR(200); ++ALTER TABLE tm1 ADD COLUMN d2 VARCHAR(200); ++ALTER TABLE ti1 ADD COLUMN e ENUM('a', 'b') FIRST; ++ALTER TABLE tm1 ADD COLUMN e ENUM('a', 'b') FIRST; ++ALTER TABLE ti1 ADD COLUMN f INT AFTER a; ++ALTER TABLE tm1 ADD COLUMN f INT AFTER a; ++ ++ALTER TABLE ti1 ADD INDEX ii1(b); ++ALTER TABLE tm1 ADD INDEX im1(b); ++ALTER TABLE ti1 ADD UNIQUE INDEX ii2 (c); ++ALTER TABLE tm1 ADD UNIQUE INDEX im2 (c); ++ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d); ++ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d); ++ALTER TABLE ti1 ADD FULLTEXT INDEX ii4 (d2); ++ALTER TABLE tm1 ADD FULLTEXT INDEX im4 (d2); ++ ++# Bug#14140038 INCONSISTENT HANDLING OF FULLTEXT INDEXES IN ALTER TABLE ++--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON ++ALTER TABLE ti1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE; ++ALTER TABLE ti1 ADD PRIMARY KEY(a); ++ALTER TABLE tm1 ADD PRIMARY KEY(a); ++ ++ALTER TABLE ti1 DROP INDEX ii3; ++ALTER TABLE tm1 DROP INDEX im3; ++ ++ALTER TABLE ti1 DROP COLUMN d2; ++ALTER TABLE tm1 DROP COLUMN d2; ++ ++ALTER TABLE ti1 ADD CONSTRAINT fi1 FOREIGN KEY (b) REFERENCES ti2(a); ++ALTER TABLE tm1 ADD CONSTRAINT fm1 FOREIGN KEY (b) REFERENCES tm2(a); ++ ++ALTER TABLE ti1 ALTER COLUMN b SET DEFAULT 1; ++ALTER TABLE tm1 ALTER COLUMN b SET DEFAULT 1; ++ALTER TABLE ti1 ALTER COLUMN b DROP DEFAULT; ++ALTER TABLE tm1 ALTER COLUMN b DROP DEFAULT; ++ ++# This will set both ALTER_COLUMN_NAME and COLUMN_DEFAULT_VALUE ++ALTER TABLE ti1 CHANGE COLUMN f g INT; ++ALTER TABLE tm1 CHANGE COLUMN f g INT; ++ALTER TABLE ti1 CHANGE COLUMN g h VARCHAR(20); ++ALTER TABLE tm1 CHANGE COLUMN g h VARCHAR(20); ++ALTER TABLE ti1 MODIFY COLUMN e ENUM('a', 'b', 'c'); ++ALTER TABLE tm1 MODIFY COLUMN e ENUM('a', 'b', 'c'); ++ALTER TABLE ti1 MODIFY COLUMN e INT; ++ALTER TABLE tm1 MODIFY COLUMN e INT; ++# This will set both ALTER_COLUMN_ORDER and COLUMN_DEFAULT_VALUE ++ALTER TABLE ti1 MODIFY COLUMN e INT AFTER h; ++ALTER TABLE tm1 MODIFY COLUMN e INT AFTER h; ++ALTER TABLE ti1 MODIFY COLUMN e INT FIRST; ++ALTER TABLE tm1 MODIFY COLUMN e INT FIRST; ++# This will set both ALTER_COLUMN_NOT_NULLABLE and COLUMN_DEFAULT_VALUE ++--disable_info ++# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on. ++SET @orig_sql_mode = @@sql_mode; ++SET @@sql_mode = 'STRICT_TRANS_TABLES'; ++--enable_info ++ALTER TABLE ti1 MODIFY COLUMN c INT NOT NULL; ++--disable_info ++SET @@sql_mode = @orig_sql_mode; ++--enable_info ++ALTER TABLE tm1 MODIFY COLUMN c INT NOT NULL; ++# This will set both ALTER_COLUMN_NULLABLE and COLUMN_DEFAULT_VALUE ++ALTER TABLE ti1 MODIFY COLUMN c INT NULL; ++ALTER TABLE tm1 MODIFY COLUMN c INT NULL; ++# This will set both ALTER_COLUMN_EQUAL_PACK_LENGTH and COLUMN_DEFAULT_VALUE ++ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30); ++ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30); ++ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30) AFTER d; ++ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30) AFTER d; ++ ++ALTER TABLE ti1 DROP COLUMN h; ++ALTER TABLE tm1 DROP COLUMN h; ++ ++ALTER TABLE ti1 DROP INDEX ii2; ++ALTER TABLE tm1 DROP INDEX im2; ++ALTER TABLE ti1 DROP PRIMARY KEY; ++ALTER TABLE tm1 DROP PRIMARY KEY; ++ ++ALTER TABLE ti1 DROP FOREIGN KEY fi1; ++ALTER TABLE tm1 DROP FOREIGN KEY fm1; ++ ++ALTER TABLE ti1 RENAME TO ti3; ++ALTER TABLE tm1 RENAME TO tm3; ++ALTER TABLE ti3 RENAME TO ti1; ++ALTER TABLE tm3 RENAME TO tm1; ++ ++ALTER TABLE ti1 ORDER BY b; ++ALTER TABLE tm1 ORDER BY b; ++ ++ALTER TABLE ti1 CONVERT TO CHARACTER SET utf16; ++ALTER TABLE tm1 CONVERT TO CHARACTER SET utf16; ++ALTER TABLE ti1 DEFAULT CHARACTER SET utf8; ++ALTER TABLE tm1 DEFAULT CHARACTER SET utf8; ++ ++ALTER TABLE ti1 FORCE; ++ALTER TABLE tm1 FORCE; ++ ++ALTER TABLE ti1 AUTO_INCREMENT 3; ++ALTER TABLE tm1 AUTO_INCREMENT 3; ++ALTER TABLE ti1 AVG_ROW_LENGTH 10; ++ALTER TABLE tm1 AVG_ROW_LENGTH 10; ++ALTER TABLE ti1 CHECKSUM 1; ++ALTER TABLE tm1 CHECKSUM 1; ++ALTER TABLE ti1 COMMENT 'test'; ++ALTER TABLE tm1 COMMENT 'test'; ++ALTER TABLE ti1 MAX_ROWS 100; ++ALTER TABLE tm1 MAX_ROWS 100; ++ALTER TABLE ti1 MIN_ROWS 1; ++ALTER TABLE tm1 MIN_ROWS 1; ++ALTER TABLE ti1 PACK_KEYS 1; ++ALTER TABLE tm1 PACK_KEYS 1; ++ ++--disable_info ++DROP TABLE ti1, ti2, tm1, tm2; ++ ++--echo # Tests of >1 operation (InnoDB) ++ ++CREATE TABLE ti1(a INT PRIMARY KEY AUTO_INCREMENT, b INT) engine=InnoDB; ++INSERT INTO ti1(b) VALUES (1), (2); ++ ++--enable_info ++ALTER TABLE ti1 RENAME TO ti3, ADD INDEX ii1(b); ++ ++ALTER TABLE ti3 DROP INDEX ii1, AUTO_INCREMENT 5; ++--disable_info ++INSERT INTO ti3(b) VALUES (5); ++--enable_info ++ALTER TABLE ti3 ADD INDEX ii1(b), AUTO_INCREMENT 7; ++--disable_info ++INSERT INTO ti3(b) VALUES (7); ++SELECT * FROM ti3; ++ ++DROP TABLE ti3; ++ ++--echo # ++--echo # 8: Scenario in which ALTER TABLE was returning an unwarranted ++--echo # ER_ILLEGAL_HA error at some point during work on this WL. ++--echo # ++ ++CREATE TABLE tm1(i INT DEFAULT 1) engine=MyISAM; ++ALTER TABLE tm1 ADD INDEX ii1(i), ALTER COLUMN i DROP DEFAULT; ++DROP TABLE tm1; ++ ++# ++# MDEV-4435 Server crashes in my_strcasecmp_utf8 on ADD KEY IF NOT EXISTS with implicit name when the key exists. ++# ++create table if not exists t1 (i int); ++alter table t1 add key (i); ++alter table t1 add key if not exists (i); ++DROP TABLE t1; ++ ++# ++# MDEV-4436 CHANGE COLUMN IF EXISTS does not work and throws wrong warning. ++# ++create table t1 (a int); ++alter table t1 change column if exists a b bigint; ++show create table t1; ++DROP TABLE t1; ++ ++# ++# MDEV-4437 ALTER TABLE .. ADD UNIQUE INDEX IF NOT EXISTS causes syntax error. ++# ++ ++create table t1 (i int); ++alter table t1 add unique index if not exists idx(i); ++alter table t1 add unique index if not exists idx(i); ++show create table t1; ++DROP TABLE t1; ++ ++# ++# MDEV-8358 ADD PRIMARY KEY IF NOT EXISTS -> ERROR 1068 (42000): Multiple primary key ++# ++ ++CREATE TABLE t1 ( ++ `event_id` bigint(20) unsigned NOT NULL DEFAULT '0', ++ `market_id` bigint(20) unsigned NOT NULL DEFAULT '0', ++ PRIMARY KEY (`event_id`,`market_id`) ++ ); ++ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id); ++DROP TABLE t1; ++ ++--echo # ++--echo # MDEV-11126 Crash while altering persistent virtual column ++--echo # ++ ++CREATE TABLE `tab1` ( ++ `id` bigint(20) NOT NULL AUTO_INCREMENT, ++ `field2` set('option1','option2','option3','option4') NOT NULL, ++ `field3` set('option1','option2','option3','option4','option5') NOT NULL, ++ `field4` set('option1','option2','option3','option4') NOT NULL, ++ `field5` varchar(32) NOT NULL, ++ `field6` varchar(32) NOT NULL, ++ `field7` varchar(32) NOT NULL, ++ `field8` varchar(32) NOT NULL, ++ `field9` int(11) NOT NULL DEFAULT '1', ++ `field10` varchar(16) NOT NULL, ++ `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', ++ `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, ++ PRIMARY KEY (`id`) ++) DEFAULT CHARSET=latin1; ++ ++ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128); ++SHOW CREATE TABLE `tab1`; ++ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT; ++SHOW CREATE TABLE `tab1`; ++DROP TABLE `tab1`; ++ ++--echo # ++--echo # MDEV-11548 Reproducible server crash after the 2nd ALTER TABLE ADD FOREIGN KEY IF NOT EXISTS ++--echo # ++ ++CREATE TABLE t1 (id INT UNSIGNED NOT NULL PRIMARY KEY); ++CREATE TABLE t2 (id1 INT UNSIGNED NOT NULL); ++ ++ALTER TABLE t2 ++ADD FOREIGN KEY IF NOT EXISTS (id1) ++ REFERENCES t1 (id); ++ ++ALTER TABLE t2 ++ADD FOREIGN KEY IF NOT EXISTS (id1) ++REFERENCES t1 (id); ++ ++DROP TABLE t2; ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # MDEV-6390 CONVERT TO CHARACTER SET utf8 doesn't change DEFAULT CHARSET. ++--echo # ++ ++CREATE TABLE t1 (id int(11) NOT NULL, a int(11) NOT NULL, b int(11)) ++ ENGINE=InnoDB DEFAULT CHARSET=latin1; ++SHOW CREATE TABLE t1; ++ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++--echo # ++--echo # ++--echo # MDEV-15308 ++--echo # Assertion `ha_alter_info->alter_info->drop_list.elements > 0' failed ++--echo # in ha_innodb::prepare_inplace_alter_table ++--echo # ++ ++CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; ++ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN b; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; ++ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN b; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a INT, b INT, c INT, KEY(c)) ENGINE=InnoDB; ++ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN c; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a INT, b INT, c INT, KEY c1(c)) ENGINE=InnoDB; ++ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP INDEX c1; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; ++ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c; ++SHOW CREATE TABLE t1; ++DROP TABLE t1; ++ ++--echo # ++--echo # MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key ++--echo # ++CREATE TABLE t1 ( ++ `ID` BIGINT(20) NOT NULL, ++ `RANK` MEDIUMINT(4) NOT NULL, ++ `CHECK_POINT` BIGINT(20) NOT NULL, ++ UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`) ++ ) ENGINE=InnoDB; ++ ++ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); ++SHOW CREATE TABLE t1; ++ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); ++DROP TABLE t1; diff --cc mysql-test/r/grant.result index c3a1db86c37,60f2eb90e2d..cfd442d58c8 --- a/mysql-test/r/grant.result +++ b/mysql-test/r/grant.result @@@ -2569,29 -2581,24 +2569,58 @@@ foo@localhost foo@127.0.0. # Clean-up DROP USER foo@'127.0.0.1'; # End of Bug#12766319 + create user foo@localhost; + create database foodb; + grant create routine on foodb.* to foo@localhost; + create procedure fooproc() select 'i am fooproc'; + show grants; + Grants for foo@localhost + GRANT USAGE ON *.* TO 'foo'@'localhost' + GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'localhost' + GRANT EXECUTE, ALTER ROUTINE ON PROCEDURE `test`.`fooproc` TO 'foo'@'localhost' + rename table mysql.procs_priv to mysql.procs_priv1; + flush privileges; -ERROR 42S02: Table 'mysql.procs_priv' doesn't exist + show grants for foo@localhost; + Grants for foo@localhost + GRANT USAGE ON *.* TO 'foo'@'localhost' + GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'localhost' -GRANT EXECUTE, ALTER ROUTINE ON PROCEDURE `test`.`fooproc` TO 'foo'@'localhost' + rename table mysql.procs_priv1 to mysql.procs_priv; ++show grants for foo@localhost; ++Grants for foo@localhost ++GRANT USAGE ON *.* TO 'foo'@'localhost' ++GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'localhost' ++flush privileges; ++show grants for foo@localhost; ++Grants for foo@localhost ++GRANT USAGE ON *.* TO 'foo'@'localhost' ++GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'localhost' ++GRANT EXECUTE, ALTER ROUTINE ON PROCEDURE `test`.`fooproc` TO 'foo'@'localhost' + drop user foo@localhost; + drop procedure fooproc; + drop database foodb; +# +# Bug#11756966 - 48958: STORED PROCEDURES CAN BE LEVERAGED TO BYPASS +# DATABASE SECURITY +# +DROP DATABASE IF EXISTS secret; +DROP DATABASE IF EXISTS no_such_db; +CREATE DATABASE secret; +GRANT USAGE ON *.* TO untrusted@localhost; +# Connection con1 +SHOW GRANTS; +Grants for untrusted@localhost +GRANT USAGE ON *.* TO 'untrusted'@'localhost' +SHOW DATABASES; +Database +information_schema +test +# Both statements below should fail with the same error. +# They used to give different errors, thereby +# hinting that the secret database exists. +CREATE PROCEDURE no_such_db.foo() BEGIN END; +ERROR 42000: Access denied for user 'untrusted'@'localhost' to database 'no_such_db' +CREATE PROCEDURE secret.peek_at_secret() BEGIN END; +ERROR 42000: Access denied for user 'untrusted'@'localhost' to database 'secret' +# Connection default +DROP USER untrusted@localhost; +DROP DATABASE secret; diff --cc mysql-test/r/grant.test index 00000000000,00000000000..f2dfb01cc39 new file mode 100644 --- /dev/null +++ b/mysql-test/r/grant.test @@@ -1,0 -1,0 +1,2291 @@@ ++# Test of GRANT commands ++ ++# Grant tests not performed with embedded server ++-- source include/not_embedded.inc ++ ++# Save the initial number of concurrent sessions ++--source include/count_sessions.inc ++ ++SET @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators; ++SET GLOBAL log_bin_trust_function_creators = 1; ++ ++# Cleanup ++--disable_warnings ++drop table if exists t1; ++drop database if exists mysqltest; ++--enable_warnings ++ ++connect (master,localhost,root,,); ++connection master; ++SET NAMES binary; ++ ++# ++# Test that SSL options works properly ++# ++ ++delete from mysql.user where user='mysqltest_1'; ++delete from mysql.db where user='mysqltest_1'; ++flush privileges; ++grant select on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA"; ++show grants for mysqltest_1@localhost; ++grant delete on mysqltest.* to mysqltest_1@localhost; ++query_vertical select * from mysql.user where user="mysqltest_1"; ++show grants for mysqltest_1@localhost; ++revoke delete on mysqltest.* from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++grant select on mysqltest.* to mysqltest_1@localhost require NONE; ++show grants for mysqltest_1@localhost; ++grant USAGE on mysqltest.* to mysqltest_1@localhost require cipher "EDH-RSA-DES-CBC3-SHA" AND SUBJECT "testsubject" ISSUER "Monty Program Ab"; ++show grants for mysqltest_1@localhost; ++revoke all privileges on mysqltest.* from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++delete from mysql.user where user='mysqltest_1'; ++flush privileges; ++ ++# ++# Test of GRANTS specifying user limits ++# ++delete from mysql.user where user='mysqltest_1'; ++flush privileges; ++grant usage on *.* to mysqltest_1@localhost with max_queries_per_hour 10; ++query_vertical select * from mysql.user where user="mysqltest_1"; ++show grants for mysqltest_1@localhost; ++grant usage on *.* to mysqltest_1@localhost with max_updates_per_hour 20 max_connections_per_hour 30; ++query_vertical select * from mysql.user where user="mysqltest_1"; ++show grants for mysqltest_1@localhost; ++# This is just to double check that one won't ignore results of selects ++flush privileges; ++show grants for mysqltest_1@localhost; ++delete from mysql.user where user='mysqltest_1'; ++flush privileges; ++ ++# ++# Test that the new db privileges are stored/retrieved correctly ++# ++ ++grant CREATE TEMPORARY TABLES, LOCK TABLES on mysqltest.* to mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++flush privileges; ++show grants for mysqltest_1@localhost; ++revoke CREATE TEMPORARY TABLES on mysqltest.* from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++grant ALL PRIVILEGES on mysqltest.* to mysqltest_1@localhost with GRANT OPTION; ++flush privileges; ++show grants for mysqltest_1@localhost; ++revoke LOCK TABLES, ALTER on mysqltest.* from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++revoke all privileges on mysqltest.* from mysqltest_1@localhost; ++delete from mysql.user where user='mysqltest_1'; ++flush privileges; ++grant usage on test.* to mysqltest_1@localhost with grant option; ++show grants for mysqltest_1@localhost; ++delete from mysql.user where user='mysqltest_1'; ++delete from mysql.db where user='mysqltest_1'; ++delete from mysql.tables_priv where user='mysqltest_1'; ++delete from mysql.columns_priv where user='mysqltest_1'; ++flush privileges; ++--error ER_NONEXISTING_GRANT ++show grants for mysqltest_1@localhost; ++ ++# ++# Test what happens when you have same table and colum level grants ++# ++ ++create table t1 (a int); ++GRANT select,update,insert on t1 to mysqltest_1@localhost; ++GRANT select (a), update (a),insert(a), references(a) on t1 to mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; ++REVOKE select (a), update on t1 from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++REVOKE select,update,insert,insert (a) on t1 from mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++GRANT select,references on t1 to mysqltest_1@localhost; ++select table_priv,column_priv from mysql.tables_priv where user="mysqltest_1"; ++grant all on test.* to mysqltest_3@localhost with grant option; ++revoke all on test.* from mysqltest_3@localhost; ++show grants for mysqltest_3@localhost; ++revoke grant option on test.* from mysqltest_3@localhost; ++show grants for mysqltest_3@localhost; ++grant all on test.t1 to mysqltest_2@localhost with grant option; ++revoke all on test.t1 from mysqltest_2@localhost; ++show grants for mysqltest_2@localhost; ++revoke grant option on test.t1 from mysqltest_2@localhost; ++show grants for mysqltest_2@localhost; ++delete from mysql.user where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; ++delete from mysql.db where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; ++delete from mysql.tables_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; ++delete from mysql.columns_priv where user='mysqltest_1' or user="mysqltest_2" or user="mysqltest_3"; ++flush privileges; ++drop table t1; ++ ++# ++# Test some error conditions ++# ++--error ER_WRONG_USAGE ++GRANT FILE on mysqltest.* to mysqltest_1@localhost; ++select 1; # To test that the previous command didn't cause problems ++ ++# ++# Bug#4898 User privileges depending on ORDER BY Settings of table db ++# ++insert into mysql.user (host, user) values ('localhost', 'test11'); ++insert into mysql.db (host, db, user, select_priv) values ++('localhost', 'a%', 'test11', 'Y'), ('localhost', 'ab%', 'test11', 'Y'); ++alter table mysql.db order by db asc; ++flush privileges; ++show grants for test11@localhost; ++alter table mysql.db order by db desc; ++flush privileges; ++show grants for test11@localhost; ++delete from mysql.user where user='test11'; ++delete from mysql.db where user='test11'; ++ ++# ++# Bug#6123 GRANT USAGE inserts useless Db row ++# ++create database mysqltest1; ++grant usage on mysqltest1.* to test6123 identified by 'magic123'; ++select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1"; ++delete from mysql.user where user='test6123'; ++drop database mysqltest1; ++ ++# ++# Test for 'drop user', 'revoke privileges, grant' ++# ++ ++create table t1 (a int); ++grant ALL PRIVILEGES on *.* to drop_user2@localhost with GRANT OPTION; ++show grants for drop_user2@localhost; ++revoke all privileges, grant option from drop_user2@localhost; ++drop user drop_user2@localhost; ++ ++grant ALL PRIVILEGES on *.* to drop_user@localhost with GRANT OPTION; ++grant ALL PRIVILEGES on test.* to drop_user@localhost with GRANT OPTION; ++grant select(a) on test.t1 to drop_user@localhost; ++show grants for drop_user@localhost; ++ ++# ++# Bug#3086 SHOW GRANTS doesn't follow ANSI_QUOTES ++# ++set sql_mode=ansi_quotes; ++show grants for drop_user@localhost; ++set sql_mode=default; ++ ++set sql_quote_show_create=0; ++show grants for drop_user@localhost; ++set sql_mode="ansi_quotes"; ++show grants for drop_user@localhost; ++set sql_quote_show_create=1; ++show grants for drop_user@localhost; ++set sql_mode=""; ++show grants for drop_user@localhost; ++ ++revoke all privileges, grant option from drop_user@localhost; ++show grants for drop_user@localhost; ++drop user drop_user@localhost; ++--error ER_REVOKE_GRANTS ++revoke all privileges, grant option from drop_user@localhost; ++ ++grant select(a) on test.t1 to drop_user1@localhost; ++grant select on test.t1 to drop_user2@localhost; ++grant select on test.* to drop_user3@localhost; ++grant select on *.* to drop_user4@localhost; ++# Drop user now implicitly revokes all privileges. ++drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, ++drop_user4@localhost; ++--error ER_REVOKE_GRANTS ++revoke all privileges, grant option from drop_user1@localhost, drop_user2@localhost, ++drop_user3@localhost, drop_user4@localhost; ++--error ER_CANNOT_USER ++drop user drop_user1@localhost, drop_user2@localhost, drop_user3@localhost, ++drop_user4@localhost; ++drop table t1; ++grant usage on *.* to mysqltest_1@localhost identified by "password"; ++grant select, update, insert on test.* to mysqltest_1@localhost; ++show grants for mysqltest_1@localhost; ++drop user mysqltest_1@localhost; ++ ++# ++# Bug#3403 Wrong encoding in SHOW GRANTS output ++# ++SET NAMES koi8r; ++CREATE DATABASE ��; ++USE ��; ++CREATE TABLE ��� (��� INT); ++ ++GRANT SELECT ON ��.* TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT ON ��.* FROM ����@localhost; ++ ++GRANT SELECT ON ��.��� TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT ON ��.��� FROM ����@localhost; ++ ++GRANT SELECT (���) ON ��.��� TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT (���) ON ��.��� FROM ����@localhost; ++ ++# Revoke does not drop user. Leave a clean user table for the next tests. ++DROP USER ����@localhost; ++ ++DROP DATABASE ��; ++SET NAMES latin1; ++ ++# ++# Bug#5831 REVOKE ALL PRIVILEGES, GRANT OPTION does not revoke everything ++# ++USE test; ++CREATE TABLE t1 (a int ); ++CREATE TABLE t2 LIKE t1; ++CREATE TABLE t3 LIKE t1; ++CREATE TABLE t4 LIKE t1; ++CREATE TABLE t5 LIKE t1; ++CREATE TABLE t6 LIKE t1; ++CREATE TABLE t7 LIKE t1; ++CREATE TABLE t8 LIKE t1; ++CREATE TABLE t9 LIKE t1; ++CREATE TABLE t10 LIKE t1; ++CREATE DATABASE testdb1; ++CREATE DATABASE testdb2; ++CREATE DATABASE testdb3; ++CREATE DATABASE testdb4; ++CREATE DATABASE testdb5; ++CREATE DATABASE testdb6; ++CREATE DATABASE testdb7; ++CREATE DATABASE testdb8; ++CREATE DATABASE testdb9; ++CREATE DATABASE testdb10; ++GRANT ALL ON testdb1.* TO testuser@localhost; ++GRANT ALL ON testdb2.* TO testuser@localhost; ++GRANT ALL ON testdb3.* TO testuser@localhost; ++GRANT ALL ON testdb4.* TO testuser@localhost; ++GRANT ALL ON testdb5.* TO testuser@localhost; ++GRANT ALL ON testdb6.* TO testuser@localhost; ++GRANT ALL ON testdb7.* TO testuser@localhost; ++GRANT ALL ON testdb8.* TO testuser@localhost; ++GRANT ALL ON testdb9.* TO testuser@localhost; ++GRANT ALL ON testdb10.* TO testuser@localhost; ++GRANT SELECT ON test.t1 TO testuser@localhost; ++GRANT SELECT ON test.t2 TO testuser@localhost; ++GRANT SELECT ON test.t3 TO testuser@localhost; ++GRANT SELECT ON test.t4 TO testuser@localhost; ++GRANT SELECT ON test.t5 TO testuser@localhost; ++GRANT SELECT ON test.t6 TO testuser@localhost; ++GRANT SELECT ON test.t7 TO testuser@localhost; ++GRANT SELECT ON test.t8 TO testuser@localhost; ++GRANT SELECT ON test.t9 TO testuser@localhost; ++GRANT SELECT ON test.t10 TO testuser@localhost; ++GRANT SELECT (a) ON test.t1 TO testuser@localhost; ++GRANT SELECT (a) ON test.t2 TO testuser@localhost; ++GRANT SELECT (a) ON test.t3 TO testuser@localhost; ++GRANT SELECT (a) ON test.t4 TO testuser@localhost; ++GRANT SELECT (a) ON test.t5 TO testuser@localhost; ++GRANT SELECT (a) ON test.t6 TO testuser@localhost; ++GRANT SELECT (a) ON test.t7 TO testuser@localhost; ++GRANT SELECT (a) ON test.t8 TO testuser@localhost; ++GRANT SELECT (a) ON test.t9 TO testuser@localhost; ++GRANT SELECT (a) ON test.t10 TO testuser@localhost; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM testuser@localhost; ++SHOW GRANTS FOR testuser@localhost; ++DROP USER testuser@localhost; ++DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; ++DROP DATABASE testdb1; ++DROP DATABASE testdb2; ++DROP DATABASE testdb3; ++DROP DATABASE testdb4; ++DROP DATABASE testdb5; ++DROP DATABASE testdb6; ++DROP DATABASE testdb7; ++DROP DATABASE testdb8; ++DROP DATABASE testdb9; ++DROP DATABASE testdb10; ++ ++# ++# Bug#6932 a problem with 'revoke ALL PRIVILEGES' ++# ++ ++create table t1(a int, b int, c int, d int); ++grant insert(b), insert(c), insert(d), insert(a) on t1 to grant_user@localhost; ++show grants for grant_user@localhost; ++select Host,Db,User,Table_name,Column_name,Column_priv from mysql.columns_priv order by Column_name; ++revoke ALL PRIVILEGES on t1 from grant_user@localhost; ++show grants for grant_user@localhost; ++select Host,Db,User,Table_name,Column_name,Column_priv from mysql.columns_priv; ++drop user grant_user@localhost; ++drop table t1; ++ ++# ++# Bug#7391 Cross-database multi-table UPDATE security problem ++# ++create database mysqltest_1; ++create database mysqltest_2; ++create table mysqltest_1.t1 select 1 a, 2 q; ++create table mysqltest_1.t2 select 1 b, 2 r; ++create table mysqltest_2.t1 select 1 c, 2 s; ++create table mysqltest_2.t2 select 1 d, 2 t; ++ ++# test the column privileges ++grant update (a) on mysqltest_1.t1 to mysqltest_3@localhost; ++grant select (b) on mysqltest_1.t2 to mysqltest_3@localhost; ++grant select (c) on mysqltest_2.t1 to mysqltest_3@localhost; ++grant update (d) on mysqltest_2.t2 to mysqltest_3@localhost; ++connect (conn1,localhost,mysqltest_3,,); ++connection conn1; ++SELECT * FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES ++ WHERE GRANTEE = '''mysqltest_3''@''localhost''' ++ ORDER BY TABLE_NAME,COLUMN_NAME,PRIVILEGE_TYPE; ++SELECT * FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES ++ WHERE GRANTEE = '''mysqltest_3''@''localhost''' ++ ORDER BY TABLE_NAME,PRIVILEGE_TYPE; ++SELECT * from INFORMATION_SCHEMA.SCHEMA_PRIVILEGES ++ WHERE GRANTEE = '''mysqltest_3''@''localhost''' ++ ORDER BY TABLE_SCHEMA,PRIVILEGE_TYPE; ++SELECT * from INFORMATION_SCHEMA.USER_PRIVILEGES ++ WHERE GRANTEE = '''mysqltest_3''@''localhost''' ++ ORDER BY TABLE_CATALOG,PRIVILEGE_TYPE; ++--error ER_COLUMNACCESS_DENIED_ERROR ++update mysqltest_1.t1, mysqltest_1.t2 set q=10 where b=1; ++--error ER_COLUMNACCESS_DENIED_ERROR ++update mysqltest_1.t2, mysqltest_2.t2 set d=20 where d=1; ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_1.t1, mysqltest_2.t2 set d=20 where d=1; ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_2.t1, mysqltest_1.t2 set c=20 where b=1; ++--error ER_COLUMNACCESS_DENIED_ERROR ++update mysqltest_2.t1, mysqltest_2.t2 set d=10 where s=2; ++# the following two should work ++update mysqltest_1.t1, mysqltest_2.t2 set a=10,d=10; ++update mysqltest_1.t1, mysqltest_2.t1 set a=20 where c=20; ++connection master; ++select t1.*,t2.* from mysqltest_1.t1,mysqltest_1.t2; ++select t1.*,t2.* from mysqltest_2.t1,mysqltest_2.t2; ++revoke all on mysqltest_1.t1 from mysqltest_3@localhost; ++revoke all on mysqltest_1.t2 from mysqltest_3@localhost; ++revoke all on mysqltest_2.t1 from mysqltest_3@localhost; ++revoke all on mysqltest_2.t2 from mysqltest_3@localhost; ++ ++# test the db/table level privileges ++grant all on mysqltest_2.* to mysqltest_3@localhost; ++grant select on *.* to mysqltest_3@localhost; ++# Next grant is needed to trigger bug#7391. Do not optimize! ++grant select on mysqltest_2.t1 to mysqltest_3@localhost; ++flush privileges; ++disconnect conn1; ++connect (conn2,localhost,mysqltest_3,,); ++connection conn2; ++use mysqltest_1; ++update mysqltest_2.t1, mysqltest_2.t2 set c=500,d=600; ++# the following failed before, should fail now. ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_1.t1, mysqltest_1.t2 set a=100,b=200; ++use mysqltest_2; ++# the following used to succeed, it must fail now. ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_1.t1, mysqltest_1.t2 set a=100,b=200; ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_2.t1, mysqltest_1.t2 set c=100,b=200; ++--error ER_TABLEACCESS_DENIED_ERROR ++update mysqltest_1.t1, mysqltest_2.t2 set a=100,d=200; ++# lets see the result ++connection master; ++select t1.*,t2.* from mysqltest_1.t1,mysqltest_1.t2; ++select t1.*,t2.* from mysqltest_2.t1,mysqltest_2.t2; ++ ++delete from mysql.user where user='mysqltest_3'; ++delete from mysql.db where user="mysqltest_3"; ++delete from mysql.tables_priv where user="mysqltest_3"; ++delete from mysql.columns_priv where user="mysqltest_3"; ++flush privileges; ++drop database mysqltest_1; ++drop database mysqltest_2; ++disconnect conn2; ++ ++# ++# just SHOW PRIVILEGES test ++# ++SHOW PRIVILEGES; ++ ++# ++# Rights for renaming test (Bug#3270) ++# ++connect (root,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK); ++connection root; ++--disable_warnings ++create database mysqltest; ++--enable_warnings ++create table mysqltest.t1 (a int,b int,c int); ++grant all on mysqltest.t1 to mysqltest_1@localhost; ++connect (user1,localhost,mysqltest_1,,mysqltest,$MASTER_MYPORT,$MASTER_MYSOCK); ++connection user1; ++-- error ER_TABLEACCESS_DENIED_ERROR ++alter table t1 rename t2; ++disconnect user1; ++connection root; ++revoke all privileges on mysqltest.t1 from mysqltest_1@localhost; ++delete from mysql.user where user=_binary'mysqltest_1'; ++drop database mysqltest; ++connection default; ++disconnect root; ++ ++# ++# check all new table privileges ++# ++CREATE USER dummy@localhost; ++CREATE DATABASE mysqltest; ++CREATE TABLE mysqltest.dummytable (dummyfield INT); ++CREATE VIEW mysqltest.dummyview AS SELECT dummyfield FROM mysqltest.dummytable; ++GRANT ALL PRIVILEGES ON mysqltest.dummytable TO dummy@localhost; ++GRANT ALL PRIVILEGES ON mysqltest.dummyview TO dummy@localhost; ++SHOW GRANTS FOR dummy@localhost; ++use INFORMATION_SCHEMA; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++FLUSH PRIVILEGES; ++SHOW GRANTS FOR dummy@localhost; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++SHOW FIELDS FROM mysql.tables_priv; ++use test; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost; ++DROP USER dummy@localhost; ++DROP DATABASE mysqltest; ++# check view only privileges ++CREATE USER dummy@localhost; ++CREATE DATABASE mysqltest; ++CREATE TABLE mysqltest.dummytable (dummyfield INT); ++CREATE VIEW mysqltest.dummyview AS SELECT dummyfield FROM mysqltest.dummytable; ++GRANT CREATE VIEW ON mysqltest.dummytable TO dummy@localhost; ++GRANT CREATE VIEW ON mysqltest.dummyview TO dummy@localhost; ++SHOW GRANTS FOR dummy@localhost; ++use INFORMATION_SCHEMA; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++FLUSH PRIVILEGES; ++SHOW GRANTS FOR dummy@localhost; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++use test; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost; ++DROP USER dummy@localhost; ++DROP DATABASE mysqltest; ++CREATE USER dummy@localhost; ++CREATE DATABASE mysqltest; ++CREATE TABLE mysqltest.dummytable (dummyfield INT); ++CREATE VIEW mysqltest.dummyview AS SELECT dummyfield FROM mysqltest.dummytable; ++GRANT SHOW VIEW ON mysqltest.dummytable TO dummy@localhost; ++GRANT SHOW VIEW ON mysqltest.dummyview TO dummy@localhost; ++SHOW GRANTS FOR dummy@localhost; ++use INFORMATION_SCHEMA; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++FLUSH PRIVILEGES; ++SHOW GRANTS FOR dummy@localhost; ++SELECT TABLE_SCHEMA, TABLE_NAME, GROUP_CONCAT(PRIVILEGE_TYPE ORDER BY ++PRIVILEGE_TYPE SEPARATOR ', ') AS PRIVILEGES FROM TABLE_PRIVILEGES WHERE GRANTEE ++= '\'dummy\'@\'localhost\'' GROUP BY TABLE_SCHEMA, TABLE_NAME; ++use test; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost; ++DROP USER dummy@localhost; ++DROP DATABASE mysqltest; ++# ++# Bug#11330 Entry in tables_priv with host = '' causes crash ++# ++connection default; ++use mysql; ++insert into tables_priv values ('','test_db','mysqltest_1','test_table','test_grantor',CURRENT_TIMESTAMP,'Select','Select'); ++flush privileges; ++delete from tables_priv where host = '' and user = 'mysqltest_1'; ++flush privileges; ++use test; ++ ++# ++# Bug#10892 user variables not auto cast for comparisons ++# Check that we don't get illegal mix of collations ++# ++set @user123="non-existent"; ++select * from mysql.db where user=@user123; ++ ++set names koi8r; ++create database ��; ++grant select on ��.* to root@localhost; ++select hex(Db) from mysql.db where Db='��'; ++show grants for root@localhost; ++flush privileges; ++show grants for root@localhost; ++drop database ��; ++revoke all privileges on ��.* from root@localhost; ++show grants for root@localhost; ++set names latin1; ++ ++# ++# Bug#15598 Server crashes in specific case during setting new password ++# - Caused by a user with host '' ++# ++create user mysqltest_7@; ++set password for mysqltest_7@ = password('systpass'); ++show grants for mysqltest_7@; ++drop user mysqltest_7@; ++--error ER_NONEXISTING_GRANT ++show grants for mysqltest_7@; ++ ++# ++# Bug#14385 GRANT and mapping to correct user account problems ++# ++create database mysqltest; ++use mysqltest; ++create table t1(f1 int); ++GRANT DELETE ON mysqltest.t1 TO mysqltest1@'%'; ++GRANT SELECT ON mysqltest.t1 TO mysqltest1@'192.%'; ++show grants for mysqltest1@'192.%'; ++show grants for mysqltest1@'%'; ++delete from mysql.user where user='mysqltest1'; ++delete from mysql.db where user='mysqltest1'; ++delete from mysql.tables_priv where user='mysqltest1'; ++flush privileges; ++drop database mysqltest; ++ ++# ++# Bug#27515 DROP previlege is not required for RENAME TABLE ++# ++connection master; ++create database db27515; ++use db27515; ++create table t1 (a int); ++grant alter on db27515.t1 to user27515@localhost; ++grant insert, create on db27515.t2 to user27515@localhost; ++ ++connect (conn27515, localhost, user27515, , db27515); ++connection conn27515; ++--error ER_TABLEACCESS_DENIED_ERROR ++rename table t1 to t2; ++disconnect conn27515; ++ ++connection master; ++revoke all privileges, grant option from user27515@localhost; ++drop user user27515@localhost; ++drop database db27515; ++ ++--echo End of 4.1 tests ++ ++# ++# Bug#16297 In memory grant tables not flushed when users's hostname is "" ++# ++use test; ++create table t1 (a int); ++ ++# Backup anonymous users and remove them. (They get in the way of ++# the one we test with here otherwise.) ++create table t2 as select * from mysql.user where user=''; ++delete from mysql.user where user=''; ++flush privileges; ++ ++# Create some users with different hostnames ++create user mysqltest_8@''; ++create user mysqltest_8@host8; ++ ++# Try to create them again ++--error ER_CANNOT_USER ++create user mysqltest_8@''; ++--error ER_CANNOT_USER ++create user mysqltest_8; ++--error ER_CANNOT_USER ++create user mysqltest_8@host8; ++ ++select user, QUOTE(host) from mysql.user where user="mysqltest_8"; ++ ++--echo Schema privileges ++grant select on mysqltest.* to mysqltest_8@''; ++show grants for mysqltest_8@''; ++grant select on mysqltest.* to mysqltest_8@; ++show grants for mysqltest_8@; ++grant select on mysqltest.* to mysqltest_8; ++show grants for mysqltest_8; ++select * from information_schema.schema_privileges ++where grantee like "'mysqltest_8'%"; ++connect (conn3,localhost,mysqltest_8,,); ++select * from t1; ++disconnect conn3; ++connection master; ++revoke select on mysqltest.* from mysqltest_8@''; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.schema_privileges ++where grantee like "'mysqltest_8'%"; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8@; ++grant select on mysqltest.* to mysqltest_8@''; ++flush privileges; ++show grants for mysqltest_8@; ++revoke select on mysqltest.* from mysqltest_8@''; ++flush privileges; ++ ++--echo Column privileges ++grant update (a) on t1 to mysqltest_8@''; ++grant update (a) on t1 to mysqltest_8; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.column_privileges; ++connect (conn4,localhost,mysqltest_8,,); ++select * from t1; ++disconnect conn4; ++connection master; ++revoke update (a) on t1 from mysqltest_8@''; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.column_privileges; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++ ++--echo Table privileges ++grant update on t1 to mysqltest_8@''; ++grant update on t1 to mysqltest_8; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.table_privileges; ++connect (conn5,localhost,mysqltest_8,,); ++select * from t1; ++disconnect conn5; ++connection master; ++revoke update on t1 from mysqltest_8@''; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.table_privileges; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++ ++--echo "DROP USER" should clear privileges ++grant all privileges on mysqltest.* to mysqltest_8@''; ++grant select on mysqltest.* to mysqltest_8@''; ++grant update on t1 to mysqltest_8@''; ++grant update (a) on t1 to mysqltest_8@''; ++grant all privileges on mysqltest.* to mysqltest_8; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++select * from information_schema.user_privileges ++where grantee like "'mysqltest_8'%"; ++connect (conn5,localhost,mysqltest_8,,); ++select * from t1; ++disconnect conn5; ++connection master; ++flush privileges; ++show grants for mysqltest_8@''; ++show grants for mysqltest_8; ++drop user mysqltest_8@''; ++--error ER_NONEXISTING_GRANT ++show grants for mysqltest_8@''; ++--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT ++--error ER_ACCESS_DENIED_ERROR ++connect (conn6,localhost,mysqltest_8,,); ++connection master; ++--error ER_NONEXISTING_GRANT ++show grants for mysqltest_8; ++drop user mysqltest_8@host8; ++--error ER_NONEXISTING_GRANT ++show grants for mysqltest_8@host8; ++ ++# Restore the anonymous users. ++insert into mysql.user select * from t2; ++flush privileges; ++drop table t2; ++drop table t1; ++ ++# ++# Bug#20214 Incorrect error when user calls SHOW CREATE VIEW on non ++# privileged view ++# ++ ++connection master; ++ ++CREATE DATABASE mysqltest3; ++USE mysqltest3; ++ ++CREATE TABLE t_nn (c1 INT); ++CREATE VIEW v_nn AS SELECT * FROM t_nn; ++ ++CREATE DATABASE mysqltest2; ++USE mysqltest2; ++ ++CREATE TABLE t_nn (c1 INT); ++CREATE VIEW v_nn AS SELECT * FROM t_nn; ++CREATE VIEW v_yn AS SELECT * FROM t_nn; ++CREATE VIEW v_gy AS SELECT * FROM t_nn; ++CREATE VIEW v_ny AS SELECT * FROM t_nn; ++CREATE VIEW v_yy AS SELECT * FROM t_nn WHERE c1=55; ++ ++GRANT SHOW VIEW ON mysqltest2.v_ny TO 'mysqltest_1'@'localhost' IDENTIFIED BY 'mysqltest_1'; ++GRANT SELECT ON mysqltest2.v_yn TO 'mysqltest_1'@'localhost' IDENTIFIED BY 'mysqltest_1'; ++GRANT SELECT ON mysqltest2.* TO 'mysqltest_1'@'localhost' IDENTIFIED BY 'mysqltest_1'; ++GRANT SHOW VIEW,SELECT ON mysqltest2.v_yy TO 'mysqltest_1'@'localhost' IDENTIFIED BY 'mysqltest_1'; ++ ++connect (mysqltest_1, localhost, mysqltest_1, mysqltest_1,); ++ ++# fail because of missing SHOW VIEW (have generic SELECT) ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE VIEW mysqltest2.v_nn; ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE TABLE mysqltest2.v_nn; ++ ++# fail because of missing SHOW VIEW ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE VIEW mysqltest2.v_yn; ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE TABLE mysqltest2.v_yn; ++ ++# succeed (despite of missing SELECT, having SHOW VIEW bails us out) ++SHOW CREATE TABLE mysqltest2.v_ny; ++ ++# succeed (despite of missing SELECT, having SHOW VIEW bails us out) ++SHOW CREATE VIEW mysqltest2.v_ny; ++ ++# fail because of missing (specific or generic) SELECT ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE TABLE mysqltest3.t_nn; ++ ++# fail because of missing (specific or generic) SELECT (not because it's not a view!) ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE VIEW mysqltest3.t_nn; ++ ++# fail because of missing missing (specific or generic) SELECT (and SHOW VIEW) ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE VIEW mysqltest3.v_nn; ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE TABLE mysqltest3.v_nn; ++ ++# succeed thanks to generic SELECT ++SHOW CREATE TABLE mysqltest2.t_nn; ++ ++# fail because it's not a view! (have generic SELECT though) ++--error ER_WRONG_OBJECT ++SHOW CREATE VIEW mysqltest2.t_nn; ++ ++# succeed, have SELECT and SHOW VIEW ++SHOW CREATE VIEW mysqltest2.v_yy; ++ ++# succeed, have SELECT and SHOW VIEW ++SHOW CREATE TABLE mysqltest2.v_yy; ++ ++# clean-up ++connection master; ++ ++# succeed, we're root ++SHOW CREATE TABLE mysqltest2.v_nn; ++SHOW CREATE VIEW mysqltest2.v_nn; ++ ++SHOW CREATE TABLE mysqltest2.t_nn; ++ ++# fail because it's not a view! ++--error ER_WRONG_OBJECT ++SHOW CREATE VIEW mysqltest2.t_nn; ++ ++DROP VIEW mysqltest2.v_nn; ++DROP VIEW mysqltest2.v_yn; ++DROP VIEW mysqltest2.v_ny; ++DROP VIEW mysqltest2.v_yy; ++DROP TABLE mysqltest2.t_nn; ++DROP DATABASE mysqltest2; ++DROP VIEW mysqltest3.v_nn; ++DROP TABLE mysqltest3.t_nn; ++DROP DATABASE mysqltest3; ++disconnect mysqltest_1; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'mysqltest_1'@'localhost'; ++DROP USER 'mysqltest_1'@'localhost'; ++ ++# restore the original database ++USE test; ++connection default; ++disconnect master; ++ ++ ++# ++# Bug#10668 CREATE USER does not enforce username length limit ++# ++--error ER_WRONG_STRING_LENGTH ++create user longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789; ++ ++# ++# Test for Bug#16899 Possible buffer overflow in handling of DEFINER-clause. ++# ++# These checks are intended to ensure that appropriate errors are risen when ++# illegal user name or hostname is specified in user-clause of GRANT/REVOKE ++# statements. ++# ++ ++# ++# Bug#22369 Alter table rename combined with other alterations causes lost tables ++# ++CREATE DATABASE mysqltest1; ++CREATE TABLE mysqltest1.t1 ( ++ int_field INTEGER UNSIGNED NOT NULL, ++ char_field CHAR(10), ++ INDEX(`int_field`) ++); ++CREATE TABLE mysqltest1.t2 (int_field INT); ++ ++--echo "Now check that we require equivalent grants for " ++--echo "RENAME TABLE and ALTER TABLE" ++CREATE USER mysqltest_1@localhost; ++GRANT SELECT ON mysqltest1.t1 TO mysqltest_1@localhost; ++ ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++SELECT USER(); ++SHOW GRANTS; ++--error ER_TABLEACCESS_DENIED_ERROR ++RENAME TABLE t1 TO t2; ++--error ER_TABLEACCESS_DENIED_ERROR ++ALTER TABLE t1 RENAME TO t2; ++--disconnect conn42 ++--connection default ++GRANT DROP ON mysqltest1.t1 TO mysqltest_1@localhost; ++ ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++--error ER_TABLEACCESS_DENIED_ERROR ++RENAME TABLE t1 TO t2; ++--error ER_TABLEACCESS_DENIED_ERROR ++ALTER TABLE t1 RENAME TO t2; ++--disconnect conn42 ++--connection default ++GRANT ALTER ON mysqltest1.t1 TO mysqltest_1@localhost; ++ ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++SHOW GRANTS; ++--error ER_TABLEACCESS_DENIED_ERROR ++RENAME TABLE t1 TO t2; ++--error ER_TABLEACCESS_DENIED_ERROR ++ALTER TABLE t1 RENAME TO t2; ++--disconnect conn42 ++--connection default ++GRANT INSERT, CREATE ON mysqltest1.t1 TO mysqltest_1@localhost; ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++SHOW GRANTS; ++--error ER_TABLEACCESS_DENIED_ERROR ++--disconnect conn42 ++--connection default ++GRANT INSERT, SELECT, CREATE, ALTER, DROP ON mysqltest1.t2 TO mysqltest_1@localhost; ++DROP TABLE mysqltest1.t2; ++ ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++SHOW GRANTS; ++RENAME TABLE t1 TO t2; ++RENAME TABLE t2 TO t1; ++ALTER TABLE t1 RENAME TO t2; ++ALTER TABLE t2 RENAME TO t1; ++--disconnect conn42 ++--connection default ++REVOKE DROP, INSERT ON mysqltest1.t1 FROM mysqltest_1@localhost; ++REVOKE DROP, INSERT ON mysqltest1.t2 FROM mysqltest_1@localhost; ++ ++connect (conn42,localhost,mysqltest_1,,mysqltest1); ++SHOW GRANTS; ++--error ER_TABLEACCESS_DENIED_ERROR ++RENAME TABLE t1 TO t2; ++--error ER_TABLEACCESS_DENIED_ERROR ++ALTER TABLE t1 RENAME TO t2; ++--disconnect conn42 ++--connection default ++ ++DROP USER mysqltest_1@localhost; ++DROP DATABASE mysqltest1; ++USE test; ++ ++# Working with database-level privileges. ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT CREATE ON mysqltest.* TO longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT CREATE ON mysqltest.* TO some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE CREATE ON mysqltest.* FROM longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE CREATE ON mysqltest.* FROM some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++# Working with table-level privileges. ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT CREATE ON t1 TO longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT CREATE ON t1 TO some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE CREATE ON t1 FROM longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE CREATE ON t1 FROM some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++# Working with routine-level privileges. ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT EXECUTE ON PROCEDURE p1 TO longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++GRANT EXECUTE ON PROCEDURE p1 TO some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE EXECUTE ON PROCEDURE p1 FROM longer_than_80_456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789@localhost; ++ ++--error ER_WRONG_STRING_LENGTH ++REVOKE EXECUTE ON PROCEDURE t1 FROM some_user_name@1234567890abcdefghij1234567890abcdefghij1234567890abcdefghijQWERTY; ++ ++ ++# ++# Bug#23556 TRUNCATE TABLE still maps to DELETE ++# ++CREATE USER bug23556@localhost; ++CREATE DATABASE bug23556; ++GRANT SELECT ON bug23556.* TO bug23556@localhost; ++connect (bug23556,localhost,bug23556,,bug23556); ++ ++connection default; ++USE bug23556; ++CREATE TABLE t1 (a INT PRIMARY KEY); INSERT INTO t1 VALUES (1),(2),(3),(4),(5); ++GRANT DELETE ON t1 TO bug23556@localhost; ++ ++connection bug23556; ++USE bug23556; ++--error ER_TABLEACCESS_DENIED_ERROR ++TRUNCATE t1; ++ ++connection default; ++USE bug23556; ++REVOKE DELETE ON t1 FROM bug23556@localhost; ++GRANT DROP ON t1 TO bug23556@localhost; ++ ++connection bug23556; ++USE bug23556; ++TRUNCATE t1; ++ ++connection default; ++USE bug23556; ++DROP TABLE t1; ++USE test; ++DROP DATABASE bug23556; ++DROP USER bug23556@localhost; ++connection default; ++disconnect bug23556; ++ ++ ++# ++# Bug#6774 Replication fails with Wrong usage of DB GRANT and GLOBAL PRIVILEGES ++# ++# Check if GRANT ... ON * ... fails when no database is selected ++connect (con1, localhost, root,,*NO-ONE*); ++connection con1; ++--error ER_NO_DB_ERROR ++GRANT PROCESS ON * TO user@localhost; ++disconnect con1; ++connection default; ++ ++ ++# ++# Bug#9504 Stored procedures: execute privilege doesn't make 'use database' ++# okay. ++# ++ ++# Prepare. ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest1; ++DROP DATABASE IF EXISTS mysqltest2; ++DROP DATABASE IF EXISTS mysqltest3; ++DROP DATABASE IF EXISTS mysqltest4; ++--enable_warnings ++ ++CREATE DATABASE mysqltest1; ++CREATE DATABASE mysqltest2; ++CREATE DATABASE mysqltest3; ++CREATE DATABASE mysqltest4; ++ ++CREATE PROCEDURE mysqltest1.p_def() SQL SECURITY DEFINER ++ SELECT 1; ++ ++CREATE PROCEDURE mysqltest2.p_inv() SQL SECURITY INVOKER ++ SELECT 1; ++ ++CREATE FUNCTION mysqltest3.f_def() RETURNS INT SQL SECURITY DEFINER ++ RETURN 1; ++ ++CREATE FUNCTION mysqltest4.f_inv() RETURNS INT SQL SECURITY INVOKER ++ RETURN 1; ++ ++GRANT EXECUTE ON PROCEDURE mysqltest1.p_def TO mysqltest_1@localhost; ++GRANT EXECUTE ON PROCEDURE mysqltest2.p_inv TO mysqltest_1@localhost; ++GRANT EXECUTE ON FUNCTION mysqltest3.f_def TO mysqltest_1@localhost; ++GRANT EXECUTE ON FUNCTION mysqltest4.f_inv TO mysqltest_1@localhost; ++ ++GRANT ALL PRIVILEGES ON test.* TO mysqltest_1@localhost; ++ ++# Test. ++ ++--connect (bug9504_con1,localhost,mysqltest_1,,) ++--echo ++--echo ---> connection: bug9504_con1 ++ ++# - Check that we can switch to the db; ++ ++use mysqltest1; ++ ++use mysqltest2; ++ ++use mysqltest3; ++ ++use mysqltest4; ++ ++# - Check that we can call stored routines; ++ ++use test; ++ ++CALL mysqltest1.p_def(); ++ ++CALL mysqltest2.p_inv(); ++ ++SELECT mysqltest3.f_def(); ++ ++SELECT mysqltest4.f_inv(); ++ ++# Cleanup. ++ ++--connection default ++--echo ++--echo ---> connection: default ++ ++--disconnect bug9504_con1 ++ ++DROP DATABASE mysqltest1; ++DROP DATABASE mysqltest2; ++DROP DATABASE mysqltest3; ++DROP DATABASE mysqltest4; ++ ++DROP USER mysqltest_1@localhost; ++ ++ ++# ++# Bug#27337 Privileges are not restored properly. ++# ++# Actually, the patch for this bugs fixes two problems. So, here are two test ++# cases. ++ ++# Test case 1: privileges are not restored properly after calling a stored ++# routine defined with SQL SECURITY INVOKER clause. ++ ++# Prepare. ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest1; ++DROP DATABASE IF EXISTS mysqltest2; ++--enable_warnings ++ ++CREATE DATABASE mysqltest1; ++CREATE DATABASE mysqltest2; ++ ++GRANT ALL PRIVILEGES ON mysqltest1.* TO mysqltest_1@localhost; ++GRANT SELECT ON mysqltest2.* TO mysqltest_1@localhost; ++ ++CREATE PROCEDURE mysqltest1.p1() SQL SECURITY INVOKER ++ SELECT 1; ++ ++# Test. ++ ++--connect (bug27337_con1,localhost,mysqltest_1,,mysqltest2) ++--echo ++--echo ---> connection: bug27337_con1 ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++CREATE TABLE t1(c INT); ++ ++CALL mysqltest1.p1(); ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++CREATE TABLE t1(c INT); ++ ++--disconnect bug27337_con1 ++ ++--connect (bug27337_con2,localhost,mysqltest_1,,mysqltest2) ++--echo ++--echo ---> connection: bug27337_con2 ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++CREATE TABLE t1(c INT); ++ ++SHOW TABLES; ++ ++# Cleanup. ++ ++--connection default ++--echo ++--echo ---> connection: default ++ ++--disconnect bug27337_con2 ++ ++DROP DATABASE mysqltest1; ++DROP DATABASE mysqltest2; ++ ++DROP USER mysqltest_1@localhost; ++ ++# Test case 2: privileges are not checked properly for prepared statements. ++ ++# Prepare. ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest1; ++DROP DATABASE IF EXISTS mysqltest2; ++--enable_warnings ++ ++CREATE DATABASE mysqltest1; ++CREATE DATABASE mysqltest2; ++ ++CREATE TABLE mysqltest1.t1(c INT); ++CREATE TABLE mysqltest2.t2(c INT); ++ ++GRANT SELECT ON mysqltest1.t1 TO mysqltest_1@localhost; ++GRANT SELECT ON mysqltest2.t2 TO mysqltest_2@localhost; ++ ++# Test. ++ ++--connect (bug27337_con1,localhost,mysqltest_1,,mysqltest1) ++--echo ++--echo ---> connection: bug27337_con1 ++ ++SHOW TABLES FROM mysqltest1; ++ ++PREPARE stmt1 FROM 'SHOW TABLES FROM mysqltest1'; ++ ++EXECUTE stmt1; ++ ++--connect (bug27337_con2,localhost,mysqltest_2,,mysqltest2) ++--echo ++--echo ---> connection: bug27337_con2 ++ ++SHOW COLUMNS FROM mysqltest2.t2; ++ ++PREPARE stmt2 FROM 'SHOW COLUMNS FROM mysqltest2.t2'; ++ ++EXECUTE stmt2; ++ ++--connection default ++--echo ++--echo ---> connection: default ++ ++REVOKE SELECT ON mysqltest1.t1 FROM mysqltest_1@localhost; ++REVOKE SELECT ON mysqltest2.t2 FROM mysqltest_2@localhost; ++ ++--connection bug27337_con1 ++--echo ++--echo ---> connection: bug27337_con1 ++ ++--error ER_DBACCESS_DENIED_ERROR ++SHOW TABLES FROM mysqltest1; ++ ++--error ER_DBACCESS_DENIED_ERROR ++EXECUTE stmt1; ++ ++--connection bug27337_con2 ++--echo ++--echo ---> connection: bug27337_con2 ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW COLUMNS FROM mysqltest2.t2; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++EXECUTE stmt2; ++ ++# Cleanup. ++ ++--connection default ++--echo ++--echo ---> connection: default ++ ++--disconnect bug27337_con1 ++--disconnect bug27337_con2 ++ ++DROP DATABASE mysqltest1; ++DROP DATABASE mysqltest2; ++ ++DROP USER mysqltest_1@localhost; ++DROP USER mysqltest_2@localhost; ++ ++# ++# Bug#27878 Unchecked privileges on a view referring to a table from another ++# database. ++# ++USE test; ++CREATE TABLE t1 (f1 int, f2 int); ++INSERT INTO t1 VALUES(1,1), (2,2); ++CREATE DATABASE db27878; ++GRANT UPDATE(f1) ON t1 TO 'mysqltest_1'@'localhost'; ++GRANT SELECT ON `test`.* TO 'mysqltest_1'@'localhost'; ++GRANT ALL ON db27878.* TO 'mysqltest_1'@'localhost'; ++USE db27878; ++CREATE SQL SECURITY INVOKER VIEW db27878.v1 AS SELECT * FROM test.t1; ++connect (user1,localhost,mysqltest_1,,test); ++connection user1; ++USE db27878; ++--error 1356 ++UPDATE v1 SET f2 = 4; ++SELECT * FROM test.t1; ++disconnect user1; ++connection default; ++REVOKE UPDATE (f1) ON `test`.`t1` FROM 'mysqltest_1'@'localhost'; ++REVOKE SELECT ON `test`.* FROM 'mysqltest_1'@'localhost'; ++REVOKE ALL ON db27878.* FROM 'mysqltest_1'@'localhost'; ++DROP USER mysqltest_1@localhost; ++DROP DATABASE db27878; ++USE test; ++DROP TABLE t1; ++ ++--echo # ++--echo # Bug#33275 Server crash when creating temporary table mysql.user ++--echo # ++CREATE TEMPORARY TABLE mysql.user (id INT); ++FLUSH PRIVILEGES; ++DROP TABLE mysql.user; ++ ++ ++# ++# Bug#33201 Crash occurs when granting update privilege on one column of a view ++# ++drop table if exists test; ++drop function if exists test_function; ++drop view if exists v1; ++create table test (col1 varchar(30)); ++delimiter |; ++create function test_function() returns varchar(30) ++begin ++ declare tmp varchar(30); ++ select col1 from test limit 1 into tmp; ++ return '1'; ++end| ++delimiter ;| ++create view v1 as select test.* from test where test.col1=test_function(); ++grant update (col1) on v1 to 'greg'@'localhost'; ++drop user 'greg'@'localhost'; ++drop view v1; ++drop table test; ++drop function test_function; ++ ++# ++# Bug#41456 SET PASSWORD hates CURRENT_USER() ++# ++SELECT CURRENT_USER(); ++SET PASSWORD FOR CURRENT_USER() = PASSWORD("admin"); ++SET PASSWORD FOR CURRENT_USER() = PASSWORD(""); ++ ++# ++# Bug#57952: privilege change is not taken into account by EXECUTE. ++# ++ ++--echo ++--echo # Bug#57952 ++--echo ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest1; ++DROP DATABASE IF EXISTS mysqltest2; ++--enable_warnings ++ ++CREATE DATABASE mysqltest1; ++CREATE DATABASE mysqltest2; ++ ++use mysqltest1; ++CREATE TABLE t1(a INT, b INT); ++INSERT INTO t1 VALUES (1, 1); ++ ++CREATE TABLE t2(a INT); ++INSERT INTO t2 VALUES (2); ++ ++CREATE TABLE mysqltest2.t3(a INT); ++INSERT INTO mysqltest2.t3 VALUES (4); ++ ++CREATE USER testuser@localhost; ++GRANT CREATE ROUTINE, EXECUTE ON mysqltest1.* TO testuser@localhost; ++GRANT SELECT(b) ON t1 TO testuser@localhost; ++GRANT SELECT ON t2 TO testuser@localhost; ++GRANT SELECT ON mysqltest2.* TO testuser@localhost; ++ ++--echo ++--echo # Connection: bug57952_con1 (testuser@localhost, db: mysqltest1) ++--connect (bug57952_con1,localhost,testuser,,mysqltest1) ++PREPARE s1 FROM 'SELECT b FROM t1'; ++PREPARE s2 FROM 'SELECT a FROM t2'; ++PREPARE s3 FROM 'SHOW TABLES FROM mysqltest2'; ++ ++CREATE PROCEDURE p1() SELECT b FROM t1; ++CREATE PROCEDURE p2() SELECT a FROM t2; ++CREATE PROCEDURE p3() SHOW TABLES FROM mysqltest2; ++ ++CALL p1; ++CALL p2; ++CALL p3; ++ ++--echo ++--echo # Connection: default ++--connection default ++REVOKE SELECT ON t1 FROM testuser@localhost; ++GRANT SELECT(a) ON t1 TO testuser@localhost; ++REVOKE SELECT ON t2 FROM testuser@localhost; ++REVOKE SELECT ON mysqltest2.* FROM testuser@localhost; ++ ++--echo ++--echo # Connection: bug57952_con1 (testuser@localhost, db: mysqltest1) ++--connection bug57952_con1 ++--echo # - Check column-level privileges... ++--error ER_COLUMNACCESS_DENIED_ERROR ++EXECUTE s1; ++ ++--error ER_COLUMNACCESS_DENIED_ERROR ++SELECT b FROM t1; ++ ++--error ER_COLUMNACCESS_DENIED_ERROR ++EXECUTE s1; ++ ++--error ER_COLUMNACCESS_DENIED_ERROR ++CALL p1; ++ ++--echo # - Check table-level privileges... ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT a FROM t2; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++EXECUTE s2; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++CALL p2; ++ ++--echo # - Check database-level privileges... ++--error ER_DBACCESS_DENIED_ERROR ++SHOW TABLES FROM mysqltest2; ++ ++--error ER_DBACCESS_DENIED_ERROR ++EXECUTE s3; ++ ++--error ER_DBACCESS_DENIED_ERROR ++CALL p3; ++ ++--echo ++--echo # Connection: default ++--connection default ++--disconnect bug57952_con1 ++DROP DATABASE mysqltest1; ++DROP DATABASE mysqltest2; ++DROP USER testuser@localhost; ++use test; ++--echo ++ ++ ++--echo # ++--echo # Test for bug #36544 "DROP USER does not remove stored function ++--echo # privileges". ++--echo # ++create database mysqltest1; ++create function mysqltest1.f1() returns int return 0; ++create procedure mysqltest1.p1() begin end; ++--echo # ++--echo # 1) Check that DROP USER properly removes privileges on both ++--echo # stored procedures and functions. ++--echo # ++create user mysqluser1@localhost; ++grant execute on function mysqltest1.f1 to mysqluser1@localhost; ++grant execute on procedure mysqltest1.p1 to mysqluser1@localhost; ++ ++--echo # Quick test that granted privileges are properly reflected ++--echo # in privilege tables and in in-memory structures. ++show grants for mysqluser1@localhost; ++select db, routine_name, routine_type, proc_priv from mysql.procs_priv where user='mysqluser1' and host='localhost'; ++--echo # ++--echo # Create connection 'bug_36544_con1' as 'mysqluser1@localhost'. ++--connect (bug36544_con1,localhost,mysqluser1,,) ++call mysqltest1.p1(); ++select mysqltest1.f1(); ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++drop user mysqluser1@localhost; ++ ++--echo # ++--echo # Test that dropping of user is properly reflected in ++--echo # both privilege tables and in in-memory structures. ++--echo # ++--echo # Switch to connection 'bug36544_con1'. ++--connection bug36544_con1 ++--echo # The connection cold be alive but should not be able to ++--echo # access to any of the stored routines. ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest1.p1(); ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest1.f1(); ++--disconnect bug36544_con1 ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++--echo # ++--echo # Now create user with the same name and check that he ++--echo # has not inherited privileges. ++create user mysqluser1@localhost; ++show grants for mysqluser1@localhost; ++select db, routine_name, routine_type, proc_priv from mysql.procs_priv where user='mysqluser1' and host='localhost'; ++--echo # ++--echo # Create connection 'bug_36544_con2' as 'mysqluser1@localhost'. ++--connect (bug36544_con2,localhost,mysqluser1,,) ++--echo # Newly created user should not be able to access any of the routines. ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest1.p1(); ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest1.f1(); ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++ ++--echo # ++--echo # 2) Check that RENAME USER properly updates privileges on both ++--echo # stored procedures and functions. ++--echo # ++grant execute on function mysqltest1.f1 to mysqluser1@localhost; ++grant execute on procedure mysqltest1.p1 to mysqluser1@localhost; ++--echo # ++--echo # Create one more user to make in-memory hashes non-trivial. ++--echo # User names 'mysqluser11' and 'mysqluser10' were selected ++--echo # to trigger bug discovered during code inspection. ++create user mysqluser11@localhost; ++grant execute on function mysqltest1.f1 to mysqluser11@localhost; ++grant execute on procedure mysqltest1.p1 to mysqluser11@localhost; ++--echo # Also create a couple of tables to test for another bug ++--echo # discovered during code inspection (again table names were ++--echo # chosen especially to trigger the bug). ++create table mysqltest1.t11 (i int); ++create table mysqltest1.t22 (i int); ++grant select on mysqltest1.t22 to mysqluser1@localhost; ++grant select on mysqltest1.t11 to mysqluser1@localhost; ++ ++--echo # Quick test that granted privileges are properly reflected ++--echo # in privilege tables and in in-memory structures. ++show grants for mysqluser1@localhost; ++select db, routine_name, routine_type, proc_priv from mysql.procs_priv where user='mysqluser1' and host='localhost'; ++select db, table_name, table_priv from mysql.tables_priv where user='mysqluser1' and host='localhost'; ++--echo # ++--echo # Switch to connection 'bug36544_con2'. ++--connection bug36544_con2 ++call mysqltest1.p1(); ++select mysqltest1.f1(); ++select * from mysqltest1.t11; ++select * from mysqltest1.t22; ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++rename user mysqluser1@localhost to mysqluser10@localhost; ++ ++--echo # ++--echo # Test that there are no privileges left for mysqluser1. ++--echo # ++--echo # Switch to connection 'bug36544_con2'. ++--connection bug36544_con2 ++--echo # The connection cold be alive but should not be able to ++--echo # access to any of the stored routines or tables. ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest1.p1(); ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest1.f1(); ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from mysqltest1.t11; ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from mysqltest1.t22; ++--disconnect bug36544_con2 ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++--echo # ++--echo # Now create user with the old name and check that he ++--echo # has not inherited privileges. ++create user mysqluser1@localhost; ++show grants for mysqluser1@localhost; ++select db, routine_name, routine_type, proc_priv from mysql.procs_priv where user='mysqluser1' and host='localhost'; ++select db, table_name, table_priv from mysql.tables_priv where user='mysqluser1' and host='localhost'; ++--echo # ++--echo # Create connection 'bug_36544_con3' as 'mysqluser1@localhost'. ++--connect (bug36544_con3,localhost,mysqluser1,,) ++--echo # Newly created user should not be able to access to any of the ++--echo # stored routines or tables. ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest1.p1(); ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest1.f1(); ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from mysqltest1.t11; ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from mysqltest1.t22; ++--disconnect bug36544_con3 ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++--echo # ++--echo # Now check that privileges became associated with a new user ++--echo # name - mysqluser10. ++--echo # ++show grants for mysqluser10@localhost; ++select db, routine_name, routine_type, proc_priv from mysql.procs_priv where user='mysqluser10' and host='localhost'; ++select db, table_name, table_priv from mysql.tables_priv where user='mysqluser10' and host='localhost'; ++--echo # ++--echo # Create connection 'bug_36544_con4' as 'mysqluser10@localhost'. ++--connect (bug36544_con4,localhost,mysqluser10,,) ++call mysqltest1.p1(); ++select mysqltest1.f1(); ++select * from mysqltest1.t11; ++select * from mysqltest1.t22; ++--disconnect bug36544_con4 ++ ++--echo # ++--echo # Switch to connection 'default'. ++--connection default ++--echo # ++--echo # Clean-up. ++drop user mysqluser1@localhost; ++drop user mysqluser10@localhost; ++drop user mysqluser11@localhost; ++drop database mysqltest1; ++ ++ ++--echo End of 5.0 tests ++set names utf8; ++--error ER_WRONG_STRING_LENGTH ++grant select on test.* to очень_длинный_юзер890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890@localhost; ++set names default; ++ ++# ++# Bug#20901 CREATE privilege is enough to insert into a table ++# ++ ++create database mysqltest; ++use mysqltest; ++ ++grant create on mysqltest.* to mysqltest@localhost; ++create table t1 (i INT); ++ ++connect (user1,localhost,mysqltest,,mysqltest); ++connection user1; ++# show we don't have INSERT ++--error ER_TABLEACCESS_DENIED_ERROR ++insert into t1 values (1); ++# show we have CREATE ++create table t2 (i INT); ++create table t4 (i INT); ++ ++connection default; ++grant select, insert on mysqltest.t2 to mysqltest@localhost; ++grant insert on mysqltest.t4 to mysqltest@localhost; ++# to specify ACLs for non-existent objects, must explictly |CREATE ++grant create, insert on mysqltest.t5 to mysqltest@localhost; ++grant create, insert on mysqltest.t6 to mysqltest@localhost; ++flush privileges; ++ ++connection user1; ++insert into t2 values (1); ++ ++ ++# CREATE IF NOT EXISTS...SELECT, t1 exists, no INSERT, must fail ++--error ER_TABLEACCESS_DENIED_ERROR ++create table if not exists t1 select * from t2; ++ ++# CREATE IF NOT EXISTS...SELECT, no t3 yet, no INSERT, must fail ++--error ER_TABLEACCESS_DENIED_ERROR ++create table if not exists t3 select * from t2; ++ ++# CREATE IF NOT EXISTS...SELECT, t4 exists, have INSERT, must succeed ++create table if not exists t4 select * from t2; ++ ++# CREATE IF NOT EXISTS...SELECT, no t5 yet, have INSERT, must succeed ++create table if not exists t5 select * from t2; ++ ++ ++# CREATE...SELECT, no t6 yet, have INSERT, must succeed ++create table t6 select * from t2; ++ ++# CREATE...SELECT, no t7 yet, no INSERT, must fail ++--error ER_TABLEACCESS_DENIED_ERROR ++create table t7 select * from t2; ++ ++# CREATE...SELECT, t4 exists, have INSERT, must still fail (exists) ++--error 1050 ++create table t4 select * from t2; ++ ++# CREATE...SELECT, t1 exists, no INSERT, must fail ++--error ER_TABLEACCESS_DENIED_ERROR ++create table t1 select * from t2; ++ ++ ++connection default; ++drop table t1,t2,t4,t5,t6; ++ ++revoke create on mysqltest.* from mysqltest@localhost; ++revoke select, insert on mysqltest.t2 from mysqltest@localhost; ++revoke insert on mysqltest.t4 from mysqltest@localhost; ++revoke create, insert on mysqltest.t5 from mysqltest@localhost; ++revoke create, insert on mysqltest.t6 from mysqltest@localhost; ++drop user mysqltest@localhost; ++ ++disconnect user1; ++drop database mysqltest; ++use test; ++ ++ ++# ++# Bug#16470 crash on grant if old grant tables ++# ++ ++call mtr.add_suppression("Can't open and lock privilege tables"); ++ ++--echo FLUSH PRIVILEGES without procs_priv table. ++RENAME TABLE mysql.procs_priv TO mysql.procs_gone; ++FLUSH PRIVILEGES; ++--echo Assigning privileges without procs_priv table. ++CREATE DATABASE mysqltest1; ++CREATE PROCEDURE mysqltest1.test() SQL SECURITY DEFINER ++ SELECT 1; ++CREATE FUNCTION mysqltest1.test() RETURNS INT RETURN 1; ++--error ER_NO_SUCH_TABLE ++GRANT EXECUTE ON FUNCTION mysqltest1.test TO mysqltest_1@localhost; ++GRANT ALL PRIVILEGES ON test.* TO mysqltest_1@localhost; ++CALL mysqltest1.test(); ++DROP DATABASE mysqltest1; ++RENAME TABLE mysql.procs_gone TO mysql.procs_priv; ++DROP USER mysqltest_1@localhost; ++FLUSH PRIVILEGES; ++ ++ ++# ++# Bug#33464 DROP FUNCTION caused a crash. ++# ++CREATE DATABASE dbbug33464; ++CREATE USER 'userbug33464'@'localhost'; ++ ++GRANT CREATE ROUTINE ON dbbug33464.* TO 'userbug33464'@'localhost'; ++ ++--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK ++connect (connbug33464, localhost, userbug33464, , dbbug33464); ++--source suite/funcs_1/include/show_connection.inc ++ ++delimiter //; ++CREATE PROCEDURE sp3(v1 char(20)) ++BEGIN ++ SELECT * from dbbug33464.t6 where t6.f2= 'xyz'; ++END// ++delimiter ;// ++ ++delimiter //; ++CREATE FUNCTION fn1() returns char(50) SQL SECURITY INVOKER ++BEGIN ++ return 1; ++END// ++delimiter ;// ++ ++delimiter //; ++CREATE FUNCTION fn2() returns char(50) SQL SECURITY DEFINER ++BEGIN ++ return 2; ++END// ++delimiter ;// ++ ++disconnect connbug33464; ++ ++# cleanup ++connection default; ++USE dbbug33464; ++--source suite/funcs_1/include/show_connection.inc ++ ++SELECT fn1(); ++SELECT fn2(); ++ ++--error 0, ER_CANNOT_USER ++DROP USER 'userbug33464'@'localhost'; ++ ++DROP FUNCTION fn1; ++DROP FUNCTION fn2; ++DROP PROCEDURE sp3; ++ ++--error 0, ER_CANNOT_USER ++DROP USER 'userbug33464'@'localhost'; ++ ++USE test; ++DROP DATABASE dbbug33464; ++ ++ ++SET @@global.log_bin_trust_function_creators= @old_log_bin_trust_function_creators; ++ ++# ++# Bug#44658 Create procedure makes server crash when user does not have ALL privilege ++# ++CREATE USER user1; ++CREATE USER user2; ++GRANT CREATE ON db1.* TO 'user1'@'localhost'; ++GRANT CREATE ROUTINE ON db1.* TO 'user1'@'localhost'; ++GRANT CREATE ON db1.* TO 'user2'@'%'; ++GRANT CREATE ROUTINE ON db1.* TO 'user2'@'%'; ++FLUSH PRIVILEGES; ++SHOW GRANTS FOR 'user1'@'localhost'; ++connect (con1,localhost,user1,,); ++--echo ** Connect as user1 and create a procedure. ++--echo ** The creation will imply implicitly assigned ++--echo ** EXECUTE and ALTER ROUTINE privileges to ++--echo ** the current user user1@localhost. ++SELECT @@GLOBAL.sql_mode; ++SELECT @@SESSION.sql_mode; ++CREATE DATABASE db1; ++DELIMITER ||; ++CREATE PROCEDURE db1.proc1(p1 INT) ++ BEGIN ++ SET @x = 0; ++ REPEAT SET @x = @x + 1; UNTIL @x > p1 END REPEAT; ++ END ;|| ++DELIMITER ;|| ++ ++connect (con2,localhost,user2,,); ++--echo ** Connect as user2 and create a procedure. ++--echo ** Implicitly assignment of privileges will ++--echo ** fail because the user2@localhost is an ++--echo ** unknown user. ++DELIMITER ||; ++CREATE PROCEDURE db1.proc2(p1 INT) ++ BEGIN ++ SET @x = 0; ++ REPEAT SET @x = @x + 1; UNTIL @x > p1 END REPEAT; ++ END ;|| ++DELIMITER ;|| ++ ++connection default; ++SHOW GRANTS FOR 'user1'@'localhost'; ++SHOW GRANTS FOR 'user2'; ++disconnect con1; ++disconnect con2; ++DROP PROCEDURE db1.proc1; ++DROP PROCEDURE db1.proc2; ++REVOKE ALL ON db1.* FROM 'user1'@'localhost'; ++REVOKE ALL ON db1.* FROM 'user2'@'%'; ++DROP USER 'user1'; ++DROP USER 'user1'@'localhost'; ++DROP USER 'user2'; ++DROP DATABASE db1; ++ ++ ++--echo # ++--echo # Bug #25863 No database selected error, but documentation ++--echo # says * for global allowed ++--echo # ++ ++connect(conn1,localhost,root,,*NO-ONE*); ++ ++--error ER_NO_DB_ERROR ++GRANT ALL ON * TO mysqltest_1; ++ ++GRANT ALL ON *.* TO mysqltest_1; ++SHOW GRANTS FOR mysqltest_1; ++DROP USER mysqltest_1; ++ ++USE test; ++ ++GRANT ALL ON * TO mysqltest_1; ++SHOW GRANTS FOR mysqltest_1; ++DROP USER mysqltest_1; ++ ++GRANT ALL ON *.* TO mysqltest_1; ++SHOW GRANTS FOR mysqltest_1; ++DROP USER mysqltest_1; ++ ++connection default; ++disconnect conn1; ++ ++ ++# ++# Bug #53371: COM_FIELD_LIST can be abused to bypass table level grants. ++# ++ ++CREATE DATABASE db1; ++CREATE DATABASE db2; ++GRANT SELECT ON db1.* to 'testbug'@localhost; ++USE db2; ++CREATE TABLE t1 (a INT); ++USE test; ++connect (con1,localhost,testbug,,db1); ++--error ER_NO_SUCH_TABLE ++SELECT * FROM `../db2/tb2`; ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT * FROM `../db2`.tb2; ++--error ER_WRONG_TABLE_NAME ++SELECT * FROM `#mysql50#/../db2/tb2`; ++connection default; ++disconnect con1; ++DROP USER 'testbug'@localhost; ++DROP TABLE db2.t1; ++DROP DATABASE db1; ++DROP DATABASE db2; ++ ++--echo # ++--echo # Bug #36742 ++--echo # ++grant usage on Foo.* to myuser@Localhost identified by 'foo'; ++grant select on Foo.* to myuser@localhost; ++select host,user from mysql.user where User='myuser'; ++revoke select on Foo.* from myuser@localhost; ++delete from mysql.user where User='myuser'; ++flush privileges; ++ ++--echo ######################################################################### ++--echo # ++--echo # Bug#38347: ALTER ROUTINE privilege allows SHOW CREATE TABLE. ++--echo # ++--echo ######################################################################### ++ ++--echo ++--echo # -- ++--echo # -- Prepare the environment. ++--echo # -- ++ ++DELETE FROM mysql.user WHERE User LIKE 'mysqltest_%'; ++DELETE FROM mysql.db WHERE User LIKE 'mysqltest_%'; ++DELETE FROM mysql.tables_priv WHERE User LIKE 'mysqltest_%'; ++DELETE FROM mysql.columns_priv WHERE User LIKE 'mysqltest_%'; ++FLUSH PRIVILEGES; ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest_db1; ++--enable_warnings ++ ++CREATE DATABASE mysqltest_db1; ++ ++CREATE TABLE mysqltest_db1.t1(a INT); ++ ++--echo ++--echo # -- ++--echo # -- Check that global privileges don't allow SHOW CREATE TABLE. ++--echo # -- ++ ++GRANT EVENT ON mysqltest_db1.* TO mysqltest_u1@localhost; ++GRANT CREATE TEMPORARY TABLES ON mysqltest_db1.* TO mysqltest_u1@localhost; ++GRANT LOCK TABLES ON mysqltest_db1.* TO mysqltest_u1@localhost; ++GRANT ALTER ROUTINE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++GRANT CREATE ROUTINE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++GRANT EXECUTE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++GRANT FILE ON *.* TO mysqltest_u1@localhost; ++GRANT CREATE USER ON *.* TO mysqltest_u1@localhost; ++GRANT PROCESS ON *.* TO mysqltest_u1@localhost; ++GRANT RELOAD ON *.* TO mysqltest_u1@localhost; ++GRANT REPLICATION CLIENT ON *.* TO mysqltest_u1@localhost; ++GRANT REPLICATION SLAVE ON *.* TO mysqltest_u1@localhost; ++GRANT SHOW DATABASES ON *.* TO mysqltest_u1@localhost; ++GRANT SHUTDOWN ON *.* TO mysqltest_u1@localhost; ++GRANT USAGE ON *.* TO mysqltest_u1@localhost; ++ ++--echo ++SHOW GRANTS FOR mysqltest_u1@localhost; ++ ++--echo ++--echo # connection: con1 (mysqltest_u1@mysqltest_db1) ++--connect (con1,localhost,mysqltest_u1,,mysqltest_db1) ++--connection con1 ++ ++--echo ++--error ER_TABLEACCESS_DENIED_ERROR ++SHOW CREATE TABLE t1; ++ ++--echo ++--echo # connection: default ++--connection default ++ ++--disconnect con1 ++ ++--echo ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM mysqltest_u1@localhost; ++SHOW GRANTS FOR mysqltest_u1@localhost; ++ ++--echo ++--echo # -- ++--echo # -- Check that global SELECT allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT SELECT ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global INSERT allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT INSERT ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global UPDATE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT UPDATE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global DELETE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT DELETE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global CREATE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT CREATE ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global DROP allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT DROP ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global ALTER allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT ALTER ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global INDEX allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT INDEX ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global REFERENCES allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT REFERENCES ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global GRANT OPTION allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT GRANT OPTION ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global CREATE VIEW allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT CREATE VIEW ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that global SHOW VIEW allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT SHOW VIEW ON mysqltest_db1.* TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level SELECT allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT SELECT ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level INSERT allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT INSERT ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level UPDATE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT UPDATE ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level DELETE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT DELETE ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level CREATE allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT CREATE ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level DROP allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT DROP ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level ALTER allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT ALTER ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level INDEX allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT INDEX ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level REFERENCES allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT REFERENCES ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level GRANT OPTION allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT GRANT OPTION ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level CREATE VIEW allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT CREATE VIEW ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Check that table-level SHOW VIEW allows SHOW CREATE TABLE. ++--echo # -- ++ ++--echo ++GRANT SHOW VIEW ON mysqltest_db1.t1 TO mysqltest_u1@localhost; ++ ++--source include/bug38347.inc ++ ++--echo ++--echo # -- ++--echo # -- Cleanup. ++--echo # -- ++ ++--echo ++DROP DATABASE mysqltest_db1; ++ ++DROP USER mysqltest_u1@localhost; ++ ++--echo ++--echo # End of Bug#38347. ++--echo ++ ++ ++--echo # ++--echo # BUG#11759114 - '51401: GRANT TREATS NONEXISTENT FUNCTIONS/PRIVILEGES ++--echo # DIFFERENTLY'. ++--echo # ++--disable_warnings ++drop database if exists mysqltest_db1; ++--enable_warnings ++create database mysqltest_db1; ++create user mysqltest_u1; ++--echo # Both GRANT statements below should fail with the same error. ++--error ER_SP_DOES_NOT_EXIST ++grant execute on function mysqltest_db1.f1 to mysqltest_u1; ++--error ER_SP_DOES_NOT_EXIST ++grant execute on procedure mysqltest_db1.p1 to mysqltest_u1; ++--echo # Let us show that GRANT behaviour for routines is consistent ++--echo # with GRANT behaviour for tables. Attempt to grant privilege ++--echo # on non-existent table also results in an error. ++--error ER_NO_SUCH_TABLE ++grant select on mysqltest_db1.t1 to mysqltest_u1; ++show grants for mysqltest_u1; ++drop database mysqltest_db1; ++drop user mysqltest_u1; ++ ++ ++--echo # ++--echo # Bug#12766319 - 61865: RENAME USER DOES NOT WORK CORRECTLY - ++--echo # REQUIRES FLUSH PRIVILEGES ++--echo # ++ ++CREATE USER foo@'127.0.0.1'; ++GRANT ALL ON *.* TO foo@'127.0.0.1'; ++ ++--echo # First attempt, should connect successfully ++connect (conn1, '127.0.0.1', foo,,test); ++SELECT user(), current_user(); ++ ++--echo # Rename the user ++RENAME USER foo@'127.0.0.1' to foo@'127.0.0.0/255.0.0.0'; ++ ++--echo # Second attempt, should connect successfully as its valid mask ++--echo # This was failing without fix ++connect (conn2, '127.0.0.1', foo,,test); ++SELECT user(), current_user(); ++ ++--echo # Rename the user back to original ++RENAME USER foo@'127.0.0.0/255.0.0.0' to foo@'127.0.0.1'; ++ ++--echo # Third attempt, should connect successfully ++connect (conn3, '127.0.0.1', foo,,test); ++SELECT user(), current_user(); ++ ++--echo # Clean-up ++connection default; ++disconnect conn1; ++disconnect conn2; ++disconnect conn3; ++DROP USER foo@'127.0.0.1'; ++ ++--echo # End of Bug#12766319 ++ ++# ++# Bug#27230925: HANDLE_FATAL_SIGNAL (SIG=11) IN SHOW_ROUTINE_GRANTS ++# ++create user foo@localhost; ++create database foodb; ++grant create routine on foodb.* to foo@localhost; ++connect con1,localhost,foo; ++create procedure fooproc() select 'i am fooproc'; ++show grants; ++disconnect con1; ++connection default; ++rename table mysql.procs_priv to mysql.procs_priv1; ++flush privileges; ++show grants for foo@localhost; ++rename table mysql.procs_priv1 to mysql.procs_priv; ++show grants for foo@localhost; ++flush privileges; ++show grants for foo@localhost; ++drop user foo@localhost; ++drop procedure fooproc; ++drop database foodb; ++ ++ ++--echo # ++--echo # Bug#11756966 - 48958: STORED PROCEDURES CAN BE LEVERAGED TO BYPASS ++--echo # DATABASE SECURITY ++--echo # ++ ++--disable_warnings ++DROP DATABASE IF EXISTS secret; ++DROP DATABASE IF EXISTS no_such_db; ++--enable_warnings ++ ++CREATE DATABASE secret; ++GRANT USAGE ON *.* TO untrusted@localhost; ++ ++--echo # Connection con1 ++connect (con1, localhost, untrusted); ++SHOW GRANTS; ++SHOW DATABASES; ++ ++--echo # Both statements below should fail with the same error. ++--echo # They used to give different errors, thereby ++--echo # hinting that the secret database exists. ++--error ER_DBACCESS_DENIED_ERROR ++CREATE PROCEDURE no_such_db.foo() BEGIN END; ++--error ER_DBACCESS_DENIED_ERROR ++CREATE PROCEDURE secret.peek_at_secret() BEGIN END; ++ ++--echo # Connection default ++--connection default ++disconnect con1; ++DROP USER untrusted@localhost; ++DROP DATABASE secret; ++ ++# Wait till we reached the initial number of concurrent sessions ++--source include/wait_until_count_sessions.inc diff --cc mysql-test/r/lowercase_fs_off.result index d7e1b8c9e5b,362661d8911..785596c7d2f --- a/mysql-test/r/lowercase_fs_off.result +++ b/mysql-test/r/lowercase_fs_off.result @@@ -63,8 -63,44 +63,70 @@@ CREATE TRIGGER t1_bi BEFORE INSERT ON t RENAME TABLE t1 TO T1; ALTER TABLE T1 RENAME t1; DROP TABLE t1; + create database TEST; + create procedure TEST.pr() begin end; + create procedure test.pr() begin end; -Phase 1/4: Fixing views -Phase 2/4: Fixing table and database names -Phase 3/4: Checking and upgrading tables ++Phase 1/6: Checking and upgrading mysql database + Processing databases -information_schema -TEST -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK + mysql ++mysql.column_stats OK + mysql.columns_priv OK + mysql.db OK + mysql.event OK + mysql.func OK ++mysql.gtid_slave_pos OK + mysql.help_category OK + mysql.help_keyword OK + mysql.help_relation OK + mysql.help_topic OK + mysql.host OK -mysql.ndb_binlog_index OK ++mysql.index_stats OK ++mysql.innodb_index_stats ++Error : Unknown storage engine 'InnoDB' ++error : Corrupt ++mysql.innodb_table_stats ++Error : Unknown storage engine 'InnoDB' ++error : Corrupt + mysql.plugin OK + mysql.proc OK + mysql.procs_priv OK + mysql.proxies_priv OK ++mysql.roles_mapping OK + mysql.servers OK ++mysql.table_stats OK + mysql.tables_priv OK + mysql.time_zone OK + mysql.time_zone_leap_second OK + mysql.time_zone_name OK + mysql.time_zone_transition OK + mysql.time_zone_transition_type OK + mysql.user OK ++ ++Repairing tables ++mysql.innodb_index_stats ++Error : Unknown storage engine 'InnoDB' ++error : Corrupt ++mysql.innodb_table_stats ++Error : Unknown storage engine 'InnoDB' ++error : Corrupt ++Phase 2/6: Fixing views ++Phase 3/6: Running 'mysql_fix_privilege_tables' ++Phase 4/6: Fixing table and database names ++Phase 5/6: Checking and upgrading tables ++Processing databases ++TEST ++information_schema ++mtr ++mtr.global_suppressions OK ++mtr.test_suppressions OK + performance_schema + test -Phase 4/4: Running 'mysql_fix_privilege_tables' ++Phase 6/6: Running 'FLUSH PRIVILEGES' + OK + drop procedure test.pr; + drop database TEST; +create table t1 (a int); +create trigger t1_bi before insert on t1 for each row set new.a= 1; +show triggers like '%T1%'; +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +drop table t1; diff --cc mysql-test/r/lowercase_fs_off.test index 00000000000,00000000000..f4df5e8188d new file mode 100644 --- /dev/null +++ b/mysql-test/r/lowercase_fs_off.test @@@ -1,0 -1,0 +1,124 @@@ ++# ++# Specific tests for case sensitive file systems ++# i.e. lower_case_filesystem=OFF ++# ++-- source include/have_case_sensitive_file_system.inc ++-- source include/not_embedded.inc ++ ++connect (master,localhost,root,,); ++connection master; ++create database d1; ++grant all on d1.* to 'sample'@'localhost' identified by 'password'; ++flush privileges; ++ ++connect (sample,localhost,sample,password,d1); ++connection sample; ++select database(); ++--error ER_DBACCESS_DENIED_ERROR ++create database d2; ++--error ER_DBACCESS_DENIED_ERROR ++create database D1; ++disconnect sample; ++--source include/wait_until_disconnected.inc ++ ++connection master; ++drop user 'sample'@'localhost'; ++drop database if exists d1; ++disconnect master; ++--source include/wait_until_disconnected.inc ++connection default; ++ ++# End of 4.1 tests ++ ++# ++# Bug#41049 does syntax "grant" case insensitive? ++# ++CREATE DATABASE d1; ++USE d1; ++CREATE TABLE T1(f1 INT); ++CREATE TABLE t1(f1 INT); ++GRANT SELECT ON T1 to user_1@localhost; ++ ++connect (con1,localhost,user_1,,d1); ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from t1; ++select * from T1; ++connection default; ++GRANT SELECT ON t1 to user_1@localhost; ++connection con1; ++select * from information_schema.table_privileges; ++connection default; ++disconnect con1; ++ ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1@localhost; ++DROP USER user_1@localhost; ++DROP DATABASE d1; ++USE test; ++ ++CREATE DATABASE db1; ++USE db1; ++CREATE PROCEDURE p1() BEGIN END; ++CREATE FUNCTION f1(i INT) RETURNS INT RETURN i+1; ++ ++GRANT USAGE ON db1.* to user_1@localhost; ++GRANT EXECUTE ON PROCEDURE db1.P1 to user_1@localhost; ++GRANT EXECUTE ON FUNCTION db1.f1 to user_1@localhost; ++GRANT UPDATE ON db1.* to USER_1@localhost; ++ ++connect (con1,localhost,user_1,,db1); ++call p1(); ++call P1(); ++select f1(1); ++connect (con2,localhost,USER_1,,db1); ++--error ER_PROCACCESS_DENIED_ERROR ++call p1(); ++--error ER_PROCACCESS_DENIED_ERROR ++call P1(); ++--error ER_PROCACCESS_DENIED_ERROR ++select f1(1); ++ ++connection default; ++disconnect con1; ++disconnect con2; ++ ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM user_1@localhost; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM USER_1@localhost; ++DROP FUNCTION f1; ++DROP PROCEDURE p1; ++DROP USER user_1@localhost; ++DROP USER USER_1@localhost; ++DROP DATABASE db1; ++use test; ++ ++# End of 5.0 tests ++ ++ ++--echo # ++--echo # Extra test coverage for Bug#56595 RENAME TABLE causes assert on OS X ++--echo # ++ ++CREATE TABLE t1(a INT); ++CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW SET new.a= 1; ++RENAME TABLE t1 TO T1; ++ALTER TABLE T1 RENAME t1; ++DROP TABLE t1; ++ ++# ++# MDEV-13912 mysql_upgrade: case (in)sensitivity for stored procedures ++# ++create database TEST; ++create procedure TEST.pr() begin end; ++create procedure test.pr() begin end; ++--exec $MYSQL_UPGRADE --force 2>&1 ++drop procedure test.pr; ++drop database TEST; ++ ++# End of 5.5 tests ++ ++# ++# MDEV-9014 SHOW TRIGGERS not case sensitive ++# ++create table t1 (a int); ++create trigger t1_bi before insert on t1 for each row set new.a= 1; ++show triggers like '%T1%'; ++drop table t1; diff --cc mysql-test/r/sp-security.result index e8c3fbff0e3,39237b7f322..4125762e622 --- a/mysql-test/r/sp-security.result +++ b/mysql-test/r/sp-security.result @@@ -617,33 -617,24 +617,56 @@@ SELECT 1 latin1 latin1_swedish_ci latin # Connection default DROP USER user2@localhost; DROP DATABASE db1; + create user foo@local_ost; -create user foo@`local\_ost` identified by 'nevermore'; ++create user foo@`local\_ost`; ++update mysql.user set plugin='foobar' where host='local\\_ost'; ++flush privileges; + create database foodb; + grant create routine on foodb.* to foo@local_ost; + select user(), current_user(); + user() current_user() + foo@localhost foo@local_ost + show grants; + Grants for foo@local_ost + GRANT USAGE ON *.* TO 'foo'@'local_ost' + GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'local_ost' + create procedure fooproc() select 'i am fooproc'; + show grants; + Grants for foo@local_ost + GRANT USAGE ON *.* TO 'foo'@'local_ost' + GRANT CREATE ROUTINE ON `foodb`.* TO 'foo'@'local_ost' + GRANT EXECUTE, ALTER ROUTINE ON PROCEDURE `test`.`fooproc` TO 'foo'@'local_ost' + drop user foo@local_ost; + drop user foo@`local\_ost`; + drop procedure fooproc; + drop database foodb; +# +# Test for bug#12602983 - User without privilege on routine can discover +# its existence by executing "select non_existing_func();" or by +# "call non_existing_proc()"; +# +drop database if exists mysqltest_db; +create database mysqltest_db; +create function mysqltest_db.f1() returns int return 0; +create procedure mysqltest_db.p1() begin end; +# Create user with no privileges on mysqltest_db database. +create user bug12602983_user@localhost; +# Connect as user 'bug12602983_user@localhost' +# Attempt to execute routine on which user doesn't have privileges +# should result in the same 'access denied' error whether +# routine exists or not. +select mysqltest_db.f_does_not_exist(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.f_does_not_exist' +call mysqltest_db.p_does_not_exist(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.p_does_not_exist' +select mysqltest_db.f1(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.f1' +call mysqltest_db.p1(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.p1' +create view bug12602983_v1 as select mysqltest_db.f_does_not_exist(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.f_does_not_exist' +create view bug12602983_v1 as select mysqltest_db.f1(); +ERROR 42000: execute command denied to user 'bug12602983_user'@'localhost' for routine 'mysqltest_db.f1' +# Connection 'default'. +drop user bug12602983_user@localhost; +drop database mysqltest_db; diff --cc mysql-test/r/sp-security.test index 00000000000,00000000000..53dc4f8c7ac new file mode 100644 --- /dev/null +++ b/mysql-test/r/sp-security.test @@@ -1,0 -1,0 +1,1069 @@@ ++# ++# Testing SQL SECURITY of stored procedures ++# ++ ++# Can't test with embedded server that doesn't support grants ++-- source include/not_embedded.inc ++ ++# Save the initial number of concurrent sessions ++--source include/count_sessions.inc ++ ++connect (con1root,localhost,root,,); ++ ++connection con1root; ++use test; ++ ++# Create user user1 with no particular access rights ++grant usage on *.* to user1@localhost; ++flush privileges; ++ ++--disable_warnings ++drop table if exists t1; ++drop database if exists db1_secret; ++--enable_warnings ++# Create our secret database ++create database db1_secret; ++ ++# Can create a procedure in other db ++create procedure db1_secret.dummy() begin end; ++drop procedure db1_secret.dummy; ++ ++use db1_secret; ++ ++create table t1 ( u varchar(64), i int ); ++insert into t1 values('test', 0); ++ ++# A test procedure and function ++create procedure stamp(i int) ++ insert into db1_secret.t1 values (user(), i); ++--replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' ++show procedure status like 'stamp'; ++ ++delimiter |; ++create function db() returns varchar(64) ++begin ++ declare v varchar(64); ++ ++ select u into v from t1 limit 1; ++ ++ return v; ++end| ++delimiter ;| ++--replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' ++show function status like 'db'; ++ ++# root can, of course ++call stamp(1); ++select * from t1; ++select db(); ++ ++grant execute on procedure db1_secret.stamp to user1@'%'; ++grant execute on function db1_secret.db to user1@'%'; ++grant execute on procedure db1_secret.stamp to ''@'%'; ++grant execute on function db1_secret.db to ''@'%'; ++ ++connect (con2user1,localhost,user1,,); ++connect (con3anon,localhost,anon,,); ++ ++ ++# ++# User1 can ++# ++connection con2user1; ++ ++# This should work... ++call db1_secret.stamp(2); ++select db1_secret.db(); ++ ++# ...but not this ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from db1_secret.t1; ++ ++# ...and not this ++--error ER_DBACCESS_DENIED_ERROR ++create procedure db1_secret.dummy() begin end; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure db1_secret.dummy; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure db1_secret.stamp; ++--error ER_PROCACCESS_DENIED_ERROR ++drop function db1_secret.db; ++ ++ ++# ++# Anonymous can ++# ++connection con3anon; ++ ++# This should work... ++call db1_secret.stamp(3); ++select db1_secret.db(); ++ ++# ...but not this ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from db1_secret.t1; ++ ++# ...and not this ++--error ER_DBACCESS_DENIED_ERROR ++create procedure db1_secret.dummy() begin end; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure db1_secret.dummy; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure db1_secret.stamp; ++--error ER_PROCACCESS_DENIED_ERROR ++drop function db1_secret.db; ++ ++ ++# ++# Check it out ++# ++connection con1root; ++select * from t1; ++ ++# ++# Change to invoker's rights ++# ++alter procedure stamp sql security invoker; ++--replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' ++show procedure status like 'stamp'; ++ ++alter function db sql security invoker; ++--replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' ++show function status like 'db'; ++ ++# root still can ++call stamp(4); ++select * from t1; ++select db(); ++ ++# ++# User1 cannot ++# ++connection con2user1; ++ ++# This should not work ++--error ER_TABLEACCESS_DENIED_ERROR ++call db1_secret.stamp(5); ++--error ER_TABLEACCESS_DENIED_ERROR ++select db1_secret.db(); ++ ++# ++# Anonymous cannot ++# ++connection con3anon; ++ ++# This should not work ++--error ER_TABLEACCESS_DENIED_ERROR ++call db1_secret.stamp(6); ++--error ER_TABLEACCESS_DENIED_ERROR ++select db1_secret.db(); ++ ++# ++# Bug#2777 Stored procedure doesn't observe definer's rights ++# ++ ++connection con1root; ++--disable_warnings ++drop database if exists db2; ++--enable_warnings ++create database db2; ++ ++use db2; ++ ++create table t2 (s1 int); ++insert into t2 values (0); ++ ++grant usage on db2.* to user1@localhost; ++grant select on db2.* to user1@localhost; ++grant usage on db2.* to user2@localhost; ++grant select,insert,update,delete,create routine on db2.* to user2@localhost; ++grant create routine on db2.* to user1@localhost; ++flush privileges; ++ ++connection con2user1; ++use db2; ++ ++create procedure p () insert into t2 values (1); ++ ++# Check that this doesn't work. ++--error ER_TABLEACCESS_DENIED_ERROR ++call p(); ++ ++connect (con4user2,localhost,user2,,); ++ ++connection con4user2; ++use db2; ++ ++# This should not work, since p is executed with definer's (user1's) rights. ++--error ER_PROCACCESS_DENIED_ERROR ++call p(); ++select * from t2; ++ ++create procedure q () insert into t2 values (2); ++ ++call q(); ++select * from t2; ++ ++connection con1root; ++grant usage on procedure db2.q to user2@localhost with grant option; ++ ++connection con4user2; ++grant execute on procedure db2.q to user1@localhost; ++ ++connection con2user1; ++use db2; ++ ++# This should work ++call q(); ++select * from t2; ++ ++# ++# Bug#6030 Stored procedure has no appropriate DROP privilege ++# (or ALTER for that matter) ++ ++# still connection con2user1 in db2 ++ ++# This should work: ++alter procedure p modifies sql data; ++drop procedure p; ++ ++# This should NOT work ++--error ER_PROCACCESS_DENIED_ERROR ++alter procedure q modifies sql data; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure q; ++ ++connection con1root; ++use db2; ++# But root always can ++alter procedure q modifies sql data; ++drop procedure q; ++ ++ ++# Clean up ++#Still connection con1root; ++disconnect con2user1; ++disconnect con3anon; ++disconnect con4user2; ++use test; ++select type,db,name from mysql.proc where db like 'db%'; ++drop database db1_secret; ++drop database db2; ++# Make sure the routines are gone ++select type,db,name from mysql.proc where db like 'db%'; ++# Get rid of the users ++delete from mysql.user where user='user1' or user='user2'; ++delete from mysql.user where user='' and host='%'; ++# And any routine privileges ++delete from mysql.procs_priv where user='user1' or user='user2'; ++# Delete the grants to user ''@'%' that was created above ++delete from mysql.procs_priv where user='' and host='%'; ++delete from mysql.db where user='user2'; ++flush privileges; ++# ++# Test the new security acls ++# ++grant usage on *.* to usera@localhost; ++grant usage on *.* to userb@localhost; ++grant usage on *.* to userc@localhost; ++create database sptest; ++create table t1 ( u varchar(64), i int ); ++create procedure sptest.p1(i int) insert into test.t1 values (user(), i); ++grant insert on t1 to usera@localhost; ++grant execute on procedure sptest.p1 to usera@localhost; ++show grants for usera@localhost; ++grant execute on procedure sptest.p1 to userc@localhost with grant option; ++show grants for userc@localhost; ++ ++connect (con2usera,localhost,usera,,); ++connect (con3userb,localhost,userb,,); ++connect (con4userc,localhost,userc,,); ++ ++connection con2usera; ++call sptest.p1(1); ++--error ER_PROCACCESS_DENIED_ERROR ++grant execute on procedure sptest.p1 to userb@localhost; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure sptest.p1; ++ ++connection con3userb; ++--error ER_PROCACCESS_DENIED_ERROR ++call sptest.p1(2); ++--error ER_PROCACCESS_DENIED_ERROR ++grant execute on procedure sptest.p1 to userb@localhost; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure sptest.p1; ++ ++connection con4userc; ++call sptest.p1(3); ++grant execute on procedure sptest.p1 to userb@localhost; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure sptest.p1; ++ ++connection con3userb; ++call sptest.p1(4); ++--error ER_PROCACCESS_DENIED_ERROR ++grant execute on procedure sptest.p1 to userb@localhost; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure sptest.p1; ++ ++connection con1root; ++select * from t1; ++ ++grant all privileges on procedure sptest.p1 to userc@localhost; ++show grants for userc@localhost; ++show grants for userb@localhost; ++ ++connection con4userc; ++revoke all privileges on procedure sptest.p1 from userb@localhost; ++ ++connection con1root; ++show grants for userb@localhost; ++ ++#cleanup ++disconnect con4userc; ++disconnect con3userb; ++disconnect con2usera; ++use test; ++drop database sptest; ++delete from mysql.user where user='usera' or user='userb' or user='userc'; ++delete from mysql.procs_priv where user='usera' or user='userb' or user='userc'; ++delete from mysql.tables_priv where user='usera'; ++flush privileges; ++drop table t1; ++ ++# ++# Bug#9503 reseting correct parameters of thread after error in SP function ++# ++connect (root,localhost,root,,test); ++connection root; ++ ++--disable_warnings ++drop function if exists bug_9503; ++--enable_warnings ++delimiter //; ++create database mysqltest// ++use mysqltest// ++create table t1 (s1 int)// ++grant select on t1 to user1@localhost// ++create function bug_9503 () returns int sql security invoker begin declare v int; ++select min(s1) into v from t1; return v; end// ++delimiter ;// ++ ++connect (user1,localhost,user1,,test); ++connection user1; ++use mysqltest; ++-- error ER_PROCACCESS_DENIED_ERROR ++select bug_9503(); ++ ++connection root; ++grant execute on function bug_9503 to user1@localhost; ++ ++connection user1; ++do 1; ++use test; ++ ++disconnect user1; ++connection root; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM user1@localhost; ++drop function bug_9503; ++use test; ++drop database mysqltest; ++connection default; ++disconnect root; ++ ++# ++# correct value from current_user() in function run from "security definer" ++# (Bug#7291 Stored procedures: wrong CURRENT_USER value) ++# ++connection con1root; ++use test; ++ ++select current_user(); ++select user(); ++create procedure bug7291_0 () sql security invoker select current_user(), user(); ++create procedure bug7291_1 () sql security definer call bug7291_0(); ++create procedure bug7291_2 () sql security invoker call bug7291_0(); ++grant execute on procedure bug7291_0 to user1@localhost; ++grant execute on procedure bug7291_1 to user1@localhost; ++grant execute on procedure bug7291_2 to user1@localhost; ++ ++connect (user1,localhost,user1,,); ++connection user1; ++ ++call bug7291_2(); ++call bug7291_1(); ++ ++connection con1root; ++drop procedure bug7291_1; ++drop procedure bug7291_2; ++drop procedure bug7291_0; ++disconnect user1; ++REVOKE ALL PRIVILEGES, GRANT OPTION FROM user1@localhost; ++drop user user1@localhost; ++ ++# ++# Bug#12318 Wrong error message when accessing an inaccessible stored ++# procedure in another database when the current database is ++# information_schema. ++# ++ ++--disable_warnings ++drop database if exists mysqltest_1; ++--enable_warnings ++ ++create database mysqltest_1; ++delimiter //; ++create procedure mysqltest_1.p1() ++begin ++ select 1 from dual; ++end// ++delimiter ;// ++ ++grant usage on *.* to mysqltest_1@localhost; ++ ++connect (n1,localhost,mysqltest_1,,information_schema,$MASTER_MYPORT,$MASTER_MYSOCK); ++connection n1; ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest_1.p1(); ++disconnect n1; ++# Test also without a current database ++connect (n2,localhost,mysqltest_1,,*NO-ONE*,$MASTER_MYPORT,$MASTER_MYSOCK); ++connection n2; ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest_1.p1(); ++disconnect n2; ++ ++connection default; ++ ++drop procedure mysqltest_1.p1; ++drop database mysqltest_1; ++ ++revoke usage on *.* from mysqltest_1@localhost; ++drop user mysqltest_1@localhost; ++ ++# ++# Bug#12812 create view calling a function works without execute right ++# on function ++delimiter |; ++--disable_warnings ++drop function if exists bug12812| ++--enable_warnings ++create function bug12812() returns char(2) ++begin ++ return 'ok'; ++end; ++create user user_bug12812@localhost IDENTIFIED BY 'ABC'| ++--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK ++connect (test_user_12812,localhost,user_bug12812,ABC,test)| ++--error ER_PROCACCESS_DENIED_ERROR ++SELECT test.bug12812()| ++--error ER_PROCACCESS_DENIED_ERROR ++CREATE VIEW v1 AS SELECT test.bug12812()| ++# Cleanup ++connection default| ++disconnect test_user_12812| ++DROP USER user_bug12812@localhost| ++drop function bug12812| ++delimiter ;| ++ ++ ++# ++# Bug#14834 Server denies to execute Stored Procedure ++# ++# The problem here was with '_' in the database name. ++# ++create database db_bug14834; ++ ++create user user1_bug14834@localhost identified by ''; ++# The exact name of the database (no wildcard) ++grant all on `db\_bug14834`.* to user1_bug14834@localhost; ++ ++create user user2_bug14834@localhost identified by ''; ++# The exact name of the database (no wildcard) ++grant all on `db\_bug14834`.* to user2_bug14834@localhost; ++ ++create user user3_bug14834@localhost identified by ''; ++# Wildcards in the database name ++grant all on `db__ug14834`.* to user3_bug14834@localhost; ++ ++connect (user1_bug14834,localhost,user1_bug14834,,db_bug14834); ++# Create the procedure and check that we can call it ++create procedure p_bug14834() select user(), current_user(); ++call p_bug14834(); ++ ++connect (user2_bug14834,localhost,user2_bug14834,,db_bug14834); ++# This didn't work before ++call p_bug14834(); ++ ++connect (user3_bug14834,localhost,user3_bug14834,,db_bug14834); ++# Should also work ++call p_bug14834(); ++ ++# Cleanup ++connection default; ++disconnect user1_bug14834; ++disconnect user2_bug14834; ++disconnect user3_bug14834; ++drop user user1_bug14834@localhost; ++drop user user2_bug14834@localhost; ++drop user user3_bug14834@localhost; ++drop database db_bug14834; ++ ++ ++# ++# Bug#14533 'desc tbl' in stored procedure causes error ++# ER_TABLEACCESS_DENIED_ERROR ++# ++create database db_bug14533; ++use db_bug14533; ++create table t1 (id int); ++create user user_bug14533@localhost identified by ''; ++ ++create procedure bug14533_1() ++ sql security definer ++ desc db_bug14533.t1; ++ ++create procedure bug14533_2() ++ sql security definer ++ select * from db_bug14533.t1; ++ ++grant execute on procedure db_bug14533.bug14533_1 to user_bug14533@localhost; ++grant execute on procedure db_bug14533.bug14533_2 to user_bug14533@localhost; ++ ++connect (user_bug14533,localhost,user_bug14533,,test); ++ ++# These should work ++call db_bug14533.bug14533_1(); ++call db_bug14533.bug14533_2(); ++ ++# For reference, these should not work ++--error ER_TABLEACCESS_DENIED_ERROR ++desc db_bug14533.t1; ++--error ER_TABLEACCESS_DENIED_ERROR ++select * from db_bug14533.t1; ++ ++# Cleanup ++connection default; ++disconnect user_bug14533; ++drop user user_bug14533@localhost; ++drop database db_bug14533; ++ ++ ++# ++# WL#2897 Complete definer support in the stored routines. ++# ++# The following cases are tested: ++# 1. check that if DEFINER-clause is not explicitly specified, stored routines ++# are created with CURRENT_USER privileges; ++# 2. check that if DEFINER-clause specifies non-current user, SUPER privilege ++# is required to create a stored routine; ++# 3. check that if DEFINER-clause specifies non-existent user, a warning is ++# emitted. ++# 4. check that SHOW CREATE PROCEDURE | FUNCTION works correctly; ++# ++# The following cases are tested in other test suites: ++# - check that mysqldump dumps new attribute correctly; ++# - check that slave replicates CREATE-statements with explicitly specified ++# DEFINER correctly. ++# ++ ++# Setup the environment. ++ ++--echo ++--echo ---> connection: root ++--connection con1root ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest; ++--enable_warnings ++ ++CREATE DATABASE mysqltest; ++ ++CREATE USER mysqltest_1@localhost; ++GRANT ALL PRIVILEGES ON mysqltest.* TO mysqltest_1@localhost; ++ ++CREATE USER mysqltest_2@localhost; ++GRANT SUPER ON *.* TO mysqltest_2@localhost; ++GRANT ALL PRIVILEGES ON mysqltest.* TO mysqltest_2@localhost; ++ ++--connect (mysqltest_2_con,localhost,mysqltest_2,,mysqltest) ++--connect (mysqltest_1_con,localhost,mysqltest_1,,mysqltest) ++ ++# test case (1). ++ ++--echo ++--echo ---> connection: mysqltest_2_con ++--connection mysqltest_2_con ++ ++USE mysqltest; ++ ++CREATE PROCEDURE wl2897_p1() SELECT 1; ++ ++CREATE FUNCTION wl2897_f1() RETURNS INT RETURN 1; ++ ++# test case (2). ++ ++--echo ++--echo ---> connection: mysqltest_1_con ++--connection mysqltest_1_con ++ ++USE mysqltest; ++ ++--error ER_SPECIFIC_ACCESS_DENIED_ERROR ++CREATE DEFINER=root@localhost PROCEDURE wl2897_p2() SELECT 2; ++ ++--error ER_SPECIFIC_ACCESS_DENIED_ERROR ++CREATE DEFINER=root@localhost FUNCTION wl2897_f2() RETURNS INT RETURN 2; ++ ++# test case (3). ++ ++--echo ++--echo ---> connection: mysqltest_2_con ++--connection mysqltest_2_con ++ ++use mysqltest; ++ ++CREATE DEFINER='a @ b @ c'@localhost PROCEDURE wl2897_p3() SELECT 3; ++ ++CREATE DEFINER='a @ b @ c'@localhost FUNCTION wl2897_f3() RETURNS INT RETURN 3; ++ ++# test case (4). ++ ++--echo ++--echo ---> connection: con1root ++--connection con1root ++ ++USE mysqltest; ++ ++SHOW CREATE PROCEDURE wl2897_p1; ++SHOW CREATE PROCEDURE wl2897_p3; ++ ++SHOW CREATE FUNCTION wl2897_f1; ++SHOW CREATE FUNCTION wl2897_f3; ++ ++# Cleanup. ++ ++DROP USER mysqltest_1@localhost; ++DROP USER mysqltest_2@localhost; ++ ++DROP DATABASE mysqltest; ++ ++--disconnect mysqltest_1_con ++--disconnect mysqltest_2_con ++ ++ ++# ++# Bug#13198 SP executes if definer does not exist ++# ++ ++# Prepare environment. ++ ++--echo ++--echo ---> connection: root ++--connection con1root ++ ++--disable_warnings ++DROP DATABASE IF EXISTS mysqltest; ++--enable_warnings ++ ++CREATE DATABASE mysqltest; ++ ++CREATE USER mysqltest_1@localhost; ++GRANT ALL PRIVILEGES ON mysqltest.* TO mysqltest_1@localhost; ++ ++CREATE USER mysqltest_2@localhost; ++GRANT ALL PRIVILEGES ON mysqltest.* TO mysqltest_2@localhost; ++ ++--connect (mysqltest_1_con,localhost,mysqltest_1,,mysqltest) ++--connect (mysqltest_2_con,localhost,mysqltest_2,,mysqltest) ++ ++# Create a procedure/function under u1. ++ ++--echo ++--echo ---> connection: mysqltest_1_con ++--connection mysqltest_1_con ++ ++USE mysqltest; ++ ++CREATE PROCEDURE bug13198_p1() ++ SELECT 1; ++ ++CREATE FUNCTION bug13198_f1() RETURNS INT ++ RETURN 1; ++ ++CALL bug13198_p1(); ++ ++SELECT bug13198_f1(); ++ ++# Check that u2 can call the procedure/function. ++ ++--echo ++--echo ---> connection: mysqltest_2_con ++--connection mysqltest_2_con ++ ++USE mysqltest; ++ ++CALL bug13198_p1(); ++ ++SELECT bug13198_f1(); ++ ++# Drop user u1 (definer of the object); ++ ++--echo ++--echo ---> connection: root ++--connection con1root ++ ++--disconnect mysqltest_1_con ++ ++DROP USER mysqltest_1@localhost; ++ ++# Check that u2 can not call the procedure/function. ++ ++--echo ++--echo ---> connection: mysqltest_2_con ++--connection mysqltest_2_con ++ ++USE mysqltest; ++ ++--error ER_NO_SUCH_USER ++CALL bug13198_p1(); ++ ++--error ER_NO_SUCH_USER ++SELECT bug13198_f1(); ++ ++# Cleanup. ++ ++--echo ++--echo ---> connection: root ++--connection con1root ++ ++--disconnect mysqltest_2_con ++ ++DROP USER mysqltest_2@localhost; ++ ++DROP DATABASE mysqltest; ++ ++# ++# Bug#19857 When a user with CREATE ROUTINE priv creates a routine, ++# it results in NULL p/w ++# ++ ++# Can't test with embedded server that doesn't support grants ++ ++GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; ++GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO ++user19857@localhost; ++SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; ++ ++--connect (mysqltest_2_con,localhost,user19857,meow,test) ++--echo ++--echo ---> connection: mysqltest_2_con ++--connection mysqltest_2_con ++ ++USE test; ++ ++DELIMITER //; ++ CREATE PROCEDURE sp19857() DETERMINISTIC ++ BEGIN ++ DECLARE a INT; ++ SET a=1; ++ SELECT a; ++ END // ++DELIMITER ;// ++ ++SHOW CREATE PROCEDURE test.sp19857; ++ ++--disconnect mysqltest_2_con ++--connect (mysqltest_2_con,localhost,user19857,meow,test) ++--connection mysqltest_2_con ++ ++DROP PROCEDURE IF EXISTS test.sp19857; ++ ++--echo ++--echo ---> connection: root ++--connection con1root ++ ++--disconnect mysqltest_2_con ++ ++SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; ++ ++DROP USER user19857@localhost; ++ ++--disconnect con1root ++--connection default ++use test; ++ ++# ++# Bug#18630 Arguments of suid routine calculated in wrong security context ++# ++# Arguments of suid routines were calculated in definer's security ++# context instead of caller's context thus creating security hole. ++# ++--disable_warnings ++DROP TABLE IF EXISTS t1; ++DROP VIEW IF EXISTS v1; ++DROP FUNCTION IF EXISTS f_suid; ++DROP PROCEDURE IF EXISTS p_suid; ++DROP FUNCTION IF EXISTS f_evil; ++--enable_warnings ++DELETE FROM mysql.user WHERE user LIKE 'mysqltest\_%'; ++DELETE FROM mysql.db WHERE user LIKE 'mysqltest\_%'; ++DELETE FROM mysql.tables_priv WHERE user LIKE 'mysqltest\_%'; ++DELETE FROM mysql.columns_priv WHERE user LIKE 'mysqltest\_%'; ++FLUSH PRIVILEGES; ++ ++CREATE TABLE t1 (i INT); ++CREATE FUNCTION f_suid(i INT) RETURNS INT SQL SECURITY DEFINER RETURN 0; ++CREATE PROCEDURE p_suid(IN i INT) SQL SECURITY DEFINER SET @c:= 0; ++ ++CREATE USER mysqltest_u1@localhost; ++# Thanks to this grant statement privileges of anonymous users on ++# 'test' database are not applicable for mysqltest_u1@localhost. ++GRANT EXECUTE ON test.* TO mysqltest_u1@localhost; ++ ++delimiter |; ++CREATE DEFINER=mysqltest_u1@localhost FUNCTION f_evil () RETURNS INT ++ SQL SECURITY INVOKER ++BEGIN ++ SET @a:= CURRENT_USER(); ++ SET @b:= (SELECT COUNT(*) FROM t1); ++ RETURN @b; ++END| ++delimiter ;| ++ ++CREATE SQL SECURITY INVOKER VIEW v1 AS SELECT f_evil(); ++ ++connect (conn1, localhost, mysqltest_u1,,); ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT COUNT(*) FROM t1; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT f_evil(); ++SELECT @a, @b; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT f_suid(f_evil()); ++SELECT @a, @b; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++CALL p_suid(f_evil()); ++SELECT @a, @b; ++ ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT * FROM v1; ++SELECT @a, @b; ++ ++disconnect conn1; ++connection default; ++ ++DROP VIEW v1; ++DROP FUNCTION f_evil; ++DROP USER mysqltest_u1@localhost; ++DROP PROCEDURE p_suid; ++DROP FUNCTION f_suid; ++DROP TABLE t1; ++ ++--echo # ++--echo # Bug #48872 : Privileges for stored functions ignored if function name ++--echo # is mixed case ++--echo # ++ ++CREATE DATABASE B48872; ++USE B48872; ++CREATE TABLE `TestTab` (id INT); ++INSERT INTO `TestTab` VALUES (1),(2); ++CREATE FUNCTION `f_Test`() RETURNS INT RETURN 123; ++CREATE FUNCTION `f_Test_denied`() RETURNS INT RETURN 123; ++CREATE USER 'tester'; ++CREATE USER 'Tester'; ++GRANT SELECT ON TABLE `TestTab` TO 'tester'; ++GRANT EXECUTE ON FUNCTION `f_Test` TO 'tester'; ++GRANT EXECUTE ON FUNCTION `f_Test_denied` TO 'Tester'; ++ ++SELECT f_Test(); ++SELECT * FROM TestTab; ++ ++CONNECT (con_tester,localhost,tester,,B48872); ++CONNECT (con_tester_denied,localhost,Tester,,B48872); ++CONNECTION con_tester; ++ ++SELECT * FROM TestTab; ++SELECT `f_Test`(); ++SELECT `F_TEST`(); ++SELECT f_Test(); ++SELECT F_TEST(); ++ ++CONNECTION con_tester_denied; ++ ++--disable_result_log ++--error ER_TABLEACCESS_DENIED_ERROR ++SELECT * FROM TestTab; ++--error ER_PROCACCESS_DENIED_ERROR ++SELECT `f_Test`(); ++--error ER_PROCACCESS_DENIED_ERROR ++SELECT `F_TEST`(); ++--error ER_PROCACCESS_DENIED_ERROR ++SELECT f_Test(); ++--error ER_PROCACCESS_DENIED_ERROR ++SELECT F_TEST(); ++--enable_result_log ++SELECT `f_Test_denied`(); ++SELECT `F_TEST_DENIED`(); ++ ++CONNECTION default; ++DISCONNECT con_tester; ++DISCONNECT con_tester_denied; ++DROP TABLE `TestTab`; ++DROP FUNCTION `f_Test`; ++DROP FUNCTION `f_Test_denied`; ++ ++USE test; ++DROP USER 'tester'; ++DROP USER 'Tester'; ++DROP DATABASE B48872; ++ ++--echo End of 5.0 tests. ++ ++ ++--echo # ++--echo # Test for bug#57061 "User without privilege on routine can discover ++--echo # its existence." ++--echo # ++--disable_warnings ++drop database if exists mysqltest_db; ++--enable_warnings ++create database mysqltest_db; ++--echo # Create user with no privileges on mysqltest_db database. ++create user bug57061_user@localhost; ++create function mysqltest_db.f1() returns int return 0; ++create procedure mysqltest_db.p1() begin end; ++--echo # Connect as user 'bug57061_user@localhost' ++connect (conn1, localhost, bug57061_user,,); ++--echo # Attempt to drop routine on which user doesn't have privileges ++--echo # should result in the same 'access denied' type of error whether ++--echo # routine exists or not. ++--error ER_PROCACCESS_DENIED_ERROR ++drop function if exists mysqltest_db.f_does_not_exist; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure if exists mysqltest_db.p_does_not_exist; ++--error ER_PROCACCESS_DENIED_ERROR ++drop function if exists mysqltest_db.f1; ++--error ER_PROCACCESS_DENIED_ERROR ++drop procedure if exists mysqltest_db.p1; ++--echo # Connection 'default'. ++connection default; ++disconnect conn1; ++drop user bug57061_user@localhost; ++drop database mysqltest_db; ++ ++ ++--echo # ++--echo # Bug#11882603 SELECT_ACL ON ANY COLUMN IN MYSQL.PROC ALLOWS TO SEE ++--echo # DEFINITION OF ANY ROUTINE. ++--echo # ++ ++--disable_warnings ++DROP DATABASE IF EXISTS db1; ++--enable_warnings ++ ++CREATE DATABASE db1; ++CREATE PROCEDURE db1.p1() SELECT 1; ++CREATE USER user2@localhost IDENTIFIED BY ''; ++GRANT SELECT(db) ON mysql.proc TO user2@localhost; ++ ++--echo # Connection con2 as user2 ++connect (con2, localhost, user2); ++--echo # The statement below before disclosed info from body_utf8 column. ++--error ER_SP_DOES_NOT_EXIST ++SHOW CREATE PROCEDURE db1.p1; ++ ++--echo # Check that SHOW works with SELECT grant on whole table ++--echo # Connection default ++connection default; ++GRANT SELECT ON mysql.proc TO user2@localhost; ++ ++--echo # Connection con2 ++connection con2; ++--echo # This should work ++SHOW CREATE PROCEDURE db1.p1; ++ ++--echo # Connection default ++connection default; ++disconnect con2; ++DROP USER user2@localhost; ++DROP DATABASE db1; ++ ++# ++# Bug#27407480: AUTOMATIC_SP_PRIVILEGES REQUIRES NEED THE INSERT PRIVILEGES FOR MYSQL.USER TABLE ++# ++create user foo@local_ost; ++# ++# Create a user with an authentification plugin 'foobar'. ++# Instead of using a normal "CREATE USER <user> IDENTIFIED VIA <plugin>" ++# we do CREATE (without VIA) followed by UPDATE and FLUSH. ++# This is to avoid installing a real plugin and thus avoid the test dependency. ++# We won't login under this user in the below test, so this is fine. ++# ++create user foo@`local\_ost`; ++update mysql.user set plugin='foobar' where host='local\\_ost'; ++flush privileges; ++create database foodb; ++grant create routine on foodb.* to foo@local_ost; ++connect con1,localhost,foo; ++select user(), current_user(); ++show grants; ++create procedure fooproc() select 'i am fooproc'; ++show grants; ++disconnect con1; ++connection default; ++drop user foo@local_ost; ++drop user foo@`local\_ost`; ++drop procedure fooproc; ++drop database foodb; ++ ++--echo # ++--echo # Test for bug#12602983 - User without privilege on routine can discover ++--echo # its existence by executing "select non_existing_func();" or by ++--echo # "call non_existing_proc()"; ++--echo # ++--disable_warnings ++drop database if exists mysqltest_db; ++--enable_warnings ++create database mysqltest_db; ++create function mysqltest_db.f1() returns int return 0; ++create procedure mysqltest_db.p1() begin end; ++ ++--echo # Create user with no privileges on mysqltest_db database. ++create user bug12602983_user@localhost; ++ ++--echo # Connect as user 'bug12602983_user@localhost' ++connect (conn1, localhost, bug12602983_user,,); ++ ++--echo # Attempt to execute routine on which user doesn't have privileges ++--echo # should result in the same 'access denied' error whether ++--echo # routine exists or not. ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest_db.f_does_not_exist(); ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest_db.p_does_not_exist(); ++ ++--error ER_PROCACCESS_DENIED_ERROR ++select mysqltest_db.f1(); ++--error ER_PROCACCESS_DENIED_ERROR ++call mysqltest_db.p1(); ++ ++--error ER_PROCACCESS_DENIED_ERROR ++create view bug12602983_v1 as select mysqltest_db.f_does_not_exist(); ++--error ER_PROCACCESS_DENIED_ERROR ++create view bug12602983_v1 as select mysqltest_db.f1(); ++ ++--echo # Connection 'default'. ++connection default; ++disconnect conn1; ++drop user bug12602983_user@localhost; ++drop database mysqltest_db; ++ ++# Wait till all disconnects are completed ++--source include/wait_until_count_sessions.inc diff --cc mysql-test/r/sp.result index 2cb1b701e2d,4535056242a..058ef69c548 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@@ -7898,95 -8055,21 +7898,112 @@@ SET S.CLOSE_YN = ' where 1=1; drop function if exists f1; drop table t1,t2; + # + # MDEV-16957: Server crashes in Field_iterator_natural_join::next + # upon 2nd execution of SP + # + CREATE TABLE t1 (a INT, b VARCHAR(32)); + CREATE PROCEDURE sp() SELECT * FROM t1 AS t1x JOIN t1 AS t1y USING (c); + CALL sp; + ERROR 42S22: Unknown column 'c' in 'from clause' + CALL sp; + ERROR 42S22: Unknown column 'c' in 'from clause' + CALL sp; + ERROR 42S22: Unknown column 'c' in 'from clause' + alter table t1 add column c int; + CALL sp; + c a b a b + DROP PROCEDURE sp; + DROP TABLE t1; # End of 5.5 test +# +# MDEV-7040: Crash in field_conv, memcpy_field_possible, part#2 +# +create table t1 ( +col1 bigint(20), +col2 char(1), +col3 char(2) +); +insert into t1 values (1,'a','a'), (2,'b','b'); +create table t2 as select * from t1; +create table t3 as select * from t1; +create table t4 as select * from t1; +create table t5 as select * from t1; +create table t6 as select * from t1; +flush tables; +CREATE PROCEDURE p1() +begin +DECLARE _var1 bigint(20) UNSIGNED; +DECLARE _var2 CHAR(1) DEFAULT NULL; +DECLARE _var3 CHAR(1) DEFAULT NULL; +DECLARE _done BOOLEAN DEFAULT 0; +declare cur1 cursor for +select col1, col2, col3 +from t1 +where +col1 in (select t2.col1 from t2 where t2.col2=t1.col2) or +col2 in (select t3.col3 from t3 where t3.col3=t1.col2) ; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET _done = 1; +OPEN cur1; +set _var1 = (select _var1 from t4 limit 1); +set _var1 = (select _var1 from t5 limit 1); +set _var1 = (select _var1 from t6 limit 1); +label1: +LOOP +SET _done = 0; +FETCH cur1 INTO _var1, _var2, _var3; +IF _done THEN +LEAVE label1; +END IF; +END LOOP label1; +CLOSE cur1; +end| +set @tmp_toc= @@table_open_cache; +set @tmp_tdc= @@table_definition_cache; +set global table_open_cache=1; +set global table_definition_cache=1; +Warnings: +Warning 1292 Truncated incorrect table_definition_cache value: '1' +call p1(); +set global table_open_cache= @tmp_toc; +set global table_definition_cache= @tmp_tdc; +drop procedure p1; +drop table t1,t2,t3,t4,t5,t6; +# +# MDEV-11935: Queries in stored procedures with and +# EXISTS(SELECT * FROM VIEW) crashes and closes hte conneciton. +# +CREATE TABLE ANY_TABLE ( +ENTITY_UID BIGINT NOT NULL +); +CREATE TABLE SECURITY_PATH( +origid BIGINT UNSIGNED NOT NULL, +destid BIGINT UNSIGNED NOT NULL, +KEY (destid) +); +CREATE VIEW ENTITY_ACCESS ( +ENTITY_UID, +OWNER_UID +) AS +SELECT SP1.origid, +SP2.destid +FROM SECURITY_PATH SP1 +JOIN SECURITY_PATH SP2 ON SP1.destid = SP2.origid +; +CREATE PROCEDURE SP_EXAMPLE_SELECT () +BEGIN +SELECT * +FROM ANY_TABLE AT1 +WHERE EXISTS ( SELECT * +FROM ENTITY_ACCESS EA +WHERE AT1.ENTITY_UID = EA.ENTITY_UID +AND EA.OWNER_UID IS NULL ); +END +// +CALL SP_EXAMPLE_SELECT (); +ENTITY_UID +CALL SP_EXAMPLE_SELECT (); +ENTITY_UID +drop procedure SP_EXAMPLE_SELECT; +drop view ENTITY_ACCESS; +drop table ANY_TABLE, SECURITY_PATH; diff --cc mysql-test/r/type_float.result index 57e2660750b,f8574167de3..f71d4c13a0d --- a/mysql-test/r/type_float.result +++ b/mysql-test/r/type_float.result @@@ -448,87 -448,38 +448,123 @@@ select format(truncate('1.7976931348623 foo 0 # + # MDEV-17249 MAKETIME(-1e50,0,0) returns a wrong result + # + SELECT LEFT('a',EXP(50)); + LEFT('a',EXP(50)) + a + SELECT LEFT('a', COALESCE(1e30)); + LEFT('a', COALESCE(1e30)) + a + CREATE TABLE t1 (a FLOAT); + INSERT INTO t1 VALUES (1e30); + SELECT LEFT('a',a), LEFT('a',1e30) FROM t1; + LEFT('a',a) LEFT('a',1e30) + a a + DROP TABLE t1; + PREPARE stmt FROM 'SELECT LEFT(111,?)'; + SET @a=1e30; + EXECUTE stmt USING @a; + LEFT(111,?) + 111 + DEALLOCATE PREPARE stmt; + CREATE TABLE t1 (a INT); + INSERT INTO t1 VALUES (1),(2),(3); + SELECT LEFT('a',(SELECT 1e30 FROM t1 LIMIT 1)); + LEFT('a',(SELECT 1e30 FROM t1 LIMIT 1)) + a + DROP TABLE t1; + CREATE TABLE t1 (a DOUBLE); + INSERT INTO t1 VALUES (1e30),(0); + SELECT LEFT('a', SUM(a)) FROM t1; + LEFT('a', SUM(a)) + a + SELECT LEFT('a', AVG(a)) FROM t1; + LEFT('a', AVG(a)) + a + DROP TABLE t1; ++# +# Bug #13500371 63704: CONVERSION OF '1.' TO A NUMBER GIVES ERROR 1265 +# (WARN_DATA_TRUNCATED) +# +CREATE TABLE t1 (f FLOAT); +INSERT INTO t1 VALUES ('1.'); +INSERT INTO t1 VALUES ('2.0.'); +Warnings: +Warning 1265 Data truncated for column 'f' at row 1 +INSERT INTO t1 VALUES ('.'); +Warnings: +Warning 1265 Data truncated for column 'f' at row 1 +SELECT * FROM t1 ORDER BY f; +f +0 +1 +2 +DROP TABLE t1; +# +# Start of 10.0 tests +# +# +# MDEV-6950 Bad results with joins comparing DATE/DATETIME and INT/DECIMAL/DOUBLE/ENUM/VARCHAR columns +# +CREATE TABLE t1 (a DATETIME PRIMARY KEY); +INSERT INTO t1 VALUES ('1999-01-01 00:00:00'); +CREATE TABLE t2 (a DOUBLE); +INSERT INTO t2 VALUES (19990101000000); +INSERT INTO t2 VALUES (990101000000); +SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; +a +1999-01-01 00:00:00 +1999-01-01 00:00:00 +SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; +a +1999-01-01 00:00:00 +1999-01-01 00:00:00 +ALTER TABLE t2 ADD PRIMARY KEY(a); +SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; +a +1999-01-01 00:00:00 +1999-01-01 00:00:00 +SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; +a +1999-01-01 00:00:00 +1999-01-01 00:00:00 +# t2 should NOT be eliminated +EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system NULL NULL NULL NULL 1 +1 SIMPLE t2 index PRIMARY PRIMARY 8 NULL 2 Using where; Using index +DROP TABLE t1,t2; +# +# MDEV-6971 Bad results with joins comparing TIME and DOUBLE/DECIMAL columns +# +CREATE TABLE t1 (a TIME(6) PRIMARY KEY); +INSERT INTO t1 VALUES ('10:20:30'); +CREATE TABLE t2 (a DOUBLE); +INSERT INTO t2 VALUES (102030),(102030.000000001); +SELECT t1.* FROM t1 JOIN t2 USING(a); +a +10:20:30.000000 +10:20:30.000000 +SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +a +10:20:30.000000 +10:20:30.000000 +ALTER TABLE t2 ADD PRIMARY KEY(a); +SELECT t1.* FROM t1 JOIN t2 USING(a); +a +10:20:30.000000 +10:20:30.000000 +SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +a +10:20:30.000000 +10:20:30.000000 +# t2 should NOT be elimitated +EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 system NULL NULL NULL NULL 1 +1 SIMPLE t2 index PRIMARY PRIMARY 8 NULL 2 Using where; Using index +DROP TABLE t1,t2; +# +# End of 10.0 tests +# diff --cc mysql-test/r/type_float.test index 00000000000,00000000000..5dfb4a75bb3 new file mode 100644 --- /dev/null +++ b/mysql-test/r/type_float.test @@@ -1,0 -1,0 +1,417 @@@ ++# Description ++# ----------- ++# Numeric floating point. ++ ++--disable_warnings ++drop table if exists t1,t2; ++--enable_warnings ++ ++SELECT 10,10.0,10.,.1e+2,100.0e-1; ++SELECT 6e-16, -6e-16, --6e-16, -6e-16+1.000000; ++SELECT 1e1,1.e1,1.0e1,1e+1,1.e+1,1.0e+1,1e-1,1.e-1,1.0e-1; ++SELECT 0.001e+1,0.001e-1, -0.001e+01,-0.001e-01; ++SELECT 123.23E+02,-123.23E-02,"123.23E+02"+0.0,"-123.23E-02"+0.0; ++SELECT 2147483647E+02,21474836.47E+06; ++ ++create table t1 (f1 float(24),f2 float(52)); ++# We mask out Privileges column because it differs for embedded server ++--replace_column 8 # ++show full columns from t1; ++insert into t1 values(10,10),(1e+5,1e+5),(1234567890,1234567890),(1e+10,1e+10),(1e+15,1e+15),(1e+20,1e+20),(1e+50,1e+50),(1e+150,1e+150); ++insert into t1 values(-10,-10),(1e-5,1e-5),(1e-10,1e-10),(1e-15,1e-15),(1e-20,1e-20),(1e-50,1e-50),(1e-150,1e-150); ++select * from t1; ++drop table t1; ++ ++create table t1 (datum double); ++insert into t1 values (0.5),(1.0),(1.5),(2.0),(2.5); ++select * from t1; ++select * from t1 where datum < 1.5; ++select * from t1 where datum > 1.5; ++select * from t1 where datum = 1.5; ++drop table t1; ++ ++create table t1 (a decimal(7,3) not null, key (a)); ++insert into t1 values ("0"),("-0.00"),("-0.01"),("-0.002"),("1"); ++select a from t1 order by a; ++select min(a) from t1; ++drop table t1; ++ ++# ++# BUG#3612, BUG#4393, BUG#4356, BUG#4394 ++# ++ ++create table t1 (c1 double, c2 varchar(20)); ++insert t1 values (121,"16"); ++select c1 + c1 * (c2 / 100) as col from t1; ++create table t2 select c1 + c1 * (c2 / 100) as col1, round(c1, 5) as col2, round(c1, 35) as col3, sqrt(c1*1e-15) col4 from t1; ++# Floats are a bit different in PS ++--disable_ps_protocol ++select * from t2; ++--enable_ps_protocol ++show create table t2; ++drop table t1,t2; ++ ++# Bug #1022: When a table contains a 'float' field, ++# and one of the functions MAX, MIN, or AVG is used on that field, ++# the system crashes. ++ ++create table t1 (a float); ++insert into t1 values (1); ++select max(a),min(a),avg(a) from t1; ++drop table t1; ++ ++# ++# FLOAT/DOUBLE/DECIMAL handling ++# ++ ++create table t1 (f float, f2 float(24), f3 float(6,2), d double, d2 float(53), d3 double(10,3), de decimal, de2 decimal(6), de3 decimal(5,2), n numeric, n2 numeric(8), n3 numeric(7,6)); ++# We mask out Privileges column because it differs for embedded server ++--replace_column 8 # ++show full columns from t1; ++drop table t1; ++ ++create table t1 (a decimal(7,3) not null, key (a)); ++insert into t1 values ("0"),("-0.00"),("-0.01"),("-0.002"),("1"); ++select a from t1 order by a; ++select min(a) from t1; ++drop table t1; ++ ++--error 1425 ++create table t1 (a float(200,100), b double(200,100)); ++ ++# ++# float in a char(1) field ++# ++create table t1 (c20 char); ++insert into t1 values (5000.0); ++insert into t1 values (0.5e4); ++drop table t1; ++ ++# Errors ++ ++--error 1063 ++create table t1 (f float(54)); # Should give an error ++--disable_warnings ++drop table if exists t1; ++--enable_warnings ++ ++# Don't allow 'double unsigned' to be set to a negative value (Bug #7700) ++create table t1 (d1 double, d2 double unsigned); ++insert into t1 set d1 = -1.0; ++update t1 set d2 = d1; ++select * from t1; ++drop table t1; ++ ++# Ensure that maximum values as the result of number of decimals ++# being specified in table schema are enforced (Bug #7361) ++create table t1 (f float(4,3)); ++insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); ++select * from t1; ++drop table if exists t1; ++create table t1 (f double(4,3)); ++insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); ++select * from t1; ++drop table if exists t1; ++ ++# Check conversion of floats to character field (Bug #7774) ++create table t1 (c char(20)); ++insert into t1 values (5e-28); ++select * from t1; ++drop table t1; ++create table t1 (c char(6)); ++insert into t1 values (2e5),(2e6),(2e-4),(2e-5); ++select * from t1; ++drop table t1; ++ ++# ++# Test of comparison of integer with float-in-range (Bug #7840) ++# This is needed because some ODBC applications (like Foxpro) uses ++# floats for everything. ++# ++ ++CREATE TABLE t1 ( ++ reckey int unsigned NOT NULL, ++ recdesc varchar(50) NOT NULL, ++ PRIMARY KEY (reckey) ++) ENGINE=MyISAM DEFAULT CHARSET=latin1; ++ ++INSERT INTO t1 VALUES (108, 'Has 108 as key'); ++INSERT INTO t1 VALUES (109, 'Has 109 as key'); ++select * from t1 where reckey=108; ++select * from t1 where reckey=1.08E2; ++select * from t1 where reckey=109; ++select * from t1 where reckey=1.09E2; ++drop table t1; ++ ++# ++# Bug #13372 (decimal union) ++# ++create table t1 (d double(10,1)); ++create table t2 (d double(10,9)); ++insert into t1 values ("100000000.0"); ++insert into t2 values ("1.23456780"); ++create table t3 select * from t2 union select * from t1; ++select * from t3; ++show create table t3; ++drop table t1, t2, t3; ++ ++ ++# ++# Bug #9855 (inconsistent column type for create select ++# ++create table t1 select 105213674794682365.00 + 0.0 x; ++show warnings; ++desc t1; ++drop table t1; ++ ++create table t1 select 0.0 x; ++desc t1; ++create table t2 select 105213674794682365.00 y; ++desc t2; ++create table t3 select x+y a from t1,t2; ++show warnings; ++desc t3; ++drop table t1,t2,t3; ++ ++# ++# Bug #22129: A small double precision number becomes zero ++# ++# check if underflows are detected correctly ++select 1e-308, 1.00000001e-300, 100000000e-300; ++ ++# check if overflows are detected correctly ++select 10e307; ++ ++# ++# Bug #19690: ORDER BY eliminates rows from the result ++# ++create table t1(a int, b double(8, 2)); ++insert into t1 values ++(1, 28.50), (1, 121.85), (1, 157.23), (1, 1351.00), (1, -1965.35), (1, 81.75), ++(1, 217.08), (1, 7.94), (4, 96.07), (4, 6404.65), (4, -6500.72), (2, 100.00), ++(5, 5.00), (5, -2104.80), (5, 2033.80), (5, 0.07), (5, 65.93), ++(3, -4986.24), (3, 5.00), (3, 4857.34), (3, 123.74), (3, 0.16), ++(6, -1695.31), (6, 1003.77), (6, 499.72), (6, 191.82); ++explain select sum(b) s from t1 group by a; ++select sum(b) s from t1 group by a; ++select sum(b) s from t1 group by a having s <> 0; ++select sum(b) s from t1 group by a having s <> 0 order by s; ++select sum(b) s from t1 group by a having s <=> 0; ++select sum(b) s from t1 group by a having s <=> 0 order by s; ++alter table t1 add key (a, b); ++explain select sum(b) s from t1 group by a; ++select sum(b) s from t1 group by a; ++select sum(b) s from t1 group by a having s <> 0; ++select sum(b) s from t1 group by a having s <> 0 order by s; ++select sum(b) s from t1 group by a having s <=> 0; ++select sum(b) s from t1 group by a having s <=> 0 order by s; ++drop table t1; ++ ++--echo End of 4.1 tests ++ ++# ++# bug #12694 (float(m,d) specifications) ++# ++ ++--error 1427 ++create table t1 (s1 float(0,2)); ++--error 1427 ++create table t1 (s1 float(1,2)); ++ ++# ++# MySQL Bugs: #11589: mysqltest --ps-protocol, strange output, float/double/real with zerofill ++# ++ ++CREATE TABLE t1 ( ++ f1 real zerofill, ++ f2 double zerofill, ++ f3 float zerofill); ++INSERT INTO t1 VALUES ( 0.314152e+1, 0.314152e+1, 0.314152e+1); ++ ++let $my_stmt= select f1, f2, f3 FROM t1; ++eval PREPARE stmt1 FROM '$my_stmt'; ++select f1, f2, f3 FROM t1; ++eval $my_stmt; ++EXECUTE stmt1; ++ ++DROP TABLE t1; ++# Bug #28121 "INSERT or UPDATE into DOUBLE(200,0) field being truncated to 31 digits" ++# ++ ++create table t1 (f1 double(200, 0)); ++insert into t1 values (1e199), (-1e199); ++insert into t1 values (1e200), (-1e200); ++insert into t1 values (2e200), (-2e200); ++select f1 + 0e0 from t1; ++drop table t1; ++ ++create table t1 (f1 float(30, 0)); ++insert into t1 values (1e29), (-1e29); ++insert into t1 values (1e30), (-1e30); ++insert into t1 values (2e30), (-2e30); ++select f1 + 0e0 from t1; ++drop table t1; ++ ++# ++# Bug #12860 "Difference in zero padding of exponent between Unix and Windows" ++# ++ ++create table t1 (c char(6)); ++insert into t1 values (2e6),(2e-5); ++select * from t1; ++drop table t1; ++ ++# ++# Bug #21497 "DOUBLE truncated to unusable value" ++# ++ ++CREATE TABLE d1 (d DOUBLE); ++INSERT INTO d1 VALUES (1.7976931348623157E+308); ++SELECT * FROM d1; ++--error ER_ILLEGAL_VALUE_FOR_TYPE ++INSERT INTO d1 VALUES (1.79769313486232e+308); ++SELECT * FROM d1; ++DROP TABLE d1; ++ ++# ++# Bug #26788 "mysqld (debug) aborts when inserting specific numbers into char ++# fields" ++# ++ ++create table t1 (a char(20)); ++insert into t1 values (1.225e-05); ++select a+0 from t1; ++drop table t1; ++ ++# ++# Bug #27483: Casting 'scientific notation type' to 'unsigned bigint' fails on ++# windows. ++# ++ ++create table t1(d double, u bigint unsigned); ++ ++insert into t1(d) values (9.22337203685479e18), ++ (1.84e19); ++ ++update t1 set u = d; ++select u from t1; ++ ++drop table t1; ++ ++# ++# Bug #21205: Different number of digits for float/doble/real in --ps-protocol ++# ++ ++CREATE TABLE t1 (f1 DOUBLE); ++INSERT INTO t1 VALUES(-1.79769313486231e+308); ++SELECT f1 FROM t1; ++DROP TABLE t1; ++ ++--echo # ++--echo # Bug#12406055 BUFFER OVERFLOW OF VARIABLE 'BUFF' IN STRING::SET_REAL ++--echo # ++ ++--echo # Ignoring output from misc. float operations ++--disable_result_log ++ ++let $nine_65= ++99999999999999999999999999999999999999999999999999999999999999999; ++ ++select format(-1.7976931348623157E+307,256) as foo; ++select least(-1.1111111111111111111111111, ++ - group_concat(1.7976931348623157E+308)) as foo; ++eval select concat((truncate((-1.7976931348623157E+307),(0x1e))), ++ ($nine_65)) into @a; ++--enable_result_log ++ ++--echo End of 5.0 tests ++ ++--echo # ++--echo # Bug#12368853 FORMAT() CRASHES WITH LARGE NUMBERS AFTER TRUNCATE... ++--echo # ++ ++select format(truncate('1.7976931348623157E+308',-12),1,'fr_BE') as foo; ++ ++ ++--echo # ++--echo # MDEV-17249 MAKETIME(-1e50,0,0) returns a wrong result ++--echo # ++ ++SELECT LEFT('a',EXP(50)); ++SELECT LEFT('a', COALESCE(1e30)); ++ ++CREATE TABLE t1 (a FLOAT); ++INSERT INTO t1 VALUES (1e30); ++SELECT LEFT('a',a), LEFT('a',1e30) FROM t1; ++DROP TABLE t1; ++ ++PREPARE stmt FROM 'SELECT LEFT(111,?)'; ++SET @a=1e30; ++EXECUTE stmt USING @a; ++DEALLOCATE PREPARE stmt; ++ ++CREATE TABLE t1 (a INT); ++INSERT INTO t1 VALUES (1),(2),(3); ++SELECT LEFT('a',(SELECT 1e30 FROM t1 LIMIT 1)); ++DROP TABLE t1; ++ ++CREATE TABLE t1 (a DOUBLE); ++INSERT INTO t1 VALUES (1e30),(0); ++SELECT LEFT('a', SUM(a)) FROM t1; ++SELECT LEFT('a', AVG(a)) FROM t1; ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Bug #13500371 63704: CONVERSION OF '1.' TO A NUMBER GIVES ERROR 1265 ++--echo # (WARN_DATA_TRUNCATED) ++--echo # ++ ++CREATE TABLE t1 (f FLOAT); ++INSERT INTO t1 VALUES ('1.'); ++INSERT INTO t1 VALUES ('2.0.'); ++INSERT INTO t1 VALUES ('.'); ++SELECT * FROM t1 ORDER BY f; ++DROP TABLE t1; ++ ++ ++--echo # ++--echo # Start of 10.0 tests ++--echo # ++ ++--echo # ++--echo # MDEV-6950 Bad results with joins comparing DATE/DATETIME and INT/DECIMAL/DOUBLE/ENUM/VARCHAR columns ++--echo # ++CREATE TABLE t1 (a DATETIME PRIMARY KEY); ++INSERT INTO t1 VALUES ('1999-01-01 00:00:00'); ++CREATE TABLE t2 (a DOUBLE); ++INSERT INTO t2 VALUES (19990101000000); ++INSERT INTO t2 VALUES (990101000000); ++SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; ++SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; ++ALTER TABLE t2 ADD PRIMARY KEY(a); ++SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; ++SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; ++--echo # t2 should NOT be eliminated ++EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; ++DROP TABLE t1,t2; ++ ++--echo # ++--echo # MDEV-6971 Bad results with joins comparing TIME and DOUBLE/DECIMAL columns ++--echo # ++CREATE TABLE t1 (a TIME(6) PRIMARY KEY); ++INSERT INTO t1 VALUES ('10:20:30'); ++CREATE TABLE t2 (a DOUBLE); ++INSERT INTO t2 VALUES (102030),(102030.000000001); ++SELECT t1.* FROM t1 JOIN t2 USING(a); ++SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); ++ALTER TABLE t2 ADD PRIMARY KEY(a); ++SELECT t1.* FROM t1 JOIN t2 USING(a); ++SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); ++--echo # t2 should NOT be elimitated ++EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); ++DROP TABLE t1,t2; ++ ++--echo # ++--echo # End of 10.0 tests ++--echo # diff --cc mysql-test/t/alter_table.test index 28d8c5bf5e9,28dfbaa27ec..9703b9a1455 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@@ -287,7 -282,7 +287,7 @@@ drop table t1 # set names koi8r; create table t1 (a char(10) character set koi8r); --insert into t1 values ('����'); ++insert into t1 values ('����'); select a,hex(a) from t1; alter table t1 change a a char(10) character set cp1251; select a,hex(a) from t1; @@@ -367,7 -362,7 +367,7 @@@ DROP TABLE T12207 # modified. In other words, the values were reinterpreted # as UTF8 instead of being converted. create table t1 (a text) character set koi8r; --insert into t1 values (_koi8r'����'); ++insert into t1 values (_koi8r'����'); select hex(a) from t1; alter table t1 convert to character set cp1251; select hex(a) from t1; @@@ -1296,564 -1273,52 +1296,564 @@@ MODIFY COLUMN `consultant_id` BIGINT SHOW CREATE TABLE t1; DROP TABLE t1; - # - # Test of ALTER TABLE IF [NOT] EXISTS - # + --echo # + --echo # BUG#27788685: NO WARNING WHEN TRUNCATING A STRING WITH DATA LOSS + --echo # - CREATE TABLE t1 ( - id INT(11) NOT NULL, - x_param INT(11) DEFAULT NULL, - PRIMARY KEY (id) - ) ENGINE=MYISAM; + SET GLOBAL max_allowed_packet=17825792; - ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, - ADD COLUMN IF NOT EXISTS lol INT AFTER id; - ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; - ALTER TABLE t1 DROP COLUMN IF EXISTS lol; - ALTER TABLE t1 DROP COLUMN IF EXISTS lol; + --connect(con1, localhost, root,,) + CREATE TABLE t1 (t1_fld1 TEXT); + CREATE TABLE t2 (t2_fld1 MEDIUMTEXT); + CREATE TABLE t3 (t3_fld1 LONGTEXT); - ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); - ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); - ALTER TABLE t1 MODIFY IF EXISTS lol INT; + INSERT INTO t1 VALUES (REPEAT('a',300)); + INSERT INTO t2 VALUES (REPEAT('b',65680)); + INSERT INTO t3 VALUES (REPEAT('c',16777300)); - DROP INDEX IF EXISTS x_param ON t1; - DROP INDEX IF EXISTS x_param ON t1; - CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); - CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); - SHOW CREATE TABLE t1; - DROP TABLE t1; + SELECT LENGTH(t1_fld1) FROM t1; + SELECT LENGTH(t2_fld1) FROM t2; + SELECT LENGTH(t3_fld1) FROM t3; - CREATE TABLE t1 ( - id INT(11) NOT NULL, - x_param INT(11) DEFAULT NULL, - PRIMARY KEY (id) - ) ENGINE=INNODB; + --echo # With strict mode + SET SQL_MODE='STRICT_ALL_TABLES'; - CREATE TABLE t2 ( - id INT(11) NOT NULL) ENGINE=INNODB; + --error ER_DATA_TOO_LONG + ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; + --error ER_DATA_TOO_LONG + ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; + --error ER_DATA_TOO_LONG + ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; - ALTER TABLE t1 ADD COLUMN IF NOT EXISTS id INT, - ADD COLUMN IF NOT EXISTS lol INT AFTER id; - ALTER TABLE t1 ADD COLUMN IF NOT EXISTS lol INT AFTER id; - ALTER TABLE t1 DROP COLUMN IF EXISTS lol; - ALTER TABLE t1 DROP COLUMN IF EXISTS lol; + --echo # With non-strict mode + SET SQL_MODE=''; - ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); - ALTER TABLE t1 ADD KEY IF NOT EXISTS x_param(x_param); - ALTER TABLE t1 MODIFY IF EXISTS lol INT; + ALTER TABLE t1 CHANGE `t1_fld1` `my_t1_fld1` TINYTEXT; + ALTER TABLE t2 CHANGE `t2_fld1` `my_t2_fld1` TEXT; + ALTER TABLE t3 CHANGE `t3_fld1` `my_t3_fld1` MEDIUMTEXT; - DROP INDEX IF EXISTS x_param ON t1; - DROP INDEX IF EXISTS x_param ON t1; - CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); + SELECT LENGTH(my_t1_fld1) FROM t1; + SELECT LENGTH(my_t2_fld1) FROM t2; + SELECT LENGTH(my_t3_fld1) FROM t3; + + # Cleanup + --disconnect con1 + --source include/wait_until_disconnected.inc + + --connection default + DROP TABLE t1, t2, t3; + + SET SQL_MODE=default; + SET GLOBAL max_allowed_packet=default; +CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param); +SHOW CREATE TABLE t1; + +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS fk(id) REFERENCES t1(id); +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS fk; +SHOW CREATE TABLE t2; +ALTER TABLE t2 ADD FOREIGN KEY (id) REFERENCES t1(id); +ALTER TABLE t2 ADD FOREIGN KEY IF NOT EXISTS t2_ibfk_1(id) REFERENCES t1(id); +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; +ALTER TABLE t2 DROP FOREIGN KEY IF EXISTS t2_ibfk_1; +SHOW CREATE TABLE t2; + +DROP TABLE t2; +CREATE TABLE t2 ( + id INT(11) NOT NULL); +ALTER TABLE t2 ADD COLUMN a INT, ADD COLUMN IF NOT EXISTS a INT; +ALTER TABLE t2 ADD KEY k_id(id), ADD KEY IF NOT EXISTS k_id(id); +SHOW CREATE TABLE t2; +ALTER TABLE t2 DROP KEY k_id, DROP KEY IF EXISTS k_id; +ALTER TABLE t2 DROP COLUMN a, DROP COLUMN IF EXISTS a; +SHOW CREATE TABLE t2; + +DROP TABLE t2; +DROP TABLE t1; + +CREATE TABLE t1 ( + `transaction_id` int(11) NOT NULL DEFAULT '0', + KEY `transaction_id` (`transaction_id`)); +ALTER TABLE t1 DROP KEY IF EXISTS transaction_id, ADD PRIMARY KEY IF NOT EXISTS (transaction_id); +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +--echo # Bug#11748057 (formerly known as 34972): ALTER TABLE statement doesn't +--echo # identify correct column name. +--echo # + +CREATE TABLE t1 (c1 int unsigned , c2 char(100) not null default ''); +ALTER TABLE t1 ADD c3 char(16) NOT NULL DEFAULT '' AFTER c2, + MODIFY c2 char(100) NOT NULL DEFAULT '' AFTER c1; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # +--echo # WL#5534 Online ALTER, Phase 1 +--echo # + +--echo # Single thread tests. +--echo # See innodb_mysql_sync.test for multi thread tests. + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB; +CREATE TABLE m1(a INT PRIMARY KEY, b INT) engine=MyISAM; +INSERT INTO t1 VALUES (1,1), (2,2); +INSERT INTO m1 VALUES (1,1), (2,2); + +--echo # +--echo # 1: Test ALGORITHM keyword +--echo # + +--echo # --enable_info allows us to see how many rows were updated +--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0. + +--enable_info +ALTER TABLE t1 ADD INDEX i1(b); +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; +--error ER_UNKNOWN_ALTER_ALGORITHM +ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= INVALID; + +ALTER TABLE m1 ENABLE KEYS; +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= DEFAULT; +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY; +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE; +--disable_info + +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; + +--echo # +--echo # 2: Test ALGORITHM + old_alter_table +--echo # + +--enable_info +SET SESSION old_alter_table= 1; +ALTER TABLE t1 ADD INDEX i1(b); +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT; +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY; +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE; +SET SESSION old_alter_table= 0; +--disable_info + +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; + +--echo # +--echo # 3: Test unsupported in-place operation +--echo # + +ALTER TABLE t1 ADD COLUMN (c1 INT); +ALTER TABLE t1 ADD COLUMN (c2 INT), ALGORITHM= DEFAULT; +ALTER TABLE t1 ADD COLUMN (c3 INT), ALGORITHM= COPY; +ALTER TABLE t1 ADD COLUMN (c4 INT), ALGORITHM= INPLACE; + +ALTER TABLE t1 DROP COLUMN c1, DROP COLUMN c2, DROP COLUMN c3, DROP COLUMN c4; + +--echo # +--echo # 4: Test LOCK keyword +--echo # + +--enable_info +ALTER TABLE t1 ADD INDEX i1(b), LOCK= DEFAULT; +ALTER TABLE t1 ADD INDEX i2(b), LOCK= NONE; +ALTER TABLE t1 ADD INDEX i3(b), LOCK= SHARED; +ALTER TABLE t1 ADD INDEX i4(b), LOCK= EXCLUSIVE; +--error ER_UNKNOWN_ALTER_LOCK +ALTER TABLE t1 ADD INDEX i5(b), LOCK= INVALID; +--disable_info + +ALTER TABLE m1 ENABLE KEYS, LOCK= DEFAULT; +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER TABLE m1 ENABLE KEYS, LOCK= NONE; +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER TABLE m1 ENABLE KEYS, LOCK= SHARED; +ALTER TABLE m1 ENABLE KEYS, LOCK= EXCLUSIVE; + +ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4; + +--echo # +--echo # 5: Test ALGORITHM + LOCK +--echo # + +--enable_info +ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= NONE; +ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= INPLACE, LOCK= SHARED; +ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= COPY, LOCK= NONE; +ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= COPY, LOCK= SHARED; +ALTER TABLE t1 ADD INDEX i6(b), ALGORITHM= COPY, LOCK= EXCLUSIVE; + +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= NONE; +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= SHARED; +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= EXCLUSIVE; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= NONE; +# This works because the lock will be SNW for the copy phase. +# It will still require exclusive lock for actually enabling keys. +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= SHARED; +ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= EXCLUSIVE; +--disable_info + +DROP TABLE t1, m1; + +--echo # +--echo # 6: Possible deadlock involving thr_lock.c +--echo # + +CREATE TABLE t1(a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,2); + +START TRANSACTION; +INSERT INTO t1 VALUES (3,3); + +--echo # Connection con1 +connect (con1, localhost, root); +--echo # Sending: +--send ALTER TABLE t1 DISABLE KEYS + +--echo # Connection default +connection default; +--echo # Waiting until ALTER TABLE is blocked. +let $wait_condition= + SELECT COUNT(*) = 1 FROM information_schema.processlist + WHERE state = "Waiting for table metadata lock" AND + info = "ALTER TABLE t1 DISABLE KEYS"; +--source include/wait_condition.inc +UPDATE t1 SET b = 4; +COMMIT; + +--echo # Connection con1 +connection con1; +--echo # Reaping: ALTER TABLE t1 DISABLE KEYS +--reap +disconnect con1; +--source include/wait_until_disconnected.inc + +--echo # Connection default +connection default; +DROP TABLE t1; + +--echo # +--echo # 7: Which operations require copy and which can be done in-place? +--echo # +--echo # Test which ALTER TABLE operations are done in-place and +--echo # which operations are done using temporary table copy. +--echo # +--echo # --enable_info allows us to see how many rows were updated +--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0. +--echo # + +--disable_warnings +DROP TABLE IF EXISTS ti1, ti2, ti3, tm1, tm2, tm3; +--enable_warnings + +--echo # Single operation tests + +CREATE TABLE ti1(a INT NOT NULL, b INT, c INT) engine=InnoDB; +CREATE TABLE tm1(a INT NOT NULL, b INT, c INT) engine=MyISAM; +CREATE TABLE ti2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=InnoDB; +CREATE TABLE tm2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=MyISAM; +INSERT INTO ti1 VALUES (1,1,1), (2,2,2); +INSERT INTO ti2 VALUES (1,1,1), (2,2,2); +INSERT INTO tm1 VALUES (1,1,1), (2,2,2); +INSERT INTO tm2 VALUES (1,1,1), (2,2,2); + +--enable_info +ALTER TABLE ti1; +ALTER TABLE tm1; + +ALTER TABLE ti1 ADD COLUMN d VARCHAR(200); +ALTER TABLE tm1 ADD COLUMN d VARCHAR(200); +ALTER TABLE ti1 ADD COLUMN d2 VARCHAR(200); +ALTER TABLE tm1 ADD COLUMN d2 VARCHAR(200); +ALTER TABLE ti1 ADD COLUMN e ENUM('a', 'b') FIRST; +ALTER TABLE tm1 ADD COLUMN e ENUM('a', 'b') FIRST; +ALTER TABLE ti1 ADD COLUMN f INT AFTER a; +ALTER TABLE tm1 ADD COLUMN f INT AFTER a; + +ALTER TABLE ti1 ADD INDEX ii1(b); +ALTER TABLE tm1 ADD INDEX im1(b); +ALTER TABLE ti1 ADD UNIQUE INDEX ii2 (c); +ALTER TABLE tm1 ADD UNIQUE INDEX im2 (c); +ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d); +ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d); +ALTER TABLE ti1 ADD FULLTEXT INDEX ii4 (d2); +ALTER TABLE tm1 ADD FULLTEXT INDEX im4 (d2); + +# Bug#14140038 INCONSISTENT HANDLING OF FULLTEXT INDEXES IN ALTER TABLE +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE ti1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE; +ALTER TABLE ti1 ADD PRIMARY KEY(a); +ALTER TABLE tm1 ADD PRIMARY KEY(a); + +ALTER TABLE ti1 DROP INDEX ii3; +ALTER TABLE tm1 DROP INDEX im3; + +ALTER TABLE ti1 DROP COLUMN d2; +ALTER TABLE tm1 DROP COLUMN d2; + +ALTER TABLE ti1 ADD CONSTRAINT fi1 FOREIGN KEY (b) REFERENCES ti2(a); +ALTER TABLE tm1 ADD CONSTRAINT fm1 FOREIGN KEY (b) REFERENCES tm2(a); + +ALTER TABLE ti1 ALTER COLUMN b SET DEFAULT 1; +ALTER TABLE tm1 ALTER COLUMN b SET DEFAULT 1; +ALTER TABLE ti1 ALTER COLUMN b DROP DEFAULT; +ALTER TABLE tm1 ALTER COLUMN b DROP DEFAULT; + +# This will set both ALTER_COLUMN_NAME and COLUMN_DEFAULT_VALUE +ALTER TABLE ti1 CHANGE COLUMN f g INT; +ALTER TABLE tm1 CHANGE COLUMN f g INT; +ALTER TABLE ti1 CHANGE COLUMN g h VARCHAR(20); +ALTER TABLE tm1 CHANGE COLUMN g h VARCHAR(20); +ALTER TABLE ti1 MODIFY COLUMN e ENUM('a', 'b', 'c'); +ALTER TABLE tm1 MODIFY COLUMN e ENUM('a', 'b', 'c'); +ALTER TABLE ti1 MODIFY COLUMN e INT; +ALTER TABLE tm1 MODIFY COLUMN e INT; +# This will set both ALTER_COLUMN_ORDER and COLUMN_DEFAULT_VALUE +ALTER TABLE ti1 MODIFY COLUMN e INT AFTER h; +ALTER TABLE tm1 MODIFY COLUMN e INT AFTER h; +ALTER TABLE ti1 MODIFY COLUMN e INT FIRST; +ALTER TABLE tm1 MODIFY COLUMN e INT FIRST; +# This will set both ALTER_COLUMN_NOT_NULLABLE and COLUMN_DEFAULT_VALUE +--disable_info +# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on. +SET @orig_sql_mode = @@sql_mode; +SET @@sql_mode = 'STRICT_TRANS_TABLES'; +--enable_info +ALTER TABLE ti1 MODIFY COLUMN c INT NOT NULL; +--disable_info +SET @@sql_mode = @orig_sql_mode; +--enable_info +ALTER TABLE tm1 MODIFY COLUMN c INT NOT NULL; +# This will set both ALTER_COLUMN_NULLABLE and COLUMN_DEFAULT_VALUE +ALTER TABLE ti1 MODIFY COLUMN c INT NULL; +ALTER TABLE tm1 MODIFY COLUMN c INT NULL; +# This will set both ALTER_COLUMN_EQUAL_PACK_LENGTH and COLUMN_DEFAULT_VALUE +ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30); +ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30); +ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30) AFTER d; +ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30) AFTER d; + +ALTER TABLE ti1 DROP COLUMN h; +ALTER TABLE tm1 DROP COLUMN h; + +ALTER TABLE ti1 DROP INDEX ii2; +ALTER TABLE tm1 DROP INDEX im2; +ALTER TABLE ti1 DROP PRIMARY KEY; +ALTER TABLE tm1 DROP PRIMARY KEY; + +ALTER TABLE ti1 DROP FOREIGN KEY fi1; +ALTER TABLE tm1 DROP FOREIGN KEY fm1; + +ALTER TABLE ti1 RENAME TO ti3; +ALTER TABLE tm1 RENAME TO tm3; +ALTER TABLE ti3 RENAME TO ti1; +ALTER TABLE tm3 RENAME TO tm1; + +ALTER TABLE ti1 ORDER BY b; +ALTER TABLE tm1 ORDER BY b; + +ALTER TABLE ti1 CONVERT TO CHARACTER SET utf16; +ALTER TABLE tm1 CONVERT TO CHARACTER SET utf16; +ALTER TABLE ti1 DEFAULT CHARACTER SET utf8; +ALTER TABLE tm1 DEFAULT CHARACTER SET utf8; + +ALTER TABLE ti1 FORCE; +ALTER TABLE tm1 FORCE; + +ALTER TABLE ti1 AUTO_INCREMENT 3; +ALTER TABLE tm1 AUTO_INCREMENT 3; +ALTER TABLE ti1 AVG_ROW_LENGTH 10; +ALTER TABLE tm1 AVG_ROW_LENGTH 10; +ALTER TABLE ti1 CHECKSUM 1; +ALTER TABLE tm1 CHECKSUM 1; +ALTER TABLE ti1 COMMENT 'test'; +ALTER TABLE tm1 COMMENT 'test'; +ALTER TABLE ti1 MAX_ROWS 100; +ALTER TABLE tm1 MAX_ROWS 100; +ALTER TABLE ti1 MIN_ROWS 1; +ALTER TABLE tm1 MIN_ROWS 1; +ALTER TABLE ti1 PACK_KEYS 1; +ALTER TABLE tm1 PACK_KEYS 1; + +--disable_info +DROP TABLE ti1, ti2, tm1, tm2; + +--echo # Tests of >1 operation (InnoDB) + +CREATE TABLE ti1(a INT PRIMARY KEY AUTO_INCREMENT, b INT) engine=InnoDB; +INSERT INTO ti1(b) VALUES (1), (2); + +--enable_info +ALTER TABLE ti1 RENAME TO ti3, ADD INDEX ii1(b); + +ALTER TABLE ti3 DROP INDEX ii1, AUTO_INCREMENT 5; +--disable_info +INSERT INTO ti3(b) VALUES (5); +--enable_info +ALTER TABLE ti3 ADD INDEX ii1(b), AUTO_INCREMENT 7; +--disable_info +INSERT INTO ti3(b) VALUES (7); +SELECT * FROM ti3; + +DROP TABLE ti3; + +--echo # +--echo # 8: Scenario in which ALTER TABLE was returning an unwarranted +--echo # ER_ILLEGAL_HA error at some point during work on this WL. +--echo # + +CREATE TABLE tm1(i INT DEFAULT 1) engine=MyISAM; +ALTER TABLE tm1 ADD INDEX ii1(i), ALTER COLUMN i DROP DEFAULT; +DROP TABLE tm1; + +# +# MDEV-4435 Server crashes in my_strcasecmp_utf8 on ADD KEY IF NOT EXISTS with implicit name when the key exists. +# +create table if not exists t1 (i int); +alter table t1 add key (i); +alter table t1 add key if not exists (i); +DROP TABLE t1; + +# +# MDEV-4436 CHANGE COLUMN IF EXISTS does not work and throws wrong warning. +# +create table t1 (a int); +alter table t1 change column if exists a b bigint; +show create table t1; +DROP TABLE t1; + +# +# MDEV-4437 ALTER TABLE .. ADD UNIQUE INDEX IF NOT EXISTS causes syntax error. +# + +create table t1 (i int); +alter table t1 add unique index if not exists idx(i); +alter table t1 add unique index if not exists idx(i); +show create table t1; +DROP TABLE t1; + +# +# MDEV-8358 ADD PRIMARY KEY IF NOT EXISTS -> ERROR 1068 (42000): Multiple primary key +# + +CREATE TABLE t1 ( + `event_id` bigint(20) unsigned NOT NULL DEFAULT '0', + `market_id` bigint(20) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`event_id`,`market_id`) + ); +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS event_id (event_id,market_id); +DROP TABLE t1; + +--echo # +--echo # MDEV-11126 Crash while altering persistent virtual column +--echo # + +CREATE TABLE `tab1` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `field2` set('option1','option2','option3','option4') NOT NULL, + `field3` set('option1','option2','option3','option4','option5') NOT NULL, + `field4` set('option1','option2','option3','option4') NOT NULL, + `field5` varchar(32) NOT NULL, + `field6` varchar(32) NOT NULL, + `field7` varchar(32) NOT NULL, + `field8` varchar(32) NOT NULL, + `field9` int(11) NOT NULL DEFAULT '1', + `field10` varchar(16) NOT NULL, + `field11` enum('option1','option2','option3') NOT NULL DEFAULT 'option1', + `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT, + PRIMARY KEY (`id`) +) DEFAULT CHARSET=latin1; + +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128); +SHOW CREATE TABLE `tab1`; +ALTER TABLE `tab1` CHANGE COLUMN v_col `v_col` varchar(128) AS (IF(field11='option1',CONCAT_WS(":","field1",field2,field3,field4,field5,field6,field7,field8,field9,field10), CONCAT_WS(":","field1",field11,field2,field3,field4,field5,field6,field7,field8,field9,field10))) PERSISTENT; +SHOW CREATE TABLE `tab1`; +DROP TABLE `tab1`; + +--echo # +--echo # MDEV-11548 Reproducible server crash after the 2nd ALTER TABLE ADD FOREIGN KEY IF NOT EXISTS +--echo # + +CREATE TABLE t1 (id INT UNSIGNED NOT NULL PRIMARY KEY); +CREATE TABLE t2 (id1 INT UNSIGNED NOT NULL); + +ALTER TABLE t2 +ADD FOREIGN KEY IF NOT EXISTS (id1) + REFERENCES t1 (id); + +ALTER TABLE t2 +ADD FOREIGN KEY IF NOT EXISTS (id1) +REFERENCES t1 (id); + +DROP TABLE t2; +DROP TABLE t1; + + +--echo # +--echo # MDEV-6390 CONVERT TO CHARACTER SET utf8 doesn't change DEFAULT CHARSET. +--echo # + +CREATE TABLE t1 (id int(11) NOT NULL, a int(11) NOT NULL, b int(11)) + ENGINE=InnoDB DEFAULT CHARSET=latin1; +SHOW CREATE TABLE t1; +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # +--echo # +--echo # MDEV-15308 +--echo # Assertion `ha_alter_info->alter_info->drop_list.elements > 0' failed +--echo # in ha_innodb::prepare_inplace_alter_table +--echo # + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN b; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN b; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, KEY(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP COLUMN c; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, KEY c1(c)) ENGINE=InnoDB; +ALTER TABLE t1 DROP FOREIGN KEY IF EXISTS fk, DROP INDEX c1; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB; +ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key +--echo # +CREATE TABLE t1 ( + `ID` BIGINT(20) NOT NULL, + `RANK` MEDIUMINT(4) NOT NULL, + `CHECK_POINT` BIGINT(20) NOT NULL, + UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`) + ) ENGINE=InnoDB; + +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +DROP TABLE t1; + diff --cc mysql-test/t/func_time.test index 1baded9fef7,4497a4d9668..cb8f35c3627 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@@ -1695,8 -1626,62 +1695,61 @@@ DO TO_DAYS(SEC_TO_TIME(TIME(CEILING(UUI --enable_warnings DO TO_DAYS(SEC_TO_TIME(MAKEDATE('',RAND(~(''))))); -SELECT TO_DAYS(SEC_TO_TIME(MAKEDATE(0,RAND(~0)))); SELECT SEC_TO_TIME(MAKEDATE(0,RAND(~0))); + # + # MDEV-16810 AddressSanitizer: stack-buffer-overflow in int10_to_str + # + SELECT PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli')); + + + --echo # + --echo # MDEV-17249 MAKETIME(-1e50,0,0) returns a wrong result + --echo # + + --vertical_results + SELECT + MAKETIME(1e10,0,0), + MAKETIME(-1e10,0,0), + MAKETIME(1e50,0,0), + MAKETIME(-1e50,0,0), + MAKETIME(COALESCE(1e50),0,0), + MAKETIME(COALESCE(-1e50),0,0); + --horizontal_results + + CREATE TABLE t1 (a FLOAT); + INSERT INTO t1 VALUES (1e30),(-1e30); + SELECT MAKETIME(a,0,0) FROM t1; + DROP TABLE t1; + + --echo # + --echo # MDEV-17244 MAKETIME(900,0,0.111) returns a wrong result + --echo # + + SELECT MAKETIME(900,0,0); + SELECT MAKETIME(900,0,0.1); + SELECT MAKETIME(900,0,0.11); + SELECT MAKETIME(900,0,0.111); + SELECT MAKETIME(900,0,0.1111); + SELECT MAKETIME(900,0,0.11111); + SELECT MAKETIME(900,0,0.111111); + SELECT MAKETIME(900,0,0.1111111); + SELECT MAKETIME(900,0,0.11111111); + SELECT MAKETIME(900,0,0.111111111); + SELECT MAKETIME(900,0,EXP(1)); + + SELECT MAKETIME(-900,0,0); + SELECT MAKETIME(-900,0,0.1); + SELECT MAKETIME(-900,0,0.11); + SELECT MAKETIME(-900,0,0.111); + SELECT MAKETIME(-900,0,0.1111); + SELECT MAKETIME(-900,0,0.11111); + SELECT MAKETIME(-900,0,0.111111); + SELECT MAKETIME(-900,0,0.1111111); + SELECT MAKETIME(-900,0,0.11111111); + SELECT MAKETIME(-900,0,0.111111111); + SELECT MAKETIME(-900,0,EXP(1)); + --echo # --echo # End of 5.5 tests diff --cc mysql-test/t/grant.test index 156a55e0466,220cc0a248c..feca35086f0 --- a/mysql-test/t/grant.test +++ b/mysql-test/t/grant.test @@@ -210,26 -210,26 +210,26 @@@ drop user mysqltest_1@localhost # Bug#3403 Wrong encoding in SHOW GRANTS output # SET NAMES koi8r; --CREATE DATABASE ��; --USE ��; --CREATE TABLE ��� (��� INT); ++CREATE DATABASE ��; ++USE ��; ++CREATE TABLE ��� (��� INT); --GRANT SELECT ON ��.* TO ����@localhost; --SHOW GRANTS FOR ����@localhost; --REVOKE SELECT ON ��.* FROM ����@localhost; ++GRANT SELECT ON ��.* TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT ON ��.* FROM ����@localhost; --GRANT SELECT ON ��.��� TO ����@localhost; --SHOW GRANTS FOR ����@localhost; --REVOKE SELECT ON ��.��� FROM ����@localhost; ++GRANT SELECT ON ��.��� TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT ON ��.��� FROM ����@localhost; --GRANT SELECT (���) ON ��.��� TO ����@localhost; --SHOW GRANTS FOR ����@localhost; --REVOKE SELECT (���) ON ��.��� FROM ����@localhost; ++GRANT SELECT (���) ON ��.��� TO ����@localhost; ++SHOW GRANTS FOR ����@localhost; ++REVOKE SELECT (���) ON ��.��� FROM ����@localhost; # Revoke does not drop user. Leave a clean user table for the next tests. --DROP USER ����@localhost; ++DROP USER ����@localhost; --DROP DATABASE ��; ++DROP DATABASE ��; SET NAMES latin1; # @@@ -512,14 -512,14 +512,14 @@@ set @user123="non-existent" select * from mysql.db where user=@user123; set names koi8r; --create database ��; --grant select on ��.* to root@localhost; --select hex(Db) from mysql.db where Db='��'; ++create database ��; ++grant select on ��.* to root@localhost; ++select hex(Db) from mysql.db where Db='��'; show grants for root@localhost; flush privileges; show grants for root@localhost; --drop database ��; --revoke all privileges on ��.* from root@localhost; ++drop database ��; ++revoke all privileges on ��.* from root@localhost; show grants for root@localhost; set names latin1; @@@ -2232,38 -2244,26 +2232,59 @@@ DROP USER foo@'127.0.0.1' --echo # End of Bug#12766319 + +--echo # +--echo # Bug#11756966 - 48958: STORED PROCEDURES CAN BE LEVERAGED TO BYPASS +--echo # DATABASE SECURITY +--echo # + +--disable_warnings +DROP DATABASE IF EXISTS secret; +DROP DATABASE IF EXISTS no_such_db; +--enable_warnings + +CREATE DATABASE secret; +GRANT USAGE ON *.* TO untrusted@localhost; + +--echo # Connection con1 +connect (con1, localhost, untrusted); +SHOW GRANTS; +SHOW DATABASES; + +--echo # Both statements below should fail with the same error. +--echo # They used to give different errors, thereby +--echo # hinting that the secret database exists. +--error ER_DBACCESS_DENIED_ERROR +CREATE PROCEDURE no_such_db.foo() BEGIN END; +--error ER_DBACCESS_DENIED_ERROR +CREATE PROCEDURE secret.peek_at_secret() BEGIN END; + +--echo # Connection default +--connection default +disconnect con1; +DROP USER untrusted@localhost; +DROP DATABASE secret; + + # + # Bug#27230925: HANDLE_FATAL_SIGNAL (SIG=11) IN SHOW_ROUTINE_GRANTS + # + create user foo@localhost; + create database foodb; + grant create routine on foodb.* to foo@localhost; + connect con1,localhost,foo; + create procedure fooproc() select 'i am fooproc'; + show grants; + disconnect con1; + connection default; + rename table mysql.procs_priv to mysql.procs_priv1; + error ER_NO_SUCH_TABLE; + flush privileges; + show grants for foo@localhost; + rename table mysql.procs_priv1 to mysql.procs_priv; + drop user foo@localhost; + drop procedure fooproc; + drop database foodb; + + # Wait till we reached the initial number of concurrent sessions --source include/wait_until_count_sessions.inc diff --cc mysql-test/t/sp-security.test index a2079e91440,3769bb3b4bf..6a596b4febb --- a/mysql-test/t/sp-security.test +++ b/mysql-test/t/sp-security.test @@@ -995,47 -995,24 +995,65 @@@ disconnect con2 DROP USER user2@localhost; DROP DATABASE db1; + # + # Bug#27407480: AUTOMATIC_SP_PRIVILEGES REQUIRES NEED THE INSERT PRIVILEGES FOR MYSQL.USER TABLE + # + create user foo@local_ost; + create user foo@`local\_ost` identified by 'nevermore'; + create database foodb; + grant create routine on foodb.* to foo@local_ost; + connect con1,localhost,foo; + select user(), current_user(); + show grants; + create procedure fooproc() select 'i am fooproc'; + show grants; + disconnect con1; + connection default; + drop user foo@local_ost; + drop user foo@`local\_ost`; + drop procedure fooproc; + drop database foodb; +--echo # +--echo # Test for bug#12602983 - User without privilege on routine can discover +--echo # its existence by executing "select non_existing_func();" or by +--echo # "call non_existing_proc()"; +--echo # +--disable_warnings +drop database if exists mysqltest_db; +--enable_warnings +create database mysqltest_db; +create function mysqltest_db.f1() returns int return 0; +create procedure mysqltest_db.p1() begin end; + +--echo # Create user with no privileges on mysqltest_db database. +create user bug12602983_user@localhost; + +--echo # Connect as user 'bug12602983_user@localhost' +connect (conn1, localhost, bug12602983_user,,); + +--echo # Attempt to execute routine on which user doesn't have privileges +--echo # should result in the same 'access denied' error whether +--echo # routine exists or not. +--error ER_PROCACCESS_DENIED_ERROR +select mysqltest_db.f_does_not_exist(); +--error ER_PROCACCESS_DENIED_ERROR +call mysqltest_db.p_does_not_exist(); + +--error ER_PROCACCESS_DENIED_ERROR +select mysqltest_db.f1(); +--error ER_PROCACCESS_DENIED_ERROR +call mysqltest_db.p1(); + +--error ER_PROCACCESS_DENIED_ERROR +create view bug12602983_v1 as select mysqltest_db.f_does_not_exist(); +--error ER_PROCACCESS_DENIED_ERROR +create view bug12602983_v1 as select mysqltest_db.f1(); + +--echo # Connection 'default'. +connection default; +disconnect conn1; +drop user bug12602983_user@localhost; +drop database mysqltest_db; # Wait till all disconnects are completed --source include/wait_until_count_sessions.inc diff --cc mysql-test/t/sp.test index c5d37d1017d,cb93cd31442..1b765f0eb2d --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@@ -9353,112 -9353,25 +9353,133 @@@ where 1=1 drop function if exists f1; drop table t1,t2; + --echo # + --echo # MDEV-16957: Server crashes in Field_iterator_natural_join::next + --echo # upon 2nd execution of SP + --echo # + + CREATE TABLE t1 (a INT, b VARCHAR(32)); + CREATE PROCEDURE sp() SELECT * FROM t1 AS t1x JOIN t1 AS t1y USING (c); + --error ER_BAD_FIELD_ERROR + CALL sp; + --error ER_BAD_FIELD_ERROR + CALL sp; + --error ER_BAD_FIELD_ERROR + CALL sp; + alter table t1 add column c int; + CALL sp; + + # Cleanup + DROP PROCEDURE sp; + DROP TABLE t1; + + --echo # End of 5.5 test + +--echo # +--echo # MDEV-7040: Crash in field_conv, memcpy_field_possible, part#2 +--echo # +create table t1 ( + col1 bigint(20), + col2 char(1), + col3 char(2) +); +insert into t1 values (1,'a','a'), (2,'b','b'); + +create table t2 as select * from t1; +create table t3 as select * from t1; +create table t4 as select * from t1; +create table t5 as select * from t1; +create table t6 as select * from t1; + +flush tables; + +DELIMITER |; + +CREATE PROCEDURE p1() +begin + DECLARE _var1 bigint(20) UNSIGNED; + DECLARE _var2 CHAR(1) DEFAULT NULL; + DECLARE _var3 CHAR(1) DEFAULT NULL; + + DECLARE _done BOOLEAN DEFAULT 0; + + declare cur1 cursor for + select col1, col2, col3 + from t1 + where + col1 in (select t2.col1 from t2 where t2.col2=t1.col2) or + col2 in (select t3.col3 from t3 where t3.col3=t1.col2) ; + + DECLARE CONTINUE HANDLER FOR NOT FOUND SET _done = 1; + + OPEN cur1; + + set _var1 = (select _var1 from t4 limit 1); + set _var1 = (select _var1 from t5 limit 1); + set _var1 = (select _var1 from t6 limit 1); +label1: + LOOP + SET _done = 0; + FETCH cur1 INTO _var1, _var2, _var3; + IF _done THEN + LEAVE label1; + END IF; + END LOOP label1; + CLOSE cur1; +end| +DELIMITER ;| + +set @tmp_toc= @@table_open_cache; +set @tmp_tdc= @@table_definition_cache; + +set global table_open_cache=1; +set global table_definition_cache=1; +call p1(); + +set global table_open_cache= @tmp_toc; +set global table_definition_cache= @tmp_tdc; +drop procedure p1; + +drop table t1,t2,t3,t4,t5,t6; + +--echo # +--echo # MDEV-11935: Queries in stored procedures with and +--echo # EXISTS(SELECT * FROM VIEW) crashes and closes hte conneciton. +--echo # + +CREATE TABLE ANY_TABLE ( + ENTITY_UID BIGINT NOT NULL +); +CREATE TABLE SECURITY_PATH( +origid BIGINT UNSIGNED NOT NULL, +destid BIGINT UNSIGNED NOT NULL, +KEY (destid) +); +CREATE VIEW ENTITY_ACCESS ( +ENTITY_UID, +OWNER_UID +) AS +SELECT SP1.origid, + SP2.destid +FROM SECURITY_PATH SP1 +JOIN SECURITY_PATH SP2 ON SP1.destid = SP2.origid +; +--delimiter // +CREATE PROCEDURE SP_EXAMPLE_SELECT () +BEGIN + SELECT * + FROM ANY_TABLE AT1 + WHERE EXISTS ( SELECT * + FROM ENTITY_ACCESS EA + WHERE AT1.ENTITY_UID = EA.ENTITY_UID + AND EA.OWNER_UID IS NULL ); +END +// +--delimiter ; +CALL SP_EXAMPLE_SELECT (); +CALL SP_EXAMPLE_SELECT (); + +drop procedure SP_EXAMPLE_SELECT; +drop view ENTITY_ACCESS; +drop table ANY_TABLE, SECURITY_PATH; diff --cc mysql-test/t/type_float.test index bb7a784553e,fa45b1041d8..297860f1e05 --- a/mysql-test/t/type_float.test +++ b/mysql-test/t/type_float.test @@@ -332,56 -332,31 +332,56 @@@ eval select concat((truncate((-1.797693 select format(truncate('1.7976931348623157E+308',-12),1,'fr_BE') as foo; + --echo # - --echo # Bug #13500371 63704: CONVERSION OF '1.' TO A NUMBER GIVES ERROR 1265 - --echo # (WARN_DATA_TRUNCATED) + --echo # MDEV-17249 MAKETIME(-1e50,0,0) returns a wrong result --echo # - CREATE TABLE t1 (f FLOAT); - INSERT INTO t1 VALUES ('1.'); - INSERT INTO t1 VALUES ('2.0.'); - INSERT INTO t1 VALUES ('.'); - SELECT * FROM t1 ORDER BY f; + SELECT LEFT('a',EXP(50)); + SELECT LEFT('a', COALESCE(1e30)); + + CREATE TABLE t1 (a FLOAT); + INSERT INTO t1 VALUES (1e30); + SELECT LEFT('a',a), LEFT('a',1e30) FROM t1; DROP TABLE t1; + PREPARE stmt FROM 'SELECT LEFT(111,?)'; + SET @a=1e30; + EXECUTE stmt USING @a; + DEALLOCATE PREPARE stmt; - --echo # - --echo # Start of 10.0 tests - --echo # + CREATE TABLE t1 (a INT); + INSERT INTO t1 VALUES (1),(2),(3); + SELECT LEFT('a',(SELECT 1e30 FROM t1 LIMIT 1)); + DROP TABLE t1; - --echo # - --echo # MDEV-6950 Bad results with joins comparing DATE/DATETIME and INT/DECIMAL/DOUBLE/ENUM/VARCHAR columns - --echo # - CREATE TABLE t1 (a DATETIME PRIMARY KEY); - INSERT INTO t1 VALUES ('1999-01-01 00:00:00'); - CREATE TABLE t2 (a DOUBLE); - INSERT INTO t2 VALUES (19990101000000); - INSERT INTO t2 VALUES (990101000000); - SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; - SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; - ALTER TABLE t2 ADD PRIMARY KEY(a); + CREATE TABLE t1 (a DOUBLE); + INSERT INTO t1 VALUES (1e30),(0); + SELECT LEFT('a', SUM(a)) FROM t1; + SELECT LEFT('a', AVG(a)) FROM t1; + DROP TABLE t1; +SELECT t1.* FROM t1,t2 WHERE t1.a=t2.a; +SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; +--echo # t2 should NOT be eliminated +EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 ON t1.a=t2.a; +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-6971 Bad results with joins comparing TIME and DOUBLE/DECIMAL columns +--echo # +CREATE TABLE t1 (a TIME(6) PRIMARY KEY); +INSERT INTO t1 VALUES ('10:20:30'); +CREATE TABLE t2 (a DOUBLE); +INSERT INTO t2 VALUES (102030),(102030.000000001); +SELECT t1.* FROM t1 JOIN t2 USING(a); +SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +ALTER TABLE t2 ADD PRIMARY KEY(a); +SELECT t1.* FROM t1 JOIN t2 USING(a); +SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +--echo # t2 should NOT be elimitated +EXPLAIN SELECT t1.* FROM t1 LEFT JOIN t2 USING(a); +DROP TABLE t1,t2; + +--echo # +--echo # End of 10.0 tests +--echo # diff --cc mysys/mf_iocache2.c index 9e693209445,a9a237696d2..ca7723cfa09 --- a/mysys/mf_iocache2.c +++ b/mysys/mf_iocache2.c @@@ -533,4 -534,4 +534,3 @@@ process_flags err: return (size_t) -1; } -- diff --cc res index 00000000000,00000000000..e95690277cc new file mode 100644 --- /dev/null +++ b/res @@@ -1,0 -1,0 +1,22 @@@ ++diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc ++index 6cbf6774dc7..7233fe6c745 100644 ++--- a/storage/innobase/handler/ha_innodb.cc +++++ b/storage/innobase/handler/ha_innodb.cc ++@@ -108,6 +108,7 @@ MYSQL_PLUGIN_IMPORT extern char mysql_unpacked_real_data_home[]; ++ #endif /* UNIV_DEBUG */ ++ #include "fts0priv.h" ++ #include "page0zip.h" +++#include "dict0priv.h" ++ ++ #define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) ++ ++@@ -8598,7 +8599,8 @@ ha_innobase::delete_row( ++ wsrep_on(user_thd) && ++ !wsrep_thd_skip_append_keys(user_thd)) ++ { ++- if (wsrep_append_keys(user_thd, false, record, NULL)) { +++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, +++ NULL)) { ++ DBUG_PRINT("wsrep", ("delete fail")); ++ error = (dberr_t)HA_ERR_INTERNAL_ERROR; ++ goto wsrep_error; diff --cc scripts/mysql_system_tables_fix.sql index 194b1615c2b,dd792ce9f2f..9d4318ad101 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@@ -453,14 -468,18 +453,11 @@@ ALTER TABLE proc MODIFY name char(64) D DEFAULT CHARACTER SET utf8; # Correct the character set and collation - ALTER TABLE proc CONVERT TO CHARACTER SET utf8; # Reset some fields after the conversion - ALTER TABLE proc MODIFY db - char(64) collate utf8_bin DEFAULT '' NOT NULL, - MODIFY definer - char(141) collate utf8_bin DEFAULT '' NOT NULL, - MODIFY comment - text collate utf8_bin NOT NULL; -SET @alter_statement = CONCAT(" + ALTER TABLE proc CONVERT TO CHARACTER SET utf8, - MODIFY db - char(64) binary DEFAULT '' NOT NULL, - MODIFY definer - char(", @definer_name_length, ") binary DEFAULT '' NOT NULL, - MODIFY comment - char(64) binary DEFAULT '' NOT NULL -"); -PREPARE alter_stmt FROM @alter_statement; -EXECUTE alter_stmt; ++ MODIFY db char(64) binary DEFAULT '' NOT NULL, ++ MODIFY definer char(141) binary DEFAULT '' NOT NULL, ++ MODIFY comment text binary NOT NULL; ALTER TABLE proc ADD character_set_client char(32) collate utf8_bin DEFAULT NULL diff --cc sql/item_timefunc.cc index c7a14049f37,2dc704f6873..cc600c8252a --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@@ -2835,13 -2765,14 +2825,14 @@@ bool Item_func_maketime::get_date(MYSQL } else { - ltime->hour= TIME_MAX_HOUR; - ltime->minute= TIME_MAX_MINUTE; - ltime->second= TIME_MAX_SECOND; + // use check_time_range() to set ltime to the max value depending on dec + int unused; + ltime->hour= TIME_MAX_HOUR + 1; + check_time_range(ltime, decimals, &unused); char buf[28]; - char *ptr= longlong10_to_str(hour, buf, args[0]->unsigned_flag ? 10 : -10); + char *ptr= longlong10_to_str(hour.value(), buf, hour.is_unsigned() ? 10 : -10); int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u", (uint)minute, (uint)second); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, buf, len, MYSQL_TIMESTAMP_TIME, NullS); } diff --cc sql/key.cc index 414c3392cff,7e5a3309b10..f900a1b4527 --- a/sql/key.cc +++ b/sql/key.cc @@@ -147,8 -144,9 +147,9 @@@ void key_copy(uchar *to_key, uchar *fro key_part->key_part_flag & HA_VAR_LENGTH_PART) { key_length-= HA_KEY_BLOB_LENGTH; - length= min(key_length, key_part->length); + length= min<uint>(key_length, key_part->length); - uint bytes= key_part->field->get_key_image(to_key, length, Field::itRAW); + uint bytes= key_part->field->get_key_image(to_key, length, + key_info->flags & HA_SPATIAL ? Field::itMBR : Field::itRAW); if (with_zerofill && bytes < length) bzero((char*) to_key + bytes, length - bytes); to_key+= HA_KEY_BLOB_LENGTH; diff --cc sql/log_event.cc index e8c1115eafb,c65ae4744b9..da4e63f02e1 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@@ -263,6 -248,27 +263,27 @@@ static void inline slave_rows_error_rep } #endif + #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -static void set_thd_db(THD *thd,const char *db, uint32 db_len) ++static void set_thd_db(THD *thd, Rpl_filter *rpl_filter, ++ const char *db, uint32 db_len) + { + char lcase_db_buf[NAME_LEN +1]; + LEX_STRING new_db; + new_db.length= db_len; + if (lower_case_table_names == 1) + { + strmov(lcase_db_buf, db); + my_casedn_str(system_charset_info, lcase_db_buf); + new_db.str= lcase_db_buf; + } + else + new_db.str= (char*) db; + /* TODO WARNING this makes rewrite_db respect lower_case_table_names values + * for more info look MDEV-17446 */ - new_db.str= (char*) rpl_filter->get_rewrite_db(new_db.str, - &new_db.length); ++ new_db.str= (char*) rpl_filter->get_rewrite_db(new_db.str, &new_db.length); + thd->set_db(new_db.str, new_db.length); + } + #endif /* Cache that will automatically be written to a dedicated file on destruction. @@@ -4138,21 -3647,11 +4159,20 @@@ bool test_if_equal_repl_errors(int expe mismatch. This mismatch could be implemented with a new ER_ code, and to ignore it you would use --slave-skip-errors... */ -int Query_log_event::do_apply_event(Relay_log_info const *rli, - const char *query_arg, uint32 q_len_arg) +int Query_log_event::do_apply_event(rpl_group_info *rgi, + const char *query_arg, uint32 q_len_arg) { - LEX_STRING new_db; int expected_error,actual_error= 0; HA_CREATE_INFO db_options; + uint64 sub_id= 0; + rpl_gtid gtid; + Relay_log_info const *rli= rgi->rli; +#ifdef WITH_WSREP + Rpl_filter *rpl_filter= (rli->mi) ? rli->mi->rpl_filter: NULL; +#else + Rpl_filter *rpl_filter= rli->mi->rpl_filter; +#endif /* WITH_WSREP */ + bool current_stmt_is_commit; DBUG_ENTER("Query_log_event::do_apply_event"); /* @@@ -4177,9 -3676,7 +4197,7 @@@ goto end; } - new_db.length= db_len; - new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); - thd->set_db(new_db.str, new_db.length); /* allocates a copy of 'db' */ - set_thd_db(thd, db, db_len); ++ set_thd_db(thd, rpl_filter, db, db_len); /* Setting the character set and collation of the current database thd->db. @@@ -4348,46 -3851,30 +4366,62 @@@ else thd->variables.collation_database= thd->db_charset; + { + const CHARSET_INFO *cs= thd->charset(); + /* + We cannot ask for parsing a statement using a character set + without state_maps (parser internal data). + */ + if (!cs->state_map) + { + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + ER_THD(thd, ER_SLAVE_FATAL_ERROR), + "character_set cannot be parsed"); + thd->is_slave_error= true; + goto end; + } + } + - thd->table_map_for_update= (table_map)table_map_for_update; - thd->set_invoker(&user, &host); /* - Flag if we need to rollback the statement transaction on - slave if it by chance succeeds. - If we expected a non-zero error code and get nothing and, - it is a concurrency issue or ignorable issue, effects - of the statement should be rolled back. + Record any GTID in the same transaction, so slave state is + transactionally consistent. + */ + if (current_stmt_is_commit) + { + thd->variables.option_bits&= ~OPTION_GTID_BEGIN; + if (rgi->gtid_pending) + { + sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; + + gtid= rgi->current_gtid; + if (rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, + true, false)) + { + int errcode= thd->get_stmt_da()->sql_errno(); + if (!is_parallel_retry_error(rgi, errcode)) + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, + rgi->gtid_info(), + "Error during COMMIT: failed to update GTID state in " + "%s.%s: %d: %s", + "mysql", rpl_gtid_slave_state_table_name.str, + errcode, + thd->get_stmt_da()->message()); + sub_id= 0; + thd->is_slave_error= 1; + goto end; + } + } + } + + thd->table_map_for_update= (table_map)table_map_for_update; + thd->set_invoker(&user, &host); + /* + Flag if we need to rollback the statement transaction on + slave if it by chance succeeds. + If we expected a non-zero error code and get nothing and, + it is a concurrency issue or ignorable issue, effects + of the statement should be rolled back. */ if (expected_error && (ignored_error_code(expected_error) || @@@ -5921,18 -5464,13 +5955,15 @@@ void Load_log_event::set_fields(const c 1 Failure */ -int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, +int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi, bool use_rli_only_for_errors) { - LEX_STRING new_db; + Relay_log_info const *rli= rgi->rli; + Rpl_filter *rpl_filter= rli->mi->rpl_filter; DBUG_ENTER("Load_log_event::do_apply_event"); - new_db.length= db_len; - new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length); - thd->set_db(new_db.str, new_db.length); DBUG_ASSERT(thd->query() == 0); - set_thd_db(thd, db, db_len); ++ set_thd_db(thd, rpl_filter, db, db_len); thd->reset_query_inner(); // Should not be needed thd->is_slave_error= 0; clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); @@@ -5974,9 -5522,11 +6005,11 @@@ { thd->set_time(when, when_sec_part); thd->set_query_id(next_query_id()); - thd->warning_info->opt_clear_warning_info(thd->query_id); + thd->get_stmt_da()->opt_clear_warning_info(thd->query_id); TABLE_LIST tables; + if (lower_case_table_names) + my_casedn_str(system_charset_info, (char *)table_name); tables.init_one_table(thd->strmake(thd->db, thd->db_length), thd->db_length, table_name, strlen(table_name), @@@ -7752,7 -6557,12 +7785,12 @@@ User_var_log_event(const char* buf, uin Old events will not have this extra byte, thence, we keep the flags set to UNDEF_F. */ - uint bytes_read= ((val + val_len) - start); + uint bytes_read= ((val + val_len) - buf_start); + if (bytes_read > event_len) + { + error= true; + goto err; + } if ((data_written - bytes_read) > 0) { flags= (uint) *(buf + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + @@@ -11122,15 -9778,14 +11160,15 @@@ check_table_map(rpl_group_info *rgi, RP DBUG_RETURN(res); } -int Table_map_log_event::do_apply_event(Relay_log_info const *rli) +int Table_map_log_event::do_apply_event(rpl_group_info *rgi) { RPL_TABLE_LIST *table_list; - char *db_mem, *tname_mem; + char *db_mem, *tname_mem, *ptr; size_t dummy_len; void *memory; + Rpl_filter *filter; + Relay_log_info const *rli= rgi->rli; DBUG_ENTER("Table_map_log_event::do_apply_event(Relay_log_info*)"); - DBUG_ASSERT(rli->sql_thd == thd); /* Step the query id to mark what columns that are actually used. */ thd->set_query_id(next_query_id()); @@@ -11142,10 -9797,17 +11180,20 @@@ NullS))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); + strmov(db_mem, m_dbnam); + strmov(tname_mem, m_tblnam); + if (lower_case_table_names) + { + my_casedn_str(files_charset_info, (char*)tname_mem); + my_casedn_str(files_charset_info, (char*)db_mem); + } + + /* call from mysql_client_binlog_statement() will not set rli->mi */ + filter= rgi->thd->slave_thread ? rli->mi->rpl_filter : global_rpl_filter; - strmov(db_mem, filter->get_rewrite_db(m_dbnam, &dummy_len)); - strmov(tname_mem, m_tblnam); ++ + /* rewrite rules changed the database */ - if (((ptr= (char*) rpl_filter->get_rewrite_db(db_mem, &dummy_len)) != db_mem)) ++ if (((ptr= (char*) filter->get_rewrite_db(db_mem, &dummy_len)) != db_mem)) + strmov(db_mem, ptr); table_list->init_one_table(db_mem, strlen(db_mem), tname_mem, strlen(tname_mem), diff --cc sql/mysqld.cc index 3cc87b02a4d,229045d4493..ab58928273a --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@@ -7192,8 -6651,8 +7191,9 @@@ pthread_handler_t handle_connections_na /* Host is unknown */ thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); create_new_thread(thd); + set_current_thd(0); } + LocalFree(saPipeSecurity.lpSecurityDescriptor); CloseHandle(connectOverlapped.hEvent); DBUG_LEAVE; decrement_handler_count(); diff --cc sql/sql_acl.cc index cbca413a93d,948a2e58c75..ecce9ad2b39 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@@ -9991,8 -7225,8 +9991,8 @@@ bool sp_grant_privileges(THD *thd, cons mysql_mutex_lock(&acl_cache->lock); - if ((au= find_user_wild(combo->host.str= (char *) sctx->priv_host, - combo->user.str))) - if ((au= find_acl_user(combo->host.str= (char *) sctx->priv_host, - combo->user.str, TRUE))) ++ if ((au= find_user_exact(combo->host.str= (char *) sctx->priv_host, ++ combo->user.str))) goto found_acl; mysql_mutex_unlock(&acl_cache->lock); diff --cc sql/sql_error.h index a993e9203c9,903ad65521d..da077627c88 --- a/sql/sql_error.h +++ b/sql/sql_error.h @@@ -18,14 -18,126 +18,15 @@@ #include "sql_list.h" /* Sql_alloc, MEM_ROOT */ #include "m_string.h" /* LEX_STRING */ + #include "sql_type_int.h" // Longlong_hybrid #include "sql_string.h" /* String */ +#include "sql_plist.h" /* I_P_List */ #include "mysql_com.h" /* MYSQL_ERRMSG_SIZE */ +#include "my_time.h" /* MYSQL_TIME */ +#include "decimal.h" class THD; - -/** - Stores status of the currently executed statement. - Cleared at the beginning of the statement, and then - can hold either OK, ERROR, or EOF status. - Can not be assigned twice per statement. -*/ - -class Diagnostics_area -{ -public: - enum enum_diagnostics_status - { - /** The area is cleared at start of a statement. */ - DA_EMPTY= 0, - /** Set whenever one calls my_ok(). */ - DA_OK, - /** Set whenever one calls my_eof(). */ - DA_EOF, - /** Set whenever one calls my_error() or my_message(). */ - DA_ERROR, - /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */ - DA_DISABLED - }; - /** True if status information is sent to the client. */ - bool is_sent; - /** Set to make set_error_status after set_{ok,eof}_status possible. */ - bool can_overwrite_status; - - void set_ok_status(THD *thd, ulonglong affected_rows_arg, - ulonglong last_insert_id_arg, - const char *message); - void set_eof_status(THD *thd); - void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg, - const char *sqlstate); - - void disable_status(); - - void reset_diagnostics_area(); - - bool is_set() const { return m_status != DA_EMPTY; } - bool is_error() const { return m_status == DA_ERROR; } - bool is_eof() const { return m_status == DA_EOF; } - bool is_ok() const { return m_status == DA_OK; } - bool is_disabled() const { return m_status == DA_DISABLED; } - enum_diagnostics_status status() const { return m_status; } - - const char *message() const - { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; } - - uint sql_errno() const - { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; } - - const char* get_sqlstate() const - { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; } - - ulonglong affected_rows() const - { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; } - - ulonglong last_insert_id() const - { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; } - - uint statement_warn_count() const - { - DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF); - return m_statement_warn_count; - } - - /* Used to count any warnings pushed after calling set_ok_status(). */ - void increment_warning() - { - if (m_status != DA_EMPTY) - m_statement_warn_count++; - } - - Diagnostics_area() { reset_diagnostics_area(); } - -private: - /** Message buffer. Can be used by OK or ERROR status. */ - char m_message[MYSQL_ERRMSG_SIZE]; - /** - SQL error number. One of ER_ codes from share/errmsg.txt. - Set by set_error_status. - */ - uint m_sql_errno; - - char m_sqlstate[SQLSTATE_LENGTH+1]; - - /** - The number of rows affected by the last statement. This is - semantically close to thd->row_count_func, but has a different - life cycle. thd->row_count_func stores the value returned by - function ROW_COUNT() and is cleared only by statements that - update its value, such as INSERT, UPDATE, DELETE and few others. - This member is cleared at the beginning of the next statement. - - We could possibly merge the two, but life cycle of thd->row_count_func - can not be changed. - */ - ulonglong m_affected_rows; - /** - Similarly to the previous member, this is a replacement of - thd->first_successful_insert_id_in_prev_stmt, which is used - to implement LAST_INSERT_ID(). - */ - ulonglong m_last_insert_id; - /** - Number of warnings of this last statement. May differ from - the number of warnings returned by SHOW WARNINGS e.g. in case - the statement doesn't clear the warnings, and doesn't generate - them. - */ - uint m_statement_warn_count; - enum_diagnostics_status m_status; -}; +class my_decimal; /////////////////////////////////////////////////////////////////////////// diff --cc sql/wsrep_mysqld.h index 49845a8fafb,e2800c8c2a8..4c98b9a624b --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@@ -19,8 -19,8 +19,9 @@@ #include "mysqld.h" typedef struct st_mysql_show_var SHOW_VAR; #include <sql_priv.h> +//#include "rpl_gtid.h" #include "../wsrep/wsrep_api.h" + #include "wsrep_mysqld_c.h" #define WSREP_UNDEFINED_TRX_ID ULONGLONG_MAX diff --cc storage/innobase/handler/ha_innodb.cc index 7d1911889bd,e6689f68704..7233fe6c745 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@@ -94,27 -91,16 +94,28 @@@ MYSQL_PLUGIN_IMPORT extern char mysql_u #include "ha_prototypes.h" #include "ut0mem.h" #include "ibuf0ibuf.h" +#include "dict0dict.h" +#include "srv0mon.h" +#include "api0api.h" +#include "api0misc.h" +#include "pars0pars.h" +#include "fts0fts.h" +#include "fts0types.h" +#include "row0import.h" +#include "row0quiesce.h" +#ifdef UNIV_DEBUG +#include "trx0purge.h" +#endif /* UNIV_DEBUG */ +#include "fts0priv.h" +#include "page0zip.h" ++#include "dict0priv.h" -enum_tx_isolation thd_get_trx_isolation(const THD* thd); +#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) -#ifdef WITH_WSREP -#include "../storage/innobase/include/ut0byte.h" -#ifndef EXTRA_DEBUG - //#include "../storage/innobase/include/ut0byte.ic" -#endif /* EXTRA_DEBUG */ -#endif /* WITH_WSREP */ -} +#ifdef MYSQL_DYNAMIC_PLUGIN +#define tc_size 400 +#define tdc_size 400 +#endif #include "ha_innodb.h" #include "i_s.h" @@@ -127,11 -110,9 +128,11 @@@ # endif /* MYSQL_PLUGIN_IMPORT */ #ifdef WITH_WSREP - #include "dict0priv.h" + #include "../../../wsrep/wsrep_api.h" +#include "../storage/innobase/include/ut0byte.h" #include <wsrep_mysqld.h> -#include <my_md5.h> +#include <wsrep_md5.h> + extern my_bool wsrep_certify_nonPK; class binlog_trx_data; extern handlerton *binlog_hton; @@@ -5125,3873 -4805,839 +5126,3876 @@@ innobase_match_index_columns } /*******************************************************************//** -Stores a key value for a row to a buffer. -@return key value length as stored in buff */ -#ifdef WITH_WSREP -UNIV_INTERN -uint -wsrep_store_key_val_for_row( -/*===============================*/ - TABLE* table, - uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL - format) */ - uint buff_len,/*!< in: buffer length */ - const uchar* record, - ibool* key_is_null)/*!< out: full key was null */ +This function builds a translation table in INNOBASE_SHARE +structure for fast index location with mysql array number from its +table->key_info structure. This also provides the necessary translation +between the key order in mysql key_info and Innodb ib_table->indexes if +they are not fully matched with each other. +Note we do not have any mutex protecting the translation table +building based on the assumption that there is no concurrent +index creation/drop and DMLs that requires index lookup. All table +handle will be closed before the index creation/drop. +@return TRUE if index translation table built successfully */ +static +ibool +innobase_build_index_translation( +/*=============================*/ + const TABLE* table, /*!< in: table in MySQL data + dictionary */ + dict_table_t* ib_table,/*!< in: table in Innodb data + dictionary */ + INNOBASE_SHARE* share) /*!< in/out: share structure + where index translation table + will be constructed in. */ { - KEY* key_info = table->key_info + keynr; - KEY_PART_INFO* key_part = key_info->key_part; - KEY_PART_INFO* end = key_part + key_info->key_parts; - char* buff_start = buff; - enum_field_types mysql_type; - Field* field; - - DBUG_ENTER("store_key_val_for_row"); + ulint mysql_num_index; + ulint ib_num_index; + dict_index_t** index_mapping; + ibool ret = TRUE; + + DBUG_ENTER("innobase_build_index_translation"); + + mutex_enter(&dict_sys->mutex); + + mysql_num_index = table->s->keys; + ib_num_index = UT_LIST_GET_LEN(ib_table->indexes); + + index_mapping = share->idx_trans_tbl.index_mapping; + + /* If there exists inconsistency between MySQL and InnoDB dictionary + (metadata) information, the number of index defined in MySQL + could exceed that in InnoDB, do not build index translation + table in such case */ + if (UNIV_UNLIKELY(ib_num_index < mysql_num_index)) { + ret = FALSE; + goto func_exit; + } + + /* If index entry count is non-zero, nothing has + changed since last update, directly return TRUE */ + if (share->idx_trans_tbl.index_count) { + /* Index entry count should still match mysql_num_index */ + ut_a(share->idx_trans_tbl.index_count == mysql_num_index); + goto func_exit; + } + + /* The number of index increased, rebuild the mapping table */ + if (mysql_num_index > share->idx_trans_tbl.array_size) { + index_mapping = (dict_index_t**) my_realloc(index_mapping, + mysql_num_index * + sizeof(*index_mapping), + MYF(MY_ALLOW_ZERO_PTR)); + + if (!index_mapping) { + /* Report an error if index_mapping continues to be + NULL and mysql_num_index is a non-zero value */ + sql_print_error("InnoDB: fail to allocate memory for " + "index translation table. Number of " + "Index:%lu, array size:%lu", + mysql_num_index, + share->idx_trans_tbl.array_size); + ret = FALSE; + goto func_exit; + } + + share->idx_trans_tbl.array_size = mysql_num_index; + } + + /* For each index in the mysql key_info array, fetch its + corresponding InnoDB index pointer into index_mapping + array. */ + for (ulint count = 0; count < mysql_num_index; count++) { + + /* Fetch index pointers into index_mapping according to mysql + index sequence */ + index_mapping[count] = dict_table_get_index_on_name( + ib_table, table->key_info[count].name); + + if (!index_mapping[count]) { + sql_print_error("Cannot find index %s in InnoDB " + "index dictionary.", + table->key_info[count].name); + ret = FALSE; + goto func_exit; + } + + /* Double check fetched index has the same + column info as those in mysql key_info. */ + if (!innobase_match_index_columns(&table->key_info[count], + index_mapping[count])) { + sql_print_error("Found index %s whose column info " + "does not match that of MySQL.", + table->key_info[count].name); + ret = FALSE; + goto func_exit; + } + } + + /* Successfully built the translation table */ + share->idx_trans_tbl.index_count = mysql_num_index; + +func_exit: + if (!ret) { + /* Build translation table failed. */ + my_free(index_mapping); + + share->idx_trans_tbl.array_size = 0; + share->idx_trans_tbl.index_count = 0; + index_mapping = NULL; + } + + share->idx_trans_tbl.index_mapping = index_mapping; + + mutex_exit(&dict_sys->mutex); + + DBUG_RETURN(ret); +} + +/*******************************************************************//** +This function uses index translation table to quickly locate the +requested index structure. +Note we do not have mutex protection for the index translatoin table +access, it is based on the assumption that there is no concurrent +translation table rebuild (fter create/drop index) and DMLs that +require index lookup. +@return dict_index_t structure for requested index. NULL if +fail to locate the index structure. */ +static +dict_index_t* +innobase_index_lookup( +/*==================*/ + INNOBASE_SHARE* share, /*!< in: share structure for index + translation table. */ + uint keynr) /*!< in: index number for the requested + index */ +{ + if (!share->idx_trans_tbl.index_mapping + || keynr >= share->idx_trans_tbl.index_count) { + return(NULL); + } + + return(share->idx_trans_tbl.index_mapping[keynr]); +} + +/************************************************************************ +Set the autoinc column max value. This should only be called once from +ha_innobase::open(). Therefore there's no need for a covering lock. */ +UNIV_INTERN +void +ha_innobase::innobase_initialize_autoinc() +/*======================================*/ +{ + ulonglong auto_inc; + const Field* field = table->found_next_number_field; + + if (field != NULL) { + auto_inc = innobase_get_int_col_max_value(field); + } else { + /* We have no idea what's been passed in to us as the + autoinc column. We set it to the 0, effectively disabling + updates to the table. */ + auto_inc = 0; + + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: Unable to determine the AUTOINC " + "column name\n"); + } + + if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { + /* If the recovery level is set so high that writes + are disabled we force the AUTOINC counter to 0 + value effectively disabling writes to the table. + Secondly, we avoid reading the table in case the read + results in failure due to a corrupted table/index. + + We will not return an error to the client, so that the + tables can be dumped with minimal hassle. If an error + were returned in this case, the first attempt to read + the table would fail and subsequent SELECTs would succeed. */ + auto_inc = 0; + } else if (field == NULL) { + /* This is a far more serious error, best to avoid + opening the table and return failure. */ + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + } else { + dict_index_t* index; + const char* col_name; + ib_uint64_t read_auto_inc; + ulint err; + + update_thd(ha_thd()); + + ut_a(prebuilt->trx == thd_to_trx(user_thd)); + + col_name = field->field_name; + index = innobase_get_index(table->s->next_number_index); + + /* Execute SELECT MAX(col_name) FROM TABLE; */ + err = row_search_max_autoinc(index, col_name, &read_auto_inc); + + switch (err) { + case DB_SUCCESS: { + ulonglong col_max_value; + + col_max_value = innobase_get_int_col_max_value(field); + + /* At the this stage we do not know the increment + nor the offset, so use a default increment of 1. */ + + auto_inc = innobase_next_autoinc( + read_auto_inc, 1, 1, 0, col_max_value); + + break; + } + case DB_RECORD_NOT_FOUND: + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: MySQL and InnoDB data " + "dictionaries are out of sync.\n" + "InnoDB: Unable to find the AUTOINC column " + "%s in the InnoDB table %s.\n" + "InnoDB: We set the next AUTOINC column " + "value to 0,\n" + "InnoDB: in effect disabling the AUTOINC " + "next value generation.\n" + "InnoDB: You can either set the next " + "AUTOINC value explicitly using ALTER TABLE\n" + "InnoDB: or fix the data dictionary by " + "recreating the table.\n", + col_name, index->table->name); + + /* This will disable the AUTOINC generation. */ + auto_inc = 0; + + /* We want the open to succeed, so that the user can + take corrective action. ie. reads should succeed but + updates should fail. */ + err = DB_SUCCESS; + break; + default: + /* row_search_max_autoinc() should only return + one of DB_SUCCESS or DB_RECORD_NOT_FOUND. */ + ut_error; + } + } + + dict_table_autoinc_initialize(prebuilt->table, auto_inc); +} + +/*****************************************************************//** +Creates and opens a handle to a table which already exists in an InnoDB +database. +@return 1 if error, 0 if success */ +UNIV_INTERN +int +ha_innobase::open( +/*==============*/ + const char* name, /*!< in: table name */ + int mode, /*!< in: not used */ + uint test_if_locked) /*!< in: not used */ +{ + dict_table_t* ib_table; + char norm_name[FN_REFLEN]; + THD* thd; + char* is_part = NULL; + ibool par_case_name_set = FALSE; + char par_case_name[FN_REFLEN]; + dict_err_ignore_t ignore_err = DICT_ERR_IGNORE_NONE; + + DBUG_ENTER("ha_innobase::open"); + + UT_NOT_USED(mode); + UT_NOT_USED(test_if_locked); + + thd = ha_thd(); + + /* Under some cases MySQL seems to call this function while + holding btr_search_latch. This breaks the latching order as + we acquire dict_sys->mutex below and leads to a deadlock. */ + if (thd != NULL) { + innobase_release_temporary_latches(ht, thd); + } + + normalize_table_name(norm_name, name); + + user_thd = NULL; + + if (!(share=get_share(name))) { + + DBUG_RETURN(1); + } + + /* Will be allocated if it is needed in ::update_row() */ + upd_buf = NULL; + upd_buf_size = 0; + + /* We look for pattern #P# to see if the table is partitioned + MySQL table. */ +#ifdef __WIN__ + is_part = strstr(norm_name, "#p#"); +#else + is_part = strstr(norm_name, "#P#"); +#endif /* __WIN__ */ + + /* Check whether FOREIGN_KEY_CHECKS is set to 0. If so, the table + can be opened even if some FK indexes are missing. If not, the table + can't be opened in the same situation */ + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { + ignore_err = DICT_ERR_IGNORE_FK_NOKEY; + } + + /* Get pointer to a table object in InnoDB dictionary cache */ + ib_table = dict_table_open_on_name(norm_name, FALSE, TRUE, ignore_err); + + if (ib_table + && ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) + && table->s->stored_fields != dict_table_get_n_user_cols(ib_table)) + || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) + && (table->s->fields + != dict_table_get_n_user_cols(ib_table) - 1)))) { + ib_logf(IB_LOG_LEVEL_WARN, + "table %s contains %lu user defined columns " + "in InnoDB, but %lu columns in MySQL. Please " + "check INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and " + REFMAN "innodb-troubleshooting.html " + "for how to resolve it", + norm_name, (ulong) dict_table_get_n_user_cols(ib_table), + (ulong) table->s->fields); + + /* Mark this table as corrupted, so the drop table + or force recovery can still use it, but not others. */ + ib_table->corrupted = true; + dict_table_close(ib_table, FALSE, FALSE); + ib_table = NULL; + is_part = NULL; + } + + if (NULL == ib_table) { + if (is_part) { + /* MySQL partition engine hard codes the file name + separator as "#P#". The text case is fixed even if + lower_case_table_names is set to 1 or 2. This is true + for sub-partition names as well. InnoDB always + normalises file names to lower case on Windows, this + can potentially cause problems when copying/moving + tables between platforms. + + 1) If boot against an installation from Windows + platform, then its partition table name could + be in lower case in system tables. So we will + need to check lower case name when load table. + + 2) If we boot an installation from other case + sensitive platform in Windows, we might need to + check the existence of table name without lower + case in the system table. */ + if (innobase_get_lower_case_table_names() == 1) { + + if (!par_case_name_set) { +#ifndef __WIN__ + /* Check for the table using lower + case name, including the partition + separator "P" */ + strcpy(par_case_name, norm_name); + innobase_casedn_str(par_case_name); +#else + /* On Windows platfrom, check + whether there exists table name in + system table whose name is + not being normalized to lower case */ + normalize_table_name_low( + par_case_name, name, FALSE); +#endif + par_case_name_set = TRUE; + } + + ib_table = dict_table_open_on_name( + par_case_name, FALSE, TRUE, + ignore_err); + } + + if (ib_table) { +#ifndef __WIN__ + sql_print_warning("Partition table %s opened " + "after converting to lower " + "case. The table may have " + "been moved from a case " + "in-sensitive file system. " + "Please recreate table in " + "the current file system\n", + norm_name); +#else + sql_print_warning("Partition table %s opened " + "after skipping the step to " + "lower case the table name. " + "The table may have been " + "moved from a case sensitive " + "file system. Please " + "recreate table in the " + "current file system\n", + norm_name); +#endif + goto table_opened; + } + } + + if (is_part) { + sql_print_error("Failed to open table %s.\n", + norm_name); + } + + ib_logf(IB_LOG_LEVEL_WARN, + "Cannot open table %s from the internal data " + "dictionary of InnoDB though the .frm file " + "for the table exists. See " + REFMAN "innodb-troubleshooting.html for how " + "you can resolve the problem.", norm_name); + + free_share(share); + my_errno = ENOENT; + + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + } + +table_opened: + + innobase_copy_frm_flags_from_table_share(ib_table, table->s); + + dict_stats_init(ib_table); + + MONITOR_INC(MONITOR_TABLE_OPEN); + + bool no_tablespace; + + if (dict_table_is_discarded(ib_table)) { + + ib_senderrf(thd, + IB_LOG_LEVEL_WARN, ER_TABLESPACE_DISCARDED, + table->s->table_name.str); + + /* Allow an open because a proper DISCARD should have set + all the flags and index root page numbers to FIL_NULL that + should prevent any DML from running but it should allow DDL + operations. */ + + no_tablespace = false; + + } else if (ib_table->ibd_file_missing) { + + ib_senderrf( + thd, IB_LOG_LEVEL_WARN, + ER_TABLESPACE_MISSING, norm_name); + + /* This means we have no idea what happened to the tablespace + file, best to play it safe. */ + + no_tablespace = true; + } else { + no_tablespace = false; + } + + if (!thd_tablespace_op(thd) && no_tablespace) { + free_share(share); + my_errno = ENOENT; + + dict_table_close(ib_table, FALSE, FALSE); + + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + } + + prebuilt = row_create_prebuilt(ib_table, table->s->stored_rec_length); + + prebuilt->default_rec = table->s->default_values; + ut_ad(prebuilt->default_rec); + + /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ + primary_key = table->s->primary_key; + key_used_on_scan = primary_key; + + if (!innobase_build_index_translation(table, ib_table, share)) { + sql_print_error("Build InnoDB index translation table for" + " Table %s failed", name); + } + + /* Allocate a buffer for a 'row reference'. A row reference is + a string of bytes of length ref_length which uniquely specifies + a row in our table. Note that MySQL may also compare two row + references for equality by doing a simple memcmp on the strings + of length ref_length! */ + + if (!row_table_got_default_clust_index(ib_table)) { + + prebuilt->clust_index_was_generated = FALSE; + + if (UNIV_UNLIKELY(primary_key >= MAX_KEY)) { + ib_table->dict_frm_mismatch = DICT_FRM_NO_PK; + + /* This mismatch could cause further problems + if not attended, bring this to the user's attention + by printing a warning in addition to log a message + in the errorlog */ + + ib_push_frm_error(thd, ib_table, table, 0, true); + + /* If primary_key >= MAX_KEY, its (primary_key) + value could be out of bound if continue to index + into key_info[] array. Find InnoDB primary index, + and assign its key_length to ref_length. + In addition, since MySQL indexes are sorted starting + with primary index, unique index etc., initialize + ref_length to the first index key length in + case we fail to find InnoDB cluster index. + + Please note, this will not resolve the primary + index mismatch problem, other side effects are + possible if users continue to use the table. + However, we allow this table to be opened so + that user can adopt necessary measures for the + mismatch while still being accessible to the table + date. */ + if (!table->key_info) { + ut_ad(!table->s->keys); + ref_length = 0; + } else { + ref_length = table->key_info[0].key_length; + } + + /* Find corresponding cluster index + key length in MySQL's key_info[] array */ + for (uint i = 0; i < table->s->keys; i++) { + dict_index_t* index; + index = innobase_get_index(i); + if (dict_index_is_clust(index)) { + ref_length = + table->key_info[i].key_length; + } + } + } else { + /* MySQL allocates the buffer for ref. + key_info->key_length includes space for all key + columns + one byte for each column that may be + NULL. ref_length must be as exact as possible to + save space, because all row reference buffers are + allocated based on ref_length. */ + + ref_length = table->key_info[primary_key].key_length; + } + } else { + if (primary_key != MAX_KEY) { + + ib_table->dict_frm_mismatch = DICT_NO_PK_FRM_HAS; + + /* This mismatch could cause further problems + if not attended, bring this to the user attention + by printing a warning in addition to log a message + in the errorlog */ + ib_push_frm_error(thd, ib_table, table, 0, true); + } + + prebuilt->clust_index_was_generated = TRUE; + + ref_length = DATA_ROW_ID_LEN; + + /* If we automatically created the clustered index, then + MySQL does not know about it, and MySQL must NOT be aware + of the index used on scan, to make it avoid checking if we + update the column of the index. That is why we assert below + that key_used_on_scan is the undefined value MAX_KEY. + The column is the row id in the automatical generation case, + and it will never be updated anyway. */ + + if (key_used_on_scan != MAX_KEY) { + sql_print_warning( + "Table %s key_used_on_scan is %lu even " + "though there is no primary key inside " + "InnoDB.", name, (ulong) key_used_on_scan); + } + } + + /* Index block size in InnoDB: used by MySQL in query optimization */ + stats.block_size = UNIV_PAGE_SIZE; + + /* Init table lock structure */ + thr_lock_data_init(&share->lock,&lock,(void*) 0); + + if (prebuilt->table) { + /* We update the highest file format in the system table + space, if this table has higher file format setting. */ + + trx_sys_file_format_max_upgrade( + (const char**) &innobase_file_format_max, + dict_table_get_format(prebuilt->table)); + } + + /* Only if the table has an AUTOINC column. */ + if (prebuilt->table != NULL + && !prebuilt->table->ibd_file_missing + && table->found_next_number_field != NULL) { + dict_table_autoinc_lock(prebuilt->table); + + /* Since a table can already be "open" in InnoDB's internal + data dictionary, we only init the autoinc counter once, the + first time the table is loaded. We can safely reuse the + autoinc value from a previous MySQL open. */ + if (dict_table_autoinc_read(prebuilt->table) == 0) { + + innobase_initialize_autoinc(); + } + + dict_table_autoinc_unlock(prebuilt->table); + } + + info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); + + DBUG_RETURN(0); +} + +UNIV_INTERN +handler* +ha_innobase::clone( +/*===============*/ + const char* name, /*!< in: table name */ + MEM_ROOT* mem_root) /*!< in: memory context */ +{ + ha_innobase* new_handler; + + DBUG_ENTER("ha_innobase::clone"); + + new_handler = static_cast<ha_innobase*>(handler::clone(name, + mem_root)); + if (new_handler) { + DBUG_ASSERT(new_handler->prebuilt != NULL); + + new_handler->prebuilt->select_lock_type + = prebuilt->select_lock_type; + } + + DBUG_RETURN(new_handler); +} + +UNIV_INTERN +uint +ha_innobase::max_supported_key_part_length() const +/*==============================================*/ +{ + /* A table format specific index column length check will be performed + at ha_innobase::add_index() and row_create_index_for_mysql() */ + return(innobase_large_prefix + ? REC_VERSION_56_MAX_INDEX_COL_LEN + : REC_ANTELOPE_MAX_INDEX_COL_LEN - 1); +} + +/******************************************************************//** +Closes a handle to an InnoDB table. +@return 0 */ +UNIV_INTERN +int +ha_innobase::close() +/*================*/ +{ + THD* thd; + + DBUG_ENTER("ha_innobase::close"); + + thd = ha_thd(); + if (thd != NULL) { + innobase_release_temporary_latches(ht, thd); + } + + row_prebuilt_free(prebuilt, FALSE); + + if (upd_buf != NULL) { + ut_ad(upd_buf_size != 0); + my_free(upd_buf); + upd_buf = NULL; + upd_buf_size = 0; + } + + free_share(share); + + MONITOR_INC(MONITOR_TABLE_CLOSE); + + /* Tell InnoDB server that there might be work for + utility threads: */ + + srv_active_wake_master_thread(); + + DBUG_RETURN(0); +} + +/* The following accessor functions should really be inside MySQL code! */ + +/**************************************************************//** +Gets field offset for a field in a table. +@return offset */ +static inline +uint +get_field_offset( +/*=============*/ + const TABLE* table, /*!< in: MySQL table object */ + const Field* field) /*!< in: MySQL field object */ +{ + return((uint) (field->ptr - table->record[0])); +} + +#ifdef WITH_WSREP +UNIV_INTERN +int +wsrep_innobase_mysql_sort( +/*===============*/ + /* out: str contains sort string */ + int mysql_type, /* in: MySQL type */ + uint charset_number, /* in: number of the charset */ + unsigned char* str, /* in: data field */ + unsigned int str_length, /* in: data field length, + not UNIV_SQL_NULL */ + unsigned int buf_length) /* in: total str buffer length */ + +{ + CHARSET_INFO* charset; + enum_field_types mysql_tp; + int ret_length = str_length; + + DBUG_ASSERT(str_length != UNIV_SQL_NULL); + + mysql_tp = (enum_field_types) mysql_type; + + switch (mysql_tp) { + + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: + { + uchar tmp_str[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; + uint tmp_length = REC_VERSION_56_MAX_INDEX_COL_LEN; + + /* Use the charset number to pick the right charset struct for + the comparison. Since the MySQL function get_charset may be + slow before Bar removes the mutex operation there, we first + look at 2 common charsets directly. */ + + if (charset_number == default_charset_info->number) { + charset = default_charset_info; + } else if (charset_number == my_charset_latin1.number) { + charset = &my_charset_latin1; + } else { + charset = get_charset(charset_number, MYF(MY_WME)); + + if (charset == NULL) { + sql_print_error("InnoDB needs charset %lu for doing " + "a comparison, but MySQL cannot " + "find that charset.", + (ulong) charset_number); + ut_a(0); + } + } + + ut_a(str_length <= tmp_length); + memcpy(tmp_str, str, str_length); + + tmp_length = charset->coll->strnxfrm(charset, str, str_length, + str_length, tmp_str, + tmp_length, 0); + DBUG_ASSERT(tmp_length <= str_length); + if (wsrep_protocol_version < 3) { + tmp_length = charset->coll->strnxfrm( + charset, str, str_length, + str_length, tmp_str, tmp_length, 0); + DBUG_ASSERT(tmp_length <= str_length); + } else { + /* strnxfrm will expand the destination string, + protocols < 3 truncated the sorted sring + protocols >= 3 gets full sorted sring + */ + tmp_length = charset->coll->strnxfrm( + charset, str, buf_length, + str_length, tmp_str, str_length, 0); + DBUG_ASSERT(tmp_length <= buf_length); + ret_length = tmp_length; + } + + break; + } + case MYSQL_TYPE_DECIMAL : + case MYSQL_TYPE_TINY : + case MYSQL_TYPE_SHORT : + case MYSQL_TYPE_LONG : + case MYSQL_TYPE_FLOAT : + case MYSQL_TYPE_DOUBLE : + case MYSQL_TYPE_NULL : + case MYSQL_TYPE_TIMESTAMP : + case MYSQL_TYPE_LONGLONG : + case MYSQL_TYPE_INT24 : + case MYSQL_TYPE_DATE : + case MYSQL_TYPE_TIME : + case MYSQL_TYPE_DATETIME : + case MYSQL_TYPE_YEAR : + case MYSQL_TYPE_NEWDATE : + case MYSQL_TYPE_NEWDECIMAL : + case MYSQL_TYPE_ENUM : + case MYSQL_TYPE_SET : + case MYSQL_TYPE_GEOMETRY : + break; + default: + break; + } + + return ret_length; +} +#endif /* WITH_WSREP */ + +/*************************************************************//** +InnoDB uses this function to compare two data fields for which the data type +is such that we must use MySQL code to compare them. NOTE that the prototype +of this function is in rem0cmp.cc in InnoDB source code! If you change this +function, remember to update the prototype there! +@return 1, 0, -1, if a is greater, equal, less than b, respectively */ +UNIV_INTERN +int +innobase_mysql_cmp( +/*===============*/ + int mysql_type, /*!< in: MySQL type */ + uint charset_number, /*!< in: number of the charset */ + const unsigned char* a, /*!< in: data field */ + unsigned int a_length, /*!< in: data field length, + not UNIV_SQL_NULL */ + const unsigned char* b, /*!< in: data field */ + unsigned int b_length) /*!< in: data field length, + not UNIV_SQL_NULL */ +{ + CHARSET_INFO* charset; + enum_field_types mysql_tp; + int ret; + + DBUG_ASSERT(a_length != UNIV_SQL_NULL); + DBUG_ASSERT(b_length != UNIV_SQL_NULL); + + mysql_tp = (enum_field_types) mysql_type; + + switch (mysql_tp) { + + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: + /* Use the charset number to pick the right charset struct for + the comparison. Since the MySQL function get_charset may be + slow before Bar removes the mutex operation there, we first + look at 2 common charsets directly. */ + + if (charset_number == default_charset_info->number) { + charset = default_charset_info; + } else if (charset_number == my_charset_latin1.number) { + charset = &my_charset_latin1; + } else { + charset = get_charset(charset_number, MYF(MY_WME)); + + if (charset == NULL) { + sql_print_error("InnoDB needs charset %lu for doing " + "a comparison, but MySQL cannot " + "find that charset.", + (ulong) charset_number); + ut_a(0); + } + } + + /* Starting from 4.1.3, we use strnncollsp() in comparisons of + non-latin1_swedish_ci strings. NOTE that the collation order + changes then: 'b\0\0...' is ordered BEFORE 'b ...'. Users + having indexes on such data need to rebuild their tables! */ + + ret = charset->coll->strnncollsp( + charset, a, a_length, b, b_length, 0); + + if (ret < 0) { + return(-1); + } else if (ret > 0) { + return(1); + } else { + return(0); + } + default: + ut_error; + } + + return(0); +} + + +/*************************************************************//** +Get the next token from the given string and store it in *token. */ +UNIV_INTERN +CHARSET_INFO* +innobase_get_fts_charset( +/*=====================*/ + int mysql_type, /*!< in: MySQL type */ + uint charset_number) /*!< in: number of the charset */ +{ + enum_field_types mysql_tp; + CHARSET_INFO* charset; + + mysql_tp = (enum_field_types) mysql_type; + + switch (mysql_tp) { + + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: + /* Use the charset number to pick the right charset struct for + the comparison. Since the MySQL function get_charset may be + slow before Bar removes the mutex operation there, we first + look at 2 common charsets directly. */ + + if (charset_number == default_charset_info->number) { + charset = default_charset_info; + } else if (charset_number == my_charset_latin1.number) { + charset = &my_charset_latin1; + } else { + charset = get_charset(charset_number, MYF(MY_WME)); + + if (charset == NULL) { + sql_print_error("InnoDB needs charset %lu for doing " + "a comparison, but MySQL cannot " + "find that charset.", + (ulong) charset_number); + ut_a(0); + } + } + break; + default: + ut_error; + } + + return(charset); +} + +/*************************************************************//** +InnoDB uses this function to compare two data fields for which the data type +is such that we must use MySQL code to compare them. NOTE that the prototype +of this function is in rem0cmp.c in InnoDB source code! If you change this +function, remember to update the prototype there! +@return 1, 0, -1, if a is greater, equal, less than b, respectively */ +UNIV_INTERN +int +innobase_mysql_cmp_prefix( +/*======================*/ + int mysql_type, /*!< in: MySQL type */ + uint charset_number, /*!< in: number of the charset */ + const unsigned char* a, /*!< in: data field */ + unsigned int a_length, /*!< in: data field length, + not UNIV_SQL_NULL */ + const unsigned char* b, /*!< in: data field */ + unsigned int b_length) /*!< in: data field length, + not UNIV_SQL_NULL */ +{ + CHARSET_INFO* charset; + int result; + + charset = innobase_get_fts_charset(mysql_type, charset_number); + + result = ha_compare_text(charset, (uchar*) a, a_length, + (uchar*) b, b_length, 1, 0); + + return(result); +} +/******************************************************************//** +compare two character string according to their charset. */ +UNIV_INTERN +int +innobase_fts_text_cmp( +/*==================*/ + const void* cs, /*!< in: Character set */ + const void* p1, /*!< in: key */ + const void* p2) /*!< in: node */ +{ + const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; + const fts_string_t* s1 = (const fts_string_t*) p1; + const fts_string_t* s2 = (const fts_string_t*) p2; + + return(ha_compare_text( + charset, s1->f_str, static_cast<uint>(s1->f_len), + s2->f_str, static_cast<uint>(s2->f_len), 0, 0)); +} +/******************************************************************//** +compare two character string case insensitively according to their charset. */ +UNIV_INTERN +int +innobase_fts_text_case_cmp( +/*=======================*/ + const void* cs, /*!< in: Character set */ + const void* p1, /*!< in: key */ + const void* p2) /*!< in: node */ +{ + const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; + const fts_string_t* s1 = (const fts_string_t*) p1; + const fts_string_t* s2 = (const fts_string_t*) p2; + ulint newlen; + + my_casedn_str(charset, (char*) s2->f_str); + + newlen = strlen((const char*) s2->f_str); + + return(ha_compare_text( + charset, s1->f_str, static_cast<uint>(s1->f_len), + s2->f_str, static_cast<uint>(newlen), 0, 0)); +} +/******************************************************************//** +Get the first character's code position for FTS index partition. */ +UNIV_INTERN +ulint +innobase_strnxfrm( +/*==============*/ + const CHARSET_INFO* + cs, /*!< in: Character set */ + const uchar* str, /*!< in: string */ + const ulint len) /*!< in: string length */ +{ + uchar mystr[2]; + ulint value; + + if (!str || len == 0) { + return(0); + } + + my_strnxfrm(cs, (uchar*) mystr, 2, str, len); + + value = mach_read_from_2(mystr); + + if (value > 255) { + value = value / 256; + } + + return(value); +} + +/******************************************************************//** +compare two character string according to their charset. */ +UNIV_INTERN +int +innobase_fts_text_cmp_prefix( +/*=========================*/ + const void* cs, /*!< in: Character set */ + const void* p1, /*!< in: prefix key */ + const void* p2) /*!< in: value to compare */ +{ + const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; + const fts_string_t* s1 = (const fts_string_t*) p1; + const fts_string_t* s2 = (const fts_string_t*) p2; + int result; + + result = ha_compare_text( + charset, s2->f_str, static_cast<uint>(s2->f_len), + s1->f_str, static_cast<uint>(s1->f_len), 1, 0); + + /* We switched s1, s2 position in ha_compare_text. So we need + to negate the result */ + return(-result); +} + +/******************************************************************//** +Makes all characters in a string lower case. */ +UNIV_INTERN +size_t +innobase_fts_casedn_str( +/*====================*/ + CHARSET_INFO* cs, /*!< in: Character set */ + char* src, /*!< in: string to put in lower case */ + size_t src_len,/*!< in: input string length */ + char* dst, /*!< in: buffer for result string */ + size_t dst_len)/*!< in: buffer size */ +{ + if (cs->casedn_multiply == 1) { + memcpy(dst, src, src_len); + dst[src_len] = 0; + my_casedn_str(cs, dst); + + return(strlen(dst)); + } else { + return(cs->cset->casedn(cs, src, src_len, dst, dst_len)); + } +} + +#define true_word_char(c, ch) ((c) & (_MY_U | _MY_L | _MY_NMR) || (ch) == '_') + +#define misc_word_char(X) 0 + +/*************************************************************//** +Get the next token from the given string and store it in *token. +It is mostly copied from MyISAM's doc parsing function ft_simple_get_word() +@return length of string processed */ +UNIV_INTERN +ulint +innobase_mysql_fts_get_token( +/*=========================*/ + CHARSET_INFO* cs, /*!< in: Character set */ + const byte* start, /*!< in: start of text */ + const byte* end, /*!< in: one character past end of + text */ + fts_string_t* token, /*!< out: token's text */ + ulint* offset) /*!< out: offset to token, + measured as characters from + 'start' */ +{ + int mbl; + const uchar* doc = start; + + ut_a(cs); + + token->f_n_char = token->f_len = 0; + token->f_str = NULL; + + for (;;) { + + if (doc >= end) { + return(doc - start); + } + + int ctype; + + mbl = cs->cset->ctype( + cs, &ctype, doc, (const uchar*) end); + + if (true_word_char(ctype, *doc)) { + break; + } + + doc += mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1); + } + + ulint mwc = 0; + ulint length = 0; + + token->f_str = const_cast<byte*>(doc); + + while (doc < end) { + + int ctype; + + mbl = cs->cset->ctype( + cs, &ctype, (uchar*) doc, (uchar*) end); + if (true_word_char(ctype, *doc)) { + mwc = 0; + } else if (!misc_word_char(*doc) || mwc) { + break; + } else { + ++mwc; + } + + ++length; + + doc += mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1); + } + + token->f_len = (uint) (doc - token->f_str) - mwc; + token->f_n_char = length; + + return(doc - start); +} + +/**************************************************************//** +Converts a MySQL type to an InnoDB type. Note that this function returns +the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 +VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. +@return DATA_BINARY, DATA_VARCHAR, ... */ +UNIV_INTERN +ulint +get_innobase_type_from_mysql_type( +/*==============================*/ + ulint* unsigned_flag, /*!< out: DATA_UNSIGNED if an + 'unsigned type'; + at least ENUM and SET, + and unsigned integer + types are 'unsigned types' */ + const void* f) /*!< in: MySQL Field */ +{ + const class Field* field = reinterpret_cast<const class Field*>(f); + + /* The following asserts try to check that the MySQL type code fits in + 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to + the type */ + + DBUG_ASSERT((ulint)MYSQL_TYPE_STRING < 256); + DBUG_ASSERT((ulint)MYSQL_TYPE_VAR_STRING < 256); + DBUG_ASSERT((ulint)MYSQL_TYPE_DOUBLE < 256); + DBUG_ASSERT((ulint)MYSQL_TYPE_FLOAT < 256); + DBUG_ASSERT((ulint)MYSQL_TYPE_DECIMAL < 256); + + if (field->flags & UNSIGNED_FLAG) { + + *unsigned_flag = DATA_UNSIGNED; + } else { + *unsigned_flag = 0; + } + + if (field->real_type() == MYSQL_TYPE_ENUM + || field->real_type() == MYSQL_TYPE_SET) { + + /* MySQL has field->type() a string type for these, but the + data is actually internally stored as an unsigned integer + code! */ + + *unsigned_flag = DATA_UNSIGNED; /* MySQL has its own unsigned + flag set to zero, even though + internally this is an unsigned + integer type */ + return(DATA_INT); + } + + switch (field->type()) { + /* NOTE that we only allow string types in DATA_MYSQL and + DATA_VARMYSQL */ + case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */ + case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */ + if (field->binary()) { + return(DATA_BINARY); + } else if (field->charset() == &my_charset_latin1) { + return(DATA_VARCHAR); + } else { + return(DATA_VARMYSQL); + } + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + if (field->binary()) { + return(DATA_FIXBINARY); + } else if (field->charset() == &my_charset_latin1) { + return(DATA_CHAR); + } else { + return(DATA_MYSQL); + } + case MYSQL_TYPE_NEWDECIMAL: + return(DATA_FIXBINARY); + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_NEWDATE: + return(DATA_INT); + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATETIME: + if (field->key_type() == HA_KEYTYPE_BINARY) + return(DATA_FIXBINARY); + else + return(DATA_INT); + case MYSQL_TYPE_FLOAT: + return(DATA_FLOAT); + case MYSQL_TYPE_DOUBLE: + return(DATA_DOUBLE); + case MYSQL_TYPE_DECIMAL: + return(DATA_DECIMAL); + case MYSQL_TYPE_GEOMETRY: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_LONG_BLOB: + return(DATA_BLOB); + case MYSQL_TYPE_NULL: + /* MySQL currently accepts "NULL" datatype, but will + reject such datatype in the next release. We will cope + with it and not trigger assertion failure in 5.1 */ + break; + default: + ut_error; + } + + return(0); +} + +/*******************************************************************//** +Writes an unsigned integer value < 64k to 2 bytes, in the little-endian +storage format. */ +static inline +void +innobase_write_to_2_little_endian( +/*==============================*/ + byte* buf, /*!< in: where to store */ + ulint val) /*!< in: value to write, must be < 64k */ +{ + ut_a(val < 256 * 256); + + buf[0] = (byte)(val & 0xFF); + buf[1] = (byte)(val / 256); +} + +/*******************************************************************//** +Reads an unsigned integer value < 64k from 2 bytes, in the little-endian +storage format. +@return value */ +static inline +uint +innobase_read_from_2_little_endian( +/*===============================*/ + const uchar* buf) /*!< in: from where to read */ +{ + return((uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1])))); +} + +#ifdef WITH_WSREP +/*******************************************************************//** +Stores a key value for a row to a buffer. +@return key value length as stored in buff */ +UNIV_INTERN +uint +wsrep_store_key_val_for_row( +/*===============================*/ + THD* thd, + TABLE* table, + uint keynr, /*!< in: key number */ + char* buff, /*!< in/out: buffer for the key value (in MySQL + format) */ + uint buff_len,/*!< in: buffer length */ + const uchar* record, + ibool* key_is_null)/*!< out: full key was null */ +{ + KEY* key_info = table->key_info + keynr; + KEY_PART_INFO* key_part = key_info->key_part; + KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; + char* buff_start = buff; + enum_field_types mysql_type; + Field* field; + uint buff_space = buff_len; + + DBUG_ENTER("wsrep_store_key_val_for_row"); + + memset(buff, 0, buff_len); + *key_is_null = TRUE; + + for (; key_part != end; key_part++) { + + uchar sorted[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; + ibool part_is_null = FALSE; + + if (key_part->null_bit) { + if (buff_space > 0) { + if (record[key_part->null_offset] + & key_part->null_bit) { + *buff = 1; + part_is_null = TRUE; + } else { + *buff = 0; + } + buff++; + buff_space--; + } else { + fprintf (stderr, "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + } + } + if (!part_is_null) *key_is_null = FALSE; + + field = key_part->field; + mysql_type = field->type(); + + if (mysql_type == MYSQL_TYPE_VARCHAR) { + /* >= 5.0.3 true VARCHAR */ + ulint lenlen; + ulint len; + const byte* data; + ulint key_len; + ulint true_len; + const CHARSET_INFO* cs; + int error=0; + + key_len = key_part->length; + + if (part_is_null) { + true_len = key_len + 2; + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + buff += true_len; + buff_space -= true_len; + continue; + } + cs = field->charset(); + + lenlen = (ulint) + (((Field_varstring*)field)->length_bytes); + + data = row_mysql_read_true_varchar(&len, + (byte*) (record + + (ulint)get_field_offset(table, field)), + lenlen); + + true_len = len; + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char *) data, + (const char *) data + len, + (uint) (key_len / + cs->mbmaxlen), + &error); + } + + /* In a column prefix index, we may need to truncate + the stored value: */ + + if (true_len > key_len) { + true_len = key_len; + } + + memcpy(sorted, data, true_len); + true_len = wsrep_innobase_mysql_sort( + mysql_type, cs->number, sorted, true_len, + REC_VERSION_56_MAX_INDEX_COL_LEN); + + if (wsrep_protocol_version > 1) { + /* Note that we always reserve the maximum possible + length of the true VARCHAR in the key value, though + only len first bytes after the 2 length bytes contain + actual data. The rest of the space was reset to zero + in the bzero() call above. */ + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + memcpy(buff, sorted, true_len); + buff += true_len; + buff_space -= true_len; + } else { + buff += key_len; + } + } else if (mysql_type == MYSQL_TYPE_TINY_BLOB + || mysql_type == MYSQL_TYPE_MEDIUM_BLOB + || mysql_type == MYSQL_TYPE_BLOB + || mysql_type == MYSQL_TYPE_LONG_BLOB + /* MYSQL_TYPE_GEOMETRY data is treated + as BLOB data in innodb. */ + || mysql_type == MYSQL_TYPE_GEOMETRY) { + + const CHARSET_INFO* cs; + ulint key_len; + ulint true_len; + int error=0; + ulint blob_len; + const byte* blob_data; + + ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + + key_len = key_part->length; + + if (part_is_null) { + true_len = key_len + 2; + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + buff += true_len; + buff_space -= true_len; + + continue; + } + + cs = field->charset(); + + blob_data = row_mysql_read_blob_ref(&blob_len, + (byte*) (record + + (ulint)get_field_offset(table, field)), + (ulint) field->pack_length()); + + true_len = blob_len; + + ut_a(get_field_offset(table, field) + == key_part->offset); + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (blob_len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char *) blob_data, + (const char *) blob_data + + blob_len, + (uint) (key_len / + cs->mbmaxlen), + &error); + } + + /* All indexes on BLOB and TEXT are column prefix + indexes, and we may need to truncate the data to be + stored in the key value: */ + + if (true_len > key_len) { + true_len = key_len; + } + + memcpy(sorted, blob_data, true_len); + true_len = wsrep_innobase_mysql_sort( + mysql_type, cs->number, sorted, true_len, + REC_VERSION_56_MAX_INDEX_COL_LEN); + + + /* Note that we always reserve the maximum possible + length of the BLOB prefix in the key value. */ + if (wsrep_protocol_version > 1) { + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + buff += true_len; + buff_space -= true_len; + } else { + buff += key_len; + } + memcpy(buff, sorted, true_len); + } else { + /* Here we handle all other data types except the + true VARCHAR, BLOB and TEXT. Note that the column + value we store may be also in a column prefix + index. */ + + const CHARSET_INFO* cs = NULL; + ulint true_len; + ulint key_len; + const uchar* src_start; + int error=0; + enum_field_types real_type; + + key_len = key_part->length; + + if (part_is_null) { + true_len = key_len; + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + buff += true_len; + buff_space -= true_len; + + continue; + } + + src_start = record + key_part->offset; + real_type = field->real_type(); + true_len = key_len; + + /* Character set for the field is defined only + to fields whose type is string and real field + type is not enum or set. For these fields check + if character set is multi byte. */ + + if (real_type != MYSQL_TYPE_ENUM + && real_type != MYSQL_TYPE_SET + && ( mysql_type == MYSQL_TYPE_VAR_STRING + || mysql_type == MYSQL_TYPE_STRING)) { + + cs = field->charset(); + + /* For multi byte character sets we need to + calculate the true length of the key */ + + if (key_len > 0 && cs->mbmaxlen > 1) { + + true_len = (ulint) + cs->cset->well_formed_len(cs, + (const char *)src_start, + (const char *)src_start + + key_len, + (uint) (key_len / + cs->mbmaxlen), + &error); + } + memcpy(sorted, src_start, true_len); + true_len = wsrep_innobase_mysql_sort( + mysql_type, cs->number, sorted, true_len, + REC_VERSION_56_MAX_INDEX_COL_LEN); + + if (true_len > buff_space) { + fprintf (stderr, + "WSREP: key truncated: %s\n", + wsrep_thd_query(thd)); + true_len = buff_space; + } + memcpy(buff, sorted, true_len); + } else { + memcpy(buff, src_start, true_len); + } + buff += true_len; + buff_space -= true_len; + } + } + + ut_a(buff <= buff_start + buff_len); + + DBUG_RETURN((uint)(buff - buff_start)); +} +#endif /* WITH_WSREP */ + +/*******************************************************************//** +Stores a key value for a row to a buffer. +@return key value length as stored in buff */ +UNIV_INTERN +uint +ha_innobase::store_key_val_for_row( +/*===============================*/ + uint keynr, /*!< in: key number */ + char* buff, /*!< in/out: buffer for the key value (in MySQL + format) */ + uint buff_len,/*!< in: buffer length */ + const uchar* record)/*!< in: row in MySQL format */ +{ + KEY* key_info = table->key_info + keynr; + KEY_PART_INFO* key_part = key_info->key_part; + KEY_PART_INFO* end = + key_part + key_info->user_defined_key_parts; + char* buff_start = buff; + enum_field_types mysql_type; + Field* field; + ibool is_null; + + DBUG_ENTER("store_key_val_for_row"); + + /* The format for storing a key field in MySQL is the following: + + 1. If the column can be NULL, then in the first byte we put 1 if the + field value is NULL, 0 otherwise. + + 2. If the column is of a BLOB type (it must be a column prefix field + in this case), then we put the length of the data in the field to the + next 2 bytes, in the little-endian format. If the field is SQL NULL, + then these 2 bytes are set to 0. Note that the length of data in the + field is <= column prefix length. + + 3. In a column prefix field, prefix_len next bytes are reserved for + data. In a normal field the max field length next bytes are reserved + for data. For a VARCHAR(n) the max field length is n. If the stored + value is the SQL NULL then these data bytes are set to 0. + + 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that + in the MySQL row format, the length is stored in 1 or 2 bytes, + depending on the maximum allowed length. But in the MySQL key value + format, the length always takes 2 bytes. + + We have to zero-fill the buffer so that MySQL is able to use a + simple memcmp to compare two key values to determine if they are + equal. MySQL does this to compare contents of two 'ref' values. */ + + memset(buff, 0, buff_len); + + for (; key_part != end; key_part++) { + is_null = FALSE; + + if (key_part->null_bit) { + if (record[key_part->null_offset] + & key_part->null_bit) { + *buff = 1; + is_null = TRUE; + } else { + *buff = 0; + } + buff++; + } + + field = key_part->field; + mysql_type = field->type(); + + if (mysql_type == MYSQL_TYPE_VARCHAR) { + /* >= 5.0.3 true VARCHAR */ + ulint lenlen; + ulint len; + const byte* data; + ulint key_len; + ulint true_len; + const CHARSET_INFO* cs; + int error=0; + + key_len = key_part->length; + + if (is_null) { + buff += key_len + 2; + + continue; + } + cs = field->charset(); + + lenlen = (ulint) + (((Field_varstring*) field)->length_bytes); + + data = row_mysql_read_true_varchar(&len, + (byte*) (record + + (ulint) get_field_offset(table, field)), + lenlen); + + true_len = len; + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char*) data, + (const char*) data + len, + (uint) (key_len / cs->mbmaxlen), + &error); + } + + /* In a column prefix index, we may need to truncate + the stored value: */ + + if (true_len > key_len) { + true_len = key_len; + } + + /* The length in a key value is always stored in 2 + bytes */ + + row_mysql_store_true_var_len((byte*) buff, true_len, 2); + buff += 2; + + memcpy(buff, data, true_len); + + /* Note that we always reserve the maximum possible + length of the true VARCHAR in the key value, though + only len first bytes after the 2 length bytes contain + actual data. The rest of the space was reset to zero + in the memset() call above. */ + + buff += key_len; + + } else if (mysql_type == MYSQL_TYPE_TINY_BLOB + || mysql_type == MYSQL_TYPE_MEDIUM_BLOB + || mysql_type == MYSQL_TYPE_BLOB + || mysql_type == MYSQL_TYPE_LONG_BLOB + /* MYSQL_TYPE_GEOMETRY data is treated + as BLOB data in innodb. */ + || mysql_type == MYSQL_TYPE_GEOMETRY) { + + const CHARSET_INFO* cs; + ulint key_len; + ulint true_len; + int error=0; + ulint blob_len; + const byte* blob_data; + + ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + + key_len = key_part->length; + + if (is_null) { + buff += key_len + 2; + + continue; + } + + cs = field->charset(); + + blob_data = row_mysql_read_blob_ref(&blob_len, + (byte*) (record + + (ulint) get_field_offset(table, field)), + (ulint) field->pack_length()); + + true_len = blob_len; + + ut_a(get_field_offset(table, field) + == key_part->offset); + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (blob_len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char*) blob_data, + (const char*) blob_data + + blob_len, + (uint) (key_len / cs->mbmaxlen), + &error); + } + + /* All indexes on BLOB and TEXT are column prefix + indexes, and we may need to truncate the data to be + stored in the key value: */ + + if (true_len > key_len) { + true_len = key_len; + } + + /* MySQL reserves 2 bytes for the length and the + storage of the number is little-endian */ + + innobase_write_to_2_little_endian( + (byte*) buff, true_len); + buff += 2; + + memcpy(buff, blob_data, true_len); + + /* Note that we always reserve the maximum possible + length of the BLOB prefix in the key value. */ + + buff += key_len; + } else { + /* Here we handle all other data types except the + true VARCHAR, BLOB and TEXT. Note that the column + value we store may be also in a column prefix + index. */ + + const CHARSET_INFO* cs = NULL; + ulint true_len; + ulint key_len; + const uchar* src_start; + int error=0; + enum_field_types real_type; + + key_len = key_part->length; + + if (is_null) { + buff += key_len; + + continue; + } + + src_start = record + key_part->offset; + real_type = field->real_type(); + true_len = key_len; + + /* Character set for the field is defined only + to fields whose type is string and real field + type is not enum or set. For these fields check + if character set is multi byte. */ + + if (real_type != MYSQL_TYPE_ENUM + && real_type != MYSQL_TYPE_SET + && ( mysql_type == MYSQL_TYPE_VAR_STRING + || mysql_type == MYSQL_TYPE_STRING)) { + + cs = field->charset(); + + /* For multi byte character sets we need to + calculate the true length of the key */ + + if (key_len > 0 && cs->mbmaxlen > 1) { + + true_len = (ulint) + cs->cset->well_formed_len(cs, + (const char*) src_start, + (const char*) src_start + + key_len, + (uint) (key_len + / cs->mbmaxlen), + &error); + } + } + + memcpy(buff, src_start, true_len); + buff += true_len; + + /* Pad the unused space with spaces. */ + + if (true_len < key_len) { + ulint pad_len = key_len - true_len; + ut_a(cs != NULL); + ut_a(!(pad_len % cs->mbminlen)); + + cs->cset->fill(cs, buff, pad_len, + 0x20 /* space */); + buff += pad_len; + } + } + } + + ut_a(buff <= buff_start + buff_len); + + DBUG_RETURN((uint)(buff - buff_start)); +} + +/**************************************************************//** +Determines if a field is needed in a prebuilt struct 'template'. +@return field to use, or NULL if the field is not needed */ +static +const Field* +build_template_needs_field( +/*=======================*/ + ibool index_contains, /*!< in: + dict_index_contains_col_or_prefix( + index, i) */ + ibool read_just_key, /*!< in: TRUE when MySQL calls + ha_innobase::extra with the + argument HA_EXTRA_KEYREAD; it is enough + to read just columns defined in + the index (i.e., no read of the + clustered index record necessary) */ + ibool fetch_all_in_key, + /*!< in: true=fetch all fields in + the index */ + ibool fetch_primary_key_cols, + /*!< in: true=fetch the + primary key columns */ + dict_index_t* index, /*!< in: InnoDB index to use */ + const TABLE* table, /*!< in: MySQL table object */ + ulint i, /*!< in: field index in InnoDB table */ + ulint sql_idx) /*!< in: field index in SQL table */ +{ + const Field* field = table->field[sql_idx]; + + ut_ad(index_contains == dict_index_contains_col_or_prefix(index, i)); + + if (!index_contains) { + if (read_just_key) { + /* If this is a 'key read', we do not need + columns that are not in the key */ + + return(NULL); + } + } else if (fetch_all_in_key) { + /* This field is needed in the query */ + + return(field); + } + + if (bitmap_is_set(table->read_set, static_cast<uint>(sql_idx)) + || bitmap_is_set(table->write_set, static_cast<uint>(sql_idx))) { + /* This field is needed in the query */ + + return(field); + } + + if (fetch_primary_key_cols + && dict_table_col_in_clustered_key(index->table, i)) { + /* This field is needed in the query */ + + return(field); + } + + /* This field is not needed in the query, skip it */ + + return(NULL); +} + +/**************************************************************//** +Determines if a field is needed in a prebuilt struct 'template'. +@return whether the field is needed for index condition pushdown */ +inline +bool +build_template_needs_field_in_icp( +/*==============================*/ + const dict_index_t* index, /*!< in: InnoDB index */ + const row_prebuilt_t* prebuilt,/*!< in: row fetch template */ + bool contains,/*!< in: whether the index contains + column i */ + ulint i) /*!< in: column number */ +{ + ut_ad(contains == dict_index_contains_col_or_prefix(index, i)); + + return(index == prebuilt->index + ? contains + : dict_index_contains_col_or_prefix(prebuilt->index, i)); +} + +/**************************************************************//** +Adds a field to a prebuilt struct 'template'. +@return the field template */ +static +mysql_row_templ_t* +build_template_field( +/*=================*/ + row_prebuilt_t* prebuilt, /*!< in/out: template */ + dict_index_t* clust_index, /*!< in: InnoDB clustered index */ + dict_index_t* index, /*!< in: InnoDB index to use */ + TABLE* table, /*!< in: MySQL table object */ + const Field* field, /*!< in: field in MySQL table */ + ulint i) /*!< in: field index in InnoDB table */ +{ + mysql_row_templ_t* templ; + const dict_col_t* col; + + //ut_ad(field == table->field[i]); + ut_ad(clust_index->table == index->table); + + col = dict_table_get_nth_col(index->table, i); + + templ = prebuilt->mysql_template + prebuilt->n_template++; + UNIV_MEM_INVALID(templ, sizeof *templ); + templ->col_no = i; + templ->clust_rec_field_no = dict_col_get_clust_pos(col, clust_index); + + /* If clustered index record field is not found, lets print out + field names and all the rest to understand why field is not found. */ + if (templ->clust_rec_field_no == ULINT_UNDEFINED) { + const char* tb_col_name = dict_table_get_col_name(clust_index->table, i); + dict_field_t* field=NULL; + size_t size = 0; + + for(ulint j=0; j < clust_index->n_user_defined_cols; j++) { + dict_field_t* ifield = &(clust_index->fields[j]); + if (ifield && !memcmp(tb_col_name, ifield->name, + strlen(tb_col_name))) { + field = ifield; + break; + } + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Looking for field %lu name %s from table %s", + i, + (tb_col_name ? tb_col_name : "NULL"), + clust_index->table->name); + + + for(ulint j=0; j < clust_index->n_user_defined_cols; j++) { + dict_field_t* ifield = &(clust_index->fields[j]); + ib_logf(IB_LOG_LEVEL_INFO, + "InnoDB Table %s field %lu name %s", + clust_index->table->name, + j, + (ifield ? ifield->name : "NULL")); + } + + for(ulint j=0; j < table->s->stored_fields; j++) { + ib_logf(IB_LOG_LEVEL_INFO, + "MySQL table %s field %lu name %s", + table->s->table_name.str, + j, + table->field[j]->field_name); + } + + ib_logf(IB_LOG_LEVEL_ERROR, + "Clustered record field for column %lu" + " not found table n_user_defined %d" + " index n_user_defined %d" + " InnoDB table %s field name %s" + " MySQL table %s field name %s n_fields %d" + " query %s", + i, + clust_index->n_user_defined_cols, + clust_index->table->n_cols - DATA_N_SYS_COLS, + clust_index->table->name, + (field ? field->name : "NULL"), + table->s->table_name.str, + (tb_col_name ? tb_col_name : "NULL"), + table->s->stored_fields, + innobase_get_stmt(current_thd, &size)); + + ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); + } + + if (dict_index_is_clust(index)) { + templ->rec_field_no = templ->clust_rec_field_no; + } else { + templ->rec_field_no = dict_index_get_nth_col_pos(index, i); + } + + if (field->real_maybe_null()) { + templ->mysql_null_byte_offset = + field->null_offset(); + + templ->mysql_null_bit_mask = (ulint) field->null_bit; + } else { + templ->mysql_null_bit_mask = 0; + } + + templ->mysql_col_offset = (ulint) get_field_offset(table, field); + + templ->mysql_col_len = (ulint) field->pack_length(); + templ->type = col->mtype; + templ->mysql_type = (ulint) field->type(); + + if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + templ->mysql_length_bytes = (ulint) + (((Field_varstring*) field)->length_bytes); + } + + templ->charset = dtype_get_charset_coll(col->prtype); + templ->mbminlen = dict_col_get_mbminlen(col); + templ->mbmaxlen = dict_col_get_mbmaxlen(col); + templ->is_unsigned = col->prtype & DATA_UNSIGNED; + + if (!dict_index_is_clust(index) + && templ->rec_field_no == ULINT_UNDEFINED) { + prebuilt->need_to_access_clustered = TRUE; + } + + if (prebuilt->mysql_prefix_len < templ->mysql_col_offset + + templ->mysql_col_len) { + prebuilt->mysql_prefix_len = templ->mysql_col_offset + + templ->mysql_col_len; + } + + if (templ->type == DATA_BLOB) { + prebuilt->templ_contains_blob = TRUE; + } + + return(templ); +} + +/**************************************************************//** +Builds a 'template' to the prebuilt struct. The template is used in fast +retrieval of just those column values MySQL needs in its processing. */ +UNIV_INTERN +void +ha_innobase::build_template( +/*========================*/ + bool whole_row) /*!< in: true=ROW_MYSQL_WHOLE_ROW, + false=ROW_MYSQL_REC_FIELDS */ +{ + dict_index_t* index; + dict_index_t* clust_index; + ulint n_stored_fields; + ibool fetch_all_in_key = FALSE; + ibool fetch_primary_key_cols = FALSE; + ulint i, sql_idx; + + if (prebuilt->select_lock_type == LOCK_X) { + /* We always retrieve the whole clustered index record if we + use exclusive row level locks, for example, if the read is + done in an UPDATE statement. */ + + whole_row = true; + } else if (!whole_row) { + if (prebuilt->hint_need_to_fetch_extra_cols + == ROW_RETRIEVE_ALL_COLS) { + + /* We know we must at least fetch all columns in the + key, or all columns in the table */ + + if (prebuilt->read_just_key) { + /* MySQL has instructed us that it is enough + to fetch the columns in the key; looks like + MySQL can set this flag also when there is + only a prefix of the column in the key: in + that case we retrieve the whole column from + the clustered index */ + + fetch_all_in_key = TRUE; + } else { + whole_row = true; + } + } else if (prebuilt->hint_need_to_fetch_extra_cols + == ROW_RETRIEVE_PRIMARY_KEY) { + /* We must at least fetch all primary key cols. Note + that if the clustered index was internally generated + by InnoDB on the row id (no primary key was + defined), then row_search_for_mysql() will always + retrieve the row id to a special buffer in the + prebuilt struct. */ + + fetch_primary_key_cols = TRUE; + } + } + + clust_index = dict_table_get_first_index(prebuilt->table); + + index = whole_row ? clust_index : prebuilt->index; + + prebuilt->need_to_access_clustered = (index == clust_index); + + /* Either prebuilt->index should be a secondary index, or it + should be the clustered index. */ + ut_ad(dict_index_is_clust(index) == (index == clust_index)); + + /* Below we check column by column if we need to access + the clustered index. */ + + n_stored_fields= (ulint)table->s->stored_fields; /* number of stored columns */ + + if (!prebuilt->mysql_template) { + prebuilt->mysql_template = (mysql_row_templ_t*) + mem_alloc(n_stored_fields * sizeof(mysql_row_templ_t)); + } + + prebuilt->template_type = whole_row + ? ROW_MYSQL_WHOLE_ROW : ROW_MYSQL_REC_FIELDS; + prebuilt->null_bitmap_len = table->s->null_bytes; + + /* Prepare to build prebuilt->mysql_template[]. */ + prebuilt->templ_contains_blob = FALSE; + prebuilt->mysql_prefix_len = 0; + prebuilt->n_template = 0; + prebuilt->idx_cond_n_cols = 0; + + /* Note that in InnoDB, i is the column number in the table. + MySQL calls columns 'fields'. */ + + if (active_index != MAX_KEY && active_index == pushed_idx_cond_keyno) { + /* Push down an index condition or an end_range check. */ + for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + + while (!table->field[sql_idx]->stored_in_db) { + sql_idx++; + } + + const ibool index_contains + = dict_index_contains_col_or_prefix(index, i); + + /* Test if an end_range or an index condition + refers to the field. Note that "index" and + "index_contains" may refer to the clustered index. + Index condition pushdown is relative to prebuilt->index + (the index that is being looked up first). */ + + /* When join_read_always_key() invokes this + code via handler::ha_index_init() and + ha_innobase::index_init(), end_range is not + yet initialized. Because of that, we must + always check for index_contains, instead of + the subset + field->part_of_key.is_set(active_index) + which would be acceptable if end_range==NULL. */ + if (build_template_needs_field_in_icp( + index, prebuilt, index_contains, i)) { + /* Needed in ICP */ + const Field* field; + mysql_row_templ_t* templ; + + if (whole_row) { + field = table->field[sql_idx]; + } else { + field = build_template_needs_field( + index_contains, + prebuilt->read_just_key, + fetch_all_in_key, + fetch_primary_key_cols, + index, table, i, sql_idx); + if (!field) { + continue; + } + } + + templ = build_template_field( + prebuilt, clust_index, index, + table, field, i); + prebuilt->idx_cond_n_cols++; + ut_ad(prebuilt->idx_cond_n_cols + == prebuilt->n_template); + + if (index == prebuilt->index) { + templ->icp_rec_field_no + = templ->rec_field_no; + } else { + templ->icp_rec_field_no + = dict_index_get_nth_col_pos( + prebuilt->index, i); + } + + if (dict_index_is_clust(prebuilt->index)) { + ut_ad(templ->icp_rec_field_no + != ULINT_UNDEFINED); + /* If the primary key includes + a column prefix, use it in + index condition pushdown, + because the condition is + evaluated before fetching any + off-page (externally stored) + columns. */ + if (templ->icp_rec_field_no + < prebuilt->index->n_uniq) { + /* This is a key column; + all set. */ + continue; + } + } else if (templ->icp_rec_field_no + != ULINT_UNDEFINED) { + continue; + } + + /* This is a column prefix index. + The column prefix can be used in + an end_range comparison. */ + + templ->icp_rec_field_no + = dict_index_get_nth_col_or_prefix_pos( + prebuilt->index, i, TRUE); + ut_ad(templ->icp_rec_field_no + != ULINT_UNDEFINED); + + /* Index condition pushdown can be used on + all columns of a secondary index, and on + the PRIMARY KEY columns. On the clustered + index, it must never be used on other than + PRIMARY KEY columns, because those columns + may be stored off-page, and we will not + fetch externally stored columns before + checking the index condition. */ + /* TODO: test the above with an assertion + like this. Note that index conditions are + currently pushed down as part of the + "optimizer phase" while end_range is done + as part of the execution phase. Therefore, + we were unable to use an accurate condition + for end_range in the "if" condition above, + and the following assertion would fail. + ut_ad(!dict_index_is_clust(prebuilt->index) + || templ->rec_field_no + < prebuilt->index->n_uniq); + */ + } + } + + ut_ad(prebuilt->idx_cond_n_cols > 0); + ut_ad(prebuilt->idx_cond_n_cols == prebuilt->n_template); + + /* Include the fields that are not needed in index condition + pushdown. */ + for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + + while (!table->field[sql_idx]->stored_in_db) { + sql_idx++; + } + + const ibool index_contains + = dict_index_contains_col_or_prefix(index, i); + + if (!build_template_needs_field_in_icp( + index, prebuilt, index_contains, i)) { + /* Not needed in ICP */ + const Field* field; + + if (whole_row) { + field = table->field[sql_idx]; + } else { + field = build_template_needs_field( + index_contains, + prebuilt->read_just_key, + fetch_all_in_key, + fetch_primary_key_cols, + index, table, i, sql_idx); + if (!field) { + continue; + } + } + + build_template_field(prebuilt, + clust_index, index, + table, field, i); + } + } + + prebuilt->idx_cond = this; + } else { + /* No index condition pushdown */ + prebuilt->idx_cond = NULL; + + for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + const Field* field; + + while (!table->field[sql_idx]->stored_in_db) { + sql_idx++; + } + + if (whole_row) { + field = table->field[sql_idx]; + } else { + field = build_template_needs_field( + dict_index_contains_col_or_prefix( + index, i), + prebuilt->read_just_key, + fetch_all_in_key, + fetch_primary_key_cols, + index, table, i, sql_idx); + if (!field) { + continue; + } + } + + build_template_field(prebuilt, clust_index, index, + table, field, i); + } + } + + if (index != clust_index && prebuilt->need_to_access_clustered) { + /* Change rec_field_no's to correspond to the clustered index + record */ + for (i = 0; i < prebuilt->n_template; i++) { + + mysql_row_templ_t* templ + = &prebuilt->mysql_template[i]; + + templ->rec_field_no = templ->clust_rec_field_no; + } + } +} + +/********************************************************************//** +This special handling is really to overcome the limitations of MySQL's +binlogging. We need to eliminate the non-determinism that will arise in +INSERT ... SELECT type of statements, since MySQL binlog only stores the +min value of the autoinc interval. Once that is fixed we can get rid of +the special lock handling. +@return DB_SUCCESS if all OK else error code */ +UNIV_INTERN +dberr_t +ha_innobase::innobase_lock_autoinc(void) +/*====================================*/ +{ + DBUG_ENTER("ha_innobase::innobase_lock_autoinc"); + dberr_t error = DB_SUCCESS; + + ut_ad(!srv_read_only_mode); + + switch (innobase_autoinc_lock_mode) { + case AUTOINC_NO_LOCKING: + /* Acquire only the AUTOINC mutex. */ + dict_table_autoinc_lock(prebuilt->table); + break; + + case AUTOINC_NEW_STYLE_LOCKING: + /* For simple (single/multi) row INSERTs/REPLACEs and RBR + events, we fallback to the old style only if another + transaction has already acquired the AUTOINC lock on + behalf of a LOAD FILE or INSERT ... SELECT etc. type of + statement. */ + if (thd_sql_command(user_thd) == SQLCOM_INSERT + || thd_sql_command(user_thd) == SQLCOM_REPLACE + || thd_sql_command(user_thd) == SQLCOM_END // RBR event + ) { + dict_table_t* ib_table = prebuilt->table; + + /* Acquire the AUTOINC mutex. */ + dict_table_autoinc_lock(ib_table); + + /* We need to check that another transaction isn't + already holding the AUTOINC lock on the table. */ + if (ib_table->n_waiting_or_granted_auto_inc_locks) { + /* Release the mutex to avoid deadlocks and + fall back to old style locking. */ + dict_table_autoinc_unlock(ib_table); + } else { + /* Do not fall back to old style locking. */ + break; + } + } + /* Use old style locking. */ + /* fall through */ + case AUTOINC_OLD_STYLE_LOCKING: + DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", + ut_ad(0);); + error = row_lock_table_autoinc_for_mysql(prebuilt); + + if (error == DB_SUCCESS) { + + /* Acquire the AUTOINC mutex. */ + dict_table_autoinc_lock(prebuilt->table); + } + break; + + default: + ut_error; + } + + DBUG_RETURN(error); +} + +/********************************************************************//** +Reset the autoinc value in the table. +@return DB_SUCCESS if all went well else error code */ +UNIV_INTERN +dberr_t +ha_innobase::innobase_reset_autoinc( +/*================================*/ + ulonglong autoinc) /*!< in: value to store */ +{ + dberr_t error; + + error = innobase_lock_autoinc(); + + if (error == DB_SUCCESS) { + + dict_table_autoinc_initialize(prebuilt->table, autoinc); + + dict_table_autoinc_unlock(prebuilt->table); + } + + return(error); +} + +/********************************************************************//** +Store the autoinc value in the table. The autoinc value is only set if +it's greater than the existing autoinc value in the table. +@return DB_SUCCESS if all went well else error code */ +UNIV_INTERN +dberr_t +ha_innobase::innobase_set_max_autoinc( +/*==================================*/ + ulonglong auto_inc) /*!< in: value to store */ +{ + dberr_t error; + + error = innobase_lock_autoinc(); + + if (error == DB_SUCCESS) { + + dict_table_autoinc_update_if_greater(prebuilt->table, auto_inc); + + dict_table_autoinc_unlock(prebuilt->table); + } + + return(error); +} + +/********************************************************************//** +Stores a row in an InnoDB database, to the table specified in this +handle. +@return error code */ +UNIV_INTERN +int +ha_innobase::write_row( +/*===================*/ + uchar* record) /*!< in: a row in MySQL format */ +{ + dberr_t error; + int error_result= 0; + ibool auto_inc_used= FALSE; +#ifdef WITH_WSREP + ibool auto_inc_inserted= FALSE; /* if NULL was inserted */ +#endif + ulint sql_command; + trx_t* trx = thd_to_trx(user_thd); + + DBUG_ENTER("ha_innobase::write_row"); + + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (prebuilt->trx != trx) { + sql_print_error("The transaction object for the table handle " + "is at %p, but for the current thread it is at " + "%p", + (const void*) prebuilt->trx, (const void*) trx); + + fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); + ut_print_buf(stderr, ((const byte*) prebuilt) - 100, 200); + fputs("\n" + "InnoDB: Dump of 200 bytes around ha_data: ", + stderr); + ut_print_buf(stderr, ((const byte*) trx) - 100, 200); + putc('\n', stderr); + ut_error; + } else if (!trx_is_started(trx)) { + ++trx->will_lock; + } + + ha_statistic_increment(&SSV::ha_write_count); + + sql_command = thd_sql_command(user_thd); + + if ((sql_command == SQLCOM_ALTER_TABLE + || sql_command == SQLCOM_OPTIMIZE + || sql_command == SQLCOM_CREATE_INDEX +#ifdef WITH_WSREP + || (wsrep_on(user_thd) && wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options( + user_thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) +#endif /* WITH_WSREP */ + || sql_command == SQLCOM_DROP_INDEX) + && num_write_row >= 10000) { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && sql_command == SQLCOM_LOAD) { + WSREP_DEBUG("forced trx split for LOAD: %s", + wsrep_thd_query(user_thd)); + } +#endif /* WITH_WSREP */ + /* ALTER TABLE is COMMITted at every 10000 copied rows. + The IX table lock for the original table has to be re-issued. + As this method will be called on a temporary table where the + contents of the original table is being copied to, it is + a bit tricky to determine the source table. The cursor + position in the source table need not be adjusted after the + intermediate COMMIT, since writes by other transactions are + being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */ + + dict_table_t* src_table; + enum lock_mode mode; + + num_write_row = 0; + + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ + + /* Altering an InnoDB table */ + /* Get the source table. */ + src_table = lock_get_src_table( + prebuilt->trx, prebuilt->table, &mode); + if (!src_table) { +no_commit: + /* Unknown situation: do not commit */ + /* + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: ALTER TABLE is holding lock" + " on %lu tables!\n", + prebuilt->trx->mysql_n_tables_locked); + */ + ; + } else if (src_table == prebuilt->table) { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && + wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options(user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1)) + { + case WSREP_TRX_OK: + break; + case WSREP_TRX_SIZE_EXCEEDED: + case WSREP_TRX_CERT_FAIL: + case WSREP_TRX_ERROR: + DBUG_RETURN(1); + } + + if (binlog_hton->commit(binlog_hton, user_thd, 1)) + DBUG_RETURN(1); + wsrep_post_commit(user_thd, TRUE); + } +#endif /* WITH_WSREP */ + /* Source table is not in InnoDB format: + no need to re-acquire locks on it. */ + + /* Altering to InnoDB format */ + innobase_commit(ht, user_thd, 1); + /* Note that this transaction is still active. */ + trx_register_for_2pc(prebuilt->trx); + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } else { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && + wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options(user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1)) + { + case WSREP_TRX_OK: + break; + case WSREP_TRX_SIZE_EXCEEDED: + case WSREP_TRX_CERT_FAIL: + case WSREP_TRX_ERROR: + DBUG_RETURN(1); + } + + if (binlog_hton->commit(binlog_hton, user_thd, 1)) + DBUG_RETURN(1); + wsrep_post_commit(user_thd, TRUE); + } +#endif /* WITH_WSREP */ + /* Ensure that there are no other table locks than + LOCK_IX and LOCK_AUTO_INC on the destination table. */ + + if (!lock_is_table_exclusive(prebuilt->table, + prebuilt->trx)) { + goto no_commit; + } + + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ + innobase_commit(ht, user_thd, 1); + /* Note that this transaction is still active. */ + trx_register_for_2pc(prebuilt->trx); + /* Re-acquire the table lock on the source table. */ + row_lock_table_for_mysql(prebuilt, src_table, mode); + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } + } + + num_write_row++; + + /* This is the case where the table has an auto-increment column */ + if (table->next_number_field && record == table->record[0]) { + + /* Reset the error code before calling + innobase_get_auto_increment(). */ + prebuilt->autoinc_error = DB_SUCCESS; + +#ifdef WITH_WSREP + auto_inc_inserted= (table->next_number_field->val_int() == 0); +#endif + + if ((error_result = update_auto_increment())) { + /* We don't want to mask autoinc overflow errors. */ + + /* Handle the case where the AUTOINC sub-system + failed during initialization. */ + if (prebuilt->autoinc_error == DB_UNSUPPORTED) { + error_result = ER_AUTOINC_READ_FAILED; + /* Set the error message to report too. */ + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + goto func_exit; + } else if (prebuilt->autoinc_error != DB_SUCCESS) { + error = prebuilt->autoinc_error; + goto report_error; + } + + /* MySQL errors are passed straight back. */ + goto func_exit; + } + + auto_inc_used = TRUE; + } + + if (prebuilt->mysql_template == NULL + || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + + /* Build the template used in converting quickly between + the two database formats */ + + build_template(true); + } + + innobase_srv_conc_enter_innodb(prebuilt->trx); + + error = row_insert_for_mysql((byte*) record, prebuilt); + DEBUG_SYNC(user_thd, "ib_after_row_insert"); + + /* Handle duplicate key errors */ + if (auto_inc_used) { + ulonglong auto_inc; + ulonglong col_max_value; + + /* Note the number of rows processed for this statement, used + by get_auto_increment() to determine the number of AUTO-INC + values to reserve. This is only useful for a mult-value INSERT + and is a statement level counter.*/ + if (trx->n_autoinc_rows > 0) { + --trx->n_autoinc_rows; + } + + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + col_max_value = innobase_get_int_col_max_value( + table->next_number_field); + + /* Get the value that MySQL attempted to store in the table.*/ + auto_inc = table->next_number_field->val_uint(); + + switch (error) { + case DB_DUPLICATE_KEY: + + /* A REPLACE command and LOAD DATA INFILE REPLACE + handle a duplicate key error themselves, but we + must update the autoinc counter if we are performing + those statements. */ + + switch (sql_command) { + case SQLCOM_LOAD: + if (trx->duplicates) { + + goto set_max_autoinc; + } + break; + + case SQLCOM_REPLACE: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE_SELECT: + goto set_max_autoinc; + +#ifdef WITH_WSREP + /* workaround for LP bug #355000, retrying the insert */ + case SQLCOM_INSERT: + + WSREP_DEBUG("DUPKEY error for autoinc\n" + "THD %ld, value %llu, off %llu inc %llu", + wsrep_thd_thread_id(current_thd), + auto_inc, + prebuilt->autoinc_offset, + prebuilt->autoinc_increment); + + if (wsrep_on(current_thd) && + auto_inc_inserted && + wsrep_drupal_282555_workaround && + wsrep_thd_retry_counter(current_thd) == 0 && + !thd_test_options(current_thd, + OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN)) { + WSREP_DEBUG( + "retrying insert: %s", + (*wsrep_thd_query(current_thd)) ? + wsrep_thd_query(current_thd) : + (char *)"void"); + error= DB_SUCCESS; + wsrep_thd_set_conflict_state( + current_thd, MUST_ABORT); + innobase_srv_conc_exit_innodb(prebuilt->trx); + /* jump straight to func exit over + * later wsrep hooks */ + goto func_exit; + } + break; +#endif /* WITH_WSREP */ + + default: + break; + } + + break; + + case DB_SUCCESS: + /* If the actual value inserted is greater than + the upper limit of the interval, then we try and + update the table upper limit. Note: last_value + will be 0 if get_auto_increment() was not called.*/ + + if (auto_inc >= prebuilt->autoinc_last_value) { +set_max_autoinc: + /* This should filter out the negative + values set explicitly by the user. */ + if (auto_inc <= col_max_value) { + ut_a(prebuilt->autoinc_increment > 0); + + ulonglong offset; + ulonglong increment; + dberr_t err; - bzero(buff, buff_len); - *key_is_null = TRUE; + offset = prebuilt->autoinc_offset; + increment = prebuilt->autoinc_increment; - for (; key_part != end; key_part++) { - uchar sorted[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; - ibool part_is_null = FALSE; + auto_inc = innobase_next_autoinc( + auto_inc, + 1, increment, offset, + col_max_value); - if (key_part->null_bit) { - if (record[key_part->null_offset] & - key_part->null_bit) { - *buff = 1; - part_is_null = TRUE; - } else { - *buff = 0; + err = innobase_set_max_autoinc( + auto_inc); + + if (err != DB_SUCCESS) { + error = err; + } + } } - buff++; + break; + default: + break; } - if (!part_is_null) *key_is_null = FALSE; + } - field = key_part->field; - mysql_type = field->type(); + innobase_srv_conc_exit_innodb(prebuilt->trx); - if (mysql_type == MYSQL_TYPE_VARCHAR) { - /* >= 5.0.3 true VARCHAR */ - ulint lenlen; - ulint len; - const byte* data; - ulint key_len; - ulint true_len; - CHARSET_INFO* cs; - int error=0; +report_error: + if (error == DB_TABLESPACE_DELETED) { + ib_senderrf( + trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); + } - key_len = key_part->length; + error_result = convert_error_code_to_mysql(error, + prebuilt->table->flags, + user_thd); - if (part_is_null) { - buff += key_len + 2; +#ifdef WITH_WSREP + if (!error_result + && wsrep_on(user_thd) + && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE + && !wsrep_consistency_check(user_thd) + && !wsrep_thd_skip_append_keys(user_thd)) { - if (wsrep_append_keys(user_thd, false, record, NULL)) { ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, ++ NULL)) { + DBUG_PRINT("wsrep", ("row key failed")); + error_result = HA_ERR_INTERNAL_ERROR; + goto wsrep_error; + } + } +wsrep_error: +#endif /* WITH_WSREP */ - continue; - } - cs = field->charset(); + if (error_result == HA_FTS_INVALID_DOCID) { + my_error(HA_FTS_INVALID_DOCID, MYF(0)); + } - lenlen = (ulint) - (((Field_varstring*)field)->length_bytes); +func_exit: + innobase_active_small(); - data = row_mysql_read_true_varchar(&len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - lenlen); + DBUG_RETURN(error_result); +} - true_len = len; +/**********************************************************************//** +Checks which fields have changed in a row and stores information +of them to an update vector. +@return DB_SUCCESS or error code */ +static +dberr_t +calc_row_difference( +/*================*/ + upd_t* uvect, /*!< in/out: update vector */ + uchar* old_row, /*!< in: old row in MySQL format */ + uchar* new_row, /*!< in: new row in MySQL format */ + TABLE* table, /*!< in: table in MySQL data + dictionary */ + uchar* upd_buff, /*!< in: buffer to use */ + ulint buff_len, /*!< in: buffer length */ + row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ + THD* thd) /*!< in: user thread */ +{ + uchar* original_upd_buff = upd_buff; + Field* field; + enum_field_types field_mysql_type; + uint n_fields; + ulint o_len; + ulint n_len; + ulint col_pack_len; + const byte* new_mysql_row_col; + const byte* o_ptr; + const byte* n_ptr; + byte* buf; + upd_field_t* ufield; + ulint col_type; + ulint n_changed = 0; + dfield_t dfield; + dict_index_t* clust_index; + uint sql_idx, innodb_idx= 0; + ibool changes_fts_column = FALSE; + ibool changes_fts_doc_col = FALSE; + trx_t* trx = thd_to_trx(thd); + doc_id_t doc_id = FTS_NULL_DOC_ID; - /* For multi byte character sets we need to calculate - the true length of the key */ + ut_ad(!srv_read_only_mode); - if (len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) data, - (const char *) data + len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } + n_fields = table->s->fields; + clust_index = dict_table_get_first_index(prebuilt->table); - /* In a column prefix index, we may need to truncate - the stored value: */ + /* We use upd_buff to convert changed fields */ + buf = (byte*) upd_buff; - if (true_len > key_len) { - true_len = key_len; - } + for (sql_idx = 0; sql_idx < n_fields; sql_idx++) { + field = table->field[sql_idx]; + if (!field->stored_in_db) + continue; - memcpy(sorted, data, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + o_ptr = (const byte*) old_row + get_field_offset(table, field); + n_ptr = (const byte*) new_row + get_field_offset(table, field); - if (wsrep_protocol_version > 1) { - memcpy(buff, sorted, true_len); - /* Note that we always reserve the maximum possible - length of the true VARCHAR in the key value, though - only len first bytes after the 2 length bytes contain - actual data. The rest of the space was reset to zero - in the bzero() call above. */ - buff += true_len; - } else { - buff += key_len; - } - } else if (mysql_type == MYSQL_TYPE_TINY_BLOB - || mysql_type == MYSQL_TYPE_MEDIUM_BLOB - || mysql_type == MYSQL_TYPE_BLOB - || mysql_type == MYSQL_TYPE_LONG_BLOB - /* MYSQL_TYPE_GEOMETRY data is treated - as BLOB data in innodb. */ - || mysql_type == MYSQL_TYPE_GEOMETRY) { + /* Use new_mysql_row_col and col_pack_len save the values */ - CHARSET_INFO* cs; - ulint key_len; - ulint true_len; - int error=0; - ulint blob_len; - const byte* blob_data; + new_mysql_row_col = n_ptr; + col_pack_len = field->pack_length(); - ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + o_len = col_pack_len; + n_len = col_pack_len; - key_len = key_part->length; + /* We use o_ptr and n_ptr to dig up the actual data for + comparison. */ - if (part_is_null) { - buff += key_len + 2; + field_mysql_type = field->type(); - continue; - } + col_type = prebuilt->table->cols[innodb_idx].mtype; - cs = field->charset(); + switch (col_type) { - blob_data = row_mysql_read_blob_ref(&blob_len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - (ulint) field->pack_length()); + case DATA_BLOB: + o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); + n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); - true_len = blob_len; + break; - ut_a(get_field_offset(table, field) - == key_part->offset); + case DATA_VARCHAR: + case DATA_BINARY: + case DATA_VARMYSQL: + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ - /* For multi byte character sets we need to calculate - the true length of the key */ + o_ptr = row_mysql_read_true_varchar( + &o_len, o_ptr, + (ulint) + (((Field_varstring*) field)->length_bytes)); - if (blob_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) blob_data, - (const char *) blob_data - + blob_len, - (uint) (key_len / - cs->mbmaxlen), - &error); + n_ptr = row_mysql_read_true_varchar( + &n_len, n_ptr, + (ulint) + (((Field_varstring*) field)->length_bytes)); } - /* All indexes on BLOB and TEXT are column prefix - indexes, and we may need to truncate the data to be - stored in the key value: */ + break; + default: + ; + } - if (true_len > key_len) { - true_len = key_len; + if (field_mysql_type == MYSQL_TYPE_LONGLONG + && prebuilt->table->fts + && innobase_strcasecmp( + field->field_name, FTS_DOC_ID_COL_NAME) == 0) { + doc_id = (doc_id_t) mach_read_from_n_little_endian( + n_ptr, 8); + if (doc_id == 0) { + return(DB_FTS_INVALID_DOCID); } + } - memcpy(sorted, blob_data, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); - memcpy(buff, sorted, true_len); + if (field->real_maybe_null()) { + if (field->is_null_in_record(old_row)) { + o_len = UNIV_SQL_NULL; + } - /* Note that we always reserve the maximum possible - length of the BLOB prefix in the key value. */ - if (wsrep_protocol_version > 1) { - buff += true_len; - } else { - buff += key_len; - } - } else { - /* Here we handle all other data types except the - true VARCHAR, BLOB and TEXT. Note that the column - value we store may be also in a column prefix - index. */ + if (field->is_null_in_record(new_row)) { + n_len = UNIV_SQL_NULL; + } + } - CHARSET_INFO* cs; - ulint true_len; - ulint key_len; - const uchar* src_start; - int error=0; - enum_field_types real_type; + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { + /* The field has changed */ - key_len = key_part->length; + ufield = uvect->fields + n_changed; + UNIV_MEM_INVALID(ufield, sizeof *ufield); - if (part_is_null) { - buff += key_len; + /* Let us use a dummy dfield to make the conversion + from the MySQL column format to the InnoDB format */ - continue; + if (n_len != UNIV_SQL_NULL) { + dict_col_copy_type(prebuilt->table->cols + + innodb_idx, + dfield_get_type(&dfield)); + + buf = row_mysql_store_col_in_innobase_format( + &dfield, + (byte*) buf, + TRUE, + new_mysql_row_col, + col_pack_len, + dict_table_is_comp(prebuilt->table)); + dfield_copy(&ufield->new_val, &dfield); + } else { + dfield_set_null(&ufield->new_val); } - src_start = record + key_part->offset; - real_type = field->real_type(); - true_len = key_len; + ufield->exp = NULL; + ufield->orig_len = 0; + ufield->field_no = dict_col_get_clust_pos( + &prebuilt->table->cols[innodb_idx], clust_index); + n_changed++; - /* Character set for the field is defined only - to fields whose type is string and real field - type is not enum or set. For these fields check - if character set is multi byte. */ + /* If an FTS indexed column was changed by this + UPDATE then we need to inform the FTS sub-system. - if (real_type != MYSQL_TYPE_ENUM - && real_type != MYSQL_TYPE_SET - && ( mysql_type == MYSQL_TYPE_VAR_STRING - || mysql_type == MYSQL_TYPE_STRING)) { + NOTE: Currently we re-index all FTS indexed columns + even if only a subset of the FTS indexed columns + have been updated. That is the reason we are + checking only once here. Later we will need to + note which columns have been updated and do + selective processing. */ + if (prebuilt->table->fts != NULL) { + ulint offset; + dict_table_t* innodb_table; - cs = field->charset(); + innodb_table = prebuilt->table; - /* For multi byte character sets we need to - calculate the true length of the key */ + if (!changes_fts_column) { + offset = row_upd_changes_fts_column( + innodb_table, ufield); - if (key_len > 0 && cs->mbmaxlen > 1) { + if (offset != ULINT_UNDEFINED) { + changes_fts_column = TRUE; + } + } - true_len = (ulint) - cs->cset->well_formed_len(cs, - (const char *)src_start, - (const char *)src_start - + key_len, - (uint) (key_len / - cs->mbmaxlen), - &error); + if (!changes_fts_doc_col) { + changes_fts_doc_col = + row_upd_changes_doc_id( + innodb_table, ufield); } - memcpy(sorted, src_start, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + } + } + if (field->stored_in_db) + innodb_idx++; + } + + /* If the update changes a column with an FTS index on it, we + then add an update column node with a new document id to the + other changes. We piggy back our changes on the normal UPDATE + to reduce processing and IO overhead. */ + if (!prebuilt->table->fts) { + trx->fts_next_doc_id = 0; + } else if (changes_fts_column || changes_fts_doc_col) { + dict_table_t* innodb_table = prebuilt->table; + + ufield = uvect->fields + n_changed; + + if (!DICT_TF2_FLAG_IS_SET( + innodb_table, DICT_TF2_FTS_HAS_DOC_ID)) { + + /* If Doc ID is managed by user, and if any + FTS indexed column has been updated, its corresponding + Doc ID must also be updated. Otherwise, return + error */ + if (changes_fts_column && !changes_fts_doc_col) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: A new Doc ID" + " must be supplied while updating" + " FTS indexed columns.\n"); + return(DB_FTS_INVALID_DOCID); + } - memcpy(buff, sorted, true_len); - } else { - memcpy(buff, src_start, true_len); + /* Doc ID must monotonically increase */ + ut_ad(innodb_table->fts->cache); + if (doc_id < prebuilt->table->fts->cache->next_doc_id) { + fprintf(stderr, + "InnoDB: FTS Doc ID must be larger than" + " " IB_ID_FMT " for table", + innodb_table->fts->cache->next_doc_id + - 1); + ut_print_name(stderr, trx, + TRUE, innodb_table->name); + putc('\n', stderr); + + return(DB_FTS_INVALID_DOCID); + } else if ((doc_id + - prebuilt->table->fts->cache->next_doc_id) + >= FTS_DOC_ID_MAX_STEP) { + fprintf(stderr, + "InnoDB: Doc ID " UINT64PF " is too" + " big. Its difference with largest" + " Doc ID used " UINT64PF " cannot" + " exceed or equal to %d\n", + doc_id, + prebuilt->table->fts->cache->next_doc_id - 1, + FTS_DOC_ID_MAX_STEP); } - buff += true_len; - /* Pad the unused space with spaces. */ -#ifdef REMOVED - if (true_len < key_len) { - ulint pad_len = key_len - true_len; - ut_a(!(pad_len % cs->mbminlen)); + trx->fts_next_doc_id = doc_id; + } else { + /* If the Doc ID is a hidden column, it can't be + changed by user */ + ut_ad(!changes_fts_doc_col); - cs->cset->fill(cs, buff, pad_len, - 0x20 /* space */); - buff += pad_len; - } -#endif /* REMOVED */ + /* Doc ID column is hidden, a new Doc ID will be + generated by following fts_update_doc_id() call */ + trx->fts_next_doc_id = 0; } + + fts_update_doc_id( + innodb_table, ufield, &trx->fts_next_doc_id); + + ++n_changed; + } else { + /* We have a Doc ID column, but none of FTS indexed + columns are touched, nor the Doc ID column, so set + fts_next_doc_id to UINT64_UNDEFINED, which means do not + update the Doc ID column */ + trx->fts_next_doc_id = UINT64_UNDEFINED; } - ut_a(buff <= buff_start + buff_len); + uvect->n_fields = n_changed; + uvect->info_bits = 0; - DBUG_RETURN((uint)(buff - buff_start)); + ut_a(buf <= (byte*) original_upd_buff + buff_len); + + return(DB_SUCCESS); } -#endif /* WITH_WSREP */ -UNIV_INTERN -uint -ha_innobase::store_key_val_for_row( -/*===============================*/ - uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL - format) */ - uint buff_len,/*!< in: buffer length */ - const uchar* record)/*!< in: row in MySQL format */ + +#ifdef WITH_WSREP +static +int +wsrep_calc_row_hash( +/*================*/ + byte* digest, /*!< in/out: md5 sum */ + const uchar* row, /*!< in: row in MySQL format */ + TABLE* table, /*!< in: table in MySQL data + dictionary */ + row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ + THD* thd) /*!< in: user thread */ { - KEY* key_info = table->key_info + keynr; - KEY_PART_INFO* key_part = key_info->key_part; - KEY_PART_INFO* end = key_part + key_info->key_parts; - char* buff_start = buff; - enum_field_types mysql_type; Field* field; - ibool is_null; + enum_field_types field_mysql_type; + uint n_fields; + ulint len; + const byte* ptr; + ulint col_type; + uint i; - DBUG_ENTER("store_key_val_for_row"); + void *ctx = wsrep_md5_init(); - /* The format for storing a key field in MySQL is the following: + n_fields = table->s->fields; - 1. If the column can be NULL, then in the first byte we put 1 if the - field value is NULL, 0 otherwise. + for (i = 0; i < n_fields; i++) { + byte null_byte=0; + byte true_byte=1; - 2. If the column is of a BLOB type (it must be a column prefix field - in this case), then we put the length of the data in the field to the - next 2 bytes, in the little-endian format. If the field is SQL NULL, - then these 2 bytes are set to 0. Note that the length of data in the - field is <= column prefix length. + field = table->field[i]; - 3. In a column prefix field, prefix_len next bytes are reserved for - data. In a normal field the max field length next bytes are reserved - for data. For a VARCHAR(n) the max field length is n. If the stored - value is the SQL NULL then these data bytes are set to 0. + ptr = (const byte*) row + get_field_offset(table, field); + len = field->pack_length(); - 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that - in the MySQL row format, the length is stored in 1 or 2 bytes, - depending on the maximum allowed length. But in the MySQL key value - format, the length always takes 2 bytes. + field_mysql_type = field->type(); - We have to zero-fill the buffer so that MySQL is able to use a - simple memcmp to compare two key values to determine if they are - equal. MySQL does this to compare contents of two 'ref' values. */ + col_type = prebuilt->table->cols[i].mtype; - bzero(buff, buff_len); + switch (col_type) { - for (; key_part != end; key_part++) { - is_null = FALSE; + case DATA_BLOB: + ptr = row_mysql_read_blob_ref(&len, ptr, len); + + break; + + case DATA_VARCHAR: + case DATA_BINARY: + case DATA_VARMYSQL: + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ + + ptr = row_mysql_read_true_varchar( + &len, ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); - if (key_part->null_bit) { - if (record[key_part->null_offset] - & key_part->null_bit) { - *buff = 1; - is_null = TRUE; - } else { - *buff = 0; } - buff++; + + break; + default: + ; } + /* + if (field->null_ptr && + field_in_record_is_null(table, field, (char*) row)) { + */ - field = key_part->field; - mysql_type = field->type(); + if (field->is_null_in_record(row)) { + wsrep_md5_update(ctx, (char*)&null_byte, 1); + } else { + wsrep_md5_update(ctx, (char*)&true_byte, 1); + wsrep_md5_update(ctx, (char*)ptr, len); + } + } - if (mysql_type == MYSQL_TYPE_VARCHAR) { - /* >= 5.0.3 true VARCHAR */ - ulint lenlen; - ulint len; - const byte* data; - ulint key_len; - ulint true_len; - CHARSET_INFO* cs; - int error=0; + wsrep_compute_md5_hash((char*)digest, ctx); - key_len = key_part->length; + return(0); +} +#endif /* WITH_WSREP */ +/**********************************************************************//** +Updates a row given as a parameter to a new value. Note that we are given +whole rows, not just the fields which are updated: this incurs some +overhead for CPU when we check which fields are actually updated. +TODO: currently InnoDB does not prevent the 'Halloween problem': +in a searched update a single row can get updated several times +if its index columns are updated! +@return error number or 0 */ +UNIV_INTERN +int +ha_innobase::update_row( +/*====================*/ + const uchar* old_row, /*!< in: old row in MySQL format */ + uchar* new_row) /*!< in: new row in MySQL format */ +{ + upd_t* uvect; + dberr_t error; + trx_t* trx = thd_to_trx(user_thd); - if (is_null) { - buff += key_len + 2; + DBUG_ENTER("ha_innobase::update_row"); - continue; - } - cs = field->charset(); + ut_a(prebuilt->trx == trx); - lenlen = (ulint) - (((Field_varstring*)field)->length_bytes); + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (!trx_is_started(trx)) { + ++trx->will_lock; + } - data = row_mysql_read_true_varchar(&len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - lenlen); + if (upd_buf == NULL) { + ut_ad(upd_buf_size == 0); - true_len = len; + /* Create a buffer for packing the fields of a record. Why + table->stored_rec_length did not work here? Obviously, + because char fields when packed actually became 1 byte + longer, when we also stored the string length as the first + byte. */ - /* For multi byte character sets we need to calculate - the true length of the key */ + upd_buf_size = table->s->stored_rec_length + + table->s->max_key_length + MAX_REF_PARTS * 3; + upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME)); + if (upd_buf == NULL) { + upd_buf_size = 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + } - if (len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) data, - (const char *) data + len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } + ha_statistic_increment(&SSV::ha_update_count); - /* In a column prefix index, we may need to truncate - the stored value: */ + if (prebuilt->upd_node) { + uvect = prebuilt->upd_node->update; + } else { + uvect = row_get_prebuilt_update_vector(prebuilt); + } - if (true_len > key_len) { - true_len = key_len; - } + /* Build an update vector from the modified fields in the rows + (uses upd_buf of the handle) */ - /* The length in a key value is always stored in 2 - bytes */ + error = calc_row_difference(uvect, (uchar*) old_row, new_row, table, + upd_buf, upd_buf_size, prebuilt, user_thd); - row_mysql_store_true_var_len((byte*)buff, true_len, 2); - buff += 2; + if (error != DB_SUCCESS) { + goto func_exit; + } - memcpy(buff, data, true_len); + /* This is not a delete */ + prebuilt->upd_node->is_delete = FALSE; - /* Note that we always reserve the maximum possible - length of the true VARCHAR in the key value, though - only len first bytes after the 2 length bytes contain - actual data. The rest of the space was reset to zero - in the bzero() call above. */ + ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); - buff += key_len; + innobase_srv_conc_enter_innodb(trx); + + error = row_update_for_mysql((byte*) old_row, prebuilt); + + /* We need to do some special AUTOINC handling for the following case: + + INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... - } else if (mysql_type == MYSQL_TYPE_TINY_BLOB - || mysql_type == MYSQL_TYPE_MEDIUM_BLOB - || mysql_type == MYSQL_TYPE_BLOB - || mysql_type == MYSQL_TYPE_LONG_BLOB - /* MYSQL_TYPE_GEOMETRY data is treated - as BLOB data in innodb. */ - || mysql_type == MYSQL_TYPE_GEOMETRY) { + We need to use the AUTOINC counter that was actually used by + MySQL in the UPDATE statement, which can be different from the + value used in the INSERT statement.*/ - CHARSET_INFO* cs; - ulint key_len; - ulint true_len; - int error=0; - ulint blob_len; - const byte* blob_data; + if (error == DB_SUCCESS + && table->next_number_field + && new_row == table->record[0] + && thd_sql_command(user_thd) == SQLCOM_INSERT + && trx->duplicates) { - ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + ulonglong auto_inc; + ulonglong col_max_value; - key_len = key_part->length; + auto_inc = table->next_number_field->val_uint(); - if (is_null) { - buff += key_len + 2; + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + col_max_value = innobase_get_int_col_max_value( + table->next_number_field); - continue; - } + if (auto_inc <= col_max_value && auto_inc != 0) { - cs = field->charset(); + ulonglong offset; + ulonglong increment; - blob_data = row_mysql_read_blob_ref(&blob_len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - (ulint) field->pack_length()); + offset = prebuilt->autoinc_offset; + increment = prebuilt->autoinc_increment; - true_len = blob_len; + auto_inc = innobase_next_autoinc( + auto_inc, 1, increment, offset, col_max_value); - ut_a(get_field_offset(table, field) - == key_part->offset); + error = innobase_set_max_autoinc(auto_inc); + } + } - /* For multi byte character sets we need to calculate - the true length of the key */ + innobase_srv_conc_exit_innodb(trx); - if (blob_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) blob_data, - (const char *) blob_data - + blob_len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } +func_exit: + int err = convert_error_code_to_mysql(error, + prebuilt->table->flags, user_thd); - /* All indexes on BLOB and TEXT are column prefix - indexes, and we may need to truncate the data to be - stored in the key value: */ + /* If success and no columns were updated. */ + if (err == 0 && uvect->n_fields == 0) { - if (true_len > key_len) { - true_len = key_len; - } + /* This is the same as success, but instructs + MySQL that the row is not really updated and it + should not increase the count of updated rows. + This is fix for http://bugs.mysql.com/29157 */ + err = HA_ERR_RECORD_IS_THE_SAME; + } else if (err == HA_FTS_INVALID_DOCID) { + my_error(HA_FTS_INVALID_DOCID, MYF(0)); + } - /* MySQL reserves 2 bytes for the length and the - storage of the number is little-endian */ + /* Tell InnoDB server that there might be work for + utility threads: */ - innobase_write_to_2_little_endian( - (byte*)buff, true_len); - buff += 2; + innobase_active_small(); - memcpy(buff, blob_data, true_len); +#ifdef WITH_WSREP + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && + wsrep_on(user_thd) && + !wsrep_thd_skip_append_keys(user_thd)) + { + DBUG_PRINT("wsrep", ("update row key")); - if (wsrep_append_keys(user_thd, false, old_row, new_row)) { - /* Note that we always reserve the maximum possible - length of the BLOB prefix in the key value. */ ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, old_row, ++ new_row)) { + WSREP_DEBUG("WSREP: UPDATE_ROW_KEY FAILED"); + DBUG_PRINT("wsrep", ("row key failed")); + err = HA_ERR_INTERNAL_ERROR; + goto wsrep_error; + } + } +wsrep_error: +#endif /* WITH_WSREP */ - buff += key_len; - } else { - /* Here we handle all other data types except the - true VARCHAR, BLOB and TEXT. Note that the column - value we store may be also in a column prefix - index. */ - CHARSET_INFO* cs; - ulint true_len; - ulint key_len; - const uchar* src_start; - int error=0; - enum_field_types real_type; + DBUG_RETURN(err); +} - key_len = key_part->length; +/**********************************************************************//** +Deletes a row given as the parameter. +@return error number or 0 */ +UNIV_INTERN +int +ha_innobase::delete_row( +/*====================*/ + const uchar* record) /*!< in: a row in MySQL format */ +{ + dberr_t error; + trx_t* trx = thd_to_trx(user_thd); - if (is_null) { - buff += key_len; + DBUG_ENTER("ha_innobase::delete_row"); - continue; - } + ut_a(prebuilt->trx == trx); - src_start = record + key_part->offset; - real_type = field->real_type(); - true_len = key_len; + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (!trx_is_started(trx)) { + ++trx->will_lock; + } - /* Character set for the field is defined only - to fields whose type is string and real field - type is not enum or set. For these fields check - if character set is multi byte. */ + ha_statistic_increment(&SSV::ha_delete_count); - if (real_type != MYSQL_TYPE_ENUM - && real_type != MYSQL_TYPE_SET - && ( mysql_type == MYSQL_TYPE_VAR_STRING - || mysql_type == MYSQL_TYPE_STRING)) { + if (!prebuilt->upd_node) { + row_get_prebuilt_update_vector(prebuilt); + } - cs = field->charset(); + /* This is a delete */ - /* For multi byte character sets we need to - calculate the true length of the key */ + prebuilt->upd_node->is_delete = TRUE; - if (key_len > 0 && cs->mbmaxlen > 1) { + innobase_srv_conc_enter_innodb(trx); - true_len = (ulint) - cs->cset->well_formed_len(cs, - (const char *)src_start, - (const char *)src_start - + key_len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } - } + error = row_update_for_mysql((byte*) record, prebuilt); - memcpy(buff, src_start, true_len); - buff += true_len; + innobase_srv_conc_exit_innodb(trx); - /* Pad the unused space with spaces. */ + /* Tell the InnoDB server that there might be work for + utility threads: */ - if (true_len < key_len) { - ulint pad_len = key_len - true_len; - ut_a(!(pad_len % cs->mbminlen)); + innobase_active_small(); - cs->cset->fill(cs, buff, pad_len, - 0x20 /* space */); - buff += pad_len; - } +#ifdef WITH_WSREP + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && + wsrep_on(user_thd) && + !wsrep_thd_skip_append_keys(user_thd)) + { - if (wsrep_append_keys(user_thd, false, record, NULL)) { ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, ++ NULL)) { + DBUG_PRINT("wsrep", ("delete fail")); + error = (dberr_t)HA_ERR_INTERNAL_ERROR; + goto wsrep_error; } } - - ut_a(buff <= buff_start + buff_len); - - DBUG_RETURN((uint)(buff - buff_start)); +wsrep_error: +#endif + DBUG_RETURN(convert_error_code_to_mysql( + error, prebuilt->table->flags, user_thd)); } -/**************************************************************//** -Builds a 'template' to the prebuilt struct. The template is used in fast -retrieval of just those column values MySQL needs in its processing. */ -static +/**********************************************************************//** +Removes a new lock set on a row, if it was not read optimistically. This can +be called after a row has been read in the processing of an UPDATE or a DELETE +query, if the option innodb_locks_unsafe_for_binlog is set. */ +UNIV_INTERN void -build_template( -/*===========*/ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */ - THD* thd, /*!< in: current user thread, used - only if templ_type is - ROW_MYSQL_REC_FIELDS */ - TABLE* table, /*!< in: MySQL table */ - uint templ_type) /*!< in: ROW_MYSQL_WHOLE_ROW or - ROW_MYSQL_REC_FIELDS */ +ha_innobase::unlock_row(void) +/*=========================*/ { - dict_index_t* index; - dict_index_t* clust_index; - mysql_row_templ_t* templ; - Field* field; - ulint n_fields; - ulint n_requested_fields = 0; - ibool fetch_all_in_key = FALSE; - ibool fetch_primary_key_cols = FALSE; - ulint i; - /* byte offset of the end of last requested column */ - ulint mysql_prefix_len = 0; + DBUG_ENTER("ha_innobase::unlock_row"); - if (prebuilt->select_lock_type == LOCK_X) { - /* We always retrieve the whole clustered index record if we - use exclusive row level locks, for example, if the read is - done in an UPDATE statement. */ + /* Consistent read does not take any locks, thus there is + nothing to unlock. */ - templ_type = ROW_MYSQL_WHOLE_ROW; + if (prebuilt->select_lock_type == LOCK_NONE) { + DBUG_VOID_RETURN; } - if (templ_type == ROW_MYSQL_REC_FIELDS) { - if (prebuilt->hint_need_to_fetch_extra_cols - == ROW_RETRIEVE_ALL_COLS) { + /* Ideally, this assert must be in the beginning of the function. + But there are some calls to this function from the SQL layer when the + transaction is in state TRX_STATE_NOT_STARTED. The check on + prebuilt->select_lock_type above gets around this issue. */ + ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); - /* We know we must at least fetch all columns in the - key, or all columns in the table */ + switch (prebuilt->row_read_type) { + case ROW_READ_WITH_LOCKS: + if (!srv_locks_unsafe_for_binlog + && prebuilt->trx->isolation_level + > TRX_ISO_READ_COMMITTED) { + break; + } + /* fall through */ + case ROW_READ_TRY_SEMI_CONSISTENT: + row_unlock_for_mysql(prebuilt, FALSE); + break; + case ROW_READ_DID_SEMI_CONSISTENT: + prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + break; + } - if (prebuilt->read_just_key) { - /* MySQL has instructed us that it is enough - to fetch the columns in the key; looks like - MySQL can set this flag also when there is - only a prefix of the column in the key: in - that case we retrieve the whole column from - the clustered index */ + DBUG_VOID_RETURN; +} + +/* See handler.h and row0mysql.h for docs on this function. */ +UNIV_INTERN +bool +ha_innobase::was_semi_consistent_read(void) +/*=======================================*/ +{ + return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); +} + +/* See handler.h and row0mysql.h for docs on this function. */ +UNIV_INTERN +void +ha_innobase::try_semi_consistent_read(bool yes) +/*===========================================*/ +{ + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); - fetch_all_in_key = TRUE; - } else { - templ_type = ROW_MYSQL_WHOLE_ROW; - } - } else if (prebuilt->hint_need_to_fetch_extra_cols - == ROW_RETRIEVE_PRIMARY_KEY) { - /* We must at least fetch all primary key cols. Note - that if the clustered index was internally generated - by InnoDB on the row id (no primary key was - defined), then row_search_for_mysql() will always - retrieve the row id to a special buffer in the - prebuilt struct. */ + /* Row read type is set to semi consistent read if this was + requested by the MySQL and either innodb_locks_unsafe_for_binlog + option is used or this session is using READ COMMITTED isolation + level. */ - fetch_primary_key_cols = TRUE; - } + if (yes + && (srv_locks_unsafe_for_binlog + || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) { + prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + } else { + prebuilt->row_read_type = ROW_READ_WITH_LOCKS; } +} - clust_index = dict_table_get_first_index(prebuilt->table); +/******************************************************************//** +Initializes a handle to use an index. +@return 0 or error number */ +UNIV_INTERN +int +ha_innobase::index_init( +/*====================*/ + uint keynr, /*!< in: key (index) number */ + bool sorted) /*!< in: 1 if result MUST be sorted according to index */ +{ + DBUG_ENTER("index_init"); - if (templ_type == ROW_MYSQL_REC_FIELDS) { - index = prebuilt->index; - } else { - index = clust_index; - } + DBUG_RETURN(change_active_index(keynr)); +} - if (index == clust_index) { - prebuilt->need_to_access_clustered = TRUE; - } else { - prebuilt->need_to_access_clustered = FALSE; - /* Below we check column by column if we need to access - the clustered index */ +/******************************************************************//** +Currently does nothing. +@return 0 */ +UNIV_INTERN +int +ha_innobase::index_end(void) +/*========================*/ +{ + int error = 0; + DBUG_ENTER("index_end"); + active_index = MAX_KEY; + in_range_check_pushed_down = FALSE; + ds_mrr.dsmrr_close(); + DBUG_RETURN(error); +} + +/*********************************************************************//** +Converts a search mode flag understood by MySQL to a flag understood +by InnoDB. */ +static inline +ulint +convert_search_mode_to_innobase( +/*============================*/ + enum ha_rkey_function find_flag) +{ + switch (find_flag) { + case HA_READ_KEY_EXACT: + /* this does not require the index to be UNIQUE */ + return(PAGE_CUR_GE); + case HA_READ_KEY_OR_NEXT: + return(PAGE_CUR_GE); + case HA_READ_KEY_OR_PREV: + return(PAGE_CUR_LE); + case HA_READ_AFTER_KEY: + return(PAGE_CUR_G); + case HA_READ_BEFORE_KEY: + return(PAGE_CUR_L); + case HA_READ_PREFIX: + return(PAGE_CUR_GE); + case HA_READ_PREFIX_LAST: + return(PAGE_CUR_LE); + case HA_READ_PREFIX_LAST_OR_PREV: + return(PAGE_CUR_LE); + /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always + pass a complete-field prefix of a key value as the search + tuple. I.e., it is not allowed that the last field would + just contain n first bytes of the full field value. + MySQL uses a 'padding' trick to convert LIKE 'abc%' + type queries so that it can use as a search tuple + a complete-field-prefix of a key value. Thus, the InnoDB + search mode PAGE_CUR_LE_OR_EXTENDS is never used. + TODO: when/if MySQL starts to use also partial-field + prefixes, we have to deal with stripping of spaces + and comparison of non-latin1 char type fields in + innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to + work correctly. */ + case HA_READ_MBR_CONTAIN: + case HA_READ_MBR_INTERSECT: + case HA_READ_MBR_WITHIN: + case HA_READ_MBR_DISJOINT: + case HA_READ_MBR_EQUAL: + return(PAGE_CUR_UNSUPP); + /* do not use "default:" in order to produce a gcc warning: + enumeration value '...' not handled in switch + (if -Wswitch or -Wall is used) */ } - n_fields = (ulint)table->s->fields; /* number of columns */ + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "this functionality"); - if (!prebuilt->mysql_template) { - prebuilt->mysql_template = (mysql_row_templ_t*) - mem_alloc(n_fields * sizeof(mysql_row_templ_t)); - } + return(PAGE_CUR_UNSUPP); +} - prebuilt->template_type = templ_type; - prebuilt->null_bitmap_len = table->s->null_bytes; +/* + BACKGROUND INFO: HOW A SELECT SQL QUERY IS EXECUTED + --------------------------------------------------- +The following does not cover all the details, but explains how we determine +the start of a new SQL statement, and what is associated with it. - prebuilt->templ_contains_blob = FALSE; +For each table in the database the MySQL interpreter may have several +table handle instances in use, also in a single SQL query. For each table +handle instance there is an InnoDB 'prebuilt' struct which contains most +of the InnoDB data associated with this table handle instance. - /* Note that in InnoDB, i is the column number. MySQL calls columns - 'fields'. */ - for (i = 0; i < n_fields; i++) { - const dict_col_t* col = &index->table->cols[i]; - templ = prebuilt->mysql_template + n_requested_fields; - field = table->field[i]; + A) if the user has not explicitly set any MySQL table level locks: - if (UNIV_LIKELY(templ_type == ROW_MYSQL_REC_FIELDS)) { - /* Decide which columns we should fetch - and which we can skip. */ - register const ibool index_contains_field = - dict_index_contains_col_or_prefix(index, i); + 1) MySQL calls ::external_lock to set an 'intention' table level lock on +the table of the handle instance. There we set +prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set +true if we are taking this table handle instance to use in a new SQL +statement issued by the user. We also increment trx->n_mysql_tables_in_use. - if (!index_contains_field && prebuilt->read_just_key) { - /* If this is a 'key read', we do not need - columns that are not in the key */ + 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search +instructions to prebuilt->template of the table handle instance in +::index_read. The template is used to save CPU time in large joins. - goto skip_field; - } + 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we +allocate a new consistent read view for the trx if it does not yet have one, +or in the case of a locking read, set an InnoDB 'intention' table level +lock on the table. - if (index_contains_field && fetch_all_in_key) { - /* This field is needed in the query */ + 4) We do the SELECT. MySQL may repeatedly call ::index_read for the +same table handle instance, if it is a join. - goto include_field; - } + 5) When the SELECT ends, MySQL removes its intention table level locks +in ::external_lock. When trx->n_mysql_tables_in_use drops to zero, + (a) we execute a COMMIT there if the autocommit is on, + (b) we also release possible 'SQL statement level resources' InnoDB may +have for this SQL statement. The MySQL interpreter does NOT execute +autocommit for pure read transactions, though it should. That is why the +table handler in that case has to execute the COMMIT in ::external_lock. - if (bitmap_is_set(table->read_set, i) || - bitmap_is_set(table->write_set, i)) { - /* This field is needed in the query */ + B) If the user has explicitly set MySQL table level locks, then MySQL +does NOT call ::external_lock at the start of the statement. To determine +when we are at the start of a new SQL statement we at the start of +::index_read also compare the query id to the latest query id where the +table handle instance was used. If it has changed, we know we are at the +start of a new SQL statement. Since the query id can theoretically +overwrap, we use this test only as a secondary way of determining the +start of a new SQL statement. */ - goto include_field; - } - if (fetch_primary_key_cols - && dict_table_col_in_clustered_key( - index->table, i)) { - /* This field is needed in the query */ +/**********************************************************************//** +Positions an index cursor to the index specified in the handle. Fetches the +row if any. +@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ +UNIV_INTERN +int +ha_innobase::index_read( +/*====================*/ + uchar* buf, /*!< in/out: buffer for the returned + row */ + const uchar* key_ptr, /*!< in: key value; if this is NULL + we position the cursor at the + start or end of index; this can + also contain an InnoDB row id, in + which case key_len is the InnoDB + row id length; the key value can + also be a prefix of a full key value, + and the last column can be a prefix + of a full column */ + uint key_len,/*!< in: key value length */ + enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ +{ + ulint mode; + dict_index_t* index; + ulint match_mode = 0; + int error; + dberr_t ret; - goto include_field; - } + DBUG_ENTER("index_read"); + DEBUG_SYNC_C("ha_innobase_index_read_begin"); - /* This field is not needed in the query, skip it */ + ut_a(prebuilt->trx == thd_to_trx(user_thd)); + ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT); - goto skip_field; - } -include_field: - n_requested_fields++; + ha_statistic_increment(&SSV::ha_read_key_count); - templ->col_no = i; - templ->clust_rec_field_no = dict_col_get_clust_pos( - col, clust_index); - ut_ad(templ->clust_rec_field_no != ULINT_UNDEFINED); + index = prebuilt->index; - if (index == clust_index) { - templ->rec_field_no = templ->clust_rec_field_no; - } else { - templ->rec_field_no = dict_index_get_nth_col_pos( - index, i); - if (templ->rec_field_no == ULINT_UNDEFINED) { - prebuilt->need_to_access_clustered = TRUE; - } - } + if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) { + prebuilt->index_usable = FALSE; + DBUG_RETURN(HA_ERR_CRASHED); + } + if (UNIV_UNLIKELY(!prebuilt->index_usable)) { + DBUG_RETURN(dict_index_is_corrupted(index) + ? HA_ERR_INDEX_CORRUPT + : HA_ERR_TABLE_DEF_CHANGED); + } - if (field->null_ptr) { - templ->mysql_null_byte_offset = - (ulint) ((char*) field->null_ptr - - (char*) table->record[0]); + if (index->type & DICT_FTS) { + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); + } - templ->mysql_null_bit_mask = (ulint) field->null_bit; - } else { - templ->mysql_null_bit_mask = 0; - } + /* Note that if the index for which the search template is built is not + necessarily prebuilt->index, but can also be the clustered index */ - templ->mysql_col_offset = (ulint) - get_field_offset(table, field); + if (prebuilt->sql_stat_start) { + build_template(false); + } - templ->mysql_col_len = (ulint) field->pack_length(); - if (mysql_prefix_len < templ->mysql_col_offset - + templ->mysql_col_len) { - mysql_prefix_len = templ->mysql_col_offset - + templ->mysql_col_len; - } - templ->type = col->mtype; - templ->mysql_type = (ulint)field->type(); + if (key_ptr) { + /* Convert the search key value to InnoDB format into + prebuilt->search_tuple */ - if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { - templ->mysql_length_bytes = (ulint) - (((Field_varstring*)field)->length_bytes); - } + row_sel_convert_mysql_key_to_innobase( + prebuilt->search_tuple, + prebuilt->srch_key_val1, + prebuilt->srch_key_val_len, + index, + (byte*) key_ptr, + (ulint) key_len, + prebuilt->trx); + DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); + } else { + /* We position the cursor to the last or the first entry + in the index */ - templ->charset = dtype_get_charset_coll(col->prtype); - templ->mbminlen = dict_col_get_mbminlen(col); - templ->mbmaxlen = dict_col_get_mbmaxlen(col); - templ->is_unsigned = col->prtype & DATA_UNSIGNED; - if (templ->type == DATA_BLOB) { - prebuilt->templ_contains_blob = TRUE; - } -skip_field: - ; + dtuple_set_n_fields(prebuilt->search_tuple, 0); } - prebuilt->n_template = n_requested_fields; - prebuilt->mysql_prefix_len = mysql_prefix_len; + mode = convert_search_mode_to_innobase(find_flag); - if (index != clust_index && prebuilt->need_to_access_clustered) { - /* Change rec_field_no's to correspond to the clustered index - record */ - for (i = 0; i < n_requested_fields; i++) { - templ = prebuilt->mysql_template + i; + match_mode = 0; - templ->rec_field_no = templ->clust_rec_field_no; - } - } -} + if (find_flag == HA_READ_KEY_EXACT) { -/********************************************************************//** -This special handling is really to overcome the limitations of MySQL's -binlogging. We need to eliminate the non-determinism that will arise in -INSERT ... SELECT type of statements, since MySQL binlog only stores the -min value of the autoinc interval. Once that is fixed we can get rid of -the special lock handling. -@return DB_SUCCESS if all OK else error code */ -UNIV_INTERN -ulint -ha_innobase::innobase_lock_autoinc(void) -/*====================================*/ -{ - ulint error = DB_SUCCESS; + match_mode = ROW_SEL_EXACT; - switch (innobase_autoinc_lock_mode) { - case AUTOINC_NO_LOCKING: - /* Acquire only the AUTOINC mutex. */ - dict_table_autoinc_lock(prebuilt->table); - break; + } else if (find_flag == HA_READ_PREFIX + || find_flag == HA_READ_PREFIX_LAST) { + + match_mode = ROW_SEL_EXACT_PREFIX; + } - case AUTOINC_NEW_STYLE_LOCKING: - /* For simple (single/multi) row INSERTs/REPLACEs and RBR - events, we fallback to the old style only if another - transaction has already acquired the AUTOINC lock on - behalf of a LOAD FILE or INSERT ... SELECT etc. type of - statement. */ - if (thd_sql_command(user_thd) == SQLCOM_INSERT - || thd_sql_command(user_thd) == SQLCOM_REPLACE - || thd_sql_command(user_thd) == SQLCOM_END // RBR event - ) { - dict_table_t* table = prebuilt->table; + last_match_mode = (uint) match_mode; - /* Acquire the AUTOINC mutex. */ - dict_table_autoinc_lock(table); + if (mode != PAGE_CUR_UNSUPP) { - /* We need to check that another transaction isn't - already holding the AUTOINC lock on the table. */ - if (table->n_waiting_or_granted_auto_inc_locks) { - /* Release the mutex to avoid deadlocks and - fall back to old style locking. */ - dict_table_autoinc_unlock(table); - } else { - /* Do not fall back to old style locking. */ - break; - } - } - /* fall through */ + innobase_srv_conc_enter_innodb(prebuilt->trx); - case AUTOINC_OLD_STYLE_LOCKING: - error = row_lock_table_autoinc_for_mysql(prebuilt); + ret = row_search_for_mysql((byte*) buf, mode, prebuilt, + match_mode, 0); - if (error == DB_SUCCESS) { + innobase_srv_conc_exit_innodb(prebuilt->trx); + } else { - /* Acquire the AUTOINC mutex. */ - dict_table_autoinc_lock(prebuilt->table); + ret = DB_UNSUPPORTED; + } + + switch (ret) { + case DB_SUCCESS: + error = 0; + table->status = 0; + if (prebuilt->table->is_system_db) { + srv_stats.n_system_rows_read.add( + (size_t) prebuilt->trx->id, 1); + } else { + srv_stats.n_rows_read.add( + (size_t) prebuilt->trx->id, 1); } break; + case DB_RECORD_NOT_FOUND: + error = HA_ERR_KEY_NOT_FOUND; + table->status = STATUS_NOT_FOUND; + break; + case DB_END_OF_INDEX: + error = HA_ERR_KEY_NOT_FOUND; + table->status = STATUS_NOT_FOUND; + break; + case DB_TABLESPACE_DELETED: + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); + + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + case DB_TABLESPACE_NOT_FOUND: + + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_MISSING, MYF(0), + table->s->table_name.str); + + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; default: - ut_error; + error = convert_error_code_to_mysql( + ret, prebuilt->table->flags, user_thd); + + table->status = STATUS_NOT_FOUND; + break; } - return(ulong(error)); + DBUG_RETURN(error); } -/********************************************************************//** -Reset the autoinc value in the table. -@return DB_SUCCESS if all went well else error code */ +/*******************************************************************//** +The following functions works like index_read, but it find the last +row with the current key value or prefix. +@return 0, HA_ERR_KEY_NOT_FOUND, or an error code */ UNIV_INTERN -ulint -ha_innobase::innobase_reset_autoinc( -/*================================*/ - ulonglong autoinc) /*!< in: value to store */ +int +ha_innobase::index_read_last( +/*=========================*/ + uchar* buf, /*!< out: fetched row */ + const uchar* key_ptr,/*!< in: key value, or a prefix of a full + key value */ + uint key_len)/*!< in: length of the key val or prefix + in bytes */ { - ulint error; - - error = innobase_lock_autoinc(); - - if (error == DB_SUCCESS) { - - dict_table_autoinc_initialize(prebuilt->table, autoinc); - - dict_table_autoinc_unlock(prebuilt->table); - } - - return(ulong(error)); + return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST)); } /********************************************************************//** @@@ -9438,1492 -6037,991 +9442,1512 @@@ ha_innobase::rnd_next } /**********************************************************************//** -Checks which fields have changed in a row and stores information -of them to an update vector. -@return error number or 0 */ -static +Fetches a row from the table based on a row reference. +@return 0, HA_ERR_KEY_NOT_FOUND, or error code */ +UNIV_INTERN int -calc_row_difference( -/*================*/ - upd_t* uvect, /*!< in/out: update vector */ - uchar* old_row, /*!< in: old row in MySQL format */ - uchar* new_row, /*!< in: new row in MySQL format */ - TABLE* table, /*!< in: table in MySQL data - dictionary */ - uchar* upd_buff, /*!< in: buffer to use */ - ulint buff_len, /*!< in: buffer length */ - row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ - THD* thd) /*!< in: user thread */ +ha_innobase::rnd_pos( +/*=================*/ + uchar* buf, /*!< in/out: buffer for the row */ + uchar* pos) /*!< in: primary key value of the row in the + MySQL format, or the row id if the clustered + index was internally generated by InnoDB; the + length of data in pos has to be ref_length */ { - uchar* original_upd_buff = upd_buff; - Field* field; - enum_field_types field_mysql_type; - uint n_fields; - ulint o_len; - ulint n_len; - ulint col_pack_len; - const byte* new_mysql_row_col; - const byte* o_ptr; - const byte* n_ptr; - byte* buf; - upd_field_t* ufield; - ulint col_type; - ulint n_changed = 0; - dfield_t dfield; - dict_index_t* clust_index; - uint i; + int error; + DBUG_ENTER("rnd_pos"); + DBUG_DUMP("key", pos, ref_length); - n_fields = table->s->fields; - clust_index = dict_table_get_first_index(prebuilt->table); + ha_statistic_increment(&SSV::ha_read_rnd_count); - /* We use upd_buff to convert changed fields */ - buf = (byte*) upd_buff; + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); - for (i = 0; i < n_fields; i++) { - field = table->field[i]; + /* Note that we assume the length of the row reference is fixed + for the table, and it is == ref_length */ - o_ptr = (const byte*) old_row + get_field_offset(table, field); - n_ptr = (const byte*) new_row + get_field_offset(table, field); + error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); - /* Use new_mysql_row_col and col_pack_len save the values */ + if (error) { + DBUG_PRINT("error", ("Got error: %d", error)); + } - new_mysql_row_col = n_ptr; - col_pack_len = field->pack_length(); + DBUG_RETURN(error); +} + +/**********************************************************************//** +Initialize FT index scan +@return 0 or error number */ +UNIV_INTERN +int +ha_innobase::ft_init() +/*==================*/ +{ + DBUG_ENTER("ft_init"); - o_len = col_pack_len; - n_len = col_pack_len; + trx_t* trx = check_trx_exists(ha_thd()); - /* We use o_ptr and n_ptr to dig up the actual data for - comparison. */ + /* FTS queries are not treated as autocommit non-locking selects. + This is because the FTS implementation can acquire locks behind + the scenes. This has not been verified but it is safer to treat + them as regular read only transactions for now. */ - field_mysql_type = field->type(); + if (!trx_is_started(trx)) { + ++trx->will_lock; + } - col_type = prebuilt->table->cols[i].mtype; + DBUG_RETURN(rnd_init(false)); +} - switch (col_type) { +/**********************************************************************//** +Initialize FT index scan +@return FT_INFO structure if successful or NULL */ +UNIV_INTERN +FT_INFO* +ha_innobase::ft_init_ext( +/*=====================*/ + uint flags, /* in: */ + uint keynr, /* in: */ + String* key) /* in: */ +{ + trx_t* trx; + dict_table_t* ft_table; + dberr_t error; + byte* query = (byte*) key->ptr(); + ulint query_len = key->length(); + const CHARSET_INFO* char_set = key->charset(); + NEW_FT_INFO* fts_hdl = NULL; + dict_index_t* index; + fts_result_t* result; + char buf_tmp[8192]; + ulint buf_tmp_used; + uint num_errors; + + if (fts_enable_diag_print) { + fprintf(stderr, "keynr=%u, '%.*s'\n", + keynr, (int) key->length(), (byte*) key->ptr()); + + if (flags & FT_BOOL) { + fprintf(stderr, "BOOL search\n"); + } else { + fprintf(stderr, "NL search\n"); + } + } - case DATA_BLOB: - o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); - n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); + /* FIXME: utf32 and utf16 are not compatible with some + string function used. So to convert them to uft8 before + proceed. */ + if (strcmp(char_set->csname, "utf32") == 0 + || strcmp(char_set->csname, "utf16") == 0) { + buf_tmp_used = innobase_convert_string( + buf_tmp, sizeof(buf_tmp) - 1, + &my_charset_utf8_general_ci, + query, query_len, (CHARSET_INFO*) char_set, + &num_errors); - break; + query = (byte*) buf_tmp; + query_len = buf_tmp_used; + query[query_len] = 0; + } - case DATA_VARCHAR: - case DATA_BINARY: - case DATA_VARMYSQL: - if (field_mysql_type == MYSQL_TYPE_VARCHAR) { - /* This is a >= 5.0.3 type true VARCHAR where - the real payload data length is stored in - 1 or 2 bytes */ + trx = prebuilt->trx; - o_ptr = row_mysql_read_true_varchar( - &o_len, o_ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); + /* FTS queries are not treated as autocommit non-locking selects. + This is because the FTS implementation can acquire locks behind + the scenes. This has not been verified but it is safer to treat + them as regular read only transactions for now. */ - n_ptr = row_mysql_read_true_varchar( - &n_len, n_ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); - } + if (!trx_is_started(trx)) { + ++trx->will_lock; + } - break; - default: - ; - } + ft_table = prebuilt->table; - if (field->null_ptr) { - if (field_in_record_is_null(table, field, - (char*) old_row)) { - o_len = UNIV_SQL_NULL; - } + /* Table does not have an FTS index */ + if (!ft_table->fts || ib_vector_is_empty(ft_table->fts->indexes)) { + my_error(ER_TABLE_HAS_NO_FT, MYF(0)); + return(NULL); + } - if (field_in_record_is_null(table, field, - (char*) new_row)) { - n_len = UNIV_SQL_NULL; - } - } + /* If tablespace is discarded, we should return here */ + if (dict_table_is_discarded(ft_table)) { + my_error(ER_NO_SUCH_TABLE, MYF(0), table->s->db.str, + table->s->table_name.str); + return(NULL); + } - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { - /* The field has changed */ + if (keynr == NO_SUCH_KEY) { + /* FIXME: Investigate the NO_SUCH_KEY usage */ + index = (dict_index_t*) ib_vector_getp(ft_table->fts->indexes, 0); + } else { + index = innobase_get_index(keynr); + } - ufield = uvect->fields + n_changed; - UNIV_MEM_INVALID(ufield, sizeof *ufield); + if (!index || index->type != DICT_FTS) { + my_error(ER_TABLE_HAS_NO_FT, MYF(0)); + return(NULL); + } - /* Let us use a dummy dfield to make the conversion - from the MySQL column format to the InnoDB format */ + if (!(ft_table->fts->fts_status & ADDED_TABLE_SYNCED)) { + fts_init_index(ft_table, FALSE); - if (n_len != UNIV_SQL_NULL) { - dict_col_copy_type(prebuilt->table->cols + i, - dfield_get_type(&dfield)); + ft_table->fts->fts_status |= ADDED_TABLE_SYNCED; + } - buf = row_mysql_store_col_in_innobase_format( - &dfield, - (byte*)buf, - TRUE, - new_mysql_row_col, - col_pack_len, - dict_table_is_comp(prebuilt->table)); - dfield_copy(&ufield->new_val, &dfield); - } else { - dfield_set_null(&ufield->new_val); - } + error = fts_query(trx, index, flags, query, query_len, &result); - ufield->exp = NULL; - ufield->orig_len = 0; - ufield->field_no = dict_col_get_clust_pos( - &prebuilt->table->cols[i], clust_index); - n_changed++; - } + if (error != DB_SUCCESS) { + my_error(convert_error_code_to_mysql(error, 0, NULL), + MYF(0)); + return(NULL); } - uvect->n_fields = n_changed; - uvect->info_bits = 0; + /* Allocate FTS handler, and instantiate it before return */ + fts_hdl = static_cast<NEW_FT_INFO*>(my_malloc(sizeof(NEW_FT_INFO), + MYF(0))); - ut_a(buf <= (byte*)original_upd_buff + buff_len); + fts_hdl->please = const_cast<_ft_vft*>(&ft_vft_result); + fts_hdl->could_you = const_cast<_ft_vft_ext*>(&ft_vft_ext_result); + fts_hdl->ft_prebuilt = prebuilt; + fts_hdl->ft_result = result; - return(0); + /* FIXME: Re-evluate the condition when Bug 14469540 + is resolved */ + prebuilt->in_fts_query = true; + + return((FT_INFO*) fts_hdl); } -#ifdef WITH_WSREP -static -int -wsrep_calc_row_hash( -/*================*/ - byte* digest, /*!< in/out: md5 sum */ - const uchar* row, /*!< in: row in MySQL format */ - TABLE* table, /*!< in: table in MySQL data - dictionary */ - row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ - THD* thd) /*!< in: user thread */ + +/*****************************************************************//** +Copy a cached MySQL row. +If requested, also avoids overwriting non-read columns. +@param[out] buf Row in MySQL format. +@param[in] cached_row Which row to copy. +@param[in] rec_len Record length. */ +void +ha_innobase::copy_cached_row( + uchar* buf, + const uchar* cached_row, + uint rec_len) { - Field* field; - enum_field_types field_mysql_type; - uint n_fields; - ulint len; - const byte* ptr; - ulint col_type; - uint i; + if (prebuilt->keep_other_fields_on_keyread) { + row_sel_copy_cached_fields_for_mysql(buf, cached_row, + prebuilt); + } else { + memcpy(buf, cached_row, rec_len); + } +} - my_MD5Context ctx; - my_MD5Init (&ctx); - n_fields = table->s->fields; +/*****************************************************************//** +Set up search tuple for a query through FTS_DOC_ID_INDEX on +supplied Doc ID. This is used by MySQL to retrieve the documents +once the search result (Doc IDs) is available */ +static +void +innobase_fts_create_doc_id_key( +/*===========================*/ + dtuple_t* tuple, /* in/out: prebuilt->search_tuple */ + const dict_index_t* + index, /* in: index (FTS_DOC_ID_INDEX) */ + doc_id_t* doc_id) /* in/out: doc id to search, value + could be changed to storage format + used for search. */ +{ + doc_id_t temp_doc_id; + dfield_t* dfield = dtuple_get_nth_field(tuple, 0); - for (i = 0; i < n_fields; i++) { - byte null_byte=0; - byte true_byte=1; + ut_a(dict_index_get_n_unique(index) == 1); - field = table->field[i]; + dtuple_set_n_fields(tuple, index->n_fields); + dict_index_copy_types(tuple, index, index->n_fields); - ptr = (const byte*) row + get_field_offset(table, field); - len = field->pack_length(); +#ifdef UNIV_DEBUG + /* The unique Doc ID field should be an eight-bytes integer */ + dict_field_t* field = dict_index_get_nth_field(index, 0); + ut_a(field->col->mtype == DATA_INT); + ut_ad(sizeof(*doc_id) == field->fixed_len); + ut_ad(innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME) == 0); +#endif /* UNIV_DEBUG */ - field_mysql_type = field->type(); + /* Convert to storage byte order */ + mach_write_to_8(reinterpret_cast<byte*>(&temp_doc_id), *doc_id); + *doc_id = temp_doc_id; + dfield_set_data(dfield, doc_id, sizeof(*doc_id)); - col_type = prebuilt->table->cols[i].mtype; + dtuple_set_n_fields_cmp(tuple, 1); - switch (col_type) { + for (ulint i = 1; i < index->n_fields; i++) { + dfield = dtuple_get_nth_field(tuple, i); + dfield_set_null(dfield); + } +} - case DATA_BLOB: - ptr = row_mysql_read_blob_ref(&len, ptr, len); +/**********************************************************************//** +Fetch next result from the FT result set +@return error code */ +UNIV_INTERN +int +ha_innobase::ft_read( +/*=================*/ + uchar* buf) /*!< in/out: buf contain result row */ +{ + fts_result_t* result; + int error; + row_prebuilt_t* ft_prebuilt; - break; + ft_prebuilt = ((NEW_FT_INFO*) ft_handler)->ft_prebuilt; - case DATA_VARCHAR: - case DATA_BINARY: - case DATA_VARMYSQL: - if (field_mysql_type == MYSQL_TYPE_VARCHAR) { - /* This is a >= 5.0.3 type true VARCHAR where - the real payload data length is stored in - 1 or 2 bytes */ + ut_a(ft_prebuilt == prebuilt); - ptr = row_mysql_read_true_varchar( - &len, ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); + result = ((NEW_FT_INFO*) ft_handler)->ft_result; - } + if (result->current == NULL) { + /* This is the case where the FTS query did not + contain and matching documents. */ + if (result->rankings_by_id != NULL) { + /* Now that we have the complete result, we + need to sort the document ids on their rank + calculation. */ - break; - default: - ; - } + fts_query_sort_result_on_rank(result); - if (field->null_ptr && - field_in_record_is_null(table, field, (char*) row)) { - my_MD5Update (&ctx, &null_byte, 1); + result->current = const_cast<ib_rbt_node_t*>( + rbt_first(result->rankings_by_rank)); } else { - my_MD5Update (&ctx, &true_byte, 1); - my_MD5Update (&ctx, ptr, len); + ut_a(result->current == NULL); } + } else { + result->current = const_cast<ib_rbt_node_t*>( + rbt_next(result->rankings_by_rank, result->current)); } - my_MD5Final (digest, &ctx); - return(0); -} -#endif /* WITH_WSREP */ -/**********************************************************************//** -Updates a row given as a parameter to a new value. Note that we are given -whole rows, not just the fields which are updated: this incurs some -overhead for CPU when we check which fields are actually updated. -TODO: currently InnoDB does not prevent the 'Halloween problem': -in a searched update a single row can get updated several times -if its index columns are updated! -@return error number or 0 */ -UNIV_INTERN -int -ha_innobase::update_row( -/*====================*/ - const uchar* old_row, /*!< in: old row in MySQL format */ - uchar* new_row) /*!< in: new row in MySQL format */ -{ - upd_t* uvect; - int error = 0; - trx_t* trx = thd_to_trx(user_thd); +next_record: - DBUG_ENTER("ha_innobase::update_row"); + if (result->current != NULL) { + dict_index_t* index; + dtuple_t* tuple = prebuilt->search_tuple; + doc_id_t search_doc_id; - ut_a(prebuilt->trx == trx); + /* If we only need information from result we can return + without fetching the table row */ + if (ft_prebuilt->read_just_key) { + table->status= 0; + return(0); + } - if (upd_buf == NULL) { - ut_ad(upd_buf_size == 0); + index = dict_table_get_index_on_name( + prebuilt->table, FTS_DOC_ID_INDEX_NAME); - /* Create a buffer for packing the fields of a record. Why - table->reclength did not work here? Obviously, because char - fields when packed actually became 1 byte longer, when we also - stored the string length as the first byte. */ + /* Must find the index */ + ut_a(index); - upd_buf_size = table->s->reclength + table->s->max_key_length - + MAX_REF_PARTS * 3; - upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME)); - if (upd_buf == NULL) { - upd_buf_size = 0; - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - } - } + /* Switch to the FTS doc id index */ + prebuilt->index = index; - ha_statistic_increment(&SSV::ha_update_count); + fts_ranking_t* ranking = rbt_value( + fts_ranking_t, result->current); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); + search_doc_id = ranking->doc_id; - if (prebuilt->upd_node) { - uvect = prebuilt->upd_node->update; - } else { - uvect = row_get_prebuilt_update_vector(prebuilt); - } + /* We pass a pointer of search_doc_id because it will be + converted to storage byte order used in the search + tuple. */ + innobase_fts_create_doc_id_key(tuple, index, &search_doc_id); - /* Build an update vector from the modified fields in the rows - (uses upd_buf of the handle) */ + innobase_srv_conc_enter_innodb(prebuilt->trx); - calc_row_difference(uvect, (uchar*) old_row, new_row, table, - upd_buf, upd_buf_size, prebuilt, user_thd); + dberr_t ret = row_search_for_mysql( + (byte*) buf, PAGE_CUR_GE, prebuilt, ROW_SEL_EXACT, 0); - /* This is not a delete */ - prebuilt->upd_node->is_delete = FALSE; + innobase_srv_conc_exit_innodb(prebuilt->trx); - ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); + switch (ret) { + case DB_SUCCESS: + error = 0; + table->status = 0; + break; + case DB_RECORD_NOT_FOUND: + result->current = const_cast<ib_rbt_node_t*>( + rbt_next(result->rankings_by_rank, + result->current)); + + if (!result->current) { + /* exhaust the result set, should return + HA_ERR_END_OF_FILE just like + ha_innobase::general_fetch() and/or + ha_innobase::index_first() etc. */ + error = HA_ERR_END_OF_FILE; + table->status = STATUS_NOT_FOUND; + } else { + goto next_record; + } + break; + case DB_END_OF_INDEX: + error = HA_ERR_END_OF_FILE; + table->status = STATUS_NOT_FOUND; + break; + case DB_TABLESPACE_DELETED: - innodb_srv_conc_enter_innodb(trx); + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); - error = row_update_for_mysql((byte*) old_row, prebuilt); + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + case DB_TABLESPACE_NOT_FOUND: - /* We need to do some special AUTOINC handling for the following case: + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_MISSING, + table->s->table_name.str); - INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + default: + error = convert_error_code_to_mysql( + ret, 0, user_thd); - We need to use the AUTOINC counter that was actually used by - MySQL in the UPDATE statement, which can be different from the - value used in the INSERT statement.*/ + table->status = STATUS_NOT_FOUND; + break; + } - if (error == DB_SUCCESS - && table->next_number_field - && new_row == table->record[0] - && thd_sql_command(user_thd) == SQLCOM_INSERT - && trx->duplicates) { + return(error); + } - ulonglong auto_inc; - ulonglong col_max_value; + return(HA_ERR_END_OF_FILE); +} + +/************************************************************************* +*/ + +void +ha_innobase::ft_end() +{ + fprintf(stderr, "ft_end()\n"); + + rnd_end(); +} +#ifdef WITH_WSREP +extern dict_index_t* +wsrep_dict_foreign_find_index( + dict_table_t* table, + const char** col_names, + const char** columns, + ulint n_cols, + dict_index_t* types_idx, + ibool check_charsets, + ulint check_null); - auto_inc = table->next_number_field->val_int(); ++inline ++const char* ++wsrep_key_type_to_str(wsrep_key_type type) ++{ ++ switch (type) { ++ case WSREP_KEY_SHARED: ++ return "shared"; ++ case WSREP_KEY_SEMI: ++ return "semi"; ++ case WSREP_KEY_EXCLUSIVE: ++ return "exclusive"; ++ }; ++ return "unknown"; ++} - extern dberr_t - /* We need the upper limit of the col type to check for - whether we update the table autoinc counter or not. */ - col_max_value = innobase_get_int_col_max_value( - table->next_number_field); ++ulint +wsrep_append_foreign_key( +/*===========================*/ + trx_t* trx, /*!< in: trx */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!<in: clustered index record */ + dict_index_t* index, /*!<in: clustered index */ + ibool referenced, /*!<in: is check for referenced table */ - ibool shared) /*!<in: is shared access */ ++ wsrep_key_type key_type) /*!< in: access type of this key ++ (shared, exclusive, semi...) */ +{ + ut_a(trx); + THD* thd = (THD*)trx->mysql_thd; + ulint rcode = DB_SUCCESS; + char cache_key[513] = {'\0'}; + int cache_key_len; + bool const copy = true; - if (auto_inc <= col_max_value && auto_inc != 0) { + if (!wsrep_on(trx->mysql_thd) || + wsrep_thd_exec_mode(thd) != LOCAL_STATE) + return DB_SUCCESS; - ulonglong offset; - ulonglong increment; + if (!thd || !foreign || + (!foreign->referenced_table && !foreign->foreign_table)) + { + WSREP_INFO("FK: %s missing in: %s", + (!thd) ? "thread" : + ((!foreign) ? "constraint" : + ((!foreign->referenced_table) ? + "referenced table" : "foreign table")), + (thd && wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } - offset = prebuilt->autoinc_offset; - increment = prebuilt->autoinc_increment; + if ( !((referenced) ? + foreign->referenced_table : foreign->foreign_table)) + { + WSREP_DEBUG("pulling %s table into cache", + (referenced) ? "referenced" : "foreign"); + mutex_enter(&(dict_sys->mutex)); + if (referenced) + { + foreign->referenced_table = + dict_table_get_low( + foreign->referenced_table_name_lookup); + if (foreign->referenced_table) + { + foreign->referenced_index = + wsrep_dict_foreign_find_index( + foreign->referenced_table, NULL, + foreign->referenced_col_names, + foreign->n_fields, + foreign->foreign_index, + TRUE, FALSE); + } + } + else + { + foreign->foreign_table = + dict_table_get_low( + foreign->foreign_table_name_lookup); + if (foreign->foreign_table) + { + foreign->foreign_index = + wsrep_dict_foreign_find_index( + foreign->foreign_table, NULL, + foreign->foreign_col_names, + foreign->n_fields, + foreign->referenced_index, + TRUE, FALSE); + } + } + mutex_exit(&(dict_sys->mutex)); + } - auto_inc = innobase_next_autoinc( - auto_inc, 1, increment, offset, col_max_value); + if ( !((referenced) ? + foreign->referenced_table : foreign->foreign_table)) + { + WSREP_WARN("FK: %s missing in query: %s", + (!foreign->referenced_table) ? + "referenced table" : "foreign table", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } + byte key[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + ulint len = WSREP_MAX_SUPPORTED_KEY_LENGTH; - error = innobase_set_max_autoinc(auto_inc); + dict_index_t *idx_target = (referenced) ? + foreign->referenced_index : index; + dict_index_t *idx = (referenced) ? + UT_LIST_GET_FIRST(foreign->referenced_table->indexes) : + UT_LIST_GET_FIRST(foreign->foreign_table->indexes); + int i = 0; + while (idx != NULL && idx != idx_target) { + if (innobase_strcasecmp (idx->name, innobase_index_reserve_name) != 0) { + i++; } + idx = UT_LIST_GET_NEXT(indexes, idx); } + ut_a(idx); + key[0] = (char)i; - innodb_srv_conc_exit_innodb(trx); + rcode = wsrep_rec_get_foreign_key( + &key[1], &len, rec, index, idx, + wsrep_protocol_version > 1); + - error = convert_error_code_to_mysql(error, - prebuilt->table->flags, user_thd); + if (rcode != DB_SUCCESS) { + WSREP_ERROR( - "FK key set failed: %lu (%lu %lu), index: %s %s, %s", - rcode, referenced, shared, ++ "FK key set failed: %lu (%lu %s), index: %s %s, %s", ++ rcode, referenced, wsrep_key_type_to_str(key_type), + (index && index->name) ? index->name : + "void index", + (index && index->table_name) ? index->table_name : + "void table", + wsrep_thd_query(thd)); + return DB_ERROR; + } + - if (error == 0 /* success */ - && uvect->n_fields == 0 /* no columns were updated */) { + strncpy(cache_key, + (wsrep_protocol_version > 1) ? + ((referenced) ? + foreign->referenced_table->name : + foreign->foreign_table->name) : + foreign->foreign_table->name, sizeof(cache_key) - 1); + cache_key_len = strlen(cache_key); +#ifdef WSREP_DEBUG_PRINT + ulint j; + fprintf(stderr, "FK parent key, table: %s %s len: %lu ", + cache_key, (shared) ? "shared" : "exclusive", len+1); + for (j=0; j<len+1; j++) { + fprintf(stderr, " %hhX, ", key[j]); + } + fprintf(stderr, "\n"); +#endif + char *p = strchr(cache_key, '/'); + if (p) { + *p = '\0'; + } else { + WSREP_WARN("unexpected foreign key table %s %s", + foreign->referenced_table->name, + foreign->foreign_table->name); + } - /* This is the same as success, but instructs - MySQL that the row is not really updated and it - should not increase the count of updated rows. - This is fix for http://bugs.mysql.com/29157 */ - error = HA_ERR_RECORD_IS_THE_SAME; + wsrep_buf_t wkey_part[3]; + wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key_for_innodb( + (const uchar*)cache_key, + cache_key_len + 1, + (const uchar*)key, len+1, + wkey_part, + (size_t*)&wkey.key_parts_num)) { + WSREP_WARN("key prepare failed for cascaded FK: %s", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } + rcode = (int)wsrep->append_key( + wsrep, + wsrep_ws_handle(thd, trx), + &wkey, + 1, - shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE, ++ key_type, + copy); + if (rcode) { + DBUG_PRINT("wsrep", ("row key failed: %lu", rcode)); + WSREP_ERROR("Appending cascaded fk row key failed: %s, %lu", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void", rcode); + return DB_ERROR; } - /* Tell InnoDB server that there might be work for - utility threads: */ + return DB_SUCCESS; +} - innobase_active_small(); +static int +wsrep_append_key( +/*==================*/ + THD *thd, + trx_t *trx, + TABLE_SHARE *table_share, + TABLE *table, + const char* key, + uint16_t key_len, - bool shared ++ wsrep_key_type key_type /*!< in: access type of this key ++ (shared, exclusive, semi...) */ +) +{ + DBUG_ENTER("wsrep_append_key"); + bool const copy = true; +#ifdef WSREP_DEBUG_PRINT + fprintf(stderr, "%s conn %ld, trx %llu, keylen %d, table %s\n Query: %s ", - (shared) ? "Shared" : "Exclusive", - wsrep_thd_thread_id(thd), (long long)trx->id, key_len, ++ wsrep_key_type_to_str(key_type), ++ wsrep_thd_thread_id(thd), trx->id, key_len, + table_share->table_name.str, wsrep_thd_query(thd)); + for (int i=0; i<key_len; i++) { + fprintf(stderr, "%hhX, ", key[i]); + } + fprintf(stderr, "\n"); +#endif + wsrep_buf_t wkey_part[3]; + wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key_for_innodb( + (const uchar*)table_share->table_cache_key.str, + table_share->table_cache_key.length, + (const uchar*)key, key_len, + wkey_part, + (size_t*)&wkey.key_parts_num)) { + WSREP_WARN("key prepare failed for: %s", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } -#ifdef WITH_WSREP - if (!error && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd)) { + int rcode = (int)wsrep->append_key( + wsrep, + wsrep_ws_handle(thd, trx), + &wkey, + 1, - shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE, ++ key_type, + copy); + if (rcode) { + DBUG_PRINT("wsrep", ("row key failed: %d", rcode)); + WSREP_WARN("Appending row key failed: %s, %d", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void", rcode); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + DBUG_RETURN(0); +} - DBUG_PRINT("wsrep", ("update row key")); +extern void compute_md5_hash(char *digest, const char *buf, int len); +#define MD5_HASH compute_md5_hash - if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, old_row, - new_row)) { - DBUG_PRINT("wsrep", ("row key failed")); - error = HA_ERR_INTERNAL_ERROR; - goto wsrep_error; +static bool +referenced_by_foreign_key2(dict_table_t* table, + dict_index_t* index) { + ut_ad(table != NULL); + ut_ad(index != NULL); + + const dict_foreign_set* fks = &table->referenced_set; + for (dict_foreign_set::const_iterator it = fks->begin(); + it != fks->end(); + ++it) + { + dict_foreign_t* foreign = *it; + if (foreign->referenced_index != index) { + continue; } + ut_ad(table == foreign->referenced_table); + return true; } -wsrep_error: -#endif - DBUG_RETURN(error); + return false; } -/**********************************************************************//** -Deletes a row given as the parameter. -@return error number or 0 */ -UNIV_INTERN int -ha_innobase::delete_row( -/*====================*/ - const uchar* record) /*!< in: a row in MySQL format */ +ha_innobase::wsrep_append_keys( +/*==================*/ + THD *thd, - bool shared, ++ wsrep_key_type key_type, /*!< in: access type of this key ++ (shared, exclusive, semi...) */ + const uchar* record0, /* in: row in MySQL format */ + const uchar* record1) /* in: row in MySQL format */ { - int error = 0; - trx_t* trx = thd_to_trx(user_thd); - - DBUG_ENTER("ha_innobase::delete_row"); - - ut_a(prebuilt->trx == trx); + int rcode; + DBUG_ENTER("wsrep_append_keys"); - ha_statistic_increment(&SSV::ha_delete_count); + bool key_appended = false; + trx_t *trx = thd_to_trx(thd); - if (!prebuilt->upd_node) { - row_get_prebuilt_update_vector(prebuilt); + if (table_share && table_share->tmp_table != NO_TMP_TABLE) { + WSREP_DEBUG("skipping tmp table DML: THD: %lu tmp: %d SQL: %s", + wsrep_thd_thread_id(thd), + table_share->tmp_table, + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + DBUG_RETURN(0); } - /* This is a delete */ + if (wsrep_protocol_version == 0) { + uint len; + char keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char *key = &keyval[0]; + ibool is_null; - prebuilt->upd_node->is_delete = TRUE; + len = wsrep_store_key_val_for_row( + thd, table, 0, key, WSREP_MAX_SUPPORTED_KEY_LENGTH, + record0, &is_null); - innodb_srv_conc_enter_innodb(trx); + if (!is_null) { + rcode = wsrep_append_key( + thd, trx, table_share, table, keyval, - len, shared); ++ len, key_type); + if (rcode) DBUG_RETURN(rcode); + } + else + { + WSREP_DEBUG("NULL key skipped (proto 0): %s", + wsrep_thd_query(thd)); + } + } else { + ut_a(table->s->keys <= 256); + uint i; + bool hasPK= false; - error = row_update_for_mysql((byte*) record, prebuilt); + for (i=0; i<table->s->keys; ++i) { + KEY* key_info = table->key_info + i; + if (key_info->flags & HA_NOSAME) { + hasPK = true; + } + } - innodb_srv_conc_exit_innodb(trx); + for (i=0; i<table->s->keys; ++i) { + uint len; + char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char* key0 = &keyval0[1]; + char* key1 = &keyval1[1]; + KEY* key_info = table->key_info + i; + ibool is_null; - error = convert_error_code_to_mysql( - error, prebuilt->table->flags, user_thd); + dict_index_t* idx = innobase_get_index(i); + dict_table_t* tab = (idx) ? idx->table : NULL; - /* Tell the InnoDB server that there might be work for - utility threads: */ + keyval0[0] = (char)i; + keyval1[0] = (char)i; - innobase_active_small(); + if (!tab) { + WSREP_WARN("MySQL-InnoDB key mismatch %s %s", + table->s->table_name.str, + key_info->name); + } + /* !hasPK == table with no PK, must append all non-unique keys */ + if (!hasPK || key_info->flags & HA_NOSAME || + ((tab && + referenced_by_foreign_key2(tab, idx)) || + (!tab && referenced_by_foreign_key()))) { -#ifdef WITH_WSREP - if (!error && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd)) { + len = wsrep_store_key_val_for_row( + thd, table, i, key0, + WSREP_MAX_SUPPORTED_KEY_LENGTH, + record0, &is_null); + if (!is_null) { + rcode = wsrep_append_key( + thd, trx, table_share, table, - keyval0, len+1, shared); ++ keyval0, len+1, key_type); + if (rcode) DBUG_RETURN(rcode); - if (key_info->flags & HA_NOSAME || shared) - if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, - NULL)) { - DBUG_PRINT("wsrep", ("delete fail")); - error = HA_ERR_INTERNAL_ERROR; - goto wsrep_error; ++ if (key_info->flags & HA_NOSAME || ++ key_type == WSREP_KEY_SHARED) + key_appended = true; + } + else + { + WSREP_DEBUG("NULL key skipped: %s", + wsrep_thd_query(thd)); + } + if (record1) { + len = wsrep_store_key_val_for_row( + thd, table, i, key1, + WSREP_MAX_SUPPORTED_KEY_LENGTH, + record1, &is_null); + if (!is_null && memcmp(key0, key1, len)) { + rcode = wsrep_append_key( + thd, trx, table_share, + table, - keyval1, len+1, shared); ++ keyval1, len+1, key_type); + if (rcode) DBUG_RETURN(rcode); + } + } + } } } -wsrep_error: -#endif - DBUG_RETURN(error); -} - -/**********************************************************************//** -Removes a new lock set on a row, if it was not read optimistically. This can -be called after a row has been read in the processing of an UPDATE or a DELETE -query, if the option innodb_locks_unsafe_for_binlog is set. */ -UNIV_INTERN -void -ha_innobase::unlock_row(void) -/*=========================*/ -{ - DBUG_ENTER("ha_innobase::unlock_row"); - ut_ad(prebuilt->trx->conc_state == TRX_ACTIVE); - - /* Consistent read does not take any locks, thus there is - nothing to unlock. */ + /* if no PK, calculate hash of full row, to be the key value */ + if (!key_appended && wsrep_certify_nonPK) { + uchar digest[16]; + int rcode; - if (prebuilt->select_lock_type == LOCK_NONE) { - DBUG_VOID_RETURN; - } + wsrep_calc_row_hash(digest, record0, table, prebuilt, thd); + if ((rcode = wsrep_append_key(thd, trx, table_share, table, + (const char*) digest, 16, - shared))) { ++ key_type))) { + DBUG_RETURN(rcode); + } - switch (prebuilt->row_read_type) { - case ROW_READ_WITH_LOCKS: - if (!srv_locks_unsafe_for_binlog - && prebuilt->trx->isolation_level - > TRX_ISO_READ_COMMITTED) { - break; + if (record1) { + wsrep_calc_row_hash( + digest, record1, table, prebuilt, thd); + if ((rcode = wsrep_append_key(thd, trx, table_share, + table, + (const char*) digest, - 16, shared))) { ++ 16, key_type))) { + DBUG_RETURN(rcode); + } } - /* fall through */ - case ROW_READ_TRY_SEMI_CONSISTENT: - row_unlock_for_mysql(prebuilt, FALSE); - break; - case ROW_READ_DID_SEMI_CONSISTENT: - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; - break; + DBUG_RETURN(0); } - DBUG_VOID_RETURN; -} - -/* See handler.h and row0mysql.h for docs on this function. */ -UNIV_INTERN -bool -ha_innobase::was_semi_consistent_read(void) -/*=======================================*/ -{ - return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); + DBUG_RETURN(0); } +#endif /* WITH_WSREP */ -/* See handler.h and row0mysql.h for docs on this function. */ +/*********************************************************************//** +Stores a reference to the current row to 'ref' field of the handle. Note +that in the case where we have generated the clustered index for the +table, the function parameter is illogical: we MUST ASSUME that 'record' +is the current 'position' of the handle, because if row ref is actually +the row id internally generated in InnoDB, then 'record' does not contain +it. We just guess that the row id must be for the record where the handle +was positioned the last time. */ UNIV_INTERN void -ha_innobase::try_semi_consistent_read(bool yes) -/*===========================================*/ -{ - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); - - /* Row read type is set to semi consistent read if this was - requested by the MySQL and either innodb_locks_unsafe_for_binlog - option is used or this session is using READ COMMITTED isolation - level. */ - - if (yes - && (srv_locks_unsafe_for_binlog - || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) { - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; - } else { - prebuilt->row_read_type = ROW_READ_WITH_LOCKS; - } -} - -/******************************************************************//** -Initializes a handle to use an index. -@return 0 or error number */ -UNIV_INTERN -int -ha_innobase::index_init( -/*====================*/ - uint keynr, /*!< in: key (index) number */ - bool sorted) /*!< in: 1 if result MUST be sorted according to index */ -{ - DBUG_ENTER("index_init"); - - DBUG_RETURN(change_active_index(keynr)); -} - -/******************************************************************//** -Currently does nothing. -@return 0 */ -UNIV_INTERN -int -ha_innobase::index_end(void) -/*========================*/ +ha_innobase::position( +/*==================*/ + const uchar* record) /*!< in: row in MySQL format */ { - int error = 0; - DBUG_ENTER("index_end"); - active_index=MAX_KEY; - DBUG_RETURN(error); -} + uint len; -/*********************************************************************//** -Converts a search mode flag understood by MySQL to a flag understood -by InnoDB. */ -static inline -ulint -convert_search_mode_to_innobase( -/*============================*/ - enum ha_rkey_function find_flag) -{ - switch (find_flag) { - case HA_READ_KEY_EXACT: - /* this does not require the index to be UNIQUE */ - return(PAGE_CUR_GE); - case HA_READ_KEY_OR_NEXT: - return(PAGE_CUR_GE); - case HA_READ_KEY_OR_PREV: - return(PAGE_CUR_LE); - case HA_READ_AFTER_KEY: - return(PAGE_CUR_G); - case HA_READ_BEFORE_KEY: - return(PAGE_CUR_L); - case HA_READ_PREFIX: - return(PAGE_CUR_GE); - case HA_READ_PREFIX_LAST: - return(PAGE_CUR_LE); - case HA_READ_PREFIX_LAST_OR_PREV: - return(PAGE_CUR_LE); - /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always - pass a complete-field prefix of a key value as the search - tuple. I.e., it is not allowed that the last field would - just contain n first bytes of the full field value. - MySQL uses a 'padding' trick to convert LIKE 'abc%' - type queries so that it can use as a search tuple - a complete-field-prefix of a key value. Thus, the InnoDB - search mode PAGE_CUR_LE_OR_EXTENDS is never used. - TODO: when/if MySQL starts to use also partial-field - prefixes, we have to deal with stripping of spaces - and comparison of non-latin1 char type fields in - innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to - work correctly. */ - case HA_READ_MBR_CONTAIN: - case HA_READ_MBR_INTERSECT: - case HA_READ_MBR_WITHIN: - case HA_READ_MBR_DISJOINT: - case HA_READ_MBR_EQUAL: - return(PAGE_CUR_UNSUPP); - /* do not use "default:" in order to produce a gcc warning: - enumeration value '...' not handled in switch - (if -Wswitch or -Wall is used) */ - } + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); - my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "this functionality"); + if (prebuilt->clust_index_was_generated) { + /* No primary key was defined for the table and we + generated the clustered index from row id: the + row reference will be the row id, not any key value + that MySQL knows of */ - return(PAGE_CUR_UNSUPP); -} + len = DATA_ROW_ID_LEN; -/* - BACKGROUND INFO: HOW A SELECT SQL QUERY IS EXECUTED - --------------------------------------------------- -The following does not cover all the details, but explains how we determine -the start of a new SQL statement, and what is associated with it. + memcpy(ref, prebuilt->row_id, len); + } else { + len = store_key_val_for_row(primary_key, (char*) ref, + ref_length, record); + } -For each table in the database the MySQL interpreter may have several -table handle instances in use, also in a single SQL query. For each table -handle instance there is an InnoDB 'prebuilt' struct which contains most -of the InnoDB data associated with this table handle instance. + /* We assume that the 'ref' value len is always fixed for the same + table. */ - A) if the user has not explicitly set any MySQL table level locks: + if (len != ref_length) { + sql_print_error("Stored ref len is %lu, but table ref len is " + "%lu", (ulong) len, (ulong) ref_length); + } +} - 1) MySQL calls ::external_lock to set an 'intention' table level lock on -the table of the handle instance. There we set -prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set -true if we are taking this table handle instance to use in a new SQL -statement issued by the user. We also increment trx->n_mysql_tables_in_use. +/*****************************************************************//** +Check whether there exist a column named as "FTS_DOC_ID", which is +reserved for InnoDB FTS Doc ID +@return true if there exist a "FTS_DOC_ID" column */ +static +bool +create_table_check_doc_id_col( +/*==========================*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + ulint* doc_id_col) /*!< out: Doc ID column number if + there exist a FTS_DOC_ID column, + ULINT_UNDEFINED if column is of the + wrong type/name/size */ +{ + for (ulint i = 0; i < form->s->fields; i++) { + const Field* field; + ulint col_type; + ulint col_len; + ulint unsigned_type; - 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search -instructions to prebuilt->template of the table handle instance in -::index_read. The template is used to save CPU time in large joins. + field = form->field[i]; - 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we -allocate a new consistent read view for the trx if it does not yet have one, -or in the case of a locking read, set an InnoDB 'intention' table level -lock on the table. + col_type = get_innobase_type_from_mysql_type(&unsigned_type, + field); - 4) We do the SELECT. MySQL may repeatedly call ::index_read for the -same table handle instance, if it is a join. + col_len = field->pack_length(); - 5) When the SELECT ends, MySQL removes its intention table level locks -in ::external_lock. When trx->n_mysql_tables_in_use drops to zero, - (a) we execute a COMMIT there if the autocommit is on, - (b) we also release possible 'SQL statement level resources' InnoDB may -have for this SQL statement. The MySQL interpreter does NOT execute -autocommit for pure read transactions, though it should. That is why the -table handler in that case has to execute the COMMIT in ::external_lock. + if (innobase_strcasecmp(field->field_name, + FTS_DOC_ID_COL_NAME) == 0) { - B) If the user has explicitly set MySQL table level locks, then MySQL -does NOT call ::external_lock at the start of the statement. To determine -when we are at the start of a new SQL statement we at the start of -::index_read also compare the query id to the latest query id where the -table handle instance was used. If it has changed, we know we are at the -start of a new SQL statement. Since the query id can theoretically -overwrap, we use this test only as a secondary way of determining the -start of a new SQL statement. */ + /* Note the name is case sensitive due to + our internal query parser */ + if (col_type == DATA_INT + && !field->real_maybe_null() + && col_len == sizeof(doc_id_t) + && (strcmp(field->field_name, + FTS_DOC_ID_COL_NAME) == 0)) { + *doc_id_col = i; + } else { + push_warning_printf( + trx->mysql_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: FTS_DOC_ID column must be " + "of BIGINT NOT NULL type, and named " + "in all capitalized characters"); + my_error(ER_WRONG_COLUMN_NAME, MYF(0), + field->field_name); + *doc_id_col = ULINT_UNDEFINED; + } + return(true); + } + } -/**********************************************************************//** -Positions an index cursor to the index specified in the handle. Fetches the -row if any. -@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ -UNIV_INTERN + return(false); +} + +/*****************************************************************//** +Creates a table definition to an InnoDB database. */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) int -ha_innobase::index_read( -/*====================*/ - uchar* buf, /*!< in/out: buffer for the returned - row */ - const uchar* key_ptr, /*!< in: key value; if this is NULL - we position the cursor at the - start or end of index; this can - also contain an InnoDB row id, in - which case key_len is the InnoDB - row id length; the key value can - also be a prefix of a full key value, - and the last column can be a prefix - of a full column */ - uint key_len,/*!< in: key value length */ - enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ +create_table_def( +/*=============*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + const char* table_name, /*!< in: table name */ + const char* temp_path, /*!< in: if this is a table explicitly + created by the user with the + TEMPORARY keyword, then this + parameter is the dir path where the + table should be placed if we create + an .ibd file for it (no .ibd extension + in the path, though). Otherwise this + is a zero length-string */ + const char* remote_path, /*!< in: Remote path or zero length-string */ + ulint flags, /*!< in: table flags */ + ulint flags2) /*!< in: table flags2 */ { - ulint mode; - dict_index_t* index; - ulint match_mode = 0; - int error; - ulint ret; - - DBUG_ENTER("index_read"); - DEBUG_SYNC_C("ha_innobase_index_read_begin"); + THD* thd = trx->mysql_thd; + dict_table_t* table; + ulint n_cols; + dberr_t err; + ulint col_type; + ulint col_len; + ulint nulls_allowed; + ulint unsigned_type; + ulint binary_type; + ulint long_true_varchar; + ulint charset_no; + ulint i; + ulint doc_id_col = 0; + ibool has_doc_id_col = FALSE; + mem_heap_t* heap; - ut_a(prebuilt->trx == thd_to_trx(user_thd)); - ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT); + DBUG_ENTER("create_table_def"); + DBUG_PRINT("enter", ("table_name: %s", table_name)); - ha_statistic_increment(&SSV::ha_read_key_count); + DBUG_ASSERT(thd != NULL); - index = prebuilt->index; + /* MySQL does the name length check. But we do additional check + on the name length here */ + const size_t table_name_len = strlen(table_name); + if (table_name_len > MAX_FULL_NAME_LEN) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_NAME, + "InnoDB: Table Name or Database Name is too long"); - if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) { - prebuilt->index_usable = FALSE; - DBUG_RETURN(HA_ERR_CRASHED); + DBUG_RETURN(ER_TABLE_NAME); } - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { - DBUG_RETURN(dict_index_is_corrupted(index) - ? HA_ERR_INDEX_CORRUPT - : HA_ERR_TABLE_DEF_CHANGED); + + if (table_name[table_name_len - 1] == '/') { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_NAME, + "InnoDB: Table name is empty"); + + DBUG_RETURN(ER_WRONG_TABLE_NAME); } - /* Note that if the index for which the search template is built is not - necessarily prebuilt->index, but can also be the clustered index */ + n_cols = form->s->fields; - if (prebuilt->sql_stat_start) { - build_template(prebuilt, user_thd, table, ROW_MYSQL_REC_FIELDS); + /* Check whether there already exists a FTS_DOC_ID column */ + if (create_table_check_doc_id_col(trx, form, &doc_id_col)){ + + /* Raise error if the Doc ID column is of wrong type or name */ + if (doc_id_col == ULINT_UNDEFINED) { + trx_commit_for_mysql(trx); + + err = DB_ERROR; + goto error_ret; + } else { + has_doc_id_col = TRUE; + } } - if (key_ptr) { - /* Convert the search key value to InnoDB format into - prebuilt->search_tuple */ + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ - row_sel_convert_mysql_key_to_innobase( - prebuilt->search_tuple, - srch_key_val1, sizeof(srch_key_val1), - index, - (byte*) key_ptr, - (ulint) key_len, - prebuilt->trx); - DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); + if (flags2 & DICT_TF2_FTS) { + /* Adjust for the FTS hidden field */ + if (!has_doc_id_col) { + table = dict_mem_table_create(table_name, 0, form->s->stored_fields + 1, + flags, flags2); + + /* Set the hidden doc_id column. */ + table->fts->doc_col = form->s->stored_fields; + } else { + table = dict_mem_table_create(table_name, 0, form->s->stored_fields, + flags, flags2); + table->fts->doc_col = doc_id_col; + } } else { - /* We position the cursor to the last or the first entry - in the index */ + table = dict_mem_table_create(table_name, 0, form->s->stored_fields, + flags, flags2); + } - dtuple_set_n_fields(prebuilt->search_tuple, 0); + if (flags2 & DICT_TF2_TEMPORARY) { + ut_a(strlen(temp_path)); + table->dir_path_of_temp_table = + mem_heap_strdup(table->heap, temp_path); } - mode = convert_search_mode_to_innobase(find_flag); + if (DICT_TF_HAS_DATA_DIR(flags)) { + ut_a(strlen(remote_path)); + table->data_dir_path = mem_heap_strdup(table->heap, remote_path); + } else { + table->data_dir_path = NULL; + } + heap = mem_heap_create(1000); - match_mode = 0; + for (i = 0; i < n_cols; i++) { + Field* field = form->field[i]; + if (!field->stored_in_db) + continue; - if (find_flag == HA_READ_KEY_EXACT) { + col_type = get_innobase_type_from_mysql_type(&unsigned_type, + field); - match_mode = ROW_SEL_EXACT; + if (!col_type) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_CANT_CREATE_TABLE, + "Error creating table '%s' with " + "column '%s'. Please check its " + "column type and try to re-create " + "the table with an appropriate " + "column type.", + table->name, field->field_name); + goto err_col; + } - } else if (find_flag == HA_READ_PREFIX - || find_flag == HA_READ_PREFIX_LAST) { + nulls_allowed = field->real_maybe_null() ? 0 : DATA_NOT_NULL; + binary_type = field->binary() ? DATA_BINARY_TYPE : 0; + + charset_no = 0; + + if (dtype_is_string_type(col_type)) { + + charset_no = (ulint) field->charset()->number; + + if (UNIV_UNLIKELY(charset_no > MAX_CHAR_COLL_NUM)) { + /* in data0type.h we assume that the + number fits in one byte in prtype */ + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_CANT_CREATE_TABLE, + "In InnoDB, charset-collation codes" + " must be below 256." + " Unsupported code %lu.", + (ulong) charset_no); + mem_heap_free(heap); + DBUG_RETURN(ER_CANT_CREATE_TABLE); + } + } + + /* we assume in dtype_form_prtype() that this fits in + two bytes */ + ut_a(static_cast<uint>(field->type()) <= MAX_CHAR_COLL_NUM); + col_len = field->pack_length(); + + /* The MySQL pack length contains 1 or 2 bytes length field + for a true VARCHAR. Let us subtract that, so that the InnoDB + column length in the InnoDB data dictionary is the real + maximum byte length of the actual data. */ + + long_true_varchar = 0; + + if (field->type() == MYSQL_TYPE_VARCHAR) { + col_len -= ((Field_varstring*) field)->length_bytes; + + if (((Field_varstring*) field)->length_bytes == 2) { + long_true_varchar = DATA_LONG_TRUE_VARCHAR; + } + } + + /* First check whether the column to be added has a + system reserved name. */ + if (dict_col_name_is_reserved(field->field_name)){ + my_error(ER_WRONG_COLUMN_NAME, MYF(0), + field->field_name); +err_col: + dict_mem_table_free(table); + mem_heap_free(heap); + trx_commit_for_mysql(trx); + + err = DB_ERROR; + goto error_ret; + } + + dict_mem_table_add_col(table, heap, + field->field_name, + col_type, + dtype_form_prtype( + (ulint) field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar, + charset_no), + col_len); + } - match_mode = ROW_SEL_EXACT_PREFIX; + /* Add the FTS doc_id hidden column. */ + if (flags2 & DICT_TF2_FTS && !has_doc_id_col) { + fts_add_doc_id_column(table, heap); } - last_match_mode = (uint) match_mode; + err = row_create_table_for_mysql(table, trx, false); - if (mode != PAGE_CUR_UNSUPP) { + mem_heap_free(heap); - innodb_srv_conc_enter_innodb(prebuilt->trx); + DBUG_EXECUTE_IF("ib_create_err_tablespace_exist", + err = DB_TABLESPACE_EXISTS;); - ret = row_search_for_mysql((byte*) buf, mode, prebuilt, - match_mode, 0); + if (err == DB_DUPLICATE_KEY || err == DB_TABLESPACE_EXISTS) { + char display_name[FN_REFLEN]; + char* buf_end = innobase_convert_identifier( + display_name, sizeof(display_name) - 1, + table_name, strlen(table_name), + thd, TRUE); - innodb_srv_conc_exit_innodb(prebuilt->trx); - } else { + *buf_end = '\0'; - ret = DB_UNSUPPORTED; + my_error(err == DB_DUPLICATE_KEY + ? ER_TABLE_EXISTS_ERROR + : ER_TABLESPACE_EXISTS, MYF(0), display_name); } - switch (ret) { - case DB_SUCCESS: - error = 0; - table->status = 0; - break; - case DB_RECORD_NOT_FOUND: - error = HA_ERR_KEY_NOT_FOUND; - table->status = STATUS_NOT_FOUND; - break; - case DB_END_OF_INDEX: - error = HA_ERR_KEY_NOT_FOUND; - table->status = STATUS_NOT_FOUND; - break; - default: - error = convert_error_code_to_mysql((int) ret, - prebuilt->table->flags, - user_thd); - table->status = STATUS_NOT_FOUND; - break; + if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) { + fts_optimize_add_table(table); } - DBUG_RETURN(error); +error_ret: + DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd)); } -/*******************************************************************//** -The following functions works like index_read, but it find the last -row with the current key value or prefix. -@return 0, HA_ERR_KEY_NOT_FOUND, or an error code */ -UNIV_INTERN +/*****************************************************************//** +Creates an index in an InnoDB database. */ +static int -ha_innobase::index_read_last( -/*=========================*/ - uchar* buf, /*!< out: fetched row */ - const uchar* key_ptr,/*!< in: key value, or a prefix of a full - key value */ - uint key_len)/*!< in: length of the key val or prefix - in bytes */ -{ - return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST)); -} - -/********************************************************************//** -Get the index for a handle. Does not change active index. -@return NULL or index instance. */ -UNIV_INTERN -dict_index_t* -ha_innobase::innobase_get_index( -/*============================*/ - uint keynr) /*!< in: use this index; MAX_KEY means always - clustered index, even if it was internally - generated by InnoDB */ +create_index( +/*=========*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + ulint flags, /*!< in: InnoDB table flags */ + const char* table_name, /*!< in: table name */ + uint key_num) /*!< in: index number */ { - KEY* key = 0; - dict_index_t* index = 0; + dict_index_t* index; + int error; + const KEY* key; + ulint ind_type; + ulint* field_lengths; - DBUG_ENTER("innobase_get_index"); + DBUG_ENTER("create_index"); - if (keynr != MAX_KEY && table->s->keys > 0) { - key = table->key_info + keynr; + key = form->key_info + key_num; - index = innobase_index_lookup(share, keynr); + /* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */ + ut_a(innobase_strcasecmp(key->name, innobase_index_reserve_name) != 0); - if (index) { - ut_a(ut_strcmp(index->name, key->name) == 0); - } else { - /* Can't find index with keynr in the translation - table. Only print message if the index translation - table exists */ - if (share->idx_trans_tbl.index_mapping) { - sql_print_warning("InnoDB could not find " - "index %s key no %u for " - "table %s through its " - "index translation table", - key ? key->name : "NULL", - keynr, - prebuilt->table->name); - } + if (key->flags & HA_FULLTEXT) { + index = dict_mem_index_create(table_name, key->name, 0, + DICT_FTS, + key->user_defined_key_parts); - index = dict_table_get_index_on_name(prebuilt->table, - key->name); + for (ulint i = 0; i < key->user_defined_key_parts; i++) { + KEY_PART_INFO* key_part = key->key_part + i; + dict_mem_index_add_field( + index, key_part->field->field_name, 0); } - } else { - index = dict_table_get_first_index(prebuilt->table); + + DBUG_RETURN(convert_error_code_to_mysql( + row_create_index_for_mysql( + index, trx, NULL), + flags, NULL)); + } - if (!index) { - sql_print_error( - "Innodb could not find key n:o %u with name %s " - "from dict cache for table %s", - keynr, key ? key->name : "NULL", - prebuilt->table->name); + ind_type = 0; + + if (key_num == form->s->primary_key) { + ind_type |= DICT_CLUSTERED; } - DBUG_RETURN(index); -} + if (key->flags & HA_NOSAME) { + ind_type |= DICT_UNIQUE; + } -/********************************************************************//** -Changes the active index of a handle. -@return 0 or error code */ -UNIV_INTERN -int -ha_innobase::change_active_index( -/*=============================*/ - uint keynr) /*!< in: use this index; MAX_KEY means always clustered - index, even if it was internally generated by - InnoDB */ -{ - DBUG_ENTER("change_active_index"); + field_lengths = (ulint*) my_malloc( + key->user_defined_key_parts * sizeof * + field_lengths, MYF(MY_FAE)); - ut_ad(user_thd == ha_thd()); - ut_a(prebuilt->trx == thd_to_trx(user_thd)); + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ - active_index = keynr; + index = dict_mem_index_create(table_name, key->name, 0, + ind_type, key->user_defined_key_parts); - prebuilt->index = innobase_get_index(keynr); + for (ulint i = 0; i < key->user_defined_key_parts; i++) { + KEY_PART_INFO* key_part = key->key_part + i; + ulint prefix_len; + ulint col_type; + ulint is_unsigned; - if (UNIV_UNLIKELY(!prebuilt->index)) { - sql_print_warning("InnoDB: change_active_index(%u) failed", - keynr); - prebuilt->index_usable = FALSE; - DBUG_RETURN(1); - } - prebuilt->index_usable = row_merge_is_index_usable(prebuilt->trx, - prebuilt->index); + /* (The flag HA_PART_KEY_SEG denotes in MySQL a + column prefix field in an index: we only store a + specified number of first bytes of the column to + the index field.) The flag does not seem to be + properly set by MySQL. Let us fall back on testing + the length of the key part versus the column. */ - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { - if (dict_index_is_corrupted(prebuilt->index)) { - char index_name[MAX_FULL_NAME_LEN + 1]; - char table_name[MAX_FULL_NAME_LEN + 1]; + Field* field = NULL; - innobase_format_name( - index_name, sizeof index_name, - prebuilt->index->name, TRUE); + for (ulint j = 0; j < form->s->fields; j++) { - innobase_format_name( - table_name, sizeof table_name, - prebuilt->index->table->name, FALSE); + field = form->field[j]; - push_warning_printf( - user_thd, MYSQL_ERROR::WARN_LEVEL_WARN, - HA_ERR_INDEX_CORRUPT, - "InnoDB: Index %s for table %s is" - " marked as corrupted", - index_name, table_name); - DBUG_RETURN(HA_ERR_INDEX_CORRUPT); + if (0 == innobase_strcasecmp( + field->field_name, + key_part->field->field_name)) { + /* Found the corresponding column */ + + goto found; + } + } + + ut_error; +found: + col_type = get_innobase_type_from_mysql_type( + &is_unsigned, key_part->field); + + if (DATA_BLOB == col_type + || (key_part->length < field->pack_length() + && field->type() != MYSQL_TYPE_VARCHAR) + || (field->type() == MYSQL_TYPE_VARCHAR + && key_part->length < field->pack_length() + - ((Field_varstring*) field)->length_bytes)) { + + switch (col_type) { + default: + prefix_len = key_part->length; + break; + case DATA_INT: + case DATA_FLOAT: + case DATA_DOUBLE: + case DATA_DECIMAL: + sql_print_error( + "MySQL is trying to create a column " + "prefix index field, on an " + "inappropriate data type. Table " + "name %s, column name %s.", + table_name, + key_part->field->field_name); + + prefix_len = 0; + } } else { - push_warning_printf( - user_thd, MYSQL_ERROR::WARN_LEVEL_WARN, - HA_ERR_TABLE_DEF_CHANGED, - "InnoDB: insufficient history for index %u", - keynr); + prefix_len = 0; } - /* The caller seems to ignore this. Thus, we must check - this again in row_search_for_mysql(). */ - DBUG_RETURN(convert_error_code_to_mysql(DB_MISSING_HISTORY, - 0, NULL)); + field_lengths[i] = key_part->length; + + dict_mem_index_add_field( + index, key_part->field->field_name, prefix_len); } - ut_a(prebuilt->search_tuple != 0); + ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS)); - dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields); + /* Even though we've defined max_supported_key_part_length, we + still do our own checking using field_lengths to be absolutely + sure we don't create too long indexes. */ - dict_index_copy_types(prebuilt->search_tuple, prebuilt->index, - prebuilt->index->n_fields); + error = convert_error_code_to_mysql( + row_create_index_for_mysql(index, trx, field_lengths), + flags, NULL); - /* MySQL changes the active index for a handle also during some - queries, for example SELECT MAX(a), SUM(a) first retrieves the MAX() - and then calculates the sum. Previously we played safe and used - the flag ROW_MYSQL_WHOLE_ROW below, but that caused unnecessary - copying. Starting from MySQL-4.1 we use a more efficient flag here. */ + my_free(field_lengths); - build_template(prebuilt, user_thd, table, ROW_MYSQL_REC_FIELDS); + DBUG_RETURN(error); +} - DBUG_RETURN(0); +/*****************************************************************//** +Creates an index to an InnoDB table when the user has defined no +primary index. */ +static +int +create_clustered_index_when_no_primary( +/*===================================*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + ulint flags, /*!< in: InnoDB table flags */ + const char* table_name) /*!< in: table name */ +{ + dict_index_t* index; + dberr_t error; + + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ + index = dict_mem_index_create(table_name, + innobase_index_reserve_name, + 0, DICT_CLUSTERED, 0); + + error = row_create_index_for_mysql(index, trx, NULL); + + return(convert_error_code_to_mysql(error, flags, NULL)); +} + +/*****************************************************************//** +Return a display name for the row format +@return row format name */ +UNIV_INTERN +const char* +get_row_format_name( +/*================*/ + enum row_type row_format) /*!< in: Row Format */ +{ + switch (row_format) { + case ROW_TYPE_COMPACT: + return("COMPACT"); + case ROW_TYPE_COMPRESSED: + return("COMPRESSED"); + case ROW_TYPE_DYNAMIC: + return("DYNAMIC"); + case ROW_TYPE_REDUNDANT: + return("REDUNDANT"); + case ROW_TYPE_DEFAULT: + return("DEFAULT"); + case ROW_TYPE_FIXED: + return("FIXED"); + case ROW_TYPE_PAGE: + case ROW_TYPE_NOT_USED: + break; + } + return("NOT USED"); } -/**********************************************************************//** -Positions an index cursor to the index specified in keynr. Fetches the -row if any. -??? This is only used to read whole keys ??? -@return error number or 0 */ -UNIV_INTERN -int -ha_innobase::index_read_idx( -/*========================*/ - uchar* buf, /*!< in/out: buffer for the returned - row */ - uint keynr, /*!< in: use this index */ - const uchar* key, /*!< in: key value; if this is NULL - we position the cursor at the - start or end of index */ - uint key_len, /*!< in: key value length */ - enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ -{ - if (change_active_index(keynr)) { +/** If file-per-table is missing, issue warning and set ret false */ +#define CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace)\ + if (!use_tablespace) { \ + push_warning_printf( \ + thd, Sql_condition::WARN_LEVEL_WARN, \ + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: ROW_FORMAT=%s requires" \ + " innodb_file_per_table.", \ + get_row_format_name(row_format)); \ + ret = "ROW_FORMAT"; \ + } - return(1); +/** If file-format is Antelope, issue warning and set ret false */ +#define CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE \ + if (srv_file_format < UNIV_FORMAT_B) { \ + push_warning_printf( \ + thd, Sql_condition::WARN_LEVEL_WARN, \ + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: ROW_FORMAT=%s requires" \ + " innodb_file_format > Antelope.", \ + get_row_format_name(row_format)); \ + ret = "ROW_FORMAT"; \ } - return(index_read(buf, key, key_len, find_flag)); -} -/***********************************************************************//** -Reads the next or previous row from a cursor, which must have previously been -positioned using index_read. -@return 0, HA_ERR_END_OF_FILE, or error number */ +/*****************************************************************//** +Validates the create options. We may build on this function +in future. For now, it checks two specifiers: +KEY_BLOCK_SIZE and ROW_FORMAT +If innodb_strict_mode is not set then this function is a no-op +@return NULL if valid, string if not. */ UNIV_INTERN -int -ha_innobase::general_fetch( +const char* +create_options_are_invalid( /*=======================*/ - uchar* buf, /*!< in/out: buffer for next row in MySQL - format */ - uint direction, /*!< in: ROW_SEL_NEXT or ROW_SEL_PREV */ - uint match_mode) /*!< in: 0, ROW_SEL_EXACT, or - ROW_SEL_EXACT_PREFIX */ + THD* thd, /*!< in: connection thread. */ + TABLE* form, /*!< in: information on table + columns and indexes */ + HA_CREATE_INFO* create_info, /*!< in: create info. */ + bool use_tablespace) /*!< in: srv_file_per_table */ { - ulint ret; - int error = 0; + ibool kbs_specified = FALSE; + const char* ret = NULL; + enum row_type row_format = form->s->row_type; - DBUG_ENTER("general_fetch"); + ut_ad(thd != NULL); - /* If transaction is not startted do not continue, instead return a error code. */ - if(!(prebuilt->sql_stat_start || (prebuilt->trx && prebuilt->trx->conc_state == 1))) { - DBUG_RETURN(HA_ERR_END_OF_FILE); + /* If innodb_strict_mode is not set don't do any validation. */ + if (!(THDVAR(thd, strict_mode))) { + return(NULL); } - ut_a(prebuilt->trx == thd_to_trx(user_thd)); - - innodb_srv_conc_enter_innodb(prebuilt->trx); + ut_ad(form != NULL); + ut_ad(create_info != NULL); - ret = row_search_for_mysql( - (byte*)buf, 0, prebuilt, match_mode, direction); + /* First check if a non-zero KEY_BLOCK_SIZE was specified. */ + if (create_info->key_block_size) { + kbs_specified = TRUE; + switch (create_info->key_block_size) { + ulint kbs_max; + case 1: + case 2: + case 4: + case 8: + case 16: + /* Valid KEY_BLOCK_SIZE, check its dependencies. */ + if (!use_tablespace) { + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE requires" + " innodb_file_per_table."); + ret = "KEY_BLOCK_SIZE"; + } + if (srv_file_format < UNIV_FORMAT_B) { + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE requires" + " innodb_file_format > Antelope."); + ret = "KEY_BLOCK_SIZE"; + } - innodb_srv_conc_exit_innodb(prebuilt->trx); + /* The maximum KEY_BLOCK_SIZE (KBS) is 16. But if + UNIV_PAGE_SIZE is smaller than 16k, the maximum + KBS is also smaller. */ + kbs_max = ut_min( + 1 << (UNIV_PAGE_SSIZE_MAX - 1), + 1 << (PAGE_ZIP_SSIZE_MAX - 1)); + if (create_info->key_block_size > kbs_max) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE=%ld" + " cannot be larger than %ld.", + create_info->key_block_size, + kbs_max); + ret = "KEY_BLOCK_SIZE"; + } + break; + default: + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: invalid KEY_BLOCK_SIZE = %lu." + " Valid values are [1, 2, 4, 8, 16]", + create_info->key_block_size); + ret = "KEY_BLOCK_SIZE"; + break; + } + } - switch (ret) { - case DB_SUCCESS: - error = 0; - table->status = 0; + /* Check for a valid Innodb ROW_FORMAT specifier and + other incompatibilities. */ + switch (row_format) { + case ROW_TYPE_COMPRESSED: + CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); + CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; break; - case DB_RECORD_NOT_FOUND: - error = HA_ERR_END_OF_FILE; - table->status = STATUS_NOT_FOUND; + case ROW_TYPE_DYNAMIC: + CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); + CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ + case ROW_TYPE_COMPACT: + case ROW_TYPE_REDUNDANT: + if (kbs_specified) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: cannot specify ROW_FORMAT = %s" + " with KEY_BLOCK_SIZE.", + get_row_format_name(row_format)); + ret = "KEY_BLOCK_SIZE"; + } break; - case DB_END_OF_INDEX: - error = HA_ERR_END_OF_FILE; - table->status = STATUS_NOT_FOUND; + case ROW_TYPE_DEFAULT: break; - default: - error = convert_error_code_to_mysql( - (int) ret, prebuilt->table->flags, user_thd); - table->status = STATUS_NOT_FOUND; + case ROW_TYPE_FIXED: + case ROW_TYPE_PAGE: + case ROW_TYPE_NOT_USED: + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: invalid ROW_FORMAT specifier."); + ret = "ROW_TYPE"; break; } diff --cc storage/innobase/handler/ha_innodb.h index 8fd60046650,c239a3218b1..85e8f887060 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@@ -24,11 -24,17 +24,15 @@@ this program; if not, write to the Fre Innodb */ -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif +#include "dict0stats.h" + #ifdef WITH_WSREP + #include "../../../wsrep/wsrep_api.h" + #endif /* WITH_WSREP */ + /* Structure defines translation table between mysql index and innodb index structures */ -typedef struct innodb_idx_translate_struct { +struct innodb_idx_translate_t { ulint index_count; /*!< number of valid index entries in the index_mapping array */ ulint array_size; /*!< array size of index_mapping */ @@@ -88,16 -108,18 +92,16 @@@ class ha_innobase: public handle void update_thd(); int change_active_index(uint keynr); int general_fetch(uchar* buf, uint direction, uint match_mode); - ulint innobase_lock_autoinc(); + dberr_t innobase_lock_autoinc(); ulonglong innobase_peek_autoinc(); - ulint innobase_set_max_autoinc(ulonglong auto_inc); - ulint innobase_reset_autoinc(ulonglong auto_inc); - ulint innobase_get_autoinc(ulonglong* value); - ulint innobase_update_autoinc(ulonglong auto_inc); + dberr_t innobase_set_max_autoinc(ulonglong auto_inc); + dberr_t innobase_reset_autoinc(ulonglong auto_inc); + dberr_t innobase_get_autoinc(ulonglong* value); void innobase_initialize_autoinc(); dict_index_t* innobase_get_index(uint keynr); - int info_low(uint flag, bool called_from_analyze); #ifdef WITH_WSREP - int wsrep_append_keys(THD *thd, bool shared, + int wsrep_append_keys(THD *thd, wsrep_key_type key_type, const uchar* record0, const uchar* record1); #endif /* Init values for the class: */ diff --cc storage/innobase/row/row0ins.cc index f196ba790df,00000000000..271e93c6e80 mode 100644,000000..100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@@ -1,3379 -1,0 +1,3402 @@@ +/***************************************************************************** + +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, 2018, MariaDB Corporation. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file row/row0ins.cc +Insert into a table + +Created 4/20/1996 Heikki Tuuri +*******************************************************/ + +#include "row0ins.h" + +#ifdef UNIV_NONINL +#include "row0ins.ic" +#endif + +#include "ha_prototypes.h" +#include "dict0dict.h" +#include "dict0boot.h" +#include "trx0rec.h" +#include "trx0undo.h" +#include "btr0btr.h" +#include "btr0cur.h" +#include "mach0data.h" +#include "ibuf0ibuf.h" +#include "que0que.h" +#include "row0upd.h" +#include "row0sel.h" +#include "row0row.h" +#include "row0log.h" +#include "rem0cmp.h" +#include "lock0lock.h" +#include "log0log.h" +#include "eval0eval.h" +#include "data0data.h" +#include "usr0sess.h" +#include "buf0lru.h" +#include "fts0fts.h" +#include "fts0types.h" +#include "m_string.h" + ++#ifdef WITH_WSREP ++#include "../../../wsrep/wsrep_api.h" ++#include "wsrep_mysqld_c.h" ++#endif /* WITH_WSREP */ ++ +/************************************************************************* +IMPORTANT NOTE: Any operation that generates redo MUST check that there +is enough space in the redo log before for that operation. This is +done by calling log_free_check(). The reason for checking the +availability of the redo log space before the start of the operation is +that we MUST not hold any synchonization objects when performing the +check. +If you make a change in this module make sure that no codepath is +introduced where a call to log_free_check() is bypassed. */ + +/*********************************************************************//** +Creates an insert node struct. +@return own: insert node struct */ +UNIV_INTERN +ins_node_t* +ins_node_create( +/*============*/ + ulint ins_type, /*!< in: INS_VALUES, ... */ + dict_table_t* table, /*!< in: table where to insert */ + mem_heap_t* heap) /*!< in: mem heap where created */ +{ + ins_node_t* node; + + node = static_cast<ins_node_t*>( + mem_heap_alloc(heap, sizeof(ins_node_t))); + + node->common.type = QUE_NODE_INSERT; + + node->ins_type = ins_type; + + node->state = INS_NODE_SET_IX_LOCK; + node->table = table; + node->index = NULL; + node->entry = NULL; + + node->select = NULL; + + node->trx_id = 0; + + node->entry_sys_heap = mem_heap_create(128); + + node->magic_n = INS_NODE_MAGIC_N; + + return(node); +} + +/***********************************************************//** +Creates an entry template for each index of a table. */ +static +void +ins_node_create_entry_list( +/*=======================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + dict_index_t* index; + dtuple_t* entry; + + ut_ad(node->entry_sys_heap); + + UT_LIST_INIT(node->entry_list); + + /* We will include all indexes (include those corrupted + secondary indexes) in the entry list. Filteration of + these corrupted index will be done in row_ins() */ + + for (index = dict_table_get_first_index(node->table); + index != 0; + index = dict_table_get_next_index(index)) { + + entry = row_build_index_entry( + node->row, NULL, index, node->entry_sys_heap); + + UT_LIST_ADD_LAST(tuple_list, node->entry_list, entry); + } +} + +/*****************************************************************//** +Adds system field buffers to a row. */ +static +void +row_ins_alloc_sys_fields( +/*=====================*/ + ins_node_t* node) /*!< in: insert node */ +{ + dtuple_t* row; + dict_table_t* table; + mem_heap_t* heap; + const dict_col_t* col; + dfield_t* dfield; + byte* ptr; + + row = node->row; + table = node->table; + heap = node->entry_sys_heap; + + ut_ad(row && table && heap); + ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); + + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + + /* 1. Populate row-id */ + col = dict_table_get_sys_col(table, DATA_ROW_ID); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); + + node->row_id_buf = ptr; + + ptr += DATA_ROW_ID_LEN; + + /* 2. Populate trx id */ + col = dict_table_get_sys_col(table, DATA_TRX_ID); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); + + node->trx_id_buf = ptr; + + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ + + col = dict_table_get_sys_col(table, DATA_ROLL_PTR); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); +} + +/*********************************************************************//** +Sets a new row to insert for an INS_DIRECT node. This function is only used +if we have constructed the row separately, which is a rare case; this +function is quite slow. */ +UNIV_INTERN +void +ins_node_set_new_row( +/*=================*/ + ins_node_t* node, /*!< in: insert node */ + dtuple_t* row) /*!< in: new row (or first row) for the node */ +{ + node->state = INS_NODE_SET_IX_LOCK; + node->index = NULL; + node->entry = NULL; + + node->row = row; + + mem_heap_empty(node->entry_sys_heap); + + /* Create templates for index entries */ + + ins_node_create_entry_list(node); + + /* Allocate from entry_sys_heap buffers for sys fields */ + + row_ins_alloc_sys_fields(node); + + /* As we allocated a new trx id buf, the trx id should be written + there again: */ + + node->trx_id = 0; +} + +/*******************************************************************//** +Does an insert operation by updating a delete-marked existing record +in the index. This situation can occur if the delete-marked record is +kept in the index for consistent reads. +@return DB_SUCCESS or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_sec_index_entry_by_modify( +/*==============================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether mtr holds just a leaf + latch or also a tree latch */ + btr_cur_t* cursor, /*!< in: B-tree cursor */ + ulint** offsets,/*!< in/out: offsets on cursor->page_cur.rec */ + mem_heap_t* offsets_heap, + /*!< in/out: memory heap that can be emptied */ + mem_heap_t* heap, /*!< in/out: memory heap */ + const dtuple_t* entry, /*!< in: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr; must be committed before + latching any further pages */ +{ + big_rec_t* dummy_big_rec; + upd_t* update; + rec_t* rec; + dberr_t err; + + rec = btr_cur_get_rec(cursor); + + ut_ad(!dict_index_is_clust(cursor->index)); + ut_ad(rec_offs_validate(rec, cursor->index, *offsets)); + ut_ad(!entry->info_bits); + + /* We know that in the alphabetical ordering, entry and rec are + identified. But in their binary form there may be differences if + there are char fields in them. Therefore we have to calculate the + difference. */ + + update = row_upd_build_sec_rec_difference_binary( + rec, cursor->index, *offsets, entry, heap); + + if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) { + /* We should never insert in place of a record that + has not been delete-marked. The only exception is when + online CREATE INDEX copied the changes that we already + made to the clustered index, and completed the + secondary index creation before we got here. In this + case, the change would already be there. The CREATE + INDEX should be waiting for a MySQL meta-data lock + upgrade at least until this INSERT or UPDATE + returns. After that point, the TEMP_INDEX_PREFIX + would be dropped from the index name in + commit_inplace_alter_table(). */ + ut_a(update->n_fields == 0); + ut_a(*cursor->index->name == TEMP_INDEX_PREFIX); + ut_ad(!dict_index_is_online_ddl(cursor->index)); + return(DB_SUCCESS); + } + + if (mode == BTR_MODIFY_LEAF) { + /* Try an optimistic updating of the record, keeping changes + within the page */ + + /* TODO: pass only *offsets */ + err = btr_cur_optimistic_update( + flags | BTR_KEEP_SYS_FLAG, cursor, + offsets, &offsets_heap, update, 0, thr, + thr_get_trx(thr)->id, mtr); + switch (err) { + case DB_OVERFLOW: + case DB_UNDERFLOW: + case DB_ZIP_OVERFLOW: + err = DB_FAIL; + default: + break; + } + } else { + ut_a(mode == BTR_MODIFY_TREE); + if (buf_LRU_buf_pool_running_out()) { + + return(DB_LOCK_TABLE_FULL); + } + + err = btr_cur_pessimistic_update( + flags | BTR_KEEP_SYS_FLAG, cursor, + offsets, &offsets_heap, + heap, &dummy_big_rec, update, 0, + thr, thr_get_trx(thr)->id, mtr); + ut_ad(!dummy_big_rec); + } + + return(err); +} + +/*******************************************************************//** +Does an insert operation by delete unmarking and updating a delete marked +existing record in the index. This situation can occur if the delete marked +record is kept in the index for consistent reads. +@return DB_SUCCESS, DB_FAIL, or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_clust_index_entry_by_modify( +/*================================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether mtr holds just a leaf + latch or also a tree latch */ + btr_cur_t* cursor, /*!< in: B-tree cursor */ + ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ + mem_heap_t** offsets_heap, + /*!< in/out: pointer to memory heap that can + be emptied, or NULL */ + mem_heap_t* heap, /*!< in/out: memory heap */ + big_rec_t** big_rec,/*!< out: possible big rec vector of fields + which have to be stored externally by the + caller */ + const dtuple_t* entry, /*!< in: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr; must be committed before + latching any further pages */ +{ + const rec_t* rec; + const upd_t* update; + dberr_t err; + + ut_ad(dict_index_is_clust(cursor->index)); + + *big_rec = NULL; + + rec = btr_cur_get_rec(cursor); + + ut_ad(rec_get_deleted_flag(rec, + dict_table_is_comp(cursor->index->table))); + + /* Build an update vector containing all the fields to be modified; + NOTE that this vector may NOT contain system columns trx_id or + roll_ptr */ + + update = row_upd_build_difference_binary( + cursor->index, entry, rec, NULL, true, + thr_get_trx(thr), heap); + if (mode != BTR_MODIFY_TREE) { + ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF); + + /* Try optimistic updating of the record, keeping changes + within the page */ + + err = btr_cur_optimistic_update( + flags, cursor, offsets, offsets_heap, update, 0, thr, + thr_get_trx(thr)->id, mtr); + switch (err) { + case DB_OVERFLOW: + case DB_UNDERFLOW: + case DB_ZIP_OVERFLOW: + err = DB_FAIL; + default: + break; + } + } else { + if (buf_LRU_buf_pool_running_out()) { + + return(DB_LOCK_TABLE_FULL); + + } + err = btr_cur_pessimistic_update( + flags | BTR_KEEP_POS_FLAG, + cursor, offsets, offsets_heap, heap, + big_rec, update, 0, thr, thr_get_trx(thr)->id, mtr); + } + + return(err); +} + +/*********************************************************************//** +Returns TRUE if in a cascaded update/delete an ancestor node of node +updates (not DELETE, but UPDATE) table. +@return TRUE if an ancestor updates table */ +static +ibool +row_ins_cascade_ancestor_updates_table( +/*===================================*/ + que_node_t* node, /*!< in: node in a query graph */ + dict_table_t* table) /*!< in: table */ +{ + que_node_t* parent; + + for (parent = que_node_get_parent(node); + que_node_get_type(parent) == QUE_NODE_UPDATE; + parent = que_node_get_parent(parent)) { + + upd_node_t* upd_node; + + upd_node = static_cast<upd_node_t*>(parent); + + if (upd_node->table == table && upd_node->is_delete == FALSE) { + + return(TRUE); + } + } + + return(FALSE); +} + +/*********************************************************************//** +Returns the number of ancestor UPDATE or DELETE nodes of a +cascaded update/delete node. +@return number of ancestors */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +ulint +row_ins_cascade_n_ancestors( +/*========================*/ + que_node_t* node) /*!< in: node in a query graph */ +{ + que_node_t* parent; + ulint n_ancestors = 0; + + for (parent = que_node_get_parent(node); + que_node_get_type(parent) == QUE_NODE_UPDATE; + parent = que_node_get_parent(parent)) { + + n_ancestors++; + } + + return(n_ancestors); +} + +/******************************************************************//** +Calculates the update vector node->cascade->update for a child table in +a cascaded update. +@return number of fields in the calculated update vector; the value +can also be 0 if no foreign key fields changed; the returned value is +ULINT_UNDEFINED if the column type in the child table is too short to +fit the new value in the parent table: that means the update fails */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +ulint +row_ins_cascade_calc_update_vec( +/*============================*/ + upd_node_t* node, /*!< in: update node of the parent + table */ + dict_foreign_t* foreign, /*!< in: foreign key constraint whose + type is != 0 */ + mem_heap_t* heap, /*!< in: memory heap to use as + temporary storage */ + trx_t* trx, /*!< in: update transaction */ + ibool* fts_col_affected)/*!< out: is FTS column affected */ +{ + upd_node_t* cascade = node->cascade_node; + dict_table_t* table = foreign->foreign_table; + dict_index_t* index = foreign->foreign_index; + upd_t* update; + dict_table_t* parent_table; + dict_index_t* parent_index; + upd_t* parent_update; + ulint n_fields_updated; + ulint parent_field_no; + ulint i; + ulint j; + ibool doc_id_updated = FALSE; + ulint doc_id_pos = 0; + doc_id_t new_doc_id = FTS_NULL_DOC_ID; + + ut_a(node); + ut_a(foreign); + ut_a(cascade); + ut_a(table); + ut_a(index); + + /* Calculate the appropriate update vector which will set the fields + in the child index record to the same value (possibly padded with + spaces if the column is a fixed length CHAR or FIXBINARY column) as + the referenced index record will get in the update. */ + + parent_table = node->table; + ut_a(parent_table == foreign->referenced_table); + parent_index = foreign->referenced_index; + parent_update = node->update; + + update = cascade->update; + + update->info_bits = 0; + update->n_fields = foreign->n_fields; + + n_fields_updated = 0; + + *fts_col_affected = FALSE; + + if (table->fts) { + doc_id_pos = dict_table_get_nth_col_pos( + table, table->fts->doc_col); + } + + for (i = 0; i < foreign->n_fields; i++) { + + parent_field_no = dict_table_get_nth_col_pos( + parent_table, + dict_index_get_nth_col_no(parent_index, i)); + + for (j = 0; j < parent_update->n_fields; j++) { + const upd_field_t* parent_ufield + = &parent_update->fields[j]; + + if (parent_ufield->field_no == parent_field_no) { + + ulint min_size; + const dict_col_t* col; + ulint ufield_len; + upd_field_t* ufield; + + col = dict_index_get_nth_col(index, i); + + /* A field in the parent index record is + updated. Let us make the update vector + field for the child table. */ + + ufield = update->fields + n_fields_updated; + + ufield->field_no + = dict_table_get_nth_col_pos( + table, dict_col_get_no(col)); + + ufield->orig_len = 0; + ufield->exp = NULL; + + ufield->new_val = parent_ufield->new_val; + ufield_len = dfield_get_len(&ufield->new_val); + + /* Clear the "external storage" flag */ + dfield_set_len(&ufield->new_val, ufield_len); + + /* Do not allow a NOT NULL column to be + updated as NULL */ + + if (dfield_is_null(&ufield->new_val) + && (col->prtype & DATA_NOT_NULL)) { + + return(ULINT_UNDEFINED); + } + + /* If the new value would not fit in the + column, do not allow the update */ + + if (!dfield_is_null(&ufield->new_val) + && dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, col->mbmaxlen, + col->len, + ufield_len, + static_cast<char*>( + dfield_get_data( + &ufield->new_val))) + < ufield_len) { + + return(ULINT_UNDEFINED); + } + + /* If the parent column type has a different + length than the child column type, we may + need to pad with spaces the new value of the + child column */ + + min_size = dict_col_get_min_size(col); + + /* Because UNIV_SQL_NULL (the marker + of SQL NULL values) exceeds all possible + values of min_size, the test below will + not hold for SQL NULL columns. */ + + if (min_size > ufield_len) { + + byte* pad; + ulint pad_len; + byte* padded_data; + ulint mbminlen; + + padded_data = static_cast<byte*>( + mem_heap_alloc( + heap, min_size)); + + pad = padded_data + ufield_len; + pad_len = min_size - ufield_len; + + memcpy(padded_data, + dfield_get_data(&ufield + ->new_val), + ufield_len); + + mbminlen = dict_col_get_mbminlen(col); + + ut_ad(!(ufield_len % mbminlen)); + ut_ad(!(min_size % mbminlen)); + + if (mbminlen == 1 + && dtype_get_charset_coll( + col->prtype) + == DATA_MYSQL_BINARY_CHARSET_COLL) { + /* Do not pad BINARY columns */ + return(ULINT_UNDEFINED); + } + + row_mysql_pad_col(mbminlen, + pad, pad_len); + dfield_set_data(&ufield->new_val, + padded_data, min_size); + } + + /* Check whether the current column has + FTS index on it */ + if (table->fts + && dict_table_is_fts_column( + table->fts->indexes, + dict_col_get_no(col)) + != ULINT_UNDEFINED) { + *fts_col_affected = TRUE; + } + + /* If Doc ID is updated, check whether the + Doc ID is valid */ + if (table->fts + && ufield->field_no == doc_id_pos) { + doc_id_t n_doc_id; + + n_doc_id = + table->fts->cache->next_doc_id; + + new_doc_id = fts_read_doc_id( + static_cast<const byte*>( + dfield_get_data( + &ufield->new_val))); + + if (new_doc_id <= 0) { + fprintf(stderr, + "InnoDB: FTS Doc ID " + "must be larger than " + "0 \n"); + return(ULINT_UNDEFINED); + } + + if (new_doc_id < n_doc_id) { + fprintf(stderr, + "InnoDB: FTS Doc ID " + "must be larger than " + IB_ID_FMT" for table", + n_doc_id -1); + + ut_print_name(stderr, trx, + TRUE, + table->name); + + putc('\n', stderr); + return(ULINT_UNDEFINED); + } + + *fts_col_affected = TRUE; + doc_id_updated = TRUE; + } + + n_fields_updated++; + } + } + } + + /* Generate a new Doc ID if FTS index columns get updated */ + if (table->fts && *fts_col_affected) { + if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) { + doc_id_t doc_id; + upd_field_t* ufield; + + ut_ad(!doc_id_updated); + ufield = update->fields + n_fields_updated; + fts_get_next_doc_id(table, &trx->fts_next_doc_id); + doc_id = fts_update_doc_id(table, ufield, + &trx->fts_next_doc_id); + n_fields_updated++; + fts_trx_add_op(trx, table, doc_id, FTS_INSERT, NULL); + } else { + if (doc_id_updated) { + ut_ad(new_doc_id); + fts_trx_add_op(trx, table, new_doc_id, + FTS_INSERT, NULL); + } else { + fprintf(stderr, "InnoDB: FTS Doc ID must be " + "updated along with FTS indexed " + "column for table "); + ut_print_name(stderr, trx, TRUE, table->name); + putc('\n', stderr); + return(ULINT_UNDEFINED); + } + } + } + + update->n_fields = n_fields_updated; + + return(n_fields_updated); +} + +/*********************************************************************//** +Set detailed error message associated with foreign key errors for +the given transaction. */ +static +void +row_ins_set_detailed( +/*=================*/ + trx_t* trx, /*!< in: transaction */ + dict_foreign_t* foreign) /*!< in: foreign key constraint */ +{ + ut_ad(!srv_read_only_mode); + + mutex_enter(&srv_misc_tmpfile_mutex); + rewind(srv_misc_tmpfile); + + if (os_file_set_eof(srv_misc_tmpfile)) { + std::string fk_str; + ut_print_name(srv_misc_tmpfile, trx, TRUE, + foreign->foreign_table_name); + fk_str = dict_print_info_on_foreign_key_in_create_format( + trx, foreign, FALSE); + fputs(fk_str.c_str(), srv_misc_tmpfile); + trx_set_detailed_error_from_file(trx, srv_misc_tmpfile); + } else { + trx_set_detailed_error(trx, "temp file operation failed"); + } + + mutex_exit(&srv_misc_tmpfile_mutex); +} + +/*********************************************************************//** +Acquires dict_foreign_err_mutex, rewinds dict_foreign_err_file +and displays information about the given transaction. +The caller must release dict_foreign_err_mutex. */ +static +void +row_ins_foreign_trx_print( +/*======================*/ + trx_t* trx) /*!< in: transaction */ +{ + ulint n_rec_locks; + ulint n_trx_locks; + ulint heap_size; + + if (srv_read_only_mode) { + return; + } + + lock_mutex_enter(); + n_rec_locks = lock_number_of_rows_locked(&trx->lock); + n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks); + heap_size = mem_heap_get_size(trx->lock.lock_heap); + lock_mutex_exit(); + + mutex_enter(&trx_sys->mutex); + + mutex_enter(&dict_foreign_err_mutex); + rewind(dict_foreign_err_file); + ut_print_timestamp(dict_foreign_err_file); + fputs(" Transaction:\n", dict_foreign_err_file); + + trx_print_low(dict_foreign_err_file, trx, 600, + n_rec_locks, n_trx_locks, heap_size); + + mutex_exit(&trx_sys->mutex); + + ut_ad(mutex_own(&dict_foreign_err_mutex)); +} + +/*********************************************************************//** +Reports a foreign key error associated with an update or a delete of a +parent table index entry. */ +static +void +row_ins_foreign_report_err( +/*=======================*/ + const char* errstr, /*!< in: error string from the viewpoint + of the parent table */ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!< in: a matching index record in the + child table */ + const dtuple_t* entry) /*!< in: index entry in the parent + table */ +{ + std::string fk_str; + + if (srv_read_only_mode) { + return; + } + + FILE* ef = dict_foreign_err_file; + trx_t* trx = thr_get_trx(thr); + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign, + TRUE); + fputs(fk_str.c_str(), ef); + putc('\n', ef); + fputs(errstr, ef); + fputs(" in parent table, in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->referenced_index->name); + if (entry) { + fputs(" tuple:\n", ef); + dtuple_print(ef, entry); + } + fputs("\nBut in child table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(", in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->foreign_index->name); + if (rec) { + fputs(", there is a record:\n", ef); + rec_print(ef, rec, foreign->foreign_index); + } else { + fputs(", the record is not available\n", ef); + } + putc('\n', ef); + + mutex_exit(&dict_foreign_err_mutex); +} + +/*********************************************************************//** +Reports a foreign key error to dict_foreign_err_file when we are trying +to add an index entry to a child table. Note that the adding may be the result +of an update, too. */ +static +void +row_ins_foreign_report_add_err( +/*===========================*/ + trx_t* trx, /*!< in: transaction */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!< in: a record in the parent table: + it does not match entry because we + have an error! */ + const dtuple_t* entry) /*!< in: index entry to insert in the + child table */ +{ + std::string fk_str; + + if (srv_read_only_mode) { + return; + } + + FILE* ef = dict_foreign_err_file; + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign, + TRUE); + fputs(fk_str.c_str(), ef); + fputs("\nTrying to add in child table, in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->foreign_index->name); + if (entry) { + fputs(" tuple:\n", ef); + /* TODO: DB_TRX_ID and DB_ROLL_PTR may be uninitialized. + It would be better to only display the user columns. */ + dtuple_print(ef, entry); + } + fputs("\nBut in parent table ", ef); + ut_print_name(ef, trx, TRUE, foreign->referenced_table_name); + fputs(", in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->referenced_index->name); + fputs(",\nthe closest match we can find is record:\n", ef); + if (rec && page_rec_is_supremum(rec)) { + /* If the cursor ended on a supremum record, it is better + to report the previous record in the error message, so that + the user gets a more descriptive error message. */ + rec = page_rec_get_prev_const(rec); + } + + if (rec) { + rec_print(ef, rec, foreign->referenced_index); + } + putc('\n', ef); + + mutex_exit(&dict_foreign_err_mutex); +} + +/*********************************************************************//** +Invalidate the query cache for the given table. */ +static +void +row_ins_invalidate_query_cache( +/*===========================*/ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + const char* name) /*!< in: table name prefixed with + database name and a '/' character */ +{ + char* buf; + char* ptr; + ulint len = strlen(name) + 1; + + buf = mem_strdupl(name, len); + + ptr = strchr(buf, '/'); + ut_a(ptr); + *ptr = '\0'; + + innobase_invalidate_query_cache(thr_get_trx(thr), buf, len); + mem_free(buf); +} +#ifdef WITH_WSREP +dberr_t wsrep_append_foreign_key(trx_t *trx, - dict_foreign_t* foreign, - const rec_t* clust_rec, - dict_index_t* clust_index, - ibool referenced, - ibool shared); ++ dict_foreign_t* foreign, ++ const rec_t* clust_rec, ++ dict_index_t* clust_index, ++ ibool referenced, ++ enum wsrep_key_type key_type); +#endif /* WITH_WSREP */ + +/*********************************************************************//** +Perform referential actions or checks when a parent row is deleted or updated +and the constraint had an ON DELETE or ON UPDATE condition which was not +RESTRICT. +@return DB_SUCCESS, DB_LOCK_WAIT, or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_foreign_check_on_constraint( +/*================================*/ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + dict_foreign_t* foreign, /*!< in: foreign key constraint whose + type is != 0 */ + btr_pcur_t* pcur, /*!< in: cursor placed on a matching + index record in the child table */ + dtuple_t* entry, /*!< in: index entry in the parent + table */ + mtr_t* mtr) /*!< in: mtr holding the latch of pcur + page */ +{ + upd_node_t* node; + upd_node_t* cascade; + dict_table_t* table = foreign->foreign_table; + dict_index_t* index; + dict_index_t* clust_index; + dtuple_t* ref; + mem_heap_t* upd_vec_heap = NULL; + const rec_t* rec; + const rec_t* clust_rec; + const buf_block_t* clust_block; + upd_t* update; + ulint n_to_update; + dberr_t err; + ulint i; + trx_t* trx; + mem_heap_t* tmp_heap = NULL; + doc_id_t doc_id = FTS_NULL_DOC_ID; + ibool fts_col_affacted = FALSE; + + ut_a(thr); + ut_a(foreign); + ut_a(pcur); + ut_a(mtr); + + trx = thr_get_trx(thr); + + /* Since we are going to delete or update a row, we have to invalidate + the MySQL query cache for table. A deadlock of threads is not possible + here because the caller of this function does not hold any latches with + the sync0sync.h rank above the lock_sys_t::mutex. The query cache mutex + has a rank just above the lock_sys_t::mutex. */ + + row_ins_invalidate_query_cache(thr, table->name); + + node = static_cast<upd_node_t*>(thr->run_node); + + if (node->is_delete && 0 == (foreign->type + & (DICT_FOREIGN_ON_DELETE_CASCADE + | DICT_FOREIGN_ON_DELETE_SET_NULL))) { + + row_ins_foreign_report_err("Trying to delete", + thr, foreign, + btr_pcur_get_rec(pcur), entry); + + return(DB_ROW_IS_REFERENCED); + } + + if (!node->is_delete && 0 == (foreign->type + & (DICT_FOREIGN_ON_UPDATE_CASCADE + | DICT_FOREIGN_ON_UPDATE_SET_NULL))) { + + /* This is an UPDATE */ + + row_ins_foreign_report_err("Trying to update", + thr, foreign, + btr_pcur_get_rec(pcur), entry); + + return(DB_ROW_IS_REFERENCED); + } + + if (node->cascade_node == NULL) { + /* Extend our query graph by creating a child to current + update node. The child is used in the cascade or set null + operation. */ + + node->cascade_heap = mem_heap_create(128); + node->cascade_node = row_create_update_node_for_mysql( + table, node->cascade_heap); + que_node_set_parent(node->cascade_node, node); + } + + /* Initialize cascade_node to do the operation we want. Note that we + use the SAME cascade node to do all foreign key operations of the + SQL DELETE: the table of the cascade node may change if there are + several child tables to the table where the delete is done! */ + + cascade = node->cascade_node; + + cascade->table = table; + + cascade->foreign = foreign; + + if (node->is_delete + && (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) { + cascade->is_delete = TRUE; + } else { + cascade->is_delete = FALSE; + + if (foreign->n_fields > cascade->update_n_fields) { + /* We have to make the update vector longer */ + + cascade->update = upd_create(foreign->n_fields, + node->cascade_heap); + cascade->update_n_fields = foreign->n_fields; + } + } + + /* We do not allow cyclic cascaded updating (DELETE is allowed, + but not UPDATE) of the same table, as this can lead to an infinite + cycle. Check that we are not updating the same table which is + already being modified in this cascade chain. We have to check + this also because the modification of the indexes of a 'parent' + table may still be incomplete, and we must avoid seeing the indexes + of the parent table in an inconsistent state! */ + + if (!cascade->is_delete + && row_ins_cascade_ancestor_updates_table(cascade, table)) { + + /* We do not know if this would break foreign key + constraints, but play safe and return an error */ + + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying an update, possibly causing a cyclic" + " cascaded update\n" + "in the child table,", thr, foreign, + btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + if (row_ins_cascade_n_ancestors(cascade) >= 15) { + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying a too deep cascaded delete or update\n", + thr, foreign, btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + index = btr_pcur_get_btr_cur(pcur)->index; + + ut_a(index == foreign->foreign_index); + + rec = btr_pcur_get_rec(pcur); + + tmp_heap = mem_heap_create(256); + + if (dict_index_is_clust(index)) { + /* pcur is already positioned in the clustered index of + the child table */ + + clust_index = index; + clust_rec = rec; + clust_block = btr_pcur_get_block(pcur); + } else { + /* We have to look for the record in the clustered index + in the child table */ + + clust_index = dict_table_get_first_index(table); + + ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec, + tmp_heap); + btr_pcur_open_with_no_init(clust_index, ref, + PAGE_CUR_LE, BTR_SEARCH_LEAF, + cascade->pcur, 0, mtr); + + clust_rec = btr_pcur_get_rec(cascade->pcur); + clust_block = btr_pcur_get_block(cascade->pcur); + + if (!page_rec_is_user_rec(clust_rec) + || btr_pcur_get_low_match(cascade->pcur) + < dict_index_get_n_unique(clust_index)) { + + fputs("InnoDB: error in cascade of a foreign key op\n" + "InnoDB: ", stderr); + dict_index_name_print(stderr, trx, index); + + fputs("\n" + "InnoDB: record ", stderr); + rec_print(stderr, rec, index); + fputs("\n" + "InnoDB: clustered record ", stderr); + rec_print(stderr, clust_rec, clust_index); + fputs("\n" + "InnoDB: Submit a detailed bug report to" + " https://jira.mariadb.org/\n", stderr); + ut_ad(0); + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + } + + /* Set an X-lock on the row to delete or update in the child table */ + + err = lock_table(0, table, LOCK_IX, thr); + + if (err == DB_SUCCESS) { + /* Here it suffices to use a LOCK_REC_NOT_GAP type lock; + we already have a normal shared lock on the appropriate + gap if the search criterion was not unique */ + + err = lock_clust_rec_read_check_and_lock_alt( + 0, clust_block, clust_rec, clust_index, + LOCK_X, LOCK_REC_NOT_GAP, thr); + } + + if (err != DB_SUCCESS) { + + goto nonstandard_exit_func; + } + + if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) { + /* This can happen if there is a circular reference of + rows such that cascading delete comes to delete a row + already in the process of being delete marked */ + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + + if (table->fts) { + doc_id = fts_get_doc_id_from_rec(table, clust_rec, tmp_heap); + } + + if (node->is_delete + ? (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) + : (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL)) { + + /* Build the appropriate update vector which sets + foreign->n_fields first fields in rec to SQL NULL */ + + update = cascade->update; + + update->info_bits = 0; + update->n_fields = foreign->n_fields; + UNIV_MEM_INVALID(update->fields, + update->n_fields * sizeof *update->fields); + + for (i = 0; i < foreign->n_fields; i++) { + upd_field_t* ufield = &update->fields[i]; + + ufield->field_no = dict_table_get_nth_col_pos( + table, + dict_index_get_nth_col_no(index, i)); + ufield->orig_len = 0; + ufield->exp = NULL; + dfield_set_null(&ufield->new_val); + + if (table->fts && dict_table_is_fts_column( + table->fts->indexes, + dict_index_get_nth_col_no(index, i)) + != ULINT_UNDEFINED) { + fts_col_affacted = TRUE; + } + } + + if (fts_col_affacted) { + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } else if (table->fts && cascade->is_delete) { + /* DICT_FOREIGN_ON_DELETE_CASCADE case */ + for (i = 0; i < foreign->n_fields; i++) { + if (table->fts && dict_table_is_fts_column( + table->fts->indexes, + dict_index_get_nth_col_no(index, i)) + != ULINT_UNDEFINED) { + fts_col_affacted = TRUE; + } + } + + if (fts_col_affacted) { + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } + + if (!node->is_delete + && (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) { + + /* Build the appropriate update vector which sets changing + foreign->n_fields first fields in rec to new values */ + + upd_vec_heap = mem_heap_create(256); + + n_to_update = row_ins_cascade_calc_update_vec( + node, foreign, upd_vec_heap, trx, &fts_col_affacted); + + if (n_to_update == ULINT_UNDEFINED) { + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying a cascaded update where the" + " updated value in the child\n" + "table would not fit in the length" + " of the column, or the value would\n" + "be NULL and the column is" + " declared as not NULL in the child table,", + thr, foreign, btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + if (cascade->update->n_fields == 0) { + + /* The update does not change any columns referred + to in this foreign key constraint: no need to do + anything */ + + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + + /* Mark the old Doc ID as deleted */ + if (fts_col_affacted) { + ut_ad(table->fts); + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } + + /* Store pcur position and initialize or store the cascade node + pcur stored position */ + + btr_pcur_store_position(pcur, mtr); + + if (index == clust_index) { + btr_pcur_copy_stored_position(cascade->pcur, pcur); + } else { + btr_pcur_store_position(cascade->pcur, mtr); + } + + mtr_commit(mtr); + + ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON); + + cascade->state = UPD_NODE_UPDATE_CLUSTERED; + +#ifdef WITH_WSREP + err = wsrep_append_foreign_key( + thr_get_trx(thr), + foreign, + clust_rec, + clust_index, - FALSE, FALSE); ++ FALSE, WSREP_KEY_EXCLUSIVE); + if (err != DB_SUCCESS) { + fprintf(stderr, + "WSREP: foreign key append failed: %d\n", err); + } else +#endif /* WITH_WSREP */ + err = row_update_cascade_for_mysql(thr, cascade, + foreign->foreign_table); + + if (foreign->foreign_table->n_foreign_key_checks_running == 0) { + fprintf(stderr, + "InnoDB: error: table %s has the counter 0" + " though there is\n" + "InnoDB: a FOREIGN KEY check running on it.\n", + foreign->foreign_table->name); + } + + /* Release the data dictionary latch for a while, so that we do not + starve other threads from doing CREATE TABLE etc. if we have a huge + cascaded operation running. The counter n_foreign_key_checks_running + will prevent other users from dropping or ALTERing the table when we + release the latch. */ + + row_mysql_unfreeze_data_dictionary(thr_get_trx(thr)); + + DEBUG_SYNC_C("innodb_dml_cascade_dict_unfreeze"); + + row_mysql_freeze_data_dictionary(thr_get_trx(thr)); + + mtr_start(mtr); + + /* Restore pcur position */ + + btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr); + + if (tmp_heap) { + mem_heap_free(tmp_heap); + } + + if (upd_vec_heap) { + mem_heap_free(upd_vec_heap); + } + + return(err); + +nonstandard_exit_func: + if (tmp_heap) { + mem_heap_free(tmp_heap); + } + + if (upd_vec_heap) { + mem_heap_free(upd_vec_heap); + } + + btr_pcur_store_position(pcur, mtr); + + mtr_commit(mtr); + mtr_start(mtr); + + btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr); + + return(err); +} + +/*********************************************************************//** +Sets a shared lock on a record. Used in locking possible duplicate key +records and also in checking foreign key constraints. +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */ +static +dberr_t +row_ins_set_shared_rec_lock( +/*========================*/ + ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or + LOCK_REC_NOT_GAP type lock */ + const buf_block_t* block, /*!< in: buffer block of rec */ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (dict_index_is_clust(index)) { + err = lock_clust_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_S, type, thr); + } else { + err = lock_sec_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_S, type, thr); + } + + return(err); +} + +/*********************************************************************//** +Sets a exclusive lock on a record. Used in locking possible duplicate key +records +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */ +static +dberr_t +row_ins_set_exclusive_rec_lock( +/*===========================*/ + ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or + LOCK_REC_NOT_GAP type lock */ + const buf_block_t* block, /*!< in: buffer block of rec */ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (dict_index_is_clust(index)) { + err = lock_clust_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_X, type, thr); + } else { + err = lock_sec_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_X, type, thr); + } + + return(err); +} + +/***************************************************************//** +Checks if foreign key constraint fails for an index entry. Sets shared locks +which lock either the success or the failure of the constraint. NOTE that +the caller must have a shared latch on dict_operation_lock. +@return DB_SUCCESS, DB_NO_REFERENCED_ROW, or DB_ROW_IS_REFERENCED */ +UNIV_INTERN +dberr_t +row_ins_check_foreign_constraint( +/*=============================*/ + ibool check_ref,/*!< in: TRUE if we want to check that + the referenced table is ok, FALSE if we + want to check the foreign key table */ + dict_foreign_t* foreign,/*!< in: foreign constraint; NOTE that the + tables mentioned in it must be in the + dictionary cache if they exist at all */ + dict_table_t* table, /*!< in: if check_ref is TRUE, then the foreign + table, else the referenced table */ + dtuple_t* entry, /*!< in: index entry for index */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + upd_node_t* upd_node; + dict_table_t* check_table; + dict_index_t* check_index; + ulint n_fields_cmp; + btr_pcur_t pcur; + int cmp; + ulint i; + mtr_t mtr; + trx_t* trx = thr_get_trx(thr); + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + +#ifdef WITH_WSREP + upd_node= NULL; +#endif /* WITH_WSREP */ +run_again: +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED)); +#endif /* UNIV_SYNC_DEBUG */ + + err = DB_SUCCESS; + + if (trx->check_foreigns == FALSE) { + /* The user has suppressed foreign key checks currently for + this session */ + goto exit_func; + } + + /* If any of the foreign key fields in entry is SQL NULL, we + suppress the foreign key check: this is compatible with Oracle, + for example */ + + for (i = 0; i < foreign->n_fields; i++) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { + + goto exit_func; + } + } + + if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) { + upd_node = static_cast<upd_node_t*>(thr->run_node); + + if (!(upd_node->is_delete) && upd_node->foreign == foreign) { + /* If a cascaded update is done as defined by a + foreign key constraint, do not check that + constraint for the child row. In ON UPDATE CASCADE + the update of the parent row is only half done when + we come here: if we would check the constraint here + for the child row it would fail. + + A QUESTION remains: if in the child table there are + several constraints which refer to the same parent + table, we should merge all updates to the child as + one update? And the updates can be contradictory! + Currently we just perform the update associated + with each foreign key constraint, one after + another, and the user has problems predicting in + which order they are performed. */ + + goto exit_func; + } + } + + if (check_ref) { + check_table = foreign->referenced_table; + check_index = foreign->referenced_index; + } else { + check_table = foreign->foreign_table; + check_index = foreign->foreign_index; + } + + if (check_table == NULL + || check_table->ibd_file_missing + || check_index == NULL) { + + if (!srv_read_only_mode && check_ref) { + FILE* ef = dict_foreign_err_file; + std::string fk_str; + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, + foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format( + trx, foreign, TRUE); + fputs(fk_str.c_str(), ef); + fputs("\nTrying to add to index ", ef); + ut_print_name(ef, trx, FALSE, + foreign->foreign_index->name); + fputs(" tuple:\n", ef); + dtuple_print(ef, entry); + fputs("\nBut the parent table ", ef); + ut_print_name(ef, trx, TRUE, + foreign->referenced_table_name); + fputs("\nor its .ibd file does" + " not currently exist!\n", ef); + mutex_exit(&dict_foreign_err_mutex); + + err = DB_NO_REFERENCED_ROW; + } + + goto exit_func; + } + + if (check_table != table) { + /* We already have a LOCK_IX on table, but not necessarily + on check_table */ + + err = lock_table(0, check_table, LOCK_IS, thr); + + if (err != DB_SUCCESS) { + + goto do_possible_lock_wait; + } + } + + mtr_start(&mtr); + + /* Store old value on n_fields_cmp */ + + n_fields_cmp = dtuple_get_n_fields_cmp(entry); + + dtuple_set_n_fields_cmp(entry, foreign->n_fields); + + btr_pcur_open(check_index, entry, PAGE_CUR_GE, + BTR_SEARCH_LEAF, &pcur, &mtr); + + /* Scan index records and check if there is a matching record */ + + do { + const rec_t* rec = btr_pcur_get_rec(&pcur); + const buf_block_t* block = btr_pcur_get_block(&pcur); + + if (page_rec_is_infimum(rec)) { + + continue; + } + + offsets = rec_get_offsets(rec, check_index, + offsets, ULINT_UNDEFINED, &heap); + + if (page_rec_is_supremum(rec)) { + + err = row_ins_set_shared_rec_lock(LOCK_ORDINARY, block, + rec, check_index, + offsets, thr); + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + continue; + default: + goto end_scan; + } + } + + cmp = cmp_dtuple_rec(entry, rec, offsets); + + if (cmp == 0) { + if (rec_get_deleted_flag(rec, + rec_offs_comp(offsets))) { + err = row_ins_set_shared_rec_lock( + LOCK_ORDINARY, block, + rec, check_index, offsets, thr); + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto end_scan; + } + } else { + /* Found a matching record. Lock only + a record because we can allow inserts + into gaps */ + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, block, + rec, check_index, offsets, thr); + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto end_scan; + } + + if (check_ref) { ++#ifdef WITH_WSREP ++ enum wsrep_key_type key_type = WSREP_KEY_EXCLUSIVE; ++#endif WITH_WSREP + err = DB_SUCCESS; ++ +#ifdef WITH_WSREP ++ if (upd_node != NULL) { ++ key_type = WSREP_KEY_SHARED; ++ } else { ++ switch (wsrep_certification_rules) { ++ case WSREP_CERTIFICATION_RULES_STRICT: ++ key_type = WSREP_KEY_EXCLUSIVE; ++ break; ++ case WSREP_CERTIFICATION_RULES_OPTIMIZED: ++ key_type = WSREP_KEY_SEMI; ++ break; ++ } ++ } ++ + err = wsrep_append_foreign_key( - thr_get_trx(thr), - foreign, - rec, - check_index, - check_ref, - (upd_node) ? TRUE : FALSE); - #endif /* WITH_WSREP */ ++ thr_get_trx(thr), ++ foreign, ++ rec, ++ check_index, ++ check_ref, ++ key_type); ++ #endif /* WITH_WSREP */ ++ + goto end_scan; + } else if (foreign->type != 0) { + /* There is an ON UPDATE or ON DELETE + condition: check them in a separate + function */ + + err = row_ins_foreign_check_on_constraint( + thr, foreign, &pcur, entry, + &mtr); + if (err != DB_SUCCESS) { + /* Since reporting a plain + "duplicate key" error + message to the user in + cases where a long CASCADE + operation would lead to a + duplicate key in some + other table is very + confusing, map duplicate + key errors resulting from + FK constraints to a + separate error code. */ + + if (err == DB_DUPLICATE_KEY) { + err = DB_FOREIGN_DUPLICATE_KEY; + } + + goto end_scan; + } + + /* row_ins_foreign_check_on_constraint + may have repositioned pcur on a + different block */ + block = btr_pcur_get_block(&pcur); + } else { + row_ins_foreign_report_err( + "Trying to delete or update", + thr, foreign, rec, entry); + + err = DB_ROW_IS_REFERENCED; + goto end_scan; + } + } + } else { + ut_a(cmp < 0); + + err = row_ins_set_shared_rec_lock( + LOCK_GAP, block, + rec, check_index, offsets, thr); + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + if (check_ref) { + err = DB_NO_REFERENCED_ROW; + row_ins_foreign_report_add_err( + trx, foreign, rec, entry); + } else { + err = DB_SUCCESS; + } + default: + break; + } + + goto end_scan; + } + } while (btr_pcur_move_to_next(&pcur, &mtr)); + + if (check_ref) { + row_ins_foreign_report_add_err( + trx, foreign, btr_pcur_get_rec(&pcur), entry); + err = DB_NO_REFERENCED_ROW; + } else { + err = DB_SUCCESS; + } + +end_scan: + btr_pcur_close(&pcur); + + mtr_commit(&mtr); + + /* Restore old value */ + dtuple_set_n_fields_cmp(entry, n_fields_cmp); + +do_possible_lock_wait: + if (err == DB_LOCK_WAIT) { + bool verified = false; + + trx->error_state = err; + + que_thr_stop_for_mysql(thr); + + lock_wait_suspend_thread(thr); + + if (check_table->to_be_dropped) { + /* The table is being dropped. We shall timeout + this operation */ + err = DB_LOCK_WAIT_TIMEOUT; + goto exit_func; + } + + /* We had temporarily released dict_operation_lock in + above lock sleep wait, now we have the lock again, and + we will need to re-check whether the foreign key has been + dropped. We only need to verify if the table is referenced + table case (check_ref == 0), since MDL lock will prevent + concurrent DDL and DML on the same table */ + if (!check_ref) { + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + if (*it == foreign) { + verified = true; + break; + } + } + } else { + verified = true; + } + + if (!verified) { + err = DB_DICT_CHANGED; + } else if (trx->error_state == DB_SUCCESS) { + goto run_again; + } else { + err = trx->error_state; + } + } + +exit_func: + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + return(err); +} + +/***************************************************************//** +Checks if foreign key constraints fail for an index entry. If index +is not mentioned in any constraint, this function does nothing, +Otherwise does searches to the indexes of referenced tables and +sets shared locks which lock either the success or the failure of +a constraint. +@return DB_SUCCESS or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_check_foreign_constraints( +/*==============================*/ + dict_table_t* table, /*!< in: table */ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in: index entry for index */ + que_thr_t* thr) /*!< in: query thread */ +{ + dict_foreign_t* foreign; + dberr_t err; + trx_t* trx; + ibool got_s_lock = FALSE; + + trx = thr_get_trx(thr); + + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "foreign_constraint_check_for_ins"); + + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + + if (foreign->foreign_index == index) { + dict_table_t* ref_table = NULL; + dict_table_t* foreign_table = foreign->foreign_table; + dict_table_t* referenced_table + = foreign->referenced_table; + + if (referenced_table == NULL) { + + ref_table = dict_table_open_on_name( + foreign->referenced_table_name_lookup, + FALSE, FALSE, DICT_ERR_IGNORE_NONE); + } + + if (0 == trx->dict_operation_lock_mode) { + got_s_lock = TRUE; + + row_mysql_freeze_data_dictionary(trx); + } + + if (referenced_table) { + os_inc_counter(dict_sys->mutex, + foreign_table + ->n_foreign_key_checks_running); + } + + /* NOTE that if the thread ends up waiting for a lock + we will release dict_operation_lock temporarily! + But the counter on the table protects the referenced + table from being dropped while the check is running. */ + + err = row_ins_check_foreign_constraint( + TRUE, foreign, table, entry, thr); + + DBUG_EXECUTE_IF("row_ins_dict_change_err", + err = DB_DICT_CHANGED;); + + if (referenced_table) { + os_dec_counter(dict_sys->mutex, + foreign_table + ->n_foreign_key_checks_running); + } + + if (got_s_lock) { + row_mysql_unfreeze_data_dictionary(trx); + } + + if (ref_table != NULL) { + dict_table_close(ref_table, FALSE, FALSE); + } + + if (err != DB_SUCCESS) { + + return(err); + } + } + } + + return(DB_SUCCESS); +} + +/***************************************************************//** +Checks if a unique key violation to rec would occur at the index entry +insert. +@return TRUE if error */ +static +ibool +row_ins_dupl_error_with_rec( +/*========================*/ + const rec_t* rec, /*!< in: user record; NOTE that we assume + that the caller already has a record lock on + the record! */ + const dtuple_t* entry, /*!< in: entry to insert */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ +{ + ulint matched_fields; + ulint matched_bytes; + ulint n_unique; + ulint i; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + n_unique = dict_index_get_n_unique(index); + + matched_fields = 0; + matched_bytes = 0; + + cmp_dtuple_rec_with_match(entry, rec, offsets, + &matched_fields, &matched_bytes); + + if (matched_fields < n_unique) { + + return(FALSE); + } + + /* In a unique secondary index we allow equal key values if they + contain SQL NULLs */ + + if (!dict_index_is_clust(index)) { + + for (i = 0; i < n_unique; i++) { + if (dfield_is_null(dtuple_get_nth_field(entry, i))) { + + return(FALSE); + } + } + } + + return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); +} + +/***************************************************************//** +Scans a unique non-clustered index at a given index entry to determine +whether a uniqueness violation has occurred for the key value of the entry. +Set shared locks on possible duplicate records. +@return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_scan_sec_index_for_duplicate( +/*=================================*/ + ulint flags, /*!< in: undo logging and locking flags */ + dict_index_t* index, /*!< in: non-clustered unique index */ + dtuple_t* entry, /*!< in: index entry */ + que_thr_t* thr, /*!< in: query thread */ + bool s_latch,/*!< in: whether index->lock is being held */ + mtr_t* mtr, /*!< in/out: mini-transaction */ + mem_heap_t* offsets_heap) + /*!< in/out: memory heap that can be emptied */ +{ + ulint n_unique; + int cmp; + ulint n_fields_cmp; + btr_pcur_t pcur; + dberr_t err = DB_SUCCESS; + ulint allow_duplicates; + ulint* offsets = NULL; + +#ifdef UNIV_SYNC_DEBUG + ut_ad(s_latch == rw_lock_own(&index->lock, RW_LOCK_SHARED)); +#endif /* UNIV_SYNC_DEBUG */ + + n_unique = dict_index_get_n_unique(index); + + /* If the secondary index is unique, but one of the fields in the + n_unique first fields is NULL, a unique key violation cannot occur, + since we define NULL != NULL in this case */ + + for (ulint i = 0; i < n_unique; i++) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { + + return(DB_SUCCESS); + } + } + + /* Store old value on n_fields_cmp */ + + n_fields_cmp = dtuple_get_n_fields_cmp(entry); + + dtuple_set_n_fields_cmp(entry, n_unique); + + btr_pcur_open(index, entry, PAGE_CUR_GE, + s_latch + ? BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED + : BTR_SEARCH_LEAF, + &pcur, mtr); + + allow_duplicates = thr_get_trx(thr)->duplicates; + + /* Scan index records and check if there is a duplicate */ + + do { + const rec_t* rec = btr_pcur_get_rec(&pcur); + const buf_block_t* block = btr_pcur_get_block(&pcur); + const ulint lock_type = LOCK_ORDINARY; + + if (page_rec_is_infimum(rec)) { + + continue; + } + + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, &offsets_heap); + + if (flags & BTR_NO_LOCKING_FLAG) { + /* Set no locks when applying log + in online table rebuild. */ + } else if (allow_duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + lock_type, block, rec, index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + lock_type, block, rec, index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + err = DB_SUCCESS; + case DB_SUCCESS: + break; + default: + goto end_scan; + } + + if (page_rec_is_supremum(rec)) { + + continue; + } + + cmp = cmp_dtuple_rec(entry, rec, offsets); + + if (cmp == 0) { + if (row_ins_dupl_error_with_rec(rec, entry, + index, offsets)) { + err = DB_DUPLICATE_KEY; + + thr_get_trx(thr)->error_info = index; + + /* If the duplicate is on hidden FTS_DOC_ID, + state so in the error log */ + if (DICT_TF2_FLAG_IS_SET( + index->table, + DICT_TF2_FTS_HAS_DOC_ID) + && strcmp(index->name, + FTS_DOC_ID_INDEX_NAME) == 0) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Duplicate FTS_DOC_ID value" + " on table %s", + index->table->name); + } + + goto end_scan; + } + } else { + ut_a(cmp < 0); + goto end_scan; + } + } while (btr_pcur_move_to_next(&pcur, mtr)); + +end_scan: + /* Restore old value */ + dtuple_set_n_fields_cmp(entry, n_fields_cmp); + + return(err); +} + +/** Checks for a duplicate when the table is being rebuilt online. +@retval DB_SUCCESS when no duplicate is detected +@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or +a newer version of entry (the entry should not be inserted) +@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_online( +/*=====================*/ + ulint n_uniq, /*!< in: offset of DB_TRX_ID */ + const dtuple_t* entry, /*!< in: entry that is being inserted */ + const rec_t* rec, /*!< in: clustered index record */ + ulint* offsets)/*!< in/out: rec_get_offsets(rec) */ +{ + ulint fields = 0; + ulint bytes = 0; + + /* During rebuild, there should not be any delete-marked rows + in the new table. */ + ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); + ut_ad(dtuple_get_n_fields_cmp(entry) == n_uniq); + + /* Compare the PRIMARY KEY fields and the + DB_TRX_ID, DB_ROLL_PTR. */ + cmp_dtuple_rec_with_match_low( + entry, rec, offsets, n_uniq + 2, &fields, &bytes); + + if (fields < n_uniq) { + /* Not a duplicate. */ + return(DB_SUCCESS); + } + + if (fields == n_uniq + 2) { + /* rec is an exact match of entry. */ + ut_ad(bytes == 0); + return(DB_SUCCESS_LOCKED_REC); + } + + return(DB_DUPLICATE_KEY); +} + +/** Checks for a duplicate when the table is being rebuilt online. +@retval DB_SUCCESS when no duplicate is detected +@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or +a newer version of entry (the entry should not be inserted) +@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_error_in_clust_online( +/*====================================*/ + ulint n_uniq, /*!< in: offset of DB_TRX_ID */ + const dtuple_t* entry, /*!< in: entry that is being inserted */ + const btr_cur_t*cursor, /*!< in: cursor on insert position */ + ulint** offsets,/*!< in/out: rec_get_offsets(rec) */ + mem_heap_t** heap) /*!< in/out: heap for offsets */ +{ + dberr_t err = DB_SUCCESS; + const rec_t* rec = btr_cur_get_rec(cursor); + + if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) { + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + ULINT_UNDEFINED, heap); + err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); + if (err != DB_SUCCESS) { + return(err); + } + } + + rec = page_rec_get_next_const(btr_cur_get_rec(cursor)); + + if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) { + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + ULINT_UNDEFINED, heap); + err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); + } + + return(err); +} + +/***************************************************************//** +Checks if a unique key violation error would occur at an index entry +insert. Sets shared locks on possible duplicate records. Works only +for a clustered index! +@retval DB_SUCCESS if no error +@retval DB_DUPLICATE_KEY if error, +@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate +record */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_error_in_clust( + btr_cur_t* cursor, /*!< in: B-tree cursor */ + const dtuple_t* entry, /*!< in: entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr */ +{ + dberr_t err; + rec_t* rec; + ulint n_unique; + trx_t* trx = thr_get_trx(thr); + mem_heap_t*heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + + UT_NOT_USED(mtr); + + ut_ad(dict_index_is_clust(cursor->index)); + + /* NOTE: For unique non-clustered indexes there may be any number + of delete marked records with the same value for the non-clustered + index key (remember multiversioning), and which differ only in + the row refererence part of the index record, containing the + clustered index key fields. For such a secondary index record, + to avoid race condition, we must FIRST do the insertion and after + that check that the uniqueness condition is not breached! */ + + /* NOTE: A problem is that in the B-tree node pointers on an + upper level may match more to the entry than the actual existing + user records on the leaf level. So, even if low_match would suggest + that a duplicate key violation may occur, this may not be the case. */ + + n_unique = dict_index_get_n_unique(cursor->index); + + if (cursor->low_match >= n_unique) { + + rec = btr_cur_get_rec(cursor); + + if (!page_rec_is_infimum(rec)) { + offsets = rec_get_offsets(rec, cursor->index, offsets, + ULINT_UNDEFINED, &heap); + + /* We set a lock on the possible duplicate: this + is needed in logical logging of MySQL to make + sure that in roll-forward we get the same duplicate + errors as in original execution */ + + if (trx->duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), rec, + cursor->index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto func_exit; + } + + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { +duplicate: + trx->error_info = cursor->index; + err = DB_DUPLICATE_KEY; + goto func_exit; + } + } + } + + if (cursor->up_match >= n_unique) { + + rec = page_rec_get_next(btr_cur_get_rec(cursor)); + + if (!page_rec_is_supremum(rec)) { + offsets = rec_get_offsets(rec, cursor->index, offsets, + ULINT_UNDEFINED, &heap); + + if (trx->duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto func_exit; + } + + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { + goto duplicate; + } + } + + /* This should never happen */ + ut_error; + } + + err = DB_SUCCESS; +func_exit: + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + return(err); +} + +/***************************************************************//** +Checks if an index entry has long enough common prefix with an +existing record so that the intended insert of the entry must be +changed to a modify of the existing record. In the case of a clustered +index, the prefix must be n_unique fields long. In the case of a +secondary index, all fields must be equal. InnoDB never updates +secondary index records in place, other than clearing or setting the +delete-mark flag. We could be able to update the non-unique fields +of a unique secondary index record by checking the cursor->up_match, +but we do not do so, because it could have some locking implications. +@return TRUE if the existing record should be updated; FALSE if not */ +UNIV_INLINE +ibool +row_ins_must_modify_rec( +/*====================*/ + const btr_cur_t* cursor) /*!< in: B-tree cursor */ +{ + /* NOTE: (compare to the note in row_ins_duplicate_error_in_clust) + Because node pointers on upper levels of the B-tree may match more + to entry than to actual user records on the leaf level, we + have to check if the candidate record is actually a user record. + A clustered index node pointer contains index->n_unique first fields, + and a secondary index node pointer contains all index fields. */ + + return(cursor->low_match + >= dict_index_get_n_unique_in_tree(cursor->index) + && !page_rec_is_infimum(btr_cur_get_rec(cursor))); +} + +/***************************************************************//** +Tries to insert an entry into a clustered index, ignoring foreign key +constraints. If a record with the same unique key is found, the other +record is necessarily marked deleted by a committed transaction, or a +unique key violation error occurs. The delete marked record is then +updated to an existing record, and we must write an undo log record on +the delete marked record. +@retval DB_SUCCESS on success +@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG) +@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed +@return error code */ +UNIV_INTERN +dberr_t +row_ins_clust_index_entry_low( +/*==========================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether we wish optimistic or + pessimistic descent down the index tree */ + dict_index_t* index, /*!< in: clustered index */ + ulint n_uniq, /*!< in: 0 or index->n_uniq */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + ulint n_ext, /*!< in: number of externally stored columns */ + que_thr_t* thr) /*!< in: query thread */ +{ + btr_cur_t cursor; + ulint* offsets = NULL; + dberr_t err; + big_rec_t* big_rec = NULL; + mtr_t mtr; + mem_heap_t* offsets_heap = NULL; + + ut_ad(dict_index_is_clust(index)); + ut_ad(!dict_index_is_unique(index) + || n_uniq == dict_index_get_n_unique(index)); + ut_ad(!n_uniq || n_uniq == dict_index_get_n_unique(index)); + + mtr_start(&mtr); + + if (mode == BTR_MODIFY_LEAF && dict_index_is_online_ddl(index)) { + mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED; + mtr_s_lock(dict_index_get_lock(index), &mtr); + } + + cursor.thr = thr; + + /* Note that we use PAGE_CUR_LE as the search mode, because then + the function will return in both low_match and up_match of the + cursor sensible values */ + + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, mode, + &cursor, 0, __FILE__, __LINE__, &mtr); + +#ifdef UNIV_DEBUG + { + page_t* page = btr_cur_get_page(&cursor); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); + + ut_ad(page_rec_is_supremum(first_rec) + || rec_get_n_fields(first_rec, index) + == dtuple_get_n_fields(entry)); + } +#endif + + if (n_uniq && (cursor.up_match >= n_uniq + || cursor.low_match >= n_uniq)) { + + if (flags + == (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) { + /* Set no locks when applying log + in online table rebuild. Only check for duplicates. */ + err = row_ins_duplicate_error_in_clust_online( + n_uniq, entry, &cursor, + &offsets, &offsets_heap); + + switch (err) { + case DB_SUCCESS: + break; + default: + ut_ad(0); + /* fall through */ + case DB_SUCCESS_LOCKED_REC: + case DB_DUPLICATE_KEY: + thr_get_trx(thr)->error_info = cursor.index; + } + } else { + /* Note that the following may return also + DB_LOCK_WAIT */ + + err = row_ins_duplicate_error_in_clust( + &cursor, entry, thr, &mtr); + } + + if (err != DB_SUCCESS) { +err_exit: + mtr_commit(&mtr); + goto func_exit; + } + } + + if (row_ins_must_modify_rec(&cursor)) { + /* There is already an index entry with a long enough common + prefix, we must convert the insert into a modify of an + existing record */ + mem_heap_t* entry_heap = mem_heap_create(1024); + + err = row_ins_clust_index_entry_by_modify( + flags, mode, &cursor, &offsets, &offsets_heap, + entry_heap, &big_rec, entry, thr, &mtr); + + rec_t* rec = btr_cur_get_rec(&cursor); + + if (big_rec) { + ut_a(err == DB_SUCCESS); + /* Write out the externally stored + columns while still x-latching + index->lock and block->lock. Allocate + pages for big_rec in the mtr that + modified the B-tree, but be sure to skip + any pages that were freed in mtr. We will + write out the big_rec pages before + committing the B-tree mini-transaction. If + the system crashes so that crash recovery + will not replay the mtr_commit(&mtr), the + big_rec pages will be left orphaned until + the pages are allocated for something else. + + TODO: If the allocation extends the + tablespace, it will not be redo + logged, in either mini-transaction. + Tablespace extension should be + redo-logged in the big_rec + mini-transaction, so that recovery + will not fail when the big_rec was + written to the extended portion of the + file, in case the file was somehow + truncated in the crash. */ + + DEBUG_SYNC_C_IF_THD( + thr_get_trx(thr)->mysql_thd, + "before_row_ins_upd_extern"); + err = btr_store_big_rec_extern_fields( + index, btr_cur_get_block(&cursor), + rec, offsets, big_rec, &mtr, + BTR_STORE_INSERT_UPDATE); + DEBUG_SYNC_C_IF_THD( + thr_get_trx(thr)->mysql_thd, + "after_row_ins_upd_extern"); + /* If writing big_rec fails (for + example, because of DB_OUT_OF_FILE_SPACE), + the record will be corrupted. Even if + we did not update any externally + stored columns, our update could cause + the record to grow so that a + non-updated column was selected for + external storage. This non-update + would not have been written to the + undo log, and thus the record cannot + be rolled back. + + However, because we have not executed + mtr_commit(mtr) yet, the update will + not be replayed in crash recovery, and + the following assertion failure will + effectively "roll back" the operation. */ + ut_a(err == DB_SUCCESS); + dtuple_big_rec_free(big_rec); + } + + if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) { + row_log_table_insert(rec, index, offsets); + } + + mtr_commit(&mtr); + mem_heap_free(entry_heap); + } else { + rec_t* insert_rec; + + if (mode != BTR_MODIFY_TREE) { + ut_ad((mode & ~BTR_ALREADY_S_LATCHED) + == BTR_MODIFY_LEAF); + err = btr_cur_optimistic_insert( + flags, &cursor, &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + } else { + if (buf_LRU_buf_pool_running_out()) { + + err = DB_LOCK_TABLE_FULL; + goto err_exit; + } + + err = btr_cur_optimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + + if (err == DB_FAIL) { + err = btr_cur_pessimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + } + } + + if (UNIV_LIKELY_NULL(big_rec)) { + mtr_commit(&mtr); + + /* Online table rebuild could read (and + ignore) the incomplete record at this point. + If online rebuild is in progress, the + row_ins_index_entry_big_rec() will write log. */ + + DBUG_EXECUTE_IF( + "row_ins_extern_checkpoint", + log_make_checkpoint_at( + LSN_MAX, TRUE);); + err = row_ins_index_entry_big_rec( + entry, big_rec, offsets, &offsets_heap, index, + thr_get_trx(thr)->mysql_thd, + __FILE__, __LINE__); + dtuple_convert_back_big_rec(index, entry, big_rec); + } else { + if (err == DB_SUCCESS + && dict_index_is_online_ddl(index)) { + row_log_table_insert( + insert_rec, index, offsets); + } + + mtr_commit(&mtr); + } + } + +func_exit: + if (offsets_heap) { + mem_heap_free(offsets_heap); + } + + return(err); +} + +/***************************************************************//** +Starts a mini-transaction and checks if the index will be dropped. +@return true if the index is to be dropped */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +bool +row_ins_sec_mtr_start_and_check_if_aborted( +/*=======================================*/ + mtr_t* mtr, /*!< out: mini-transaction */ + dict_index_t* index, /*!< in/out: secondary index */ + bool check, /*!< in: whether to check */ + ulint search_mode) + /*!< in: flags */ +{ + ut_ad(!dict_index_is_clust(index)); + + mtr_start(mtr); + + if (!check) { + return(false); + } + + if (search_mode & BTR_ALREADY_S_LATCHED) { + mtr_s_lock(dict_index_get_lock(index), mtr); + } else { + mtr_x_lock(dict_index_get_lock(index), mtr); + } + + switch (index->online_status) { + case ONLINE_INDEX_ABORTED: + case ONLINE_INDEX_ABORTED_DROPPED: + ut_ad(*index->name == TEMP_INDEX_PREFIX); + return(true); + case ONLINE_INDEX_COMPLETE: + return(false); + case ONLINE_INDEX_CREATION: + break; + } + + ut_error; + return(true); +} + +/***************************************************************//** +Tries to insert an entry into a secondary index. If a record with exactly the +same fields is found, the other record is necessarily marked deleted. +It is then unmarked. Otherwise, the entry is just inserted to the index. +@retval DB_SUCCESS on success +@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG) +@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed +@return error code */ +UNIV_INTERN +dberr_t +row_ins_sec_index_entry_low( +/*========================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether we wish optimistic or + pessimistic descent down the index tree */ + dict_index_t* index, /*!< in: secondary index */ + mem_heap_t* offsets_heap, + /*!< in/out: memory heap that can be emptied */ + mem_heap_t* heap, /*!< in/out: memory heap */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during + row_log_table_apply(), or 0 */ + que_thr_t* thr) /*!< in: query thread */ +{ + btr_cur_t cursor; + ulint search_mode = mode | BTR_INSERT; + dberr_t err = DB_SUCCESS; + ulint n_unique; + mtr_t mtr; + ulint* offsets = NULL; + + ut_ad(!dict_index_is_clust(index)); + ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_MODIFY_TREE); + + cursor.thr = thr; + ut_ad(thr_get_trx(thr)->id); + mtr_start(&mtr); + + /* Ensure that we acquire index->lock when inserting into an + index with index->online_status == ONLINE_INDEX_COMPLETE, but + could still be subject to rollback_inplace_alter_table(). + This prevents a concurrent change of index->online_status. + The memory object cannot be freed as long as we have an open + reference to the table, or index->table->n_ref_count > 0. */ + const bool check = *index->name == TEMP_INDEX_PREFIX; + if (check) { + DEBUG_SYNC_C("row_ins_sec_index_enter"); + if (mode == BTR_MODIFY_LEAF) { + search_mode |= BTR_ALREADY_S_LATCHED; + mtr_s_lock(dict_index_get_lock(index), &mtr); + } else { + mtr_x_lock(dict_index_get_lock(index), &mtr); + } + + if (row_log_online_op_try( + index, entry, thr_get_trx(thr)->id)) { + goto func_exit; + } + } + + /* Note that we use PAGE_CUR_LE as the search mode, because then + the function will return in both low_match and up_match of the + cursor sensible values */ + + if (!thr_get_trx(thr)->check_unique_secondary) { + search_mode |= BTR_IGNORE_SEC_UNIQUE; + } + + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, + search_mode, + &cursor, 0, __FILE__, __LINE__, &mtr); + + if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) { + /* The insert was buffered during the search: we are done */ + goto func_exit; + } + +#ifdef UNIV_DEBUG + { + page_t* page = btr_cur_get_page(&cursor); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); + + ut_ad(page_rec_is_supremum(first_rec) + || rec_get_n_fields(first_rec, index) + == dtuple_get_n_fields(entry)); + } +#endif + + n_unique = dict_index_get_n_unique(index); + + if (dict_index_is_unique(index) + && (cursor.low_match >= n_unique || cursor.up_match >= n_unique)) { + mtr_commit(&mtr); + + DEBUG_SYNC_C("row_ins_sec_index_unique"); + + if (row_ins_sec_mtr_start_and_check_if_aborted( + &mtr, index, check, search_mode)) { + goto func_exit; + } + + err = row_ins_scan_sec_index_for_duplicate( + flags, index, entry, thr, check, &mtr, offsets_heap); + + mtr_commit(&mtr); + + switch (err) { + case DB_SUCCESS: + break; + case DB_DUPLICATE_KEY: + if (*index->name == TEMP_INDEX_PREFIX) { + ut_ad(!thr_get_trx(thr) + ->dict_operation_lock_mode); + mutex_enter(&dict_sys->mutex); + dict_set_corrupted_index_cache_only( + index, index->table); + mutex_exit(&dict_sys->mutex); + /* Do not return any error to the + caller. The duplicate will be reported + by ALTER TABLE or CREATE UNIQUE INDEX. + Unfortunately we cannot report the + duplicate key value to the DDL thread, + because the altered_table object is + private to its call stack. */ + err = DB_SUCCESS; + } + /* fall through */ + default: + return(err); + } + + if (row_ins_sec_mtr_start_and_check_if_aborted( + &mtr, index, check, search_mode)) { + goto func_exit; + } + + DEBUG_SYNC_C("row_ins_sec_index_entry_dup_locks_created"); + + /* We did not find a duplicate and we have now + locked with s-locks the necessary records to + prevent any insertion of a duplicate by another + transaction. Let us now reposition the cursor and + continue the insertion. */ + + btr_cur_search_to_nth_level( + index, 0, entry, PAGE_CUR_LE, + search_mode & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE), + &cursor, 0, __FILE__, __LINE__, &mtr); + } + + if (row_ins_must_modify_rec(&cursor)) { + /* There is already an index entry with a long enough common + prefix, we must convert the insert into a modify of an + existing record */ + offsets = rec_get_offsets( + btr_cur_get_rec(&cursor), index, offsets, + ULINT_UNDEFINED, &offsets_heap); + + err = row_ins_sec_index_entry_by_modify( + flags, mode, &cursor, &offsets, + offsets_heap, heap, entry, thr, &mtr); + } else { + rec_t* insert_rec; + big_rec_t* big_rec; + + if (mode == BTR_MODIFY_LEAF) { + err = btr_cur_optimistic_insert( + flags, &cursor, &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + } else { + ut_ad(mode == BTR_MODIFY_TREE); + if (buf_LRU_buf_pool_running_out()) { + + err = DB_LOCK_TABLE_FULL; + goto func_exit; + } + + err = btr_cur_optimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + if (err == DB_FAIL) { + err = btr_cur_pessimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + } + } + + if (err == DB_SUCCESS && trx_id) { + page_update_max_trx_id( + btr_cur_get_block(&cursor), + btr_cur_get_page_zip(&cursor), + trx_id, &mtr); + } + + ut_ad(!big_rec); + } + +func_exit: + mtr_commit(&mtr); + return(err); +} + +/***************************************************************//** +Tries to insert the externally stored fields (off-page columns) +of a clustered index entry. +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +UNIV_INTERN +dberr_t +row_ins_index_entry_big_rec_func( +/*=============================*/ + const dtuple_t* entry, /*!< in/out: index entry to insert */ + const big_rec_t* big_rec,/*!< in: externally stored fields */ + ulint* offsets,/*!< in/out: rec offsets */ + mem_heap_t** heap, /*!< in/out: memory heap */ + dict_index_t* index, /*!< in: index */ + const char* file, /*!< in: file name of caller */ +#ifndef DBUG_OFF + const void* thd, /*!< in: connection, or NULL */ +#endif /* DBUG_OFF */ + ulint line) /*!< in: line number of caller */ +{ + mtr_t mtr; + btr_cur_t cursor; + rec_t* rec; + dberr_t error; + + ut_ad(dict_index_is_clust(index)); + + DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch"); + + mtr_start(&mtr); + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, + BTR_MODIFY_TREE, &cursor, 0, + file, line, &mtr); + rec = btr_cur_get_rec(&cursor); + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, heap); + + DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern"); + error = btr_store_big_rec_extern_fields( + index, btr_cur_get_block(&cursor), + rec, offsets, big_rec, &mtr, BTR_STORE_INSERT); + DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern"); + + if (error == DB_SUCCESS + && dict_index_is_online_ddl(index)) { + row_log_table_insert(rec, index, offsets); + } + + mtr_commit(&mtr); + + return(error); +} + +/***************************************************************//** +Inserts an entry into a clustered index. Tries first optimistic, +then pessimistic descent down the tree. If the entry matches enough +to a delete marked record, performs the insert by updating or delete +unmarking the delete marked record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +UNIV_INTERN +dberr_t +row_ins_clust_index_entry( +/*======================*/ + dict_index_t* index, /*!< in: clustered index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + ulint n_ext) /*!< in: number of externally stored columns */ +{ + dberr_t err; + ulint n_uniq; + + if (!index->table->foreign_set.empty()) { + err = row_ins_check_foreign_constraints( + index->table, index, entry, thr); + if (err != DB_SUCCESS) { + + return(err); + } + } + + n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0; + + /* Try first optimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_clust_index_entry_low( + 0, BTR_MODIFY_LEAF, index, n_uniq, entry, n_ext, thr); + +#ifdef UNIV_DEBUG + /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC(). + Once it is fixed, remove the 'ifdef', 'if' and this comment. */ + if (!thr_get_trx(thr)->ddl) { + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "after_row_ins_clust_index_entry_leaf"); + } +#endif /* UNIV_DEBUG */ + + if (err != DB_FAIL) { + DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after"); + return(err); + } + + /* Try then pessimistic descent to the B-tree */ + + log_free_check(); + + return(row_ins_clust_index_entry_low( + 0, BTR_MODIFY_TREE, index, n_uniq, entry, n_ext, thr)); +} + +/***************************************************************//** +Inserts an entry into a secondary index. Tries first optimistic, +then pessimistic descent down the tree. If the entry matches enough +to a delete marked record, performs the insert by updating or delete +unmarking the delete marked record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +UNIV_INTERN +dberr_t +row_ins_sec_index_entry( +/*====================*/ + dict_index_t* index, /*!< in: secondary index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + mem_heap_t* offsets_heap; + mem_heap_t* heap; + + DBUG_EXECUTE_IF("row_ins_sec_index_entry_timeout", { + DBUG_SET("-d,row_ins_sec_index_entry_timeout"); + return(DB_LOCK_WAIT);}); + + if (!index->table->foreign_set.empty()) { + err = row_ins_check_foreign_constraints(index->table, index, + entry, thr); + if (err != DB_SUCCESS) { + + return(err); + } + } + + ut_ad(thr_get_trx(thr)->id); + + offsets_heap = mem_heap_create(1024); + heap = mem_heap_create(1024); + + /* Try first optimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_sec_index_entry_low( + 0, BTR_MODIFY_LEAF, index, offsets_heap, heap, entry, 0, thr); + if (err == DB_FAIL) { + mem_heap_empty(heap); + + if (index->space == IBUF_SPACE_ID + && !dict_index_is_unique(index)) { + ibuf_free_excess_pages(); + } + + /* Try then pessimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_sec_index_entry_low( + 0, BTR_MODIFY_TREE, index, + offsets_heap, heap, entry, 0, thr); + } + + mem_heap_free(heap); + mem_heap_free(offsets_heap); + return(err); +} + +/***************************************************************//** +Inserts an index entry to index. Tries first optimistic, then pessimistic +descent down the tree. If the entry matches enough to a delete marked record, +performs the insert by updating or delete unmarking the delete marked +record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +static +dberr_t +row_ins_index_entry( +/*================*/ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr) /*!< in: query thread */ +{ + DBUG_EXECUTE_IF("row_ins_index_entry_timeout", { + DBUG_SET("-d,row_ins_index_entry_timeout"); + return(DB_LOCK_WAIT);}); + + if (dict_index_is_clust(index)) { + return(row_ins_clust_index_entry(index, entry, thr, 0)); + } else { + return(row_ins_sec_index_entry(index, entry, thr)); + } +} + +/***********************************************************//** +Sets the values of the dtuple fields in entry from the values of appropriate +columns in row. */ +static MY_ATTRIBUTE((nonnull)) +void +row_ins_index_entry_set_vals( +/*=========================*/ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in: index entry to make */ + const dtuple_t* row) /*!< in: row */ +{ + ulint n_fields; + ulint i; + + n_fields = dtuple_get_n_fields(entry); + + for (i = 0; i < n_fields; i++) { + dict_field_t* ind_field; + dfield_t* field; + const dfield_t* row_field; + ulint len; + + field = dtuple_get_nth_field(entry, i); + ind_field = dict_index_get_nth_field(index, i); + row_field = dtuple_get_nth_field(row, ind_field->col->ind); + len = dfield_get_len(row_field); + + /* Check column prefix indexes */ + if (ind_field->prefix_len > 0 + && dfield_get_len(row_field) != UNIV_SQL_NULL) { + + const dict_col_t* col + = dict_field_get_col(ind_field); + + len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + ind_field->prefix_len, + len, + static_cast<const char*>( + dfield_get_data(row_field))); + + ut_ad(!dfield_is_ext(row_field)); + } + + dfield_set_data(field, dfield_get_data(row_field), len); + if (dfield_is_ext(row_field)) { + ut_ad(dict_index_is_clust(index)); + dfield_set_ext(field); + } + } +} + +/***********************************************************//** +Inserts a single index entry to the table. +@return DB_SUCCESS if operation successfully completed, else error +code or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_index_entry_step( +/*=====================*/ + ins_node_t* node, /*!< in: row insert node */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(dtuple_check_typed(node->row)); + + row_ins_index_entry_set_vals(node->index, node->entry, node->row); + + ut_ad(dtuple_check_typed(node->entry)); + + err = row_ins_index_entry(node->index, node->entry, thr); + +#ifdef UNIV_DEBUG + /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC(). + Once it is fixed, remove the 'ifdef', 'if' and this comment. */ + if (!thr_get_trx(thr)->ddl) { + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "after_row_ins_index_entry_step"); + } +#endif /* UNIV_DEBUG */ + + return(err); +} + +/***********************************************************//** +Allocates a row id for row and inits the node->index field. */ +UNIV_INLINE +void +row_ins_alloc_row_id_step( +/*======================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + row_id_t row_id; + + ut_ad(node->state == INS_NODE_ALLOC_ROW_ID); + + if (dict_index_is_unique(dict_table_get_first_index(node->table))) { + + /* No row id is stored if the clustered index is unique */ + + return; + } + + /* Fill in row id value to row */ + + row_id = dict_sys_get_new_row_id(); + + dict_sys_write_row_id(node->row_id_buf, row_id); +} + +/***********************************************************//** +Gets a row to insert from the values list. */ +UNIV_INLINE +void +row_ins_get_row_from_values( +/*========================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + que_node_t* list_node; + dfield_t* dfield; + dtuple_t* row; + ulint i; + + /* The field values are copied in the buffers of the select node and + it is safe to use them until we fetch from select again: therefore + we can just copy the pointers */ + + row = node->row; + + i = 0; + list_node = node->values_list; + + while (list_node) { + eval_exp(list_node); + + dfield = dtuple_get_nth_field(row, i); + dfield_copy_data(dfield, que_node_get_val(list_node)); + + i++; + list_node = que_node_get_next(list_node); + } +} + +/***********************************************************//** +Gets a row to insert from the select list. */ +UNIV_INLINE +void +row_ins_get_row_from_select( +/*========================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + que_node_t* list_node; + dfield_t* dfield; + dtuple_t* row; + ulint i; + + /* The field values are copied in the buffers of the select node and + it is safe to use them until we fetch from select again: therefore + we can just copy the pointers */ + + row = node->row; + + i = 0; + list_node = node->select->select_list; + + while (list_node) { + dfield = dtuple_get_nth_field(row, i); + dfield_copy_data(dfield, que_node_get_val(list_node)); + + i++; + list_node = que_node_get_next(list_node); + } +} + +/***********************************************************//** +Inserts a row to a table. +@return DB_SUCCESS if operation successfully completed, else error +code or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins( +/*====*/ + ins_node_t* node, /*!< in: row insert node */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + if (node->state == INS_NODE_ALLOC_ROW_ID) { + + row_ins_alloc_row_id_step(node); + + node->index = dict_table_get_first_index(node->table); + node->entry = UT_LIST_GET_FIRST(node->entry_list); + + if (node->ins_type == INS_SEARCHED) { + + row_ins_get_row_from_select(node); + + } else if (node->ins_type == INS_VALUES) { + + row_ins_get_row_from_values(node); + } + + node->state = INS_NODE_INSERT_ENTRIES; + } + + ut_ad(node->state == INS_NODE_INSERT_ENTRIES); + + while (node->index != NULL) { + if (node->index->type != DICT_FTS) { + err = row_ins_index_entry_step(node, thr); + + if (err != DB_SUCCESS) { + + return(err); + } + } + + node->index = dict_table_get_next_index(node->index); + node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); + + DBUG_EXECUTE_IF( + "row_ins_skip_sec", + node->index = NULL; node->entry = NULL; break;); + + /* Skip corrupted secondary index and its entry */ + while (node->index && dict_index_is_corrupted(node->index)) { + + node->index = dict_table_get_next_index(node->index); + node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); + } + } + + ut_ad(node->entry == NULL); + + node->state = INS_NODE_ALLOC_ROW_ID; + + return(DB_SUCCESS); +} + +/***********************************************************//** +Inserts a row to a table. This is a high-level function used in SQL execution +graphs. +@return query thread to run next or NULL */ +UNIV_INTERN +que_thr_t* +row_ins_step( +/*=========*/ + que_thr_t* thr) /*!< in: query thread */ +{ + ins_node_t* node; + que_node_t* parent; + sel_node_t* sel_node; + trx_t* trx; + dberr_t err; + + ut_ad(thr); + + trx = thr_get_trx(thr); + + trx_start_if_not_started_xa(trx); + + node = static_cast<ins_node_t*>(thr->run_node); + + ut_ad(que_node_get_type(node) == QUE_NODE_INSERT); + + parent = que_node_get_parent(node); + sel_node = node->select; + + if (thr->prev_node == parent) { + node->state = INS_NODE_SET_IX_LOCK; + } + + /* If this is the first time this node is executed (or when + execution resumes after wait for the table IX lock), set an + IX lock on the table and reset the possible select node. MySQL's + partitioned table code may also call an insert within the same + SQL statement AFTER it has used this table handle to do a search. + This happens, for example, when a row update moves it to another + partition. In that case, we have already set the IX lock on the + table during the search operation, and there is no need to set + it again here. But we must write trx->id to node->trx_id_buf. */ + + trx_write_trx_id(node->trx_id_buf, trx->id); + + if (node->state == INS_NODE_SET_IX_LOCK) { + + node->state = INS_NODE_ALLOC_ROW_ID; + + /* It may be that the current session has not yet started + its transaction, or it has been committed: */ + + if (trx->id == node->trx_id) { + /* No need to do IX-locking */ + + goto same_trx; + } + + err = lock_table(0, node->table, LOCK_IX, thr); + + DBUG_EXECUTE_IF("ib_row_ins_ix_lock_wait", + err = DB_LOCK_WAIT;); + + if (err != DB_SUCCESS) { + + goto error_handling; + } + + node->trx_id = trx->id; +same_trx: + if (node->ins_type == INS_SEARCHED) { + /* Reset the cursor */ + sel_node->state = SEL_NODE_OPEN; + + /* Fetch a row to insert */ + + thr->run_node = sel_node; + + return(thr); + } + } + + if ((node->ins_type == INS_SEARCHED) + && (sel_node->state != SEL_NODE_FETCH)) { + + ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS); + + /* No more rows to insert */ + thr->run_node = parent; + + return(thr); + } + + /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */ + + err = row_ins(node, thr); + +error_handling: + trx->error_state = err; + + if (err != DB_SUCCESS) { + /* err == DB_LOCK_WAIT or SQL error detected */ + return(NULL); + } + + /* DO THE TRIGGER ACTIONS HERE */ + + if (node->ins_type == INS_SEARCHED) { + /* Fetch a row to insert */ + + thr->run_node = sel_node; + } else { + thr->run_node = que_node_get_parent(node); + } + + return(thr); +} diff --cc storage/xtradb/handler/ha_innodb.cc index ca14617acff,fd452a9b170..1c2d084a7fa --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@@ -89,28 -100,16 +89,29 @@@ this program; if not, write to the Fre #include "ha_prototypes.h" #include "ut0mem.h" #include "ibuf0ibuf.h" +#include "dict0dict.h" +#include "srv0mon.h" +#include "api0api.h" +#include "api0misc.h" +#include "pars0pars.h" +#include "fts0fts.h" +#include "fts0types.h" +#include "row0import.h" +#include "row0quiesce.h" +#include "row0mysql.h" +#ifdef UNIV_DEBUG +#include "trx0purge.h" +#endif /* UNIV_DEBUG */ +#include "fts0priv.h" +#include "page0zip.h" ++#include "dict0priv.h" -enum_tx_isolation thd_get_trx_isolation(const THD* thd); +#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) -#ifdef WITH_WSREP -#include "../storage/innobase/include/ut0byte.h" -#ifndef EXTRA_DEBUG - //#include "../storage/innobase/include/ut0byte.ic" -#endif /* EXTRA_DEBUG */ -#endif /* WITH_WSREP */ -} +#ifdef MYSQL_DYNAMIC_PLUGIN +#define tc_size 400 +#define tdc_size 400 +#endif #include "ha_innodb.h" #include "i_s.h" @@@ -124,11 -127,9 +125,11 @@@ # endif /* MYSQL_PLUGIN_IMPORT */ #ifdef WITH_WSREP - #include "dict0priv.h" + #include "../../../wsrep/wsrep_api.h" +#include "../storage/innobase/include/ut0byte.h" #include <wsrep_mysqld.h> -#include <my_md5.h> +#include <wsrep_md5.h> + extern my_bool wsrep_certify_nonPK; class binlog_trx_data; extern handlerton *binlog_hton; @@@ -8335,1480 -5151,1403 +8336,1483 @@@ ha_innobase::innobase_reset_autoinc dict_table_autoinc_unlock(prebuilt->table); } - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - - DBUG_RETURN(0); + return(error); } +/********************************************************************//** +Store the autoinc value in the table. The autoinc value is only set if +it's greater than the existing autoinc value in the table. +@return DB_SUCCESS if all went well else error code */ UNIV_INTERN -handler* -ha_innobase::clone( -/*===============*/ - const char* name, /*!< in: table name */ - MEM_ROOT* mem_root) /*!< in: memory context */ +dberr_t +ha_innobase::innobase_set_max_autoinc( +/*==================================*/ + ulonglong auto_inc) /*!< in: value to store */ { - ha_innobase* new_handler; + dberr_t error; + + error = innobase_lock_autoinc(); + + if (error == DB_SUCCESS) { - DBUG_ENTER("ha_innobase::clone"); + dict_table_autoinc_update_if_greater(prebuilt->table, auto_inc); - new_handler = static_cast<ha_innobase*>(handler::clone(name, - mem_root)); - if (new_handler) { - new_handler->prebuilt->select_lock_type - = prebuilt->select_lock_type; + dict_table_autoinc_unlock(prebuilt->table); } - DBUG_RETURN(new_handler); -} - -UNIV_INTERN -uint -ha_innobase::max_supported_key_part_length() const -{ - /* A table format specific index column length check will be performed - at ha_innobase::add_index() and row_create_index_for_mysql() */ - return(innobase_large_prefix - ? REC_VERSION_56_MAX_INDEX_COL_LEN - : REC_ANTELOPE_MAX_INDEX_COL_LEN - 1); + return(error); } -/******************************************************************//** -Closes a handle to an InnoDB table. -@return 0 */ +/********************************************************************//** +Stores a row in an InnoDB database, to the table specified in this +handle. +@return error code */ UNIV_INTERN int -ha_innobase::close(void) -/*====================*/ +ha_innobase::write_row( +/*===================*/ + uchar* record) /*!< in: a row in MySQL format */ { - THD* thd; + dberr_t error; + int error_result= 0; + ibool auto_inc_used= FALSE; +#ifdef WITH_WSREP + ibool auto_inc_inserted= FALSE; /* if NULL was inserted */ +#endif + ulint sql_command; + trx_t* trx = thd_to_trx(user_thd); - DBUG_ENTER("ha_innobase::close"); + DBUG_ENTER("ha_innobase::write_row"); - thd = ha_thd(); - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (prebuilt->trx != trx) { + sql_print_error("The transaction object for the table handle " + "is at %p, but for the current thread it is at " + "%p", + (const void*) prebuilt->trx, (const void*) trx); + + fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); + ut_print_buf(stderr, ((const byte*) prebuilt) - 100, 200); + fputs("\n" + "InnoDB: Dump of 200 bytes around ha_data: ", + stderr); + ut_print_buf(stderr, ((const byte*) trx) - 100, 200); + putc('\n', stderr); + ut_error; + } else if (!trx_is_started(trx)) { + ++trx->will_lock; } - row_prebuilt_free(prebuilt, FALSE); + ha_statistic_increment(&SSV::ha_write_count); - if (upd_buf != NULL) { - ut_ad(upd_buf_size != 0); - my_free(upd_buf); - upd_buf = NULL; - upd_buf_size = 0; + if (share->ib_table != prebuilt->table) { + fprintf(stderr, + "InnoDB: Warning: share->ib_table %p prebuilt->table %p table %s is_corrupt %lu.", + share->ib_table, prebuilt->table, prebuilt->table->name, prebuilt->table->is_corrupt); } - free_share(share); + if (UNIV_UNLIKELY(share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - /* Tell InnoDB server that there might be work for - utility threads: */ + sql_command = thd_sql_command(user_thd); - srv_active_wake_master_thread(); + if ((sql_command == SQLCOM_ALTER_TABLE + || sql_command == SQLCOM_OPTIMIZE + || sql_command == SQLCOM_CREATE_INDEX +#ifdef WITH_WSREP + || (wsrep_on(user_thd) && wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options( + user_thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) +#endif /* WITH_WSREP */ + || sql_command == SQLCOM_DROP_INDEX) + && num_write_row >= 10000) { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && sql_command == SQLCOM_LOAD) { + WSREP_DEBUG("forced trx split for LOAD: %s", + wsrep_thd_query(user_thd)); + } +#endif /* WITH_WSREP */ + /* ALTER TABLE is COMMITted at every 10000 copied rows. + The IX table lock for the original table has to be re-issued. + As this method will be called on a temporary table where the + contents of the original table is being copied to, it is + a bit tricky to determine the source table. The cursor + position in the source table need not be adjusted after the + intermediate COMMIT, since writes by other transactions are + being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */ - DBUG_RETURN(0); -} + dict_table_t* src_table; + enum lock_mode mode; -/* The following accessor functions should really be inside MySQL code! */ + num_write_row = 0; -/**************************************************************//** -Gets field offset for a field in a table. -@return offset */ -static inline -uint -get_field_offset( -/*=============*/ - const TABLE* table, /*!< in: MySQL table object */ - const Field* field) /*!< in: MySQL field object */ -{ - return((uint) (field->ptr - table->record[0])); -} + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ -/**************************************************************//** -Checks if a field in a record is SQL NULL. Uses the record format -information in table to track the null bit in record. -@return 1 if NULL, 0 otherwise */ -static inline -uint -field_in_record_is_null( -/*====================*/ - TABLE* table, /*!< in: MySQL table object */ - Field* field, /*!< in: MySQL field object */ - char* record) /*!< in: a row in MySQL format */ -{ - int null_offset; + /* Altering an InnoDB table */ + /* Get the source table. */ + src_table = lock_get_src_table( + prebuilt->trx, prebuilt->table, &mode); + if (!src_table) { +no_commit: + /* Unknown situation: do not commit */ + /* + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: ALTER TABLE is holding lock" + " on %lu tables!\n", + prebuilt->trx->mysql_n_tables_locked); + */ + ; + } else if (src_table == prebuilt->table) { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && + wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options(user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1)) + { + case WSREP_TRX_OK: + break; + case WSREP_TRX_SIZE_EXCEEDED: + case WSREP_TRX_CERT_FAIL: + case WSREP_TRX_ERROR: + DBUG_RETURN(1); + } - if (!field->null_ptr) { + if (binlog_hton->commit(binlog_hton, user_thd, 1)) + DBUG_RETURN(1); + wsrep_post_commit(user_thd, TRUE); + } +#endif /* WITH_WSREP */ + /* Source table is not in InnoDB format: + no need to re-acquire locks on it. */ - return(0); + /* Altering to InnoDB format */ + innobase_commit(ht, user_thd, 1); + /* Note that this transaction is still active. */ + trx_register_for_2pc(prebuilt->trx); + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } else { +#ifdef WITH_WSREP + if (wsrep_on(user_thd) && + wsrep_load_data_splitting && + sql_command == SQLCOM_LOAD && + !thd_test_options(user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + switch (wsrep_run_wsrep_commit(user_thd, wsrep_hton, 1)) + { + case WSREP_TRX_OK: + break; + case WSREP_TRX_SIZE_EXCEEDED: + case WSREP_TRX_CERT_FAIL: + case WSREP_TRX_ERROR: + DBUG_RETURN(1); + } + + if (binlog_hton->commit(binlog_hton, user_thd, 1)) + DBUG_RETURN(1); + wsrep_post_commit(user_thd, TRUE); + } +#endif /* WITH_WSREP */ + /* Ensure that there are no other table locks than + LOCK_IX and LOCK_AUTO_INC on the destination table. */ + + if (!lock_is_table_exclusive(prebuilt->table, + prebuilt->trx)) { + goto no_commit; + } + + /* Commit the transaction. This will release the table + locks, so they have to be acquired again. */ + innobase_commit(ht, user_thd, 1); + /* Note that this transaction is still active. */ + trx_register_for_2pc(prebuilt->trx); + /* Re-acquire the table lock on the source table. */ + row_lock_table_for_mysql(prebuilt, src_table, mode); + /* We will need an IX lock on the destination table. */ + prebuilt->sql_stat_start = TRUE; + } } - null_offset = (uint) ((char*) field->null_ptr - - (char*) table->record[0]); + num_write_row++; + + /* This is the case where the table has an auto-increment column */ + if (table->next_number_field && record == table->record[0]) { - if (record[null_offset] & field->null_bit) { + /* Reset the error code before calling + innobase_get_auto_increment(). */ + prebuilt->autoinc_error = DB_SUCCESS; - return(1); +#ifdef WITH_WSREP + auto_inc_inserted= (table->next_number_field->val_int() == 0); +#endif + + if ((error_result = update_auto_increment())) { + /* We don't want to mask autoinc overflow errors. */ + + /* Handle the case where the AUTOINC sub-system + failed during initialization. */ + if (prebuilt->autoinc_error == DB_UNSUPPORTED) { + error_result = ER_AUTOINC_READ_FAILED; + /* Set the error message to report too. */ + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + goto func_exit; + } else if (prebuilt->autoinc_error != DB_SUCCESS) { + error = prebuilt->autoinc_error; + goto report_error; + } + + /* MySQL errors are passed straight back. except for + ER_AUTOINC_READ_FAILED. This can only happen + for values out of range. + */ + goto func_exit; + } + + auto_inc_used = TRUE; } - return(0); -} + if (prebuilt->mysql_template == NULL + || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { -/*************************************************************//** -InnoDB uses this function to compare two data fields for which the data type -is such that we must use MySQL code to compare them. NOTE that the prototype -of this function is in rem0cmp.c in InnoDB source code! If you change this -function, remember to update the prototype there! -@return 1, 0, -1, if a is greater, equal, less than b, respectively */ -extern "C" UNIV_INTERN -int -innobase_mysql_cmp( -/*===============*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number, /*!< in: number of the charset */ - const unsigned char* a, /*!< in: data field */ - unsigned int a_length, /*!< in: data field length, - not UNIV_SQL_NULL */ - const unsigned char* b, /*!< in: data field */ - unsigned int b_length) /*!< in: data field length, - not UNIV_SQL_NULL */ -{ - CHARSET_INFO* charset; - enum_field_types mysql_tp; - int ret; + /* Build the template used in converting quickly between + the two database formats */ - DBUG_ASSERT(a_length != UNIV_SQL_NULL); - DBUG_ASSERT(b_length != UNIV_SQL_NULL); + build_template(true); + } - mysql_tp = (enum_field_types) mysql_type; + innobase_srv_conc_enter_innodb(prebuilt->trx); - switch (mysql_tp) { + error = row_insert_for_mysql((byte*) record, prebuilt); + DEBUG_SYNC(user_thd, "ib_after_row_insert"); - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_VARCHAR: - /* Use the charset number to pick the right charset struct for - the comparison. Since the MySQL function get_charset may be - slow before Bar removes the mutex operation there, we first - look at 2 common charsets directly. */ + - if (charset_number == default_charset_info->number) { - charset = default_charset_info; - } else if (charset_number == my_charset_latin1.number) { - charset = &my_charset_latin1; - } else { - charset = get_charset(charset_number, MYF(MY_WME)); + /* Handle duplicate key errors */ + if (auto_inc_used) { + ulonglong auto_inc; + ulonglong col_max_value; - if (charset == NULL) { - sql_print_error("InnoDB needs charset %lu for doing " - "a comparison, but MySQL cannot " - "find that charset.", - (ulong) charset_number); - ut_a(0); - } + /* Note the number of rows processed for this statement, used + by get_auto_increment() to determine the number of AUTO-INC + values to reserve. This is only useful for a mult-value INSERT + and is a statement level counter.*/ + if (trx->n_autoinc_rows > 0) { + --trx->n_autoinc_rows; } - /* Starting from 4.1.3, we use strnncollsp() in comparisons of - non-latin1_swedish_ci strings. NOTE that the collation order - changes then: 'b\0\0...' is ordered BEFORE 'b ...'. Users - having indexes on such data need to rebuild their tables! */ + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + col_max_value = innobase_get_int_col_max_value( + table->next_number_field); - ret = charset->coll->strnncollsp(charset, - a, a_length, - b, b_length, 0); - if (ret < 0) { - return(-1); - } else if (ret > 0) { - return(1); - } else { - return(0); - } - default: - ut_error; - } + /* Get the value that MySQL attempted to store in the table.*/ + auto_inc = table->next_number_field->val_uint(); - return(0); -} -#ifdef WITH_WSREP -extern "C" UNIV_INTERN -int -wsrep_innobase_mysql_sort( -/*===============*/ - /* out: str contains sort string */ - int mysql_type, /* in: MySQL type */ - uint charset_number, /* in: number of the charset */ - unsigned char* str, /* in: data field */ - unsigned int str_length, /* in: data field length, - not UNIV_SQL_NULL */ - unsigned int buf_length) /* in: total str buffer length */ + switch (error) { + case DB_DUPLICATE_KEY: -{ - CHARSET_INFO* charset; - enum_field_types mysql_tp; - int ret_length = str_length; + /* A REPLACE command and LOAD DATA INFILE REPLACE + handle a duplicate key error themselves, but we + must update the autoinc counter if we are performing + those statements. */ - DBUG_ASSERT(str_length != UNIV_SQL_NULL); + switch (sql_command) { + case SQLCOM_LOAD: + if (trx->duplicates) { - mysql_tp = (enum_field_types) mysql_type; + goto set_max_autoinc; + } + break; - switch (mysql_tp) { + case SQLCOM_REPLACE: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE_SELECT: + goto set_max_autoinc; - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_VARCHAR: - { - uchar tmp_str[REC_VERSION_56_MAX_INDEX_COL_LEN]; - uint tmp_length = REC_VERSION_56_MAX_INDEX_COL_LEN; +#ifdef WITH_WSREP + /* workaround for LP bug #355000, retrying the insert */ + case SQLCOM_INSERT: - /* Use the charset number to pick the right charset struct for - the comparison. Since the MySQL function get_charset may be - slow before Bar removes the mutex operation there, we first - look at 2 common charsets directly. */ + WSREP_DEBUG("DUPKEY error for autoinc\n" + "THD %ld, value %llu, off %llu inc %llu", + wsrep_thd_thread_id(current_thd), + auto_inc, + prebuilt->autoinc_offset, + prebuilt->autoinc_increment); - if (charset_number == default_charset_info->number) { - charset = default_charset_info; - } else if (charset_number == my_charset_latin1.number) { - charset = &my_charset_latin1; - } else { - charset = get_charset(charset_number, MYF(MY_WME)); + if (wsrep_on(current_thd) && + auto_inc_inserted && + wsrep_drupal_282555_workaround && + wsrep_thd_retry_counter(current_thd) == 0 && + !thd_test_options(current_thd, + OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN)) { + WSREP_DEBUG( + "retrying insert: %s", + (*wsrep_thd_query(current_thd)) ? + wsrep_thd_query(current_thd) : + (char *)"void"); + error= DB_SUCCESS; + wsrep_thd_set_conflict_state( + current_thd, MUST_ABORT); + innobase_srv_conc_exit_innodb(prebuilt->trx); + /* jump straight to func exit over + * later wsrep hooks */ + goto func_exit; + } + break; +#endif /* WITH_WSREP */ - if (charset == NULL) { - sql_print_error("InnoDB needs charset %lu for doing " - "a comparison, but MySQL cannot " - "find that charset.", - (ulong) charset_number); - ut_a(0); + default: + break; } - } - ut_a(str_length <= tmp_length); - memcpy(tmp_str, str, str_length); + break; - if (wsrep_protocol_version < 3) { - tmp_length = charset->coll->strnxfrm( - charset, str, str_length, - tmp_str, str_length); - DBUG_ASSERT(tmp_length <= str_length); - } else { - /* strnxfrm will expand the destination string, - protocols < 3 truncated the sorted sring - protocols > 3 gets full sorted sring - */ - /* 5.5 strnxfrm pads the tail with spaces and - always returns the full destination buffer lenght - we cannot know how many characters were converted - using 2 * str length here as best guess - */ - uint dst_length = (str_length * 2 < tmp_length) ? - (str_length * 2) : tmp_length; - tmp_length = charset->coll->strnxfrm( - charset, str, dst_length, - tmp_str, str_length); - DBUG_ASSERT(tmp_length <= buf_length); - ret_length = tmp_length; - } - - break; - } - case MYSQL_TYPE_DECIMAL : - case MYSQL_TYPE_TINY : - case MYSQL_TYPE_SHORT : - case MYSQL_TYPE_LONG : - case MYSQL_TYPE_FLOAT : - case MYSQL_TYPE_DOUBLE : - case MYSQL_TYPE_NULL : - case MYSQL_TYPE_TIMESTAMP : - case MYSQL_TYPE_LONGLONG : - case MYSQL_TYPE_INT24 : - case MYSQL_TYPE_DATE : - case MYSQL_TYPE_TIME : - case MYSQL_TYPE_DATETIME : - case MYSQL_TYPE_YEAR : - case MYSQL_TYPE_NEWDATE : - case MYSQL_TYPE_NEWDECIMAL : - case MYSQL_TYPE_ENUM : - case MYSQL_TYPE_SET : - case MYSQL_TYPE_GEOMETRY : - break; - default: - break; - } + case DB_SUCCESS: + /* If the actual value inserted is greater than + the upper limit of the interval, then we try and + update the table upper limit. Note: last_value + will be 0 if get_auto_increment() was not called.*/ - return ret_length; -} -#endif // WITH_WSREP -/**************************************************************//** -Converts a MySQL type to an InnoDB type. Note that this function returns -the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 -VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. -@return DATA_BINARY, DATA_VARCHAR, ... */ -extern "C" UNIV_INTERN -ulint -get_innobase_type_from_mysql_type( -/*==============================*/ - ulint* unsigned_flag, /*!< out: DATA_UNSIGNED if an - 'unsigned type'; - at least ENUM and SET, - and unsigned integer - types are 'unsigned types' */ - const void* f) /*!< in: MySQL Field */ -{ - const class Field* field = reinterpret_cast<const class Field*>(f); + if (auto_inc >= prebuilt->autoinc_last_value) { +set_max_autoinc: + /* This should filter out the negative + values set explicitly by the user. */ + if (auto_inc <= col_max_value) { + ut_a(prebuilt->autoinc_increment > 0); - /* The following asserts try to check that the MySQL type code fits in - 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to - the type */ + ulonglong offset; + ulonglong increment; + dberr_t err; - DBUG_ASSERT((ulint)MYSQL_TYPE_STRING < 256); - DBUG_ASSERT((ulint)MYSQL_TYPE_VAR_STRING < 256); - DBUG_ASSERT((ulint)MYSQL_TYPE_DOUBLE < 256); - DBUG_ASSERT((ulint)MYSQL_TYPE_FLOAT < 256); - DBUG_ASSERT((ulint)MYSQL_TYPE_DECIMAL < 256); + offset = prebuilt->autoinc_offset; + increment = prebuilt->autoinc_increment; + + auto_inc = innobase_next_autoinc( + auto_inc, + 1, increment, offset, + col_max_value); + + err = innobase_set_max_autoinc( + auto_inc); + + if (err != DB_SUCCESS) { + error = err; + } + } + } + break; + default: + break; + } + } - if (field->flags & UNSIGNED_FLAG) { + innobase_srv_conc_exit_innodb(prebuilt->trx); - *unsigned_flag = DATA_UNSIGNED; - } else { - *unsigned_flag = 0; +report_error: + if (error == DB_TABLESPACE_DELETED) { + ib_senderrf( + trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); } - if (field->real_type() == MYSQL_TYPE_ENUM - || field->real_type() == MYSQL_TYPE_SET) { + error_result = convert_error_code_to_mysql(error, + prebuilt->table->flags, + user_thd); - /* MySQL has field->type() a string type for these, but the - data is actually internally stored as an unsigned integer - code! */ +#ifdef WITH_WSREP + if (!error_result + && wsrep_on(user_thd) + && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE + && !wsrep_consistency_check(user_thd) + && !wsrep_thd_skip_append_keys(user_thd)) { - if (wsrep_append_keys(user_thd, false, record, NULL)) { ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, NULL)) { + DBUG_PRINT("wsrep", ("row key failed")); + error_result = HA_ERR_INTERNAL_ERROR; + goto wsrep_error; + } + } +wsrep_error: +#endif /* WITH_WSREP */ - *unsigned_flag = DATA_UNSIGNED; /* MySQL has its own unsigned - flag set to zero, even though - internally this is an unsigned - integer type */ - return(DATA_INT); + if (error_result == HA_FTS_INVALID_DOCID) { + my_error(HA_FTS_INVALID_DOCID, MYF(0)); } - switch (field->type()) { - /* NOTE that we only allow string types in DATA_MYSQL and - DATA_VARMYSQL */ - case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */ - case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */ - if (field->binary()) { - return(DATA_BINARY); - } else if (strcmp( - field->charset()->name, - "latin1_swedish_ci") == 0) { - return(DATA_VARCHAR); - } else { - return(DATA_VARMYSQL); - } - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_STRING: if (field->binary()) { +func_exit: + innobase_active_small(); - return(DATA_FIXBINARY); - } else if (strcmp( - field->charset()->name, - "latin1_swedish_ci") == 0) { - return(DATA_CHAR); - } else { - return(DATA_MYSQL); - } - case MYSQL_TYPE_NEWDECIMAL: - return(DATA_FIXBINARY); - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_LONGLONG: - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_INT24: - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_NEWDATE: - return(DATA_INT); + if (share->ib_table != prebuilt->table) { + fprintf(stderr, + "InnoDB: Warning: share->ib_table %p prebuilt->table %p table %s is_corrupt %lu.", + share->ib_table, prebuilt->table, prebuilt->table->name, prebuilt->table->is_corrupt); + } - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_TIMESTAMP: - /* - XtraDB should ideally just check field->keytype() and never - field->type(). The following check is here to only - change the new hires datetime/timestamp/time fields to - use DATA_FIXBINARY. We can't convert this function to - just test for field->keytype() as then the check if a - table is compatible will fail for old tables. - */ - if (field->key_type() == HA_KEYTYPE_BINARY) - return(DATA_FIXBINARY); - return(DATA_INT); - case MYSQL_TYPE_FLOAT: - return(DATA_FLOAT); - case MYSQL_TYPE_DOUBLE: - return(DATA_DOUBLE); - case MYSQL_TYPE_DECIMAL: - return(DATA_DECIMAL); - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_LONG_BLOB: - return(DATA_BLOB); - case MYSQL_TYPE_NULL: - return(DATA_FIXBINARY); - default: - ut_error; + if (UNIV_UNLIKELY(share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); } - return(0); + DBUG_RETURN(error_result); } -/*******************************************************************//** -Writes an unsigned integer value < 64k to 2 bytes, in the little-endian -storage format. */ -static inline -void -innobase_write_to_2_little_endian( -/*==============================*/ - byte* buf, /*!< in: where to store */ - ulint val) /*!< in: value to write, must be < 64k */ +/**********************************************************************//** +Checks which fields have changed in a row and stores information +of them to an update vector. +@return DB_SUCCESS or error code */ +static +dberr_t +calc_row_difference( +/*================*/ + upd_t* uvect, /*!< in/out: update vector */ + uchar* old_row, /*!< in: old row in MySQL format */ + uchar* new_row, /*!< in: new row in MySQL format */ + TABLE* table, /*!< in: table in MySQL data + dictionary */ + uchar* upd_buff, /*!< in: buffer to use */ + ulint buff_len, /*!< in: buffer length */ + row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ + THD* thd) /*!< in: user thread */ { - ut_a(val < 256 * 256); + uchar* original_upd_buff = upd_buff; + Field* field; + enum_field_types field_mysql_type; + uint n_fields; + ulint o_len; + ulint n_len; + ulint col_pack_len; + const byte* new_mysql_row_col; + const byte* o_ptr; + const byte* n_ptr; + byte* buf; + upd_field_t* ufield; + ulint col_type; + ulint n_changed = 0; + dfield_t dfield; + dict_index_t* clust_index; + uint sql_idx, innodb_idx= 0; + ibool changes_fts_column = FALSE; + ibool changes_fts_doc_col = FALSE; + trx_t* trx = thd_to_trx(thd); + doc_id_t doc_id = FTS_NULL_DOC_ID; - buf[0] = (byte)(val & 0xFF); - buf[1] = (byte)(val / 256); -} + ut_ad(!srv_read_only_mode); -/*******************************************************************//** -Reads an unsigned integer value < 64k from 2 bytes, in the little-endian -storage format. -@return value */ -static inline -uint -innobase_read_from_2_little_endian( -/*===============================*/ - const uchar* buf) /*!< in: from where to read */ -{ - return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1]))); -} + n_fields = table->s->fields; + clust_index = dict_table_get_first_index(prebuilt->table); -/*******************************************************************//** -Stores a key value for a row to a buffer. -@return key value length as stored in buff */ -#ifdef WITH_WSREP -UNIV_INTERN -uint -wsrep_store_key_val_for_row( -/*===============================*/ - TABLE* table, - uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL - format) */ - uint buff_len,/*!< in: buffer length */ - const uchar* record, - ibool* key_is_null)/*!< out: full key was null */ -{ - KEY* key_info = table->key_info + keynr; - KEY_PART_INFO* key_part = key_info->key_part; - KEY_PART_INFO* end = key_part + key_info->key_parts; - char* buff_start = buff; - enum_field_types mysql_type; - Field* field; - - DBUG_ENTER("store_key_val_for_row"); + /* We use upd_buff to convert changed fields */ + buf = (byte*) upd_buff; - bzero(buff, buff_len); - *key_is_null = TRUE; + for (sql_idx = 0; sql_idx < n_fields; sql_idx++) { + field = table->field[sql_idx]; + if (!field->stored_in_db) + continue; - for (; key_part != end; key_part++) { - uchar sorted[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; - ibool part_is_null = FALSE; + o_ptr = (const byte*) old_row + get_field_offset(table, field); + n_ptr = (const byte*) new_row + get_field_offset(table, field); - if (key_part->null_bit) { - if (record[key_part->null_offset] & - key_part->null_bit) { - *buff = 1; - part_is_null = TRUE; - } else { - *buff = 0; - } - buff++; - } - if (!part_is_null) *key_is_null = FALSE; + /* Use new_mysql_row_col and col_pack_len save the values */ - field = key_part->field; - mysql_type = field->type(); + new_mysql_row_col = n_ptr; + col_pack_len = field->pack_length(); - if (mysql_type == MYSQL_TYPE_VARCHAR) { - /* >= 5.0.3 true VARCHAR */ - ulint lenlen; - ulint len; - const byte* data; - ulint key_len; - ulint true_len; - CHARSET_INFO* cs; - int error=0; + o_len = col_pack_len; + n_len = col_pack_len; - key_len = key_part->length; + /* We use o_ptr and n_ptr to dig up the actual data for + comparison. */ - if (part_is_null) { - buff += key_len + 2; + field_mysql_type = field->type(); - continue; - } - cs = field->charset(); + col_type = prebuilt->table->cols[innodb_idx].mtype; - lenlen = (ulint) - (((Field_varstring*)field)->length_bytes); + switch (col_type) { - data = row_mysql_read_true_varchar(&len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - lenlen); + case DATA_BLOB: + /* Do not compress blob column while comparing*/ + o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); + n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); - true_len = len; + break; - /* For multi byte character sets we need to calculate - the true length of the key */ + case DATA_VARCHAR: + case DATA_BINARY: + case DATA_VARMYSQL: + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ - if (len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) data, - (const char *) data + len, - (uint) (key_len / - cs->mbmaxlen), - &error); + o_ptr = row_mysql_read_true_varchar( + &o_len, o_ptr, + (ulint) + (((Field_varstring*) field)->length_bytes)); + + n_ptr = row_mysql_read_true_varchar( + &n_len, n_ptr, + (ulint) + (((Field_varstring*) field)->length_bytes)); + } + + break; + default: + ; + } + + if (field_mysql_type == MYSQL_TYPE_LONGLONG + && prebuilt->table->fts + && innobase_strcasecmp( + field->field_name, FTS_DOC_ID_COL_NAME) == 0) { + doc_id = (doc_id_t) mach_read_from_n_little_endian( + n_ptr, 8); + if (doc_id == 0) { + return(DB_FTS_INVALID_DOCID); + } + } + + + if (field->real_maybe_null()) { + if (field->is_null_in_record(old_row)) { + o_len = UNIV_SQL_NULL; } - /* In a column prefix index, we may need to truncate - the stored value: */ - - if (true_len > key_len) { - true_len = key_len; + if (field->is_null_in_record(new_row)) { + n_len = UNIV_SQL_NULL; } + } - memcpy(sorted, data, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { + /* The field has changed */ - if (wsrep_protocol_version > 1) { - memcpy(buff, sorted, true_len); - /* Note that we always reserve the maximum possible - length of the true VARCHAR in the key value, though - only len first bytes after the 2 length bytes contain - actual data. The rest of the space was reset to zero - in the bzero() call above. */ - buff += true_len; - } else { - buff += key_len; - } - } else if (mysql_type == MYSQL_TYPE_TINY_BLOB - || mysql_type == MYSQL_TYPE_MEDIUM_BLOB - || mysql_type == MYSQL_TYPE_BLOB - || mysql_type == MYSQL_TYPE_LONG_BLOB - /* MYSQL_TYPE_GEOMETRY data is treated - as BLOB data in innodb. */ - || mysql_type == MYSQL_TYPE_GEOMETRY) { + ufield = uvect->fields + n_changed; + UNIV_MEM_INVALID(ufield, sizeof *ufield); - CHARSET_INFO* cs; - ulint key_len; - ulint true_len; - int error=0; - ulint blob_len; - const byte* blob_data; + /* Let us use a dummy dfield to make the conversion + from the MySQL column format to the InnoDB format */ - ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + if (n_len != UNIV_SQL_NULL) { + dict_col_copy_type(prebuilt->table->cols + innodb_idx, + dfield_get_type(&dfield)); - key_len = key_part->length; + buf = row_mysql_store_col_in_innobase_format( + &dfield, + (byte*) buf, + TRUE, + new_mysql_row_col, + col_pack_len, + dict_table_is_comp(prebuilt->table)); + dfield_copy(&ufield->new_val, &dfield); + } else { + dfield_set_null(&ufield->new_val); + } - if (part_is_null) { - buff += key_len + 2; + ufield->exp = NULL; + ufield->orig_len = 0; + ufield->field_no = dict_col_get_clust_pos( + &prebuilt->table->cols[innodb_idx], clust_index); + n_changed++; - continue; - } + /* If an FTS indexed column was changed by this + UPDATE then we need to inform the FTS sub-system. - cs = field->charset(); + NOTE: Currently we re-index all FTS indexed columns + even if only a subset of the FTS indexed columns + have been updated. That is the reason we are + checking only once here. Later we will need to + note which columns have been updated and do + selective processing. */ + if (prebuilt->table->fts != NULL) { + ulint offset; + dict_table_t* innodb_table; - blob_data = row_mysql_read_blob_ref(&blob_len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - (ulint) field->pack_length()); + innodb_table = prebuilt->table; - true_len = blob_len; + if (!changes_fts_column) { + offset = row_upd_changes_fts_column( + innodb_table, ufield); - ut_a(get_field_offset(table, field) - == key_part->offset); + if (offset != ULINT_UNDEFINED) { + changes_fts_column = TRUE; + } + } - /* For multi byte character sets we need to calculate - the true length of the key */ + if (!changes_fts_doc_col) { + changes_fts_doc_col = + row_upd_changes_doc_id( + innodb_table, ufield); + } + } + } + if (field->stored_in_db) + innodb_idx++; + } + + /* If the update changes a column with an FTS index on it, we + then add an update column node with a new document id to the + other changes. We piggy back our changes on the normal UPDATE + to reduce processing and IO overhead. */ + if (!prebuilt->table->fts) { + trx->fts_next_doc_id = 0; + } else if (changes_fts_column || changes_fts_doc_col) { + dict_table_t* innodb_table = prebuilt->table; + + ufield = uvect->fields + n_changed; + + if (!DICT_TF2_FLAG_IS_SET( + innodb_table, DICT_TF2_FTS_HAS_DOC_ID)) { + + /* If Doc ID is managed by user, and if any + FTS indexed column has been updated, its corresponding + Doc ID must also be updated. Otherwise, return + error */ + if (changes_fts_column && !changes_fts_doc_col) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: A new Doc ID" + " must be supplied while updating" + " FTS indexed columns.\n"); + return(DB_FTS_INVALID_DOCID); + } - if (blob_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) blob_data, - (const char *) blob_data - + blob_len, - (uint) (key_len / - cs->mbmaxlen), - &error); + /* Doc ID must monotonically increase */ + ut_ad(innodb_table->fts->cache); + if (doc_id < prebuilt->table->fts->cache->next_doc_id) { + fprintf(stderr, + "InnoDB: FTS Doc ID must be larger than" + " " IB_ID_FMT " for table", + innodb_table->fts->cache->next_doc_id + - 1); + ut_print_name(stderr, trx, + TRUE, innodb_table->name); + putc('\n', stderr); + + return(DB_FTS_INVALID_DOCID); + } else if ((doc_id + - prebuilt->table->fts->cache->next_doc_id) + >= FTS_DOC_ID_MAX_STEP) { + fprintf(stderr, + "InnoDB: Doc ID " UINT64PF " is too" + " big. Its difference with largest" + " Doc ID used " UINT64PF " cannot" + " exceed or equal to %d\n", + doc_id, + prebuilt->table->fts->cache->next_doc_id - 1, + FTS_DOC_ID_MAX_STEP); } - /* All indexes on BLOB and TEXT are column prefix - indexes, and we may need to truncate the data to be - stored in the key value: */ - if (true_len > key_len) { - true_len = key_len; - } + trx->fts_next_doc_id = doc_id; + } else { + /* If the Doc ID is a hidden column, it can't be + changed by user */ + ut_ad(!changes_fts_doc_col); - memcpy(sorted, blob_data, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + /* Doc ID column is hidden, a new Doc ID will be + generated by following fts_update_doc_id() call */ + trx->fts_next_doc_id = 0; + } - memcpy(buff, sorted, true_len); + fts_update_doc_id( + innodb_table, ufield, &trx->fts_next_doc_id); - /* Note that we always reserve the maximum possible - length of the BLOB prefix in the key value. */ - if (wsrep_protocol_version > 1) { - buff += true_len; - } else { - buff += key_len; - } - } else { - /* Here we handle all other data types except the - true VARCHAR, BLOB and TEXT. Note that the column - value we store may be also in a column prefix - index. */ + ++n_changed; + } else { + /* We have a Doc ID column, but none of FTS indexed + columns are touched, nor the Doc ID column, so set + fts_next_doc_id to UINT64_UNDEFINED, which means do not + update the Doc ID column */ + trx->fts_next_doc_id = UINT64_UNDEFINED; + } - CHARSET_INFO* cs; - ulint true_len; - ulint key_len; - const uchar* src_start; - int error=0; - enum_field_types real_type; + uvect->n_fields = n_changed; + uvect->info_bits = 0; - key_len = key_part->length; + ut_a(buf <= (byte*) original_upd_buff + buff_len); - if (part_is_null) { - buff += key_len; + return(DB_SUCCESS); +} - continue; - } +#ifdef WITH_WSREP +static +int +wsrep_calc_row_hash( +/*================*/ + byte* digest, /*!< in/out: md5 sum */ + const uchar* row, /*!< in: row in MySQL format */ + TABLE* table, /*!< in: table in MySQL data + dictionary */ + row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ + THD* thd) /*!< in: user thread */ +{ + Field* field; + enum_field_types field_mysql_type; + uint n_fields; + ulint len; + const byte* ptr; + ulint col_type; + uint i; - src_start = record + key_part->offset; - real_type = field->real_type(); - true_len = key_len; + void *ctx = wsrep_md5_init(); - /* Character set for the field is defined only - to fields whose type is string and real field - type is not enum or set. For these fields check - if character set is multi byte. */ + n_fields = table->s->fields; - if (real_type != MYSQL_TYPE_ENUM - && real_type != MYSQL_TYPE_SET - && ( mysql_type == MYSQL_TYPE_VAR_STRING - || mysql_type == MYSQL_TYPE_STRING)) { + for (i = 0; i < n_fields; i++) { + byte null_byte=0; + byte true_byte=1; - cs = field->charset(); + field = table->field[i]; - /* For multi byte character sets we need to - calculate the true length of the key */ + ptr = (const byte*) row + get_field_offset(table, field); + len = field->pack_length(); - if (key_len > 0 && cs->mbmaxlen > 1) { + field_mysql_type = field->type(); - true_len = (ulint) - cs->cset->well_formed_len(cs, - (const char *)src_start, - (const char *)src_start - + key_len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } - memcpy(sorted, src_start, true_len); - true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + col_type = prebuilt->table->cols[i].mtype; - memcpy(buff, sorted, true_len); - } else { - memcpy(buff, src_start, true_len); - } - buff += true_len; + switch (col_type) { - /* Pad the unused space with spaces. */ + case DATA_BLOB: + ptr = row_mysql_read_blob_ref(&len, ptr, len); + break; -#ifdef REMOVED - if (true_len < key_len) { - ulint pad_len = key_len - true_len; - ut_a(!(pad_len % cs->mbminlen)); + case DATA_VARCHAR: + case DATA_BINARY: + case DATA_VARMYSQL: + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ + + ptr = row_mysql_read_true_varchar( + &len, ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); - cs->cset->fill(cs, buff, pad_len, - 0x20 /* space */); - buff += pad_len; } -#endif /* REMOVED */ + + break; + default: + ; + } + /* + if (field->null_ptr && + field_in_record_is_null(table, field, (char*) row)) { + */ + + if (field->is_null_in_record(row)) { + wsrep_md5_update(ctx, (char*)&null_byte, 1); + } else { + wsrep_md5_update(ctx, (char*)&true_byte, 1); + wsrep_md5_update(ctx, (char*)ptr, len); } } - ut_a(buff <= buff_start + buff_len); + wsrep_compute_md5_hash((char*)digest, ctx); - DBUG_RETURN((uint)(buff - buff_start)); + return(0); } #endif /* WITH_WSREP */ -UNIV_INTERN -uint -ha_innobase::store_key_val_for_row( -/*===============================*/ - uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL - format) */ - uint buff_len,/*!< in: buffer length */ - const uchar* record)/*!< in: row in MySQL format */ -{ - KEY* key_info = table->key_info + keynr; - KEY_PART_INFO* key_part = key_info->key_part; - KEY_PART_INFO* end = key_part + key_info->key_parts; - char* buff_start = buff; - enum_field_types mysql_type; - Field* field; - ibool is_null; - - DBUG_ENTER("store_key_val_for_row"); - - /* The format for storing a key field in MySQL is the following: - - 1. If the column can be NULL, then in the first byte we put 1 if the - field value is NULL, 0 otherwise. - - 2. If the column is of a BLOB type (it must be a column prefix field - in this case), then we put the length of the data in the field to the - next 2 bytes, in the little-endian format. If the field is SQL NULL, - then these 2 bytes are set to 0. Note that the length of data in the - field is <= column prefix length. +/**********************************************************************//** +Updates a row given as a parameter to a new value. Note that we are given +whole rows, not just the fields which are updated: this incurs some +overhead for CPU when we check which fields are actually updated. +TODO: currently InnoDB does not prevent the 'Halloween problem': +in a searched update a single row can get updated several times +if its index columns are updated! +@return error number or 0 */ +UNIV_INTERN +int +ha_innobase::update_row( +/*====================*/ + const uchar* old_row, /*!< in: old row in MySQL format */ + uchar* new_row) /*!< in: new row in MySQL format */ +{ + upd_t* uvect; + dberr_t error; + trx_t* trx = thd_to_trx(user_thd); - 3. In a column prefix field, prefix_len next bytes are reserved for - data. In a normal field the max field length next bytes are reserved - for data. For a VARCHAR(n) the max field length is n. If the stored - value is the SQL NULL then these data bytes are set to 0. + DBUG_ENTER("ha_innobase::update_row"); - 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that - in the MySQL row format, the length is stored in 1 or 2 bytes, - depending on the maximum allowed length. But in the MySQL key value - format, the length always takes 2 bytes. + ut_a(prebuilt->trx == trx); - We have to zero-fill the buffer so that MySQL is able to use a - simple memcmp to compare two key values to determine if they are - equal. MySQL does this to compare contents of two 'ref' values. */ + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (!trx_is_started(trx)) { + ++trx->will_lock; + } - bzero(buff, buff_len); + if (upd_buf == NULL) { + ut_ad(upd_buf_size == 0); - for (; key_part != end; key_part++) { - is_null = FALSE; + /* Create a buffer for packing the fields of a record. Why + table->stored_rec_length did not work here? Obviously, because char + fields when packed actually became 1 byte longer, when we also + stored the string length as the first byte. */ - if (key_part->null_bit) { - if (record[key_part->null_offset] - & key_part->null_bit) { - *buff = 1; - is_null = TRUE; - } else { - *buff = 0; - } - buff++; + upd_buf_size = table->s->stored_rec_length + table->s->max_key_length + + MAX_REF_PARTS * 3; + upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME)); + if (upd_buf == NULL) { + upd_buf_size = 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } + } - field = key_part->field; - mysql_type = field->type(); - - if (mysql_type == MYSQL_TYPE_VARCHAR) { - /* >= 5.0.3 true VARCHAR */ - ulint lenlen; - ulint len; - const byte* data; - ulint key_len; - ulint true_len; - CHARSET_INFO* cs; - int error=0; - - key_len = key_part->length; + ha_statistic_increment(&SSV::ha_update_count); - if (is_null) { - buff += key_len + 2; + if (share->ib_table != prebuilt->table) { + fprintf(stderr, + "InnoDB: Warning: share->ib_table %p prebuilt->table %p table %s is_corrupt %lu.", + share->ib_table, prebuilt->table, prebuilt->table->name, prebuilt->table->is_corrupt); + } - continue; - } - cs = field->charset(); + if (UNIV_UNLIKELY(share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - lenlen = (ulint) - (((Field_varstring*)field)->length_bytes); + if (prebuilt->upd_node) { + uvect = prebuilt->upd_node->update; + } else { + uvect = row_get_prebuilt_update_vector(prebuilt); + } - data = row_mysql_read_true_varchar(&len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - lenlen); + /* Build an update vector from the modified fields in the rows + (uses upd_buf of the handle) */ - true_len = len; + error = calc_row_difference(uvect, (uchar*) old_row, new_row, table, + upd_buf, upd_buf_size, prebuilt, user_thd); - /* For multi byte character sets we need to calculate - the true length of the key */ + if (error != DB_SUCCESS) { + goto func_exit; + } - if (len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) data, - (const char *) data + len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } + /* This is not a delete */ + prebuilt->upd_node->is_delete = FALSE; - /* In a column prefix index, we may need to truncate - the stored value: */ + ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); - if (true_len > key_len) { - true_len = key_len; - } + innobase_srv_conc_enter_innodb(trx); - /* The length in a key value is always stored in 2 - bytes */ + error = row_update_for_mysql((byte*) old_row, prebuilt); - row_mysql_store_true_var_len((byte*)buff, true_len, 2); - buff += 2; + /* We need to do some special AUTOINC handling for the following case: - memcpy(buff, data, true_len); + INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... - /* Note that we always reserve the maximum possible - length of the true VARCHAR in the key value, though - only len first bytes after the 2 length bytes contain - actual data. The rest of the space was reset to zero - in the bzero() call above. */ + We need to use the AUTOINC counter that was actually used by + MySQL in the UPDATE statement, which can be different from the + value used in the INSERT statement.*/ - buff += key_len; + if (error == DB_SUCCESS + && table->next_number_field + && new_row == table->record[0] + && thd_sql_command(user_thd) == SQLCOM_INSERT + && trx->duplicates) { - } else if (mysql_type == MYSQL_TYPE_TINY_BLOB - || mysql_type == MYSQL_TYPE_MEDIUM_BLOB - || mysql_type == MYSQL_TYPE_BLOB - || mysql_type == MYSQL_TYPE_LONG_BLOB - /* MYSQL_TYPE_GEOMETRY data is treated - as BLOB data in innodb. */ - || mysql_type == MYSQL_TYPE_GEOMETRY) { + ulonglong auto_inc; + ulonglong col_max_value; - CHARSET_INFO* cs; - ulint key_len; - ulint true_len; - int error=0; - ulint blob_len; - const byte* blob_data; + auto_inc = table->next_number_field->val_uint(); - ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + col_max_value = innobase_get_int_col_max_value( + table->next_number_field); - key_len = key_part->length; + if (auto_inc <= col_max_value && auto_inc != 0) { - if (is_null) { - buff += key_len + 2; + ulonglong offset; + ulonglong increment; - continue; - } + offset = prebuilt->autoinc_offset; + increment = prebuilt->autoinc_increment; - cs = field->charset(); + auto_inc = innobase_next_autoinc( + auto_inc, 1, increment, offset, col_max_value); - blob_data = row_mysql_read_blob_ref(&blob_len, - (byte*) (record - + (ulint)get_field_offset(table, field)), - (ulint) field->pack_length()); + error = innobase_set_max_autoinc(auto_inc); + } + } - true_len = blob_len; + innobase_srv_conc_exit_innodb(trx); - ut_a(get_field_offset(table, field) - == key_part->offset); +func_exit: + int err = convert_error_code_to_mysql(error, + prebuilt->table->flags, user_thd); - /* For multi byte character sets we need to calculate - the true length of the key */ + /* If success and no columns were updated. */ + if (err == 0 && uvect->n_fields == 0) { - if (blob_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char *) blob_data, - (const char *) blob_data - + blob_len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } + /* This is the same as success, but instructs + MySQL that the row is not really updated and it + should not increase the count of updated rows. + This is fix for http://bugs.mysql.com/29157 */ + err = HA_ERR_RECORD_IS_THE_SAME; + } else if (err == HA_FTS_INVALID_DOCID) { + my_error(HA_FTS_INVALID_DOCID, MYF(0)); + } - /* All indexes on BLOB and TEXT are column prefix - indexes, and we may need to truncate the data to be - stored in the key value: */ + /* Tell InnoDB server that there might be work for + utility threads: */ - if (true_len > key_len) { - true_len = key_len; - } + innobase_active_small(); - /* MySQL reserves 2 bytes for the length and the - storage of the number is little-endian */ +#ifdef WITH_WSREP + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && + wsrep_on(user_thd) && + !wsrep_thd_skip_append_keys(user_thd)) + { + DBUG_PRINT("wsrep", ("update row key")); - if (wsrep_append_keys(user_thd, false, old_row, new_row)) { - innobase_write_to_2_little_endian( - (byte*)buff, true_len); - buff += 2; ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, old_row, ++ new_row)) { + WSREP_DEBUG("WSREP: UPDATE_ROW_KEY FAILED"); + DBUG_PRINT("wsrep", ("row key failed")); + err = HA_ERR_INTERNAL_ERROR; + goto wsrep_error; + } + } +wsrep_error: +#endif /* WITH_WSREP */ - memcpy(buff, blob_data, true_len); + if (share->ib_table != prebuilt->table) { + fprintf(stderr, + "InnoDB: Warning: share->ib_table %p prebuilt->table %p table %s is_corrupt %lu.", + share->ib_table, prebuilt->table, prebuilt->table->name, prebuilt->table->is_corrupt); + } - /* Note that we always reserve the maximum possible - length of the BLOB prefix in the key value. */ + if (UNIV_UNLIKELY(share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - buff += key_len; - } else { - /* Here we handle all other data types except the - true VARCHAR, BLOB and TEXT. Note that the column - value we store may be also in a column prefix - index. */ + DBUG_RETURN(err); +} - CHARSET_INFO* cs; - ulint true_len; - ulint key_len; - const uchar* src_start; - int error=0; - enum_field_types real_type; +/**********************************************************************//** +Deletes a row given as the parameter. +@return error number or 0 */ +UNIV_INTERN +int +ha_innobase::delete_row( +/*====================*/ + const uchar* record) /*!< in: a row in MySQL format */ +{ + dberr_t error; + trx_t* trx = thd_to_trx(user_thd); - key_len = key_part->length; + DBUG_ENTER("ha_innobase::delete_row"); - if (is_null) { - buff += key_len; + ut_a(prebuilt->trx == trx); - continue; - } + if (high_level_read_only) { + ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } else if (!trx_is_started(trx)) { + ++trx->will_lock; + } - src_start = record + key_part->offset; - real_type = field->real_type(); - true_len = key_len; + ha_statistic_increment(&SSV::ha_delete_count); - /* Character set for the field is defined only - to fields whose type is string and real field - type is not enum or set. For these fields check - if character set is multi byte. */ + if (UNIV_UNLIKELY(share && share->ib_table + && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - if (real_type != MYSQL_TYPE_ENUM - && real_type != MYSQL_TYPE_SET - && ( mysql_type == MYSQL_TYPE_VAR_STRING - || mysql_type == MYSQL_TYPE_STRING)) { + if (!prebuilt->upd_node) { + row_get_prebuilt_update_vector(prebuilt); + } - cs = field->charset(); + /* This is a delete */ - /* For multi byte character sets we need to - calculate the true length of the key */ + prebuilt->upd_node->is_delete = TRUE; - if (key_len > 0 && cs->mbmaxlen > 1) { + innobase_srv_conc_enter_innodb(trx); - true_len = (ulint) - cs->cset->well_formed_len(cs, - (const char *)src_start, - (const char *)src_start - + key_len, - (uint) (key_len / - cs->mbmaxlen), - &error); - } - } + error = row_update_for_mysql((byte*) record, prebuilt); - memcpy(buff, src_start, true_len); - buff += true_len; + innobase_srv_conc_exit_innodb(trx); - /* Pad the unused space with spaces. */ + /* Tell the InnoDB server that there might be work for + utility threads: */ - if (true_len < key_len) { - ulint pad_len = key_len - true_len; - ut_a(!(pad_len % cs->mbminlen)); + innobase_active_small(); - cs->cset->fill(cs, buff, pad_len, - 0x20 /* space */); - buff += pad_len; - } +#ifdef WITH_WSREP + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && + wsrep_on(user_thd) && + !wsrep_thd_skip_append_keys(user_thd)) + { - if (wsrep_append_keys(user_thd, false, record, NULL)) { ++ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, ++ NULL)) { + DBUG_PRINT("wsrep", ("delete fail")); + error = DB_ERROR; + goto wsrep_error; } } +wsrep_error: +#endif /* WITH_WSREP */ - ut_a(buff <= buff_start + buff_len); + if (UNIV_UNLIKELY(share && share->ib_table + && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - DBUG_RETURN((uint)(buff - buff_start)); + DBUG_RETURN(convert_error_code_to_mysql( + error, prebuilt->table->flags, user_thd)); } -/**************************************************************//** -Determines if a field is needed in a prebuilt struct 'template'. -@return field to use, or NULL if the field is not needed */ -static -const Field* -build_template_needs_field( -/*=======================*/ - ibool index_contains, /*!< in: - dict_index_contains_col_or_prefix( - index, i) */ - ibool read_just_key, /*!< in: TRUE when MySQL calls - ha_innobase::extra with the - argument HA_EXTRA_KEYREAD; it is enough - to read just columns defined in - the index (i.e., no read of the - clustered index record necessary) */ - ibool fetch_all_in_key, - /*!< in: true=fetch all fields in - the index */ - ibool fetch_primary_key_cols, - /*!< in: true=fetch the - primary key columns */ - dict_index_t* index, /*!< in: InnoDB index to use */ - const TABLE* table, /*!< in: MySQL table object */ - ulint i, /*!< in: field index in InnoDB table */ - ulint sql_idx) /*!< in: field index in SQL table */ +/**********************************************************************//** +Removes a new lock set on a row, if it was not read optimistically. This can +be called after a row has been read in the processing of an UPDATE or a DELETE +query, if the option innodb_locks_unsafe_for_binlog is set. */ +UNIV_INTERN +void +ha_innobase::unlock_row(void) +/*=========================*/ { - const Field* field = table->field[sql_idx]; + DBUG_ENTER("ha_innobase::unlock_row"); - ut_ad(index_contains == dict_index_contains_col_or_prefix(index, i)); + /* Consistent read does not take any locks, thus there is + nothing to unlock. */ - if (!index_contains) { - if (read_just_key) { - /* If this is a 'key read', we do not need - columns that are not in the key */ + if (prebuilt->select_lock_type == LOCK_NONE) { + DBUG_VOID_RETURN; + } - return(NULL); - } - } else if (fetch_all_in_key) { - /* This field is needed in the query */ + /* Ideally, this assert must be in the beginning of the function. + But there are some calls to this function from the SQL layer when the + transaction is in state TRX_STATE_NOT_STARTED. The check on + prebuilt->select_lock_type above gets around this issue. */ + ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); - return(field); + switch (prebuilt->row_read_type) { + case ROW_READ_WITH_LOCKS: + if (!srv_locks_unsafe_for_binlog + && prebuilt->trx->isolation_level + > TRX_ISO_READ_COMMITTED) { + break; + } + /* fall through */ + case ROW_READ_TRY_SEMI_CONSISTENT: + row_unlock_for_mysql(prebuilt, FALSE); + break; + case ROW_READ_DID_SEMI_CONSISTENT: + prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + break; } - if (bitmap_is_set(table->read_set, sql_idx) - || bitmap_is_set(table->write_set, sql_idx)) { - /* This field is needed in the query */ + DBUG_VOID_RETURN; +} - return(field); +/* See handler.h and row0mysql.h for docs on this function. */ +UNIV_INTERN +bool +ha_innobase::was_semi_consistent_read(void) +/*=======================================*/ +{ + return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); +} + +/* See handler.h and row0mysql.h for docs on this function. */ +UNIV_INTERN +void +ha_innobase::try_semi_consistent_read(bool yes) +/*===========================================*/ +{ + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + + /* Row read type is set to semi consistent read if this was + requested by the MySQL and either innodb_locks_unsafe_for_binlog + option is used or this session is using READ COMMITTED isolation + level. */ + + if (yes + && (srv_locks_unsafe_for_binlog + || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) { + prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + } else { + prebuilt->row_read_type = ROW_READ_WITH_LOCKS; } +} - if (fetch_primary_key_cols - && dict_table_col_in_clustered_key(index->table, i)) { - /* This field is needed in the query */ +/******************************************************************//** +Initializes a handle to use an index. +@return 0 or error number */ +UNIV_INTERN +int +ha_innobase::index_init( +/*====================*/ + uint keynr, /*!< in: key (index) number */ + bool sorted) /*!< in: 1 if result MUST be sorted according to index */ +{ + DBUG_ENTER("index_init"); - return(field); + DBUG_RETURN(change_active_index(keynr)); +} + +/******************************************************************//** +Currently does nothing. +@return 0 */ +UNIV_INTERN +int +ha_innobase::index_end(void) +/*========================*/ +{ + int error = 0; + DBUG_ENTER("index_end"); + active_index = MAX_KEY; + in_range_check_pushed_down = FALSE; + ds_mrr.dsmrr_close(); + DBUG_RETURN(error); +} + +/*********************************************************************//** +Converts a search mode flag understood by MySQL to a flag understood +by InnoDB. */ +static inline +ulint +convert_search_mode_to_innobase( +/*============================*/ + enum ha_rkey_function find_flag) +{ + switch (find_flag) { + case HA_READ_KEY_EXACT: + /* this does not require the index to be UNIQUE */ + return(PAGE_CUR_GE); + case HA_READ_KEY_OR_NEXT: + return(PAGE_CUR_GE); + case HA_READ_KEY_OR_PREV: + return(PAGE_CUR_LE); + case HA_READ_AFTER_KEY: + return(PAGE_CUR_G); + case HA_READ_BEFORE_KEY: + return(PAGE_CUR_L); + case HA_READ_PREFIX: + return(PAGE_CUR_GE); + case HA_READ_PREFIX_LAST: + return(PAGE_CUR_LE); + case HA_READ_PREFIX_LAST_OR_PREV: + return(PAGE_CUR_LE); + /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always + pass a complete-field prefix of a key value as the search + tuple. I.e., it is not allowed that the last field would + just contain n first bytes of the full field value. + MySQL uses a 'padding' trick to convert LIKE 'abc%' + type queries so that it can use as a search tuple + a complete-field-prefix of a key value. Thus, the InnoDB + search mode PAGE_CUR_LE_OR_EXTENDS is never used. + TODO: when/if MySQL starts to use also partial-field + prefixes, we have to deal with stripping of spaces + and comparison of non-latin1 char type fields in + innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to + work correctly. */ + case HA_READ_MBR_CONTAIN: + case HA_READ_MBR_INTERSECT: + case HA_READ_MBR_WITHIN: + case HA_READ_MBR_DISJOINT: + case HA_READ_MBR_EQUAL: + return(PAGE_CUR_UNSUPP); + /* do not use "default:" in order to produce a gcc warning: + enumeration value '...' not handled in switch + (if -Wswitch or -Wall is used) */ } - /* This field is not needed in the query, skip it */ + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "this functionality"); - return(NULL); + return(PAGE_CUR_UNSUPP); } -/**************************************************************//** -Adds a field is to a prebuilt struct 'template'. -@return the field template */ -static -mysql_row_templ_t* -build_template_field( -/*=================*/ - row_prebuilt_t* prebuilt, /*!< in/out: template */ - dict_index_t* clust_index, /*!< in: InnoDB clustered index */ - dict_index_t* index, /*!< in: InnoDB index to use */ - TABLE* table, /*!< in: MySQL table object */ - const Field* field, /*!< in: field in MySQL table */ - ulint i) /*!< in: field index in InnoDB table */ -{ - mysql_row_templ_t* templ; - const dict_col_t* col; +/* + BACKGROUND INFO: HOW A SELECT SQL QUERY IS EXECUTED + --------------------------------------------------- +The following does not cover all the details, but explains how we determine +the start of a new SQL statement, and what is associated with it. + +For each table in the database the MySQL interpreter may have several +table handle instances in use, also in a single SQL query. For each table +handle instance there is an InnoDB 'prebuilt' struct which contains most +of the InnoDB data associated with this table handle instance. + + A) if the user has not explicitly set any MySQL table level locks: + + 1) MySQL calls ::external_lock to set an 'intention' table level lock on +the table of the handle instance. There we set +prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set +true if we are taking this table handle instance to use in a new SQL +statement issued by the user. We also increment trx->n_mysql_tables_in_use. + + 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search +instructions to prebuilt->template of the table handle instance in +::index_read. The template is used to save CPU time in large joins. + + 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we +allocate a new consistent read view for the trx if it does not yet have one, +or in the case of a locking read, set an InnoDB 'intention' table level +lock on the table. - //ut_ad(field == table->field[i]); - ut_ad(clust_index->table == index->table); + 4) We do the SELECT. MySQL may repeatedly call ::index_read for the +same table handle instance, if it is a join. - col = dict_table_get_nth_col(index->table, i); + 5) When the SELECT ends, MySQL removes its intention table level locks +in ::external_lock. When trx->n_mysql_tables_in_use drops to zero, + (a) we execute a COMMIT there if the autocommit is on, + (b) we also release possible 'SQL statement level resources' InnoDB may +have for this SQL statement. The MySQL interpreter does NOT execute +autocommit for pure read transactions, though it should. That is why the +table handler in that case has to execute the COMMIT in ::external_lock. - templ = prebuilt->mysql_template + prebuilt->n_template++; - UNIV_MEM_INVALID(templ, sizeof *templ); - templ->col_no = i; - templ->clust_rec_field_no = dict_col_get_clust_pos(col, clust_index); - ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); + B) If the user has explicitly set MySQL table level locks, then MySQL +does NOT call ::external_lock at the start of the statement. To determine +when we are at the start of a new SQL statement we at the start of +::index_read also compare the query id to the latest query id where the +table handle instance was used. If it has changed, we know we are at the +start of a new SQL statement. Since the query id can theoretically +overwrap, we use this test only as a secondary way of determining the +start of a new SQL statement. */ - if (dict_index_is_clust(index)) { - templ->rec_field_no = templ->clust_rec_field_no; - } else { - templ->rec_field_no = dict_index_get_nth_col_pos(index, i); - } - if (field->null_ptr) { - templ->mysql_null_byte_offset = - (ulint) ((char*) field->null_ptr - - (char*) table->record[0]); +/**********************************************************************//** +Positions an index cursor to the index specified in the handle. Fetches the +row if any. +@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ +UNIV_INTERN +int +ha_innobase::index_read( +/*====================*/ + uchar* buf, /*!< in/out: buffer for the returned + row */ + const uchar* key_ptr, /*!< in: key value; if this is NULL + we position the cursor at the + start or end of index; this can + also contain an InnoDB row id, in + which case key_len is the InnoDB + row id length; the key value can + also be a prefix of a full key value, + and the last column can be a prefix + of a full column */ + uint key_len,/*!< in: key value length */ + enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ +{ + ulint mode; + dict_index_t* index; + ulint match_mode = 0; + int error; + dberr_t ret; - templ->mysql_null_bit_mask = (ulint) field->null_bit; - } else { - templ->mysql_null_bit_mask = 0; - } + DBUG_ENTER("index_read"); + DEBUG_SYNC_C("ha_innobase_index_read_begin"); - templ->mysql_col_offset = (ulint) get_field_offset(table, field); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); + ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT); - templ->mysql_col_len = (ulint) field->pack_length(); - templ->type = col->mtype; - templ->mysql_type = (ulint)field->type(); + ha_statistic_increment(&SSV::ha_read_key_count); - if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { - templ->mysql_length_bytes = (ulint) - (((Field_varstring*)field)->length_bytes); + if (UNIV_UNLIKELY(srv_pass_corrupt_table <= 1 && share + && share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); } - templ->charset = dtype_get_charset_coll(col->prtype); - templ->mbminlen = col->mbminlen; - templ->mbmaxlen = col->mbmaxlen; - templ->is_unsigned = col->prtype & DATA_UNSIGNED; + index = prebuilt->index; - if (!dict_index_is_clust(index) - && templ->rec_field_no == ULINT_UNDEFINED) { - prebuilt->need_to_access_clustered = TRUE; + if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) { + prebuilt->index_usable = FALSE; + DBUG_RETURN(HA_ERR_CRASHED); } - - if (prebuilt->mysql_prefix_len < templ->mysql_col_offset - + templ->mysql_col_len) { - prebuilt->mysql_prefix_len = templ->mysql_col_offset - + templ->mysql_col_len; + if (UNIV_UNLIKELY(!prebuilt->index_usable)) { + DBUG_RETURN(dict_index_is_corrupted(index) + ? HA_ERR_INDEX_CORRUPT + : HA_ERR_TABLE_DEF_CHANGED); } - if (templ->type == DATA_BLOB) { - prebuilt->templ_contains_blob = TRUE; + if (index->type & DICT_FTS) { + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - return(templ); -} - -/**************************************************************//** -Builds a 'template' to the prebuilt struct. The template is used in fast -retrieval of just those column values MySQL needs in its processing. */ -UNIV_INTERN -void -ha_innobase::build_template( -/*========================*/ - bool whole_row) /*!< in: true=ROW_MYSQL_WHOLE_ROW, - false=ROW_MYSQL_REC_FIELDS */ -{ - dict_index_t* index; - dict_index_t* clust_index; - ulint n_stored_fields; - ibool fetch_all_in_key = FALSE; - ibool fetch_primary_key_cols = FALSE; - ulint i, sql_idx; - - if (prebuilt->select_lock_type == LOCK_X) { - /* We always retrieve the whole clustered index record if we - use exclusive row level locks, for example, if the read is - done in an UPDATE statement. */ - - whole_row = true; - } else if (!whole_row) { - if (prebuilt->hint_need_to_fetch_extra_cols - == ROW_RETRIEVE_ALL_COLS) { + /* Note that if the index for which the search template is built is not + necessarily prebuilt->index, but can also be the clustered index */ - /* We know we must at least fetch all columns in the - key, or all columns in the table */ + if (prebuilt->sql_stat_start) { + build_template(false); + } - if (prebuilt->read_just_key) { - /* MySQL has instructed us that it is enough - to fetch the columns in the key; looks like - MySQL can set this flag also when there is - only a prefix of the column in the key: in - that case we retrieve the whole column from - the clustered index */ + if (key_ptr) { + /* Convert the search key value to InnoDB format into + prebuilt->search_tuple */ - fetch_all_in_key = TRUE; - } else { - whole_row = true; - } - } else if (prebuilt->hint_need_to_fetch_extra_cols - == ROW_RETRIEVE_PRIMARY_KEY) { - /* We must at least fetch all primary key cols. Note - that if the clustered index was internally generated - by InnoDB on the row id (no primary key was - defined), then row_search_for_mysql() will always - retrieve the row id to a special buffer in the - prebuilt struct. */ + row_sel_convert_mysql_key_to_innobase( + prebuilt->search_tuple, + prebuilt->srch_key_val1, + prebuilt->srch_key_val_len, + index, + (byte*) key_ptr, + (ulint) key_len, + prebuilt->trx); + DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); + } else { + /* We position the cursor to the last or the first entry + in the index */ - fetch_primary_key_cols = TRUE; - } + dtuple_set_n_fields(prebuilt->search_tuple, 0); } - clust_index = dict_table_get_first_index(prebuilt->table); + mode = convert_search_mode_to_innobase(find_flag); - index = whole_row ? clust_index : prebuilt->index; + match_mode = 0; - prebuilt->need_to_access_clustered = (index == clust_index); + if (find_flag == HA_READ_KEY_EXACT) { - /* Below we check column by column if we need to access - the clustered index. */ + match_mode = ROW_SEL_EXACT; - n_stored_fields= (ulint)table->s->stored_fields; /* number of stored columns */ + } else if (find_flag == HA_READ_PREFIX + || find_flag == HA_READ_PREFIX_LAST) { - if (!prebuilt->mysql_template) { - prebuilt->mysql_template = (mysql_row_templ_t*) - mem_alloc(n_stored_fields * sizeof(mysql_row_templ_t)); + match_mode = ROW_SEL_EXACT_PREFIX; } - prebuilt->template_type = whole_row - ? ROW_MYSQL_WHOLE_ROW : ROW_MYSQL_REC_FIELDS; - prebuilt->null_bitmap_len = table->s->null_bytes; + last_match_mode = (uint) match_mode; - /* Prepare to build prebuilt->mysql_template[]. */ - prebuilt->templ_contains_blob = FALSE; - prebuilt->mysql_prefix_len = 0; - prebuilt->n_template = 0; - prebuilt->idx_cond_n_cols = 0; + if (mode != PAGE_CUR_UNSUPP) { - /* Note that in InnoDB, i is the column number in the table. - MySQL calls columns 'fields'. */ + innobase_srv_conc_enter_innodb(prebuilt->trx); - if (active_index != MAX_KEY && active_index == pushed_idx_cond_keyno) { - /* Push down an index condition or an end_range check. */ - for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + ret = row_search_for_mysql((byte*) buf, mode, prebuilt, + match_mode, 0); - while (!table->field[sql_idx]->stored_in_db) { - sql_idx++; - } - - const ibool index_contains - = dict_index_contains_col_or_prefix(index, i); + innobase_srv_conc_exit_innodb(prebuilt->trx); + } else { - /* Test if an end_range or an index condition - refers to the field. Note that "index" and - "index_contains" may refer to the clustered index. - Index condition pushdown is relative to prebuilt->index - (the index that is being looked up first). */ + ret = DB_UNSUPPORTED; + } - /* When join_read_always_key() invokes this - code via handler::ha_index_init() and - ha_innobase::index_init(), end_range is not - yet initialized. Because of that, we must - always check for index_contains, instead of - the subset - field->part_of_key.is_set(active_index) - which would be acceptable if end_range==NULL. */ - if (index == prebuilt->index - ? index_contains - : dict_index_contains_col_or_prefix( - prebuilt->index, i)) { - /* Needed in ICP */ - const Field* field; - mysql_row_templ_t* templ; + if (UNIV_UNLIKELY(srv_pass_corrupt_table <= 1 && share + && share->ib_table && share->ib_table->is_corrupt)) { + DBUG_RETURN(HA_ERR_CRASHED); + } - if (whole_row) { - field = table->field[sql_idx]; - } else { - field = build_template_needs_field( - index_contains, - prebuilt->read_just_key, - fetch_all_in_key, - fetch_primary_key_cols, - index, table, i, sql_idx); - if (!field) { - continue; - } - } + switch (ret) { + case DB_SUCCESS: + error = 0; + table->status = 0; + if (prebuilt->table->is_system_db) { + srv_stats.n_system_rows_read.add( + (size_t) prebuilt->trx->id, 1); + } else { + srv_stats.n_rows_read.add( + (size_t) prebuilt->trx->id, 1); + } + break; + case DB_RECORD_NOT_FOUND: + error = HA_ERR_KEY_NOT_FOUND; + table->status = STATUS_NOT_FOUND; + break; + case DB_END_OF_INDEX: + error = HA_ERR_KEY_NOT_FOUND; + table->status = STATUS_NOT_FOUND; + break; + case DB_TABLESPACE_DELETED: - templ = build_template_field( - prebuilt, clust_index, index, - table, field, i); - prebuilt->idx_cond_n_cols++; - ut_ad(prebuilt->idx_cond_n_cols - == prebuilt->n_template); + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); - if (index == prebuilt->index) { - templ->icp_rec_field_no - = templ->rec_field_no; - } else { - templ->icp_rec_field_no - = dict_index_get_nth_col_pos( - prebuilt->index, i); - } + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + case DB_TABLESPACE_NOT_FOUND: - if (dict_index_is_clust(prebuilt->index)) { - ut_ad(templ->icp_rec_field_no - != ULINT_UNDEFINED); - /* If the primary key includes - a column prefix, use it in - index condition pushdown, - because the condition is - evaluated before fetching any - off-page (externally stored) - columns. */ - if (templ->icp_rec_field_no - < prebuilt->index->n_uniq) { - /* This is a key column; - all set. */ - continue; - } - } else if (templ->icp_rec_field_no - != ULINT_UNDEFINED) { - continue; - } + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_MISSING, MYF(0), + table->s->table_name.str); - /* This is a column prefix index. - The column prefix can be used in - an end_range comparison. */ + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + default: + error = convert_error_code_to_mysql( + ret, prebuilt->table->flags, user_thd); - templ->icp_rec_field_no - = dict_index_get_nth_col_or_prefix_pos( - prebuilt->index, i, TRUE); - ut_ad(templ->icp_rec_field_no - != ULINT_UNDEFINED); + table->status = STATUS_NOT_FOUND; + break; + } - /* Index condition pushdown can be used on - all columns of a secondary index, and on - the PRIMARY KEY columns. */ - /* TODO: enable this assertion - (but first ensure that end_range is - valid here and use an accurate condition - for end_range) - ut_ad(!dict_index_is_clust(prebuilt->index) - || templ->rec_field_no - < prebuilt->index->n_uniq); - */ - } - } + DBUG_RETURN(error); +} - ut_ad(prebuilt->idx_cond_n_cols > 0); - ut_ad(prebuilt->idx_cond_n_cols == prebuilt->n_template); +/*******************************************************************//** +The following functions works like index_read, but it find the last +row with the current key value or prefix. +@return 0, HA_ERR_KEY_NOT_FOUND, or an error code */ +UNIV_INTERN +int +ha_innobase::index_read_last( +/*=========================*/ + uchar* buf, /*!< out: fetched row */ + const uchar* key_ptr,/*!< in: key value, or a prefix of a full + key value */ + uint key_len)/*!< in: length of the key val or prefix + in bytes */ +{ + return(index_read(buf, key_ptr, key_len, HA_READ_PREFIX_LAST)); +} - /* Include the fields that are not needed in index condition - pushdown. */ - for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { +/********************************************************************//** +Get the index for a handle. Does not change active index. +@return NULL or index instance. */ +UNIV_INTERN +dict_index_t* +ha_innobase::innobase_get_index( +/*============================*/ + uint keynr) /*!< in: use this index; MAX_KEY means always + clustered index, even if it was internally + generated by InnoDB */ +{ + KEY* key = 0; + dict_index_t* index = 0; - while (!table->field[sql_idx]->stored_in_db) { - sql_idx++; - } - - const ibool index_contains - = dict_index_contains_col_or_prefix(index, i); + DBUG_ENTER("innobase_get_index"); - if (index == prebuilt->index - ? !index_contains - : !dict_index_contains_col_or_prefix( - prebuilt->index, i)) { - /* Not needed in ICP */ - const Field* field; + if (keynr != MAX_KEY && table->s->keys > 0) { + key = table->key_info + keynr; - if (whole_row) { - field = table->field[sql_idx]; - } else { - field = build_template_needs_field( - index_contains, - prebuilt->read_just_key, - fetch_all_in_key, - fetch_primary_key_cols, - index, table, i, sql_idx); - if (!field) { - continue; - } - } + index = innobase_index_lookup(share, keynr); - build_template_field(prebuilt, - clust_index, index, - table, field, i); - } - } + if (index) { - prebuilt->idx_cond = this; - } else { - /* No index condition pushdown */ - prebuilt->idx_cond = NULL; + if (!key || ut_strcmp(index->name, key->name) != 0) { + fprintf(stderr, "InnoDB: [Error] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); - for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { - const Field* field; + for(ulint i=0; i < table->s->keys; i++) { + index = innobase_index_lookup(share, i); + key = table->key_info + keynr; - while (!table->field[sql_idx]->stored_in_db) { - sql_idx++; - } + if (index) { - if (whole_row) { - field = table->field[sql_idx]; - } else { - field = build_template_needs_field( - dict_index_contains_col_or_prefix( - index, i), - prebuilt->read_just_key, - fetch_all_in_key, - fetch_primary_key_cols, - index, table, i, sql_idx); - if (!field) { - continue; + fprintf(stderr, "InnoDB: [Note] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + } } } @@@ -10266,1438 -7066,808 +10270,1458 @@@ ha_innobase::ft_init( } /**********************************************************************//** -Checks which fields have changed in a row and stores information -of them to an update vector. -@return error number or 0 */ -static -int -calc_row_difference( -/*================*/ - upd_t* uvect, /*!< in/out: update vector */ - uchar* old_row, /*!< in: old row in MySQL format */ - uchar* new_row, /*!< in: new row in MySQL format */ - TABLE* table, /*!< in: table in MySQL data - dictionary */ - uchar* upd_buff, /*!< in: buffer to use */ - ulint buff_len, /*!< in: buffer length */ - row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ - THD* thd) /*!< in: user thread */ +Initialize FT index scan +@return FT_INFO structure if successful or NULL */ +UNIV_INTERN +FT_INFO* +ha_innobase::ft_init_ext( +/*=====================*/ + uint flags, /* in: */ + uint keynr, /* in: */ + String* key) /* in: */ { - uchar* original_upd_buff = upd_buff; - Field* field; - enum_field_types field_mysql_type; - uint n_fields; - ulint o_len; - ulint n_len; - ulint col_pack_len; - const byte* new_mysql_row_col; - const byte* o_ptr; - const byte* n_ptr; - byte* buf; - upd_field_t* ufield; - ulint col_type; - ulint n_changed = 0; - dfield_t dfield; - dict_index_t* clust_index; - uint sql_idx, innodb_idx= 0; - - n_fields = table->s->fields; - clust_index = dict_table_get_first_index(prebuilt->table); + trx_t* trx; + dict_table_t* ft_table; + dberr_t error; + byte* query = (byte*) key->ptr(); + ulint query_len = key->length(); + const CHARSET_INFO* char_set = key->charset(); + NEW_FT_INFO* fts_hdl = NULL; + dict_index_t* index; + fts_result_t* result; + char buf_tmp[8192]; + ulint buf_tmp_used; + uint num_errors; + + if (fts_enable_diag_print) { + fprintf(stderr, "keynr=%u, '%.*s'\n", + keynr, (int) key->length(), (byte*) key->ptr()); + + if (flags & FT_BOOL) { + fprintf(stderr, "BOOL search\n"); + } else { + fprintf(stderr, "NL search\n"); + } + } - /* We use upd_buff to convert changed fields */ - buf = (byte*) upd_buff; + /* FIXME: utf32 and utf16 are not compatible with some + string function used. So to convert them to uft8 before + proceed. */ + if (strcmp(char_set->csname, "utf32") == 0 + || strcmp(char_set->csname, "utf16") == 0) { + buf_tmp_used = innobase_convert_string( + buf_tmp, sizeof(buf_tmp) - 1, + &my_charset_utf8_general_ci, + query, query_len, (CHARSET_INFO*) char_set, + &num_errors); - for (sql_idx = 0; sql_idx < n_fields; sql_idx++) { - field = table->field[sql_idx]; - if (!field->stored_in_db) - continue; + query = (byte*) buf_tmp; + query_len = buf_tmp_used; + query[query_len] = 0; + } - o_ptr = (const byte*) old_row + get_field_offset(table, field); - n_ptr = (const byte*) new_row + get_field_offset(table, field); + trx = prebuilt->trx; - /* Use new_mysql_row_col and col_pack_len save the values */ + /* FTS queries are not treated as autocommit non-locking selects. + This is because the FTS implementation can acquire locks behind + the scenes. This has not been verified but it is safer to treat + them as regular read only transactions for now. */ - new_mysql_row_col = n_ptr; - col_pack_len = field->pack_length(); + if (!trx_is_started(trx)) { + ++trx->will_lock; + } - o_len = col_pack_len; - n_len = col_pack_len; + ft_table = prebuilt->table; - /* We use o_ptr and n_ptr to dig up the actual data for - comparison. */ + /* Table does not have an FTS index */ + if (!ft_table->fts || ib_vector_is_empty(ft_table->fts->indexes)) { + my_error(ER_TABLE_HAS_NO_FT, MYF(0)); + return(NULL); + } - field_mysql_type = field->type(); + /* If tablespace is discarded, we should return here */ + if (dict_table_is_discarded(ft_table)) { + my_error(ER_NO_SUCH_TABLE, MYF(0), table->s->db.str, + table->s->table_name.str); + return(NULL); + } - col_type = prebuilt->table->cols[innodb_idx].mtype; + if (keynr == NO_SUCH_KEY) { + /* FIXME: Investigate the NO_SUCH_KEY usage */ + index = (dict_index_t*) ib_vector_getp(ft_table->fts->indexes, 0); + } else { + index = innobase_get_index(keynr); + } - switch (col_type) { + if (!index || index->type != DICT_FTS) { + my_error(ER_TABLE_HAS_NO_FT, MYF(0)); + return(NULL); + } - case DATA_BLOB: - o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); - n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); + if (!(ft_table->fts->fts_status & ADDED_TABLE_SYNCED)) { + fts_init_index(ft_table, FALSE); - break; + ft_table->fts->fts_status |= ADDED_TABLE_SYNCED; + } - case DATA_VARCHAR: - case DATA_BINARY: - case DATA_VARMYSQL: - if (field_mysql_type == MYSQL_TYPE_VARCHAR) { - /* This is a >= 5.0.3 type true VARCHAR where - the real payload data length is stored in - 1 or 2 bytes */ + error = fts_query(trx, index, flags, query, query_len, &result); - o_ptr = row_mysql_read_true_varchar( - &o_len, o_ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); + if (error != DB_SUCCESS) { + my_error(convert_error_code_to_mysql(error, 0, NULL), + MYF(0)); + return(NULL); + } - n_ptr = row_mysql_read_true_varchar( - &n_len, n_ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); - } + /* Allocate FTS handler, and instantiate it before return */ + fts_hdl = static_cast<NEW_FT_INFO*>(my_malloc(sizeof(NEW_FT_INFO), + MYF(0))); - break; - default: - ; - } + fts_hdl->please = const_cast<_ft_vft*>(&ft_vft_result); + fts_hdl->could_you = const_cast<_ft_vft_ext*>(&ft_vft_ext_result); + fts_hdl->ft_prebuilt = prebuilt; + fts_hdl->ft_result = result; - if (field->null_ptr) { - if (field_in_record_is_null(table, field, - (char*) old_row)) { - o_len = UNIV_SQL_NULL; - } + /* FIXME: Re-evluate the condition when Bug 14469540 + is resolved */ + prebuilt->in_fts_query = true; - if (field_in_record_is_null(table, field, - (char*) new_row)) { - n_len = UNIV_SQL_NULL; - } - } + return((FT_INFO*) fts_hdl); +} - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { - /* The field has changed */ +/*****************************************************************//** +Copy a cached MySQL row. +If requested, also avoids overwriting non-read columns. +@param[out] buf Row in MySQL format. +@param[in] cached_row Which row to copy. +@param[in] rec_len Record length. */ +void +ha_innobase::copy_cached_row( + uchar* buf, + const uchar* cached_row, + uint rec_len) +{ + if (prebuilt->keep_other_fields_on_keyread) { + row_sel_copy_cached_fields_for_mysql(buf, cached_row, + prebuilt); + } else { + memcpy(buf, cached_row, rec_len); + } +} - ufield = uvect->fields + n_changed; - UNIV_MEM_INVALID(ufield, sizeof *ufield); - /* Let us use a dummy dfield to make the conversion - from the MySQL column format to the InnoDB format */ +/*****************************************************************//** +Set up search tuple for a query through FTS_DOC_ID_INDEX on +supplied Doc ID. This is used by MySQL to retrieve the documents +once the search result (Doc IDs) is available */ +static +void +innobase_fts_create_doc_id_key( +/*===========================*/ + dtuple_t* tuple, /* in/out: prebuilt->search_tuple */ + const dict_index_t* + index, /* in: index (FTS_DOC_ID_INDEX) */ + doc_id_t* doc_id) /* in/out: doc id to search, value + could be changed to storage format + used for search. */ +{ + doc_id_t temp_doc_id; + dfield_t* dfield = dtuple_get_nth_field(tuple, 0); - if (n_len != UNIV_SQL_NULL) { - dict_col_copy_type(prebuilt->table->cols + innodb_idx, - dfield_get_type(&dfield)); + ut_a(dict_index_get_n_unique(index) == 1); - buf = row_mysql_store_col_in_innobase_format( - &dfield, - (byte*)buf, - TRUE, - new_mysql_row_col, - col_pack_len, - dict_table_is_comp(prebuilt->table)); - dfield_copy(&ufield->new_val, &dfield); - } else { - dfield_set_null(&ufield->new_val); - } + dtuple_set_n_fields(tuple, index->n_fields); + dict_index_copy_types(tuple, index, index->n_fields); - ufield->exp = NULL; - ufield->orig_len = 0; - ufield->field_no = dict_col_get_clust_pos( - &prebuilt->table->cols[innodb_idx], clust_index); - n_changed++; - } - if (field->stored_in_db) - innodb_idx++; - } +#ifdef UNIV_DEBUG + /* The unique Doc ID field should be an eight-bytes integer */ + dict_field_t* field = dict_index_get_nth_field(index, 0); + ut_a(field->col->mtype == DATA_INT); + ut_ad(sizeof(*doc_id) == field->fixed_len); + ut_ad(innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME) == 0); +#endif /* UNIV_DEBUG */ - uvect->n_fields = n_changed; - uvect->info_bits = 0; + /* Convert to storage byte order */ + mach_write_to_8(reinterpret_cast<byte*>(&temp_doc_id), *doc_id); + *doc_id = temp_doc_id; + dfield_set_data(dfield, doc_id, sizeof(*doc_id)); - ut_a(buf <= (byte*)original_upd_buff + buff_len); + dtuple_set_n_fields_cmp(tuple, 1); - return(0); + for (ulint i = 1; i < index->n_fields; i++) { + dfield = dtuple_get_nth_field(tuple, i); + dfield_set_null(dfield); + } } -#ifdef WITH_WSREP -static + +/**********************************************************************//** +Fetch next result from the FT result set +@return error code */ +UNIV_INTERN int -wsrep_calc_row_hash( -/*================*/ - byte* digest, /*!< in/out: md5 sum */ - const uchar* row, /*!< in: row in MySQL format */ - TABLE* table, /*!< in: table in MySQL data - dictionary */ - row_prebuilt_t* prebuilt, /*!< in: InnoDB prebuilt struct */ - THD* thd) /*!< in: user thread */ +ha_innobase::ft_read( +/*=================*/ + uchar* buf) /*!< in/out: buf contain result row */ { - Field* field; - enum_field_types field_mysql_type; - uint n_fields; - ulint len; - const byte* ptr; - ulint col_type; - uint i; + fts_result_t* result; + int error; + row_prebuilt_t* ft_prebuilt; - my_MD5Context ctx; - my_MD5Init (&ctx); + ft_prebuilt = ((NEW_FT_INFO*) ft_handler)->ft_prebuilt; - n_fields = table->s->fields; + ut_a(ft_prebuilt == prebuilt); - for (i = 0; i < n_fields; i++) { - byte null_byte=0; - byte true_byte=1; + result = ((NEW_FT_INFO*) ft_handler)->ft_result; + + if (result->current == NULL) { + /* This is the case where the FTS query did not + contain and matching documents. */ + if (result->rankings_by_id != NULL) { + /* Now that we have the complete result, we + need to sort the document ids on their rank + calculation. */ + + fts_query_sort_result_on_rank(result); + + result->current = const_cast<ib_rbt_node_t*>( + rbt_first(result->rankings_by_rank)); + } else { + ut_a(result->current == NULL); + } + } else { + result->current = const_cast<ib_rbt_node_t*>( + rbt_next(result->rankings_by_rank, result->current)); + } + +next_record: + + if (result->current != NULL) { + dict_index_t* index; + dtuple_t* tuple = prebuilt->search_tuple; + doc_id_t search_doc_id; + + /* If we only need information from result we can return + without fetching the table row */ + if (ft_prebuilt->read_just_key) { + table->status= 0; + return(0); + } + + index = dict_table_get_index_on_name( + prebuilt->table, FTS_DOC_ID_INDEX_NAME); - field = table->field[i]; + /* Must find the index */ + ut_a(index); - ptr = (const byte*) row + get_field_offset(table, field); - len = field->pack_length(); + /* Switch to the FTS doc id index */ + prebuilt->index = index; - field_mysql_type = field->type(); + fts_ranking_t* ranking = rbt_value( + fts_ranking_t, result->current); - col_type = prebuilt->table->cols[i].mtype; + search_doc_id = ranking->doc_id; - switch (col_type) { + /* We pass a pointer of search_doc_id because it will be + converted to storage byte order used in the search + tuple. */ + innobase_fts_create_doc_id_key(tuple, index, &search_doc_id); - case DATA_BLOB: - ptr = row_mysql_read_blob_ref(&len, ptr, len); + innobase_srv_conc_enter_innodb(prebuilt->trx); + + dberr_t ret = row_search_for_mysql( + (byte*) buf, PAGE_CUR_GE, prebuilt, ROW_SEL_EXACT, 0); + innobase_srv_conc_exit_innodb(prebuilt->trx); + + switch (ret) { + case DB_SUCCESS: + error = 0; + table->status = 0; + break; + case DB_RECORD_NOT_FOUND: + result->current = const_cast<ib_rbt_node_t*>( + rbt_next(result->rankings_by_rank, + result->current)); + + if (!result->current) { + /* exhaust the result set, should return + HA_ERR_END_OF_FILE just like + ha_innobase::general_fetch() and/or + ha_innobase::index_first() etc. */ + error = HA_ERR_END_OF_FILE; + table->status = STATUS_NOT_FOUND; + } else { + goto next_record; + } break; + case DB_END_OF_INDEX: + error = HA_ERR_END_OF_FILE; + table->status = STATUS_NOT_FOUND; + break; + case DB_TABLESPACE_DELETED: - case DATA_VARCHAR: - case DATA_BINARY: - case DATA_VARMYSQL: - if (field_mysql_type == MYSQL_TYPE_VARCHAR) { - /* This is a >= 5.0.3 type true VARCHAR where - the real payload data length is stored in - 1 or 2 bytes */ + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); - ptr = row_mysql_read_true_varchar( - &len, ptr, - (ulint) - (((Field_varstring*)field)->length_bytes)); + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + case DB_TABLESPACE_NOT_FOUND: - } + ib_senderrf( + prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_MISSING, + table->s->table_name.str); + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; break; default: - ; - } + error = convert_error_code_to_mysql( + ret, 0, user_thd); - if (field->null_ptr && - field_in_record_is_null(table, field, (char*) row)) { - my_MD5Update (&ctx, &null_byte, 1); - } else { - my_MD5Update (&ctx, &true_byte, 1); - my_MD5Update (&ctx, ptr, len); + table->status = STATUS_NOT_FOUND; + break; } + + return(error); } - my_MD5Final (digest, &ctx); - return(0); + return(HA_ERR_END_OF_FILE); } -#endif /* WITH_WSREP */ -/**********************************************************************//** -Updates a row given as a parameter to a new value. Note that we are given -whole rows, not just the fields which are updated: this incurs some -overhead for CPU when we check which fields are actually updated. -TODO: currently InnoDB does not prevent the 'Halloween problem': -in a searched update a single row can get updated several times -if its index columns are updated! -@return error number or 0 */ -UNIV_INTERN -int -ha_innobase::update_row( -/*====================*/ - const uchar* old_row, /*!< in: old row in MySQL format */ - uchar* new_row) /*!< in: new row in MySQL format */ + +/************************************************************************* +*/ + +void +ha_innobase::ft_end() { - upd_t* uvect; - int error = 0; - trx_t* trx = thd_to_trx(user_thd); + fprintf(stderr, "ft_end()\n"); - DBUG_ENTER("ha_innobase::update_row"); + rnd_end(); +} +#ifdef WITH_WSREP +extern dict_index_t* +wsrep_dict_foreign_find_index( + dict_table_t* table, + const char** col_names, + const char** columns, + ulint n_cols, + dict_index_t* types_idx, + ibool check_charsets, + ulint check_null); - ut_a(prebuilt->trx == trx); ++inline ++const char* ++wsrep_key_type_to_str(wsrep_key_type type) ++{ ++ switch (type) { ++ case WSREP_KEY_SHARED: ++ return "shared"; ++ case WSREP_KEY_SEMI: ++ return "semi"; ++ case WSREP_KEY_EXCLUSIVE: ++ return "exclusive"; ++ }; ++ return "unknown"; ++} - extern dberr_t - if (upd_buf == NULL) { - ut_ad(upd_buf_size == 0); ++ulint +wsrep_append_foreign_key( +/*===========================*/ + trx_t* trx, /*!< in: trx */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!<in: clustered index record */ + dict_index_t* index, /*!<in: clustered index */ + ibool referenced, /*!<in: is check for referenced table */ - ibool shared) /*!<in: is shared access */ ++ wsrep_key_type key_type) /*!< in: access type of this key ++ (shared, exclusive, semi...) */ +{ + ut_a(trx); + THD* thd = (THD*)trx->mysql_thd; + ulint rcode = DB_SUCCESS; + char cache_key[513] = {'\0'}; + int cache_key_len; + bool const copy = true; - /* Create a buffer for packing the fields of a record. Why - table->stored_rec_length did not work here? Obviously, because char - fields when packed actually became 1 byte longer, when we also - stored the string length as the first byte. */ + if (!wsrep_on(trx->mysql_thd) || + wsrep_thd_exec_mode(thd) != LOCAL_STATE) + return DB_SUCCESS; - upd_buf_size = table->s->stored_rec_length + table->s->max_key_length - + MAX_REF_PARTS * 3; - upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME)); - if (upd_buf == NULL) { - upd_buf_size = 0; - DBUG_RETURN(HA_ERR_OUT_OF_MEM); + if (!thd || !foreign || + (!foreign->referenced_table && !foreign->foreign_table)) + { + WSREP_INFO("FK: %s missing in: %s", + (!thd) ? "thread" : + ((!foreign) ? "constraint" : + ((!foreign->referenced_table) ? + "referenced table" : "foreign table")), + (thd && wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } + + if ( !((referenced) ? + foreign->referenced_table : foreign->foreign_table)) + { + WSREP_DEBUG("pulling %s table into cache", + (referenced) ? "referenced" : "foreign"); + mutex_enter(&(dict_sys->mutex)); + if (referenced) + { + foreign->referenced_table = + dict_table_get_low( + foreign->referenced_table_name_lookup); + if (foreign->referenced_table) + { + foreign->referenced_index = + wsrep_dict_foreign_find_index( + foreign->referenced_table, NULL, + foreign->referenced_col_names, + foreign->n_fields, + foreign->foreign_index, + TRUE, FALSE); + } + } + else + { + foreign->foreign_table = + dict_table_get_low( + foreign->foreign_table_name_lookup); + if (foreign->foreign_table) + { + foreign->foreign_index = + wsrep_dict_foreign_find_index( + foreign->foreign_table, NULL, + foreign->foreign_col_names, + foreign->n_fields, + foreign->referenced_index, + TRUE, FALSE); + } + } + mutex_exit(&(dict_sys->mutex)); + } + + if ( !((referenced) ? + foreign->referenced_table : foreign->foreign_table)) + { + WSREP_WARN("FK: %s missing in query: %s", + (!foreign->referenced_table) ? + "referenced table" : "foreign table", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } + byte key[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + ulint len = WSREP_MAX_SUPPORTED_KEY_LENGTH; + + dict_index_t *idx_target = (referenced) ? + foreign->referenced_index : index; + dict_index_t *idx = (referenced) ? + UT_LIST_GET_FIRST(foreign->referenced_table->indexes) : + UT_LIST_GET_FIRST(foreign->foreign_table->indexes); + int i = 0; + while (idx != NULL && idx != idx_target) { + if (innobase_strcasecmp (idx->name, innobase_index_reserve_name) != 0) { + i++; } + idx = UT_LIST_GET_NEXT(indexes, idx); + } + ut_a(idx); + key[0] = (char)i; + + rcode = wsrep_rec_get_foreign_key( + &key[1], &len, rec, index, idx, + wsrep_protocol_version > 1); ++ + if (rcode != DB_SUCCESS) { + WSREP_ERROR( - "FK key set failed: %lu (%lu %lu), index: %s %s, %s", - rcode, referenced, shared, ++ "FK key set failed: %lu (%lu %s), index: %s %s, %s", ++ rcode, referenced, wsrep_key_type_to_str(key_type), + (index && index->name) ? index->name : + "void index", + (index && index->table_name) ? index->table_name : + "void table", + wsrep_thd_query(thd)); + return DB_ERROR; + } ++ + strncpy(cache_key, + (wsrep_protocol_version > 1) ? + ((referenced) ? + foreign->referenced_table->name : + foreign->foreign_table->name) : + foreign->foreign_table->name, sizeof(cache_key) - 1); + cache_key_len = strlen(cache_key); +#ifdef WSREP_DEBUG_PRINT + ulint j; + fprintf(stderr, "FK parent key, table: %s %s len: %lu ", + cache_key, (shared) ? "shared" : "exclusive", len+1); + for (j=0; j<len+1; j++) { + fprintf(stderr, " %hhX, ", key[j]); + } + fprintf(stderr, "\n"); +#endif + char *p = strchr(cache_key, '/'); + if (p) { + *p = '\0'; + } else { + WSREP_WARN("unexpected foreign key table %s %s", + foreign->referenced_table->name, + foreign->foreign_table->name); + } + + wsrep_buf_t wkey_part[3]; + wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key_for_innodb( + (const uchar*)cache_key, + cache_key_len + 1, + (const uchar*)key, len+1, + wkey_part, + (size_t*)&wkey.key_parts_num)) { + WSREP_WARN("key prepare failed for cascaded FK: %s", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + return DB_ERROR; + } + rcode = (int)wsrep->append_key( + wsrep, + wsrep_ws_handle(thd, trx), + &wkey, + 1, - shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE, ++ key_type, + copy); + if (rcode) { + DBUG_PRINT("wsrep", ("row key failed: %lu", rcode)); + WSREP_ERROR("Appending cascaded fk row key failed: %s, %lu", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void", rcode); + return DB_ERROR; + } + + return DB_SUCCESS; +} + +static int +wsrep_append_key( +/*==================*/ + THD *thd, + trx_t *trx, + TABLE_SHARE *table_share, + TABLE *table, + const char* key, + uint16_t key_len, - bool shared ++ wsrep_key_type key_type /*!< in: access type of this key ++ (shared, exclusive, semi...) */ +) +{ + DBUG_ENTER("wsrep_append_key"); + bool const copy = true; +#ifdef WSREP_DEBUG_PRINT + fprintf(stderr, "%s conn %ld, trx %llu, keylen %d, table %s\n Query: %s ", - (shared) ? "Shared" : "Exclusive", - wsrep_thd_thread_id(thd), (long long)trx->id, key_len, ++ wsrep_key_type_to_str(key_type), ++ wsrep_thd_thread_id(thd), trx->id, key_len, + table_share->table_name.str, wsrep_thd_query(thd)); + for (int i=0; i<key_len; i++) { + fprintf(stderr, "%hhX, ", key[i]); } - - ha_statistic_increment(&SSV::ha_update_count); - - if (!share->ib_table || share->ib_table->is_corrupt) { - DBUG_RETURN(HA_ERR_CRASHED); + fprintf(stderr, "\n"); +#endif + wsrep_buf_t wkey_part[3]; + wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key_for_innodb( + (const uchar*)table_share->table_cache_key.str, + table_share->table_cache_key.length, + (const uchar*)key, key_len, + wkey_part, + (size_t*)&wkey.key_parts_num)) { + WSREP_WARN("key prepare failed for: %s", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - - if (prebuilt->upd_node) { - uvect = prebuilt->upd_node->update; - } else { - uvect = row_get_prebuilt_update_vector(prebuilt); + int rcode = (int)wsrep->append_key( + wsrep, + wsrep_ws_handle(thd, trx), + &wkey, + 1, - shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE, ++ key_type, + copy); + if (rcode) { + DBUG_PRINT("wsrep", ("row key failed: %d", rcode)); + WSREP_WARN("Appending row key failed: %s, %d", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void", rcode); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } + DBUG_RETURN(0); +} - /* Build an update vector from the modified fields in the rows - (uses upd_buf of the handle) */ +extern void compute_md5_hash(char *digest, const char *buf, int len); +#define MD5_HASH compute_md5_hash - calc_row_difference(uvect, (uchar*) old_row, new_row, table, - upd_buf, upd_buf_size, prebuilt, user_thd); +static bool +referenced_by_foreign_key2(dict_table_t* table, + dict_index_t* index) { + ut_ad(table != NULL); + ut_ad(index != NULL); - /* This is not a delete */ - prebuilt->upd_node->is_delete = FALSE; + const dict_foreign_set* fks = &table->referenced_set; + for (dict_foreign_set::const_iterator it = fks->begin(); + it != fks->end(); + ++it) + { + dict_foreign_t* foreign = *it; + if (foreign->referenced_index != index) { + continue; + } + ut_ad(table == foreign->referenced_table); + return true; + } + return false; +} - ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); +int +ha_innobase::wsrep_append_keys( +/*==================*/ + THD *thd, - bool shared, ++ wsrep_key_type key_type, /*!< in: access type of this key ++ (shared, exclusive, semi...) */ + const uchar* record0, /* in: row in MySQL format */ + const uchar* record1) /* in: row in MySQL format */ +{ + int rcode; + DBUG_ENTER("wsrep_append_keys"); - innodb_srv_conc_enter_innodb(trx); + bool key_appended = false; + trx_t *trx = thd_to_trx(thd); - error = row_update_for_mysql((byte*) old_row, prebuilt); + if (table_share && table_share->tmp_table != NO_TMP_TABLE) { + WSREP_DEBUG("skipping tmp table DML: THD: %lu tmp: %d SQL: %s", + wsrep_thd_thread_id(thd), + table_share->tmp_table, + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void"); + DBUG_RETURN(0); + } - /* We need to do some special AUTOINC handling for the following case: + if (wsrep_protocol_version == 0) { + uint len; + char keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char *key = &keyval[0]; + ibool is_null; - INSERT INTO t (c1,c2) VALUES(x,y) ON DUPLICATE KEY UPDATE ... + len = wsrep_store_key_val_for_row( + thd, table, 0, key, WSREP_MAX_SUPPORTED_KEY_LENGTH, + record0, prebuilt, &is_null); - We need to use the AUTOINC counter that was actually used by - MySQL in the UPDATE statement, which can be different from the - value used in the INSERT statement.*/ + if (!is_null) { + rcode = wsrep_append_key( + thd, trx, table_share, table, keyval, - len, shared); ++ len, key_type); + if (rcode) DBUG_RETURN(rcode); + } + else + { + WSREP_DEBUG("NULL key skipped (proto 0): %s", + wsrep_thd_query(thd)); + } + } else { + ut_a(table->s->keys <= 256); + uint i; + bool hasPK= false; - if (error == DB_SUCCESS - && table->next_number_field - && new_row == table->record[0] - && thd_sql_command(user_thd) == SQLCOM_INSERT - && trx->duplicates) { + for (i=0; i<table->s->keys; ++i) { + KEY* key_info = table->key_info + i; + if (key_info->flags & HA_NOSAME) { + hasPK = true; + } + } - ulonglong auto_inc; - ulonglong col_max_value; + for (i=0; i<table->s->keys; ++i) { + uint len; + char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + char* key0 = &keyval0[1]; + char* key1 = &keyval1[1]; + KEY* key_info = table->key_info + i; + ibool is_null; - auto_inc = table->next_number_field->val_int(); + dict_index_t* idx = innobase_get_index(i); + dict_table_t* tab = (idx) ? idx->table : NULL; - /* We need the upper limit of the col type to check for - whether we update the table autoinc counter or not. */ - col_max_value = innobase_get_int_col_max_value( - table->next_number_field); + keyval0[0] = (char)i; + keyval1[0] = (char)i; - if (auto_inc <= col_max_value && auto_inc != 0) { + if (!tab) { + WSREP_WARN("MySQL-InnoDB key mismatch %s %s", + table->s->table_name.str, + key_info->name); + } + /* !hasPK == table with no PK, must append all non-unique keys */ + if (!hasPK || key_info->flags & HA_NOSAME || + ((tab && + referenced_by_foreign_key2(tab, idx)) || + (!tab && referenced_by_foreign_key()))) { - ulonglong offset; - ulonglong increment; + len = wsrep_store_key_val_for_row( + thd, table, i, key0, + WSREP_MAX_SUPPORTED_KEY_LENGTH, + record0, prebuilt, &is_null); + if (!is_null) { + rcode = wsrep_append_key( + thd, trx, table_share, table, - keyval0, len+1, shared); ++ keyval0, len+1, key_type); + if (rcode) DBUG_RETURN(rcode); - if (key_info->flags & HA_NOSAME || shared) - offset = prebuilt->autoinc_offset; - increment = prebuilt->autoinc_increment; ++ if (key_info->flags & HA_NOSAME || ++ key_type == WSREP_KEY_SHARED) + key_appended = true; + } + else + { + WSREP_DEBUG("NULL key skipped: %s", + wsrep_thd_query(thd)); + } + if (record1) { + len = wsrep_store_key_val_for_row( + thd, table, i, key1, + WSREP_MAX_SUPPORTED_KEY_LENGTH, + record1, prebuilt, &is_null); + if (!is_null && memcmp(key0, key1, len)) { + rcode = wsrep_append_key( + thd, trx, table_share, + table, - keyval1, len+1, shared); ++ keyval1, len+1, key_type); + if (rcode) DBUG_RETURN(rcode); + } + } + } + } + } - auto_inc = innobase_next_autoinc( - auto_inc, 1, increment, offset, col_max_value); + /* if no PK, calculate hash of full row, to be the key value */ + if (!key_appended && wsrep_certify_nonPK) { + uchar digest[16]; + int rcode; - error = innobase_set_max_autoinc(auto_inc); + wsrep_calc_row_hash(digest, record0, table, prebuilt, thd); + if ((rcode = wsrep_append_key(thd, trx, table_share, table, + (const char*) digest, 16, - shared))) { ++ key_type))) { + DBUG_RETURN(rcode); } - } -#ifdef EXTENDED_FOR_USERSTAT - if (UNIV_LIKELY(error == DB_SUCCESS && !trx->fake_changes)) { - rows_changed++; + if (record1) { + wsrep_calc_row_hash( + digest, record1, table, prebuilt, thd); + if ((rcode = wsrep_append_key(thd, trx, table_share, + table, + (const char*) digest, - 16, shared))) { ++ 16, key_type))) { + DBUG_RETURN(rcode); + } + } + DBUG_RETURN(0); } -#endif - innodb_srv_conc_exit_innodb(trx); - - error = convert_error_code_to_mysql(error, - prebuilt->table->flags, user_thd); + DBUG_RETURN(0); +} +#endif /* WITH_WSREP */ - if (error == 0 /* success */ - && uvect->n_fields == 0 /* no columns were updated */) { +/*********************************************************************//** +Stores a reference to the current row to 'ref' field of the handle. Note +that in the case where we have generated the clustered index for the +table, the function parameter is illogical: we MUST ASSUME that 'record' +is the current 'position' of the handle, because if row ref is actually +the row id internally generated in InnoDB, then 'record' does not contain +it. We just guess that the row id must be for the record where the handle +was positioned the last time. */ +UNIV_INTERN +void +ha_innobase::position( +/*==================*/ + const uchar* record) /*!< in: row in MySQL format */ +{ + uint len; - /* This is the same as success, but instructs - MySQL that the row is not really updated and it - should not increase the count of updated rows. - This is fix for http://bugs.mysql.com/29157 */ - error = HA_ERR_RECORD_IS_THE_SAME; - } + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); - /* Tell InnoDB server that there might be work for - utility threads: */ + if (prebuilt->clust_index_was_generated) { + /* No primary key was defined for the table and we + generated the clustered index from row id: the + row reference will be the row id, not any key value + that MySQL knows of */ - innobase_active_small(); + len = DATA_ROW_ID_LEN; - if (!share->ib_table || share->ib_table->is_corrupt) { - DBUG_RETURN(HA_ERR_CRASHED); + memcpy(ref, prebuilt->row_id, len); + } else { + len = store_key_val_for_row(primary_key, (char*) ref, + ref_length, record); } -#ifdef WITH_WSREP - if (!error && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd)) { - - DBUG_PRINT("wsrep", ("update row key")); + /* We assume that the 'ref' value len is always fixed for the same + table. */ - if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, old_row, - new_row)) { - DBUG_PRINT("wsrep", ("row key failed")); - error = HA_ERR_INTERNAL_ERROR; - goto wsrep_error; - } + if (len != ref_length) { + sql_print_error("Stored ref len is %lu, but table ref len is " + "%lu", (ulong) len, (ulong) ref_length); } -wsrep_error: -#endif - DBUG_RETURN(error); } -/**********************************************************************//** -Deletes a row given as the parameter. -@return error number or 0 */ -UNIV_INTERN -int -ha_innobase::delete_row( -/*====================*/ - const uchar* record) /*!< in: a row in MySQL format */ +/*****************************************************************//** +Check whether there exist a column named as "FTS_DOC_ID", which is +reserved for InnoDB FTS Doc ID +@return true if there exist a "FTS_DOC_ID" column */ +static +bool +create_table_check_doc_id_col( +/*==========================*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + ulint* doc_id_col) /*!< out: Doc ID column number if + there exist a FTS_DOC_ID column, + ULINT_UNDEFINED if column is of the + wrong type/name/size */ { - int error = 0; - trx_t* trx = thd_to_trx(user_thd); + for (ulint i = 0; i < form->s->fields; i++) { + const Field* field; + ulint col_type; + ulint col_len; + ulint unsigned_type; - DBUG_ENTER("ha_innobase::delete_row"); + field = form->field[i]; - ut_a(prebuilt->trx == trx); + col_type = get_innobase_type_from_mysql_type(&unsigned_type, + field); - ha_statistic_increment(&SSV::ha_delete_count); + col_len = field->pack_length(); - if (!share->ib_table || share->ib_table->is_corrupt) { - DBUG_RETURN(HA_ERR_CRASHED); - } + if (innobase_strcasecmp(field->field_name, + FTS_DOC_ID_COL_NAME) == 0) { - if (!prebuilt->upd_node) { - row_get_prebuilt_update_vector(prebuilt); + /* Note the name is case sensitive due to + our internal query parser */ + if (col_type == DATA_INT + && !field->real_maybe_null() + && col_len == sizeof(doc_id_t) + && (strcmp(field->field_name, + FTS_DOC_ID_COL_NAME) == 0)) { + *doc_id_col = i; + } else { + push_warning_printf( + trx->mysql_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: FTS_DOC_ID column must be " + "of BIGINT NOT NULL type, and named " + "in all capitalized characters"); + my_error(ER_WRONG_COLUMN_NAME, MYF(0), + field->field_name); + *doc_id_col = ULINT_UNDEFINED; + } + + return(true); + } } - /* This is a delete */ + return(false); +} - prebuilt->upd_node->is_delete = TRUE; +/*****************************************************************//** +Creates a table definition to an InnoDB database. */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +int +create_table_def( +/*=============*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + const char* table_name, /*!< in: table name */ + const char* temp_path, /*!< in: if this is a table explicitly + created by the user with the + TEMPORARY keyword, then this + parameter is the dir path where the + table should be placed if we create + an .ibd file for it (no .ibd extension + in the path, though). Otherwise this + is a zero length-string */ + const char* remote_path, /*!< in: Remote path or zero length-string */ + ulint flags, /*!< in: table flags */ + ulint flags2) /*!< in: table flags2 */ +{ + THD* thd = trx->mysql_thd; + dict_table_t* table; + ulint n_cols, s_cols; + dberr_t err; + ulint col_type; + ulint col_len; + ulint nulls_allowed; + ulint unsigned_type; + ulint binary_type; + ulint long_true_varchar; + ulint charset_no; + ulint i; + ulint doc_id_col = 0; + ibool has_doc_id_col = FALSE; + mem_heap_t* heap; - innodb_srv_conc_enter_innodb(trx); + DBUG_ENTER("create_table_def"); + DBUG_PRINT("enter", ("table_name: %s", table_name)); - error = row_update_for_mysql((byte*) record, prebuilt); + DBUG_ASSERT(thd != NULL); -#ifdef EXTENDED_FOR_USERSTAT - if (UNIV_LIKELY(error == DB_SUCCESS && !trx->fake_changes)) { - rows_changed++; - } -#endif + /* MySQL does the name length check. But we do additional check + on the name length here */ + const size_t table_name_len = strlen(table_name); + if (table_name_len > MAX_FULL_NAME_LEN) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_NAME, + "InnoDB: Table Name or Database Name is too long"); - innodb_srv_conc_exit_innodb(trx); + DBUG_RETURN(ER_TABLE_NAME); + } - error = convert_error_code_to_mysql( - error, prebuilt->table->flags, user_thd); + if (table_name[table_name_len - 1] == '/') { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_NAME, + "InnoDB: Table name is empty"); - /* Tell the InnoDB server that there might be work for - utility threads: */ + DBUG_RETURN(ER_WRONG_TABLE_NAME); + } - innobase_active_small(); + n_cols = form->s->fields; + s_cols = form->s->stored_fields; - if (!share->ib_table || share->ib_table->is_corrupt) { - DBUG_RETURN(HA_ERR_CRASHED); - } + /* Check whether there already exists a FTS_DOC_ID column */ + if (create_table_check_doc_id_col(trx, form, &doc_id_col)){ -#ifdef WITH_WSREP - if (!error && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd)) { + /* Raise error if the Doc ID column is of wrong type or name */ + if (doc_id_col == ULINT_UNDEFINED) { + trx_commit_for_mysql(trx); - if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record, - NULL)) { - DBUG_PRINT("wsrep", ("delete fail")); - error = HA_ERR_INTERNAL_ERROR; - goto wsrep_error; + err = DB_ERROR; + goto error_ret; + } else { + has_doc_id_col = TRUE; } } -wsrep_error: -#endif - DBUG_RETURN(error); -} - -/**********************************************************************//** -Removes a new lock set on a row, if it was not read optimistically. This can -be called after a row has been read in the processing of an UPDATE or a DELETE -query, if the option innodb_locks_unsafe_for_binlog is set. */ -UNIV_INTERN -void -ha_innobase::unlock_row(void) -/*=========================*/ -{ - DBUG_ENTER("ha_innobase::unlock_row"); - ut_ad(prebuilt->trx->state == TRX_ACTIVE); + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ - /* Consistent read does not take any locks, thus there is - nothing to unlock. */ + if (flags2 & DICT_TF2_FTS) { + /* Adjust for the FTS hidden field */ + if (!has_doc_id_col) { + table = dict_mem_table_create(table_name, 0, s_cols + 1, + flags, flags2); - if (prebuilt->select_lock_type == LOCK_NONE) { - DBUG_VOID_RETURN; + /* Set the hidden doc_id column. */ + table->fts->doc_col = s_cols; + } else { + table = dict_mem_table_create(table_name, 0, s_cols, + flags, flags2); + table->fts->doc_col = doc_id_col; + } + } else { + table = dict_mem_table_create(table_name, 0, s_cols, + flags, flags2); } - switch (prebuilt->row_read_type) { - case ROW_READ_WITH_LOCKS: - if (!srv_locks_unsafe_for_binlog - && prebuilt->trx->isolation_level - > TRX_ISO_READ_COMMITTED) { - break; - } - /* fall through */ - case ROW_READ_TRY_SEMI_CONSISTENT: - row_unlock_for_mysql(prebuilt, FALSE); - break; - case ROW_READ_DID_SEMI_CONSISTENT: - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; - break; + if (flags2 & DICT_TF2_TEMPORARY) { + ut_a(strlen(temp_path)); + table->dir_path_of_temp_table = + mem_heap_strdup(table->heap, temp_path); } - DBUG_VOID_RETURN; -} + if (DICT_TF_HAS_DATA_DIR(flags)) { + ut_a(strlen(remote_path)); + table->data_dir_path = mem_heap_strdup(table->heap, remote_path); + } else { + table->data_dir_path = NULL; + } + heap = mem_heap_create(1000); -/* See handler.h and row0mysql.h for docs on this function. */ -UNIV_INTERN -bool -ha_innobase::was_semi_consistent_read(void) -/*=======================================*/ -{ - return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); -} + for (i = 0; i < n_cols; i++) { + Field* field = form->field[i]; + if (!field->stored_in_db) + continue; -/* See handler.h and row0mysql.h for docs on this function. */ -UNIV_INTERN -void -ha_innobase::try_semi_consistent_read(bool yes) -/*===========================================*/ -{ - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + col_type = get_innobase_type_from_mysql_type(&unsigned_type, + field); - /* Row read type is set to semi consistent read if this was - requested by the MySQL and either innodb_locks_unsafe_for_binlog - option is used or this session is using READ COMMITTED isolation - level. */ + if (!col_type) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_CANT_CREATE_TABLE, + "Error creating table '%s' with " + "column '%s'. Please check its " + "column type and try to re-create " + "the table with an appropriate " + "column type.", + table->name, field->field_name); + goto err_col; + } - if (yes - && (srv_locks_unsafe_for_binlog - || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) { - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; - } else { - prebuilt->row_read_type = ROW_READ_WITH_LOCKS; - } -} + nulls_allowed = field->real_maybe_null() ? 0 : DATA_NOT_NULL; + binary_type = field->binary() ? DATA_BINARY_TYPE : 0; -/******************************************************************//** -Initializes a handle to use an index. -@return 0 or error number */ -UNIV_INTERN -int -ha_innobase::index_init( -/*====================*/ - uint keynr, /*!< in: key (index) number */ - bool sorted) /*!< in: 1 if result MUST be sorted according to index */ -{ - DBUG_ENTER("index_init"); + charset_no = 0; - DBUG_RETURN(change_active_index(keynr)); -} + if (dtype_is_string_type(col_type)) { -/******************************************************************//** -Currently does nothing. -@return 0 */ -UNIV_INTERN -int -ha_innobase::index_end(void) -/*========================*/ -{ - int error = 0; - DBUG_ENTER("index_end"); - active_index=MAX_KEY; - in_range_check_pushed_down= FALSE; - ds_mrr.dsmrr_close(); - DBUG_RETURN(error); -} + charset_no = (ulint) field->charset()->number; -/*********************************************************************//** -Converts a search mode flag understood by MySQL to a flag understood -by InnoDB. */ -static inline -ulint -convert_search_mode_to_innobase( -/*============================*/ - enum ha_rkey_function find_flag) -{ - switch (find_flag) { - case HA_READ_KEY_EXACT: - /* this does not require the index to be UNIQUE */ - return(PAGE_CUR_GE); - case HA_READ_KEY_OR_NEXT: - return(PAGE_CUR_GE); - case HA_READ_KEY_OR_PREV: - return(PAGE_CUR_LE); - case HA_READ_AFTER_KEY: - return(PAGE_CUR_G); - case HA_READ_BEFORE_KEY: - return(PAGE_CUR_L); - case HA_READ_PREFIX: - return(PAGE_CUR_GE); - case HA_READ_PREFIX_LAST: - return(PAGE_CUR_LE); - case HA_READ_PREFIX_LAST_OR_PREV: - return(PAGE_CUR_LE); - /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always - pass a complete-field prefix of a key value as the search - tuple. I.e., it is not allowed that the last field would - just contain n first bytes of the full field value. - MySQL uses a 'padding' trick to convert LIKE 'abc%' - type queries so that it can use as a search tuple - a complete-field-prefix of a key value. Thus, the InnoDB - search mode PAGE_CUR_LE_OR_EXTENDS is never used. - TODO: when/if MySQL starts to use also partial-field - prefixes, we have to deal with stripping of spaces - and comparison of non-latin1 char type fields in - innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to - work correctly. */ - case HA_READ_MBR_CONTAIN: - case HA_READ_MBR_INTERSECT: - case HA_READ_MBR_WITHIN: - case HA_READ_MBR_DISJOINT: - case HA_READ_MBR_EQUAL: - return(PAGE_CUR_UNSUPP); - /* do not use "default:" in order to produce a gcc warning: - enumeration value '...' not handled in switch - (if -Wswitch or -Wall is used) */ - } + if (UNIV_UNLIKELY(charset_no > MAX_CHAR_COLL_NUM)) { + /* in data0type.h we assume that the + number fits in one byte in prtype */ + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_CANT_CREATE_TABLE, + "In InnoDB, charset-collation codes" + " must be below 256." + " Unsupported code %lu.", + (ulong) charset_no); + mem_heap_free(heap); + DBUG_RETURN(ER_CANT_CREATE_TABLE); + } + } - my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "this functionality"); + /* we assume in dtype_form_prtype() that this fits in + two bytes */ + ut_a(static_cast<uint>(field->type()) <= MAX_CHAR_COLL_NUM); + col_len = field->pack_length(); - return(PAGE_CUR_UNSUPP); -} + /* The MySQL pack length contains 1 or 2 bytes length field + for a true VARCHAR. Let us subtract that, so that the InnoDB + column length in the InnoDB data dictionary is the real + maximum byte length of the actual data. */ -/* - BACKGROUND INFO: HOW A SELECT SQL QUERY IS EXECUTED - --------------------------------------------------- -The following does not cover all the details, but explains how we determine -the start of a new SQL statement, and what is associated with it. + long_true_varchar = 0; -For each table in the database the MySQL interpreter may have several -table handle instances in use, also in a single SQL query. For each table -handle instance there is an InnoDB 'prebuilt' struct which contains most -of the InnoDB data associated with this table handle instance. + if (field->type() == MYSQL_TYPE_VARCHAR) { + col_len -= ((Field_varstring*) field)->length_bytes; - A) if the user has not explicitly set any MySQL table level locks: + if (((Field_varstring*) field)->length_bytes == 2) { + long_true_varchar = DATA_LONG_TRUE_VARCHAR; + } + } - 1) MySQL calls ::external_lock to set an 'intention' table level lock on -the table of the handle instance. There we set -prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set -true if we are taking this table handle instance to use in a new SQL -statement issued by the user. We also increment trx->n_mysql_tables_in_use. + /* First check whether the column to be added has a + system reserved name. */ + if (dict_col_name_is_reserved(field->field_name)){ + my_error(ER_WRONG_COLUMN_NAME, MYF(0), + field->field_name); +err_col: + dict_mem_table_free(table); + mem_heap_free(heap); + trx_commit_for_mysql(trx); - 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search -instructions to prebuilt->template of the table handle instance in -::index_read. The template is used to save CPU time in large joins. + err = DB_ERROR; + goto error_ret; + } - 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we -allocate a new consistent read view for the trx if it does not yet have one, -or in the case of a locking read, set an InnoDB 'intention' table level -lock on the table. + dict_mem_table_add_col(table, heap, + field->field_name, + col_type, + dtype_form_prtype( + (ulint) field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar, + charset_no), + col_len); + } - 4) We do the SELECT. MySQL may repeatedly call ::index_read for the -same table handle instance, if it is a join. + /* Add the FTS doc_id hidden column. */ + if (flags2 & DICT_TF2_FTS && !has_doc_id_col) { + fts_add_doc_id_column(table, heap); + } - 5) When the SELECT ends, MySQL removes its intention table level locks -in ::external_lock. When trx->n_mysql_tables_in_use drops to zero, - (a) we execute a COMMIT there if the autocommit is on, - (b) we also release possible 'SQL statement level resources' InnoDB may -have for this SQL statement. The MySQL interpreter does NOT execute -autocommit for pure read transactions, though it should. That is why the -table handler in that case has to execute the COMMIT in ::external_lock. + err = row_create_table_for_mysql(table, trx, false); - B) If the user has explicitly set MySQL table level locks, then MySQL -does NOT call ::external_lock at the start of the statement. To determine -when we are at the start of a new SQL statement we at the start of -::index_read also compare the query id to the latest query id where the -table handle instance was used. If it has changed, we know we are at the -start of a new SQL statement. Since the query id can theoretically -overwrap, we use this test only as a secondary way of determining the -start of a new SQL statement. */ + mem_heap_free(heap); + DBUG_EXECUTE_IF("ib_create_err_tablespace_exist", + err = DB_TABLESPACE_EXISTS;); -/**********************************************************************//** -Positions an index cursor to the index specified in the handle. Fetches the -row if any. -@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ -UNIV_INTERN + if (err == DB_DUPLICATE_KEY || err == DB_TABLESPACE_EXISTS) { + char display_name[FN_REFLEN]; + char* buf_end = innobase_convert_identifier( + display_name, sizeof(display_name) - 1, + table_name, strlen(table_name), + thd, TRUE); + + *buf_end = '\0'; + + my_error(err == DB_DUPLICATE_KEY + ? ER_TABLE_EXISTS_ERROR + : ER_TABLESPACE_EXISTS, MYF(0), display_name); + } + + if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) { + fts_optimize_add_table(table); + } + +error_ret: + DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd)); +} + +/*****************************************************************//** +Creates an index in an InnoDB database. */ +static int -ha_innobase::index_read( -/*====================*/ - uchar* buf, /*!< in/out: buffer for the returned - row */ - const uchar* key_ptr, /*!< in: key value; if this is NULL - we position the cursor at the - start or end of index; this can - also contain an InnoDB row id, in - which case key_len is the InnoDB - row id length; the key value can - also be a prefix of a full key value, - and the last column can be a prefix - of a full column */ - uint key_len,/*!< in: key value length */ - enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ +create_index( +/*=========*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + const TABLE* form, /*!< in: information on table + columns and indexes */ + ulint flags, /*!< in: InnoDB table flags */ + const char* table_name, /*!< in: table name */ + uint key_num) /*!< in: index number */ { - ulint mode; dict_index_t* index; - ulint match_mode = 0; int error; - ulint ret; + const KEY* key; + ulint ind_type; + ulint* field_lengths; - DBUG_ENTER("index_read"); - DEBUG_SYNC_C("ha_innobase_index_read_begin"); + DBUG_ENTER("create_index"); - ut_a(prebuilt->trx == thd_to_trx(user_thd)); - ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT); + key = form->key_info + key_num; - ha_statistic_increment(&SSV::ha_read_key_count); + /* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */ + ut_a(innobase_strcasecmp(key->name, innobase_index_reserve_name) != 0); + + if (key->flags & HA_FULLTEXT) { + index = dict_mem_index_create(table_name, key->name, 0, + DICT_FTS, + key->user_defined_key_parts); + + for (ulint i = 0; i < key->user_defined_key_parts; i++) { + KEY_PART_INFO* key_part = key->key_part + i; + dict_mem_index_add_field( + index, key_part->field->field_name, 0); + } + + DBUG_RETURN(convert_error_code_to_mysql( + row_create_index_for_mysql( + index, trx, NULL), + flags, NULL)); - if (UNIV_UNLIKELY(!share->ib_table || - (share->ib_table && - share->ib_table->is_corrupt && - srv_pass_corrupt_table <= 1))) { - DBUG_RETURN(HA_ERR_CRASHED); } - index = prebuilt->index; + ind_type = 0; - if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) { - prebuilt->index_usable = FALSE; - DBUG_RETURN(HA_ERR_CRASHED); + if (key_num == form->s->primary_key) { + ind_type |= DICT_CLUSTERED; } - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { - DBUG_RETURN(dict_index_is_corrupted(index) - ? HA_ERR_INDEX_CORRUPT - : HA_ERR_TABLE_DEF_CHANGED); + + if (key->flags & HA_NOSAME) { + ind_type |= DICT_UNIQUE; } - /* Note that if the index for which the search template is built is not - necessarily prebuilt->index, but can also be the clustered index */ + field_lengths = (ulint*) my_malloc( + key->user_defined_key_parts * sizeof * + field_lengths, MYF(MY_FAE)); - if (prebuilt->sql_stat_start) { - build_template(false); + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ + + index = dict_mem_index_create(table_name, key->name, 0, + ind_type, key->user_defined_key_parts); + + for (ulint i = 0; i < key->user_defined_key_parts; i++) { + KEY_PART_INFO* key_part = key->key_part + i; + ulint prefix_len; + ulint col_type; + ulint is_unsigned; + + + /* (The flag HA_PART_KEY_SEG denotes in MySQL a + column prefix field in an index: we only store a + specified number of first bytes of the column to + the index field.) The flag does not seem to be + properly set by MySQL. Let us fall back on testing + the length of the key part versus the column. */ + + Field* field = NULL; + + for (ulint j = 0; j < form->s->fields; j++) { + + field = form->field[j]; + + if (0 == innobase_strcasecmp( + field->field_name, + key_part->field->field_name)) { + /* Found the corresponding column */ + + goto found; + } + } + + ut_error; +found: + col_type = get_innobase_type_from_mysql_type( + &is_unsigned, key_part->field); + + if (DATA_BLOB == col_type + || (key_part->length < field->pack_length() + && field->type() != MYSQL_TYPE_VARCHAR) + || (field->type() == MYSQL_TYPE_VARCHAR + && key_part->length < field->pack_length() + - ((Field_varstring*) field)->length_bytes)) { + + switch (col_type) { + default: + prefix_len = key_part->length; + break; + case DATA_INT: + case DATA_FLOAT: + case DATA_DOUBLE: + case DATA_DECIMAL: + sql_print_error( + "MySQL is trying to create a column " + "prefix index field, on an " + "inappropriate data type. Table " + "name %s, column name %s.", + table_name, + key_part->field->field_name); + + prefix_len = 0; + } + } else { + prefix_len = 0; + } + + field_lengths[i] = key_part->length; + + dict_mem_index_add_field( + index, key_part->field->field_name, prefix_len); } - if (key_ptr) { - /* Convert the search key value to InnoDB format into - prebuilt->search_tuple */ + ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS)); - row_sel_convert_mysql_key_to_innobase( - prebuilt->search_tuple, - srch_key_val1, sizeof(srch_key_val1), - index, - (byte*) key_ptr, - (ulint) key_len, - prebuilt->trx); - DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); - } else { - /* We position the cursor to the last or the first entry - in the index */ + /* Even though we've defined max_supported_key_part_length, we + still do our own checking using field_lengths to be absolutely + sure we don't create too long indexes. */ - dtuple_set_n_fields(prebuilt->search_tuple, 0); + error = convert_error_code_to_mysql( + row_create_index_for_mysql(index, trx, field_lengths), + flags, NULL); + + my_free(field_lengths); + + DBUG_RETURN(error); +} + +/*****************************************************************//** +Creates an index to an InnoDB table when the user has defined no +primary index. */ +static +int +create_clustered_index_when_no_primary( +/*===================================*/ + trx_t* trx, /*!< in: InnoDB transaction handle */ + ulint flags, /*!< in: InnoDB table flags */ + const char* table_name) /*!< in: table name */ +{ + dict_index_t* index; + dberr_t error; + + /* We pass 0 as the space id, and determine at a lower level the space + id where to store the table */ + index = dict_mem_index_create(table_name, + innobase_index_reserve_name, + 0, DICT_CLUSTERED, 0); + + error = row_create_index_for_mysql(index, trx, NULL); + + return(convert_error_code_to_mysql(error, flags, NULL)); +} + +/*****************************************************************//** +Return a display name for the row format +@return row format name */ +UNIV_INTERN +const char* +get_row_format_name( +/*================*/ + enum row_type row_format) /*!< in: Row Format */ +{ + switch (row_format) { + case ROW_TYPE_COMPACT: + return("COMPACT"); + case ROW_TYPE_COMPRESSED: + return("COMPRESSED"); + case ROW_TYPE_DYNAMIC: + return("DYNAMIC"); + case ROW_TYPE_REDUNDANT: + return("REDUNDANT"); + case ROW_TYPE_DEFAULT: + return("DEFAULT"); + case ROW_TYPE_FIXED: + return("FIXED"); + case ROW_TYPE_PAGE: + case ROW_TYPE_NOT_USED: + default: + break; } + return("NOT USED"); +} - mode = convert_search_mode_to_innobase(find_flag); - - match_mode = 0; - - if (find_flag == HA_READ_KEY_EXACT) { - - match_mode = ROW_SEL_EXACT; - - } else if (find_flag == HA_READ_PREFIX - || find_flag == HA_READ_PREFIX_LAST) { +/** If file-per-table is missing, issue warning and set ret false */ +#define CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace)\ + if (!use_tablespace) { \ + push_warning_printf( \ + thd, Sql_condition::WARN_LEVEL_WARN, \ + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: ROW_FORMAT=%s requires" \ + " innodb_file_per_table.", \ + get_row_format_name(row_format)); \ + ret = "ROW_FORMAT"; \ + } - match_mode = ROW_SEL_EXACT_PREFIX; +/** If file-format is Antelope, issue warning and set ret false */ +#define CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE \ + if (srv_file_format < UNIV_FORMAT_B) { \ + push_warning_printf( \ + thd, Sql_condition::WARN_LEVEL_WARN, \ + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: ROW_FORMAT=%s requires" \ + " innodb_file_format > Antelope.", \ + get_row_format_name(row_format)); \ + ret = "ROW_FORMAT"; \ } - last_match_mode = (uint) match_mode; - if (mode != PAGE_CUR_UNSUPP) { +/*****************************************************************//** +Validates the create options. We may build on this function +in future. For now, it checks two specifiers: +KEY_BLOCK_SIZE and ROW_FORMAT +If innodb_strict_mode is not set then this function is a no-op +@return NULL if valid, string if not. */ +UNIV_INTERN +const char* +create_options_are_invalid( +/*=======================*/ + THD* thd, /*!< in: connection thread. */ + TABLE* form, /*!< in: information on table + columns and indexes */ + HA_CREATE_INFO* create_info, /*!< in: create info. */ + bool use_tablespace) /*!< in: srv_file_per_table */ +{ + ibool kbs_specified = FALSE; + const char* ret = NULL; + enum row_type row_format = form->s->row_type; - innodb_srv_conc_enter_innodb(prebuilt->trx); + ut_ad(thd != NULL); - ret = row_search_for_mysql((byte*) buf, mode, prebuilt, - match_mode, 0); + /* If innodb_strict_mode is not set don't do any validation. */ + if (!(THDVAR(thd, strict_mode))) { + return(NULL); + } - innodb_srv_conc_exit_innodb(prebuilt->trx); - } else { + ut_ad(form != NULL); + ut_ad(create_info != NULL); - ret = DB_UNSUPPORTED; - } + /* First check if a non-zero KEY_BLOCK_SIZE was specified. */ + if (create_info->key_block_size) { + kbs_specified = TRUE; + switch (create_info->key_block_size) { + ulint kbs_max; + case 1: + case 2: + case 4: + case 8: + case 16: + /* Valid KEY_BLOCK_SIZE, check its dependencies. */ + if (!use_tablespace) { + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE requires" + " innodb_file_per_table."); + ret = "KEY_BLOCK_SIZE"; + } + if (srv_file_format < UNIV_FORMAT_B) { + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE requires" + " innodb_file_format > Antelope."); + ret = "KEY_BLOCK_SIZE"; + } - if (UNIV_UNLIKELY(!share->ib_table || - (share->ib_table && - share->ib_table->is_corrupt && - srv_pass_corrupt_table <= 1))) { - DBUG_RETURN(HA_ERR_CRASHED); + /* The maximum KEY_BLOCK_SIZE (KBS) is 16. But if + UNIV_PAGE_SIZE is smaller than 16k, the maximum + KBS is also smaller. */ + kbs_max = ut_min( + 1 << (UNIV_PAGE_SSIZE_MAX - 1), + 1 << (PAGE_ZIP_SSIZE_MAX - 1)); + if (create_info->key_block_size > kbs_max) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE=%ld" + " cannot be larger than %ld.", + create_info->key_block_size, + kbs_max); + ret = "KEY_BLOCK_SIZE"; + } + break; + default: + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: invalid KEY_BLOCK_SIZE = %lu." + " Valid values are [1, 2, 4, 8, 16]", + create_info->key_block_size); + ret = "KEY_BLOCK_SIZE"; + break; + } } - switch (ret) { - case DB_SUCCESS: - error = 0; - table->status = 0; -#ifdef EXTENDED_FOR_USERSTAT - rows_read++; - if (active_index < MAX_KEY) - index_rows_read[active_index]++; -#endif + /* Check for a valid Innodb ROW_FORMAT specifier and + other incompatibilities. */ + switch (row_format) { + case ROW_TYPE_COMPRESSED: + CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); + CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; break; - case DB_RECORD_NOT_FOUND: - error = HA_ERR_KEY_NOT_FOUND; - table->status = STATUS_NOT_FOUND; + case ROW_TYPE_DYNAMIC: + CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); + CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ + case ROW_TYPE_COMPACT: + case ROW_TYPE_REDUNDANT: + if (kbs_specified) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: cannot specify ROW_FORMAT = %s" + " with KEY_BLOCK_SIZE.", + get_row_format_name(row_format)); + ret = "KEY_BLOCK_SIZE"; + } break; - case DB_END_OF_INDEX: - error = HA_ERR_KEY_NOT_FOUND; - table->status = STATUS_NOT_FOUND; + case ROW_TYPE_DEFAULT: break; + case ROW_TYPE_FIXED: + case ROW_TYPE_PAGE: + case ROW_TYPE_NOT_USED: default: - error = convert_error_code_to_mysql((int) ret, - prebuilt->table->flags, - user_thd); - table->status = STATUS_NOT_FOUND; + push_warning( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, \ + "InnoDB: invalid ROW_FORMAT specifier."); + ret = "ROW_TYPE"; break; } diff --cc storage/xtradb/handler/ha_innodb.h index 54df3bef8f1,1ba163b6015..72f92829241 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@@ -24,11 -24,17 +24,15 @@@ this program; if not, write to the Fre Innodb */ -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif +#include "dict0stats.h" + #ifdef WITH_WSREP + #include "../../../wsrep/wsrep_api.h" + #endif /* WITH_WSREP */ + /* Structure defines translation table between mysql index and innodb index structures */ -typedef struct innodb_idx_translate_struct { +struct innodb_idx_translate_t { ulint index_count; /*!< number of valid index entries in the index_mapping array */ ulint array_size; /*!< array size of index_mapping */ @@@ -89,16 -109,18 +93,16 @@@ class ha_innobase: public handle void update_thd(); int change_active_index(uint keynr); int general_fetch(uchar* buf, uint direction, uint match_mode); - ulint innobase_lock_autoinc(); + dberr_t innobase_lock_autoinc(); ulonglong innobase_peek_autoinc(); - ulint innobase_set_max_autoinc(ulonglong auto_inc); - ulint innobase_reset_autoinc(ulonglong auto_inc); - ulint innobase_get_autoinc(ulonglong* value); - ulint innobase_update_autoinc(ulonglong auto_inc); + dberr_t innobase_set_max_autoinc(ulonglong auto_inc); + dberr_t innobase_reset_autoinc(ulonglong auto_inc); + dberr_t innobase_get_autoinc(ulonglong* value); void innobase_initialize_autoinc(); dict_index_t* innobase_get_index(uint keynr); - int info_low(uint flag, bool called_from_analyze); #ifdef WITH_WSREP - int wsrep_append_keys(THD *thd, bool shared, + int wsrep_append_keys(THD *thd, wsrep_key_type key_type, const uchar* record0, const uchar* record1); #endif /* Init values for the class: */ diff --cc storage/xtradb/row/row0ins.cc index 36f83104201,00000000000..b8ac976bea8 mode 100644,000000..100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@@ -1,3452 -1,0 +1,3475 @@@ +/***************************************************************************** + +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, 2018, MariaDB Corporation. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file row/row0ins.cc +Insert into a table + +Created 4/20/1996 Heikki Tuuri +*******************************************************/ + +#include "row0ins.h" + +#ifdef UNIV_NONINL +#include "row0ins.ic" +#endif + +#include "ha_prototypes.h" +#include "dict0dict.h" +#include "dict0boot.h" +#include "trx0rec.h" +#include "trx0undo.h" +#include "btr0btr.h" +#include "btr0cur.h" +#include "mach0data.h" +#include "ibuf0ibuf.h" +#include "que0que.h" +#include "row0upd.h" +#include "row0sel.h" +#include "row0row.h" +#include "row0log.h" +#include "rem0cmp.h" +#include "lock0lock.h" +#include "log0log.h" +#include "eval0eval.h" +#include "data0data.h" +#include "usr0sess.h" +#include "buf0lru.h" +#include "fts0fts.h" +#include "fts0types.h" +#include "m_string.h" + ++#ifdef WITH_WSREP ++#include "../../../wsrep/wsrep_api.h" ++#include "wsrep_mysqld_c.h" ++#endif /* WITH_WSREP */ ++ +/************************************************************************* +IMPORTANT NOTE: Any operation that generates redo MUST check that there +is enough space in the redo log before for that operation. This is +done by calling log_free_check(). The reason for checking the +availability of the redo log space before the start of the operation is +that we MUST not hold any synchonization objects when performing the +check. +If you make a change in this module make sure that no codepath is +introduced where a call to log_free_check() is bypassed. */ + +/*********************************************************************//** +Creates an insert node struct. +@return own: insert node struct */ +UNIV_INTERN +ins_node_t* +ins_node_create( +/*============*/ + ulint ins_type, /*!< in: INS_VALUES, ... */ + dict_table_t* table, /*!< in: table where to insert */ + mem_heap_t* heap) /*!< in: mem heap where created */ +{ + ins_node_t* node; + + node = static_cast<ins_node_t*>( + mem_heap_alloc(heap, sizeof(ins_node_t))); + + node->common.type = QUE_NODE_INSERT; + + node->ins_type = ins_type; + + node->state = INS_NODE_SET_IX_LOCK; + node->table = table; + node->index = NULL; + node->entry = NULL; + + node->select = NULL; + + node->trx_id = 0; + + node->entry_sys_heap = mem_heap_create(128); + + node->magic_n = INS_NODE_MAGIC_N; + + return(node); +} + +/***********************************************************//** +Creates an entry template for each index of a table. */ +static +void +ins_node_create_entry_list( +/*=======================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + dict_index_t* index; + dtuple_t* entry; + + ut_ad(node->entry_sys_heap); + + UT_LIST_INIT(node->entry_list); + + /* We will include all indexes (include those corrupted + secondary indexes) in the entry list. Filteration of + these corrupted index will be done in row_ins() */ + + for (index = dict_table_get_first_index(node->table); + index != 0; + index = dict_table_get_next_index(index)) { + + entry = row_build_index_entry( + node->row, NULL, index, node->entry_sys_heap); + + UT_LIST_ADD_LAST(tuple_list, node->entry_list, entry); + } +} + +/*****************************************************************//** +Adds system field buffers to a row. */ +static +void +row_ins_alloc_sys_fields( +/*=====================*/ + ins_node_t* node) /*!< in: insert node */ +{ + dtuple_t* row; + dict_table_t* table; + mem_heap_t* heap; + const dict_col_t* col; + dfield_t* dfield; + byte* ptr; + + row = node->row; + table = node->table; + heap = node->entry_sys_heap; + + ut_ad(row && table && heap); + ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); + + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + + /* 1. Populate row-id */ + col = dict_table_get_sys_col(table, DATA_ROW_ID); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); + + node->row_id_buf = ptr; + + ptr += DATA_ROW_ID_LEN; + + /* 2. Populate trx id */ + col = dict_table_get_sys_col(table, DATA_TRX_ID); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); + + node->trx_id_buf = ptr; + + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ + + col = dict_table_get_sys_col(table, DATA_ROLL_PTR); + + dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); + + dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); +} + +/*********************************************************************//** +Sets a new row to insert for an INS_DIRECT node. This function is only used +if we have constructed the row separately, which is a rare case; this +function is quite slow. */ +UNIV_INTERN +void +ins_node_set_new_row( +/*=================*/ + ins_node_t* node, /*!< in: insert node */ + dtuple_t* row) /*!< in: new row (or first row) for the node */ +{ + node->state = INS_NODE_SET_IX_LOCK; + node->index = NULL; + node->entry = NULL; + + node->row = row; + + mem_heap_empty(node->entry_sys_heap); + + /* Create templates for index entries */ + + ins_node_create_entry_list(node); + + /* Allocate from entry_sys_heap buffers for sys fields */ + + row_ins_alloc_sys_fields(node); + + /* As we allocated a new trx id buf, the trx id should be written + there again: */ + + node->trx_id = 0; +} + +/*******************************************************************//** +Does an insert operation by updating a delete-marked existing record +in the index. This situation can occur if the delete-marked record is +kept in the index for consistent reads. +@return DB_SUCCESS or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_sec_index_entry_by_modify( +/*==============================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether mtr holds just a leaf + latch or also a tree latch */ + btr_cur_t* cursor, /*!< in: B-tree cursor */ + ulint** offsets,/*!< in/out: offsets on cursor->page_cur.rec */ + mem_heap_t* offsets_heap, + /*!< in/out: memory heap that can be emptied */ + mem_heap_t* heap, /*!< in/out: memory heap */ + const dtuple_t* entry, /*!< in: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr; must be committed before + latching any further pages */ +{ + big_rec_t* dummy_big_rec; + upd_t* update; + rec_t* rec; + dberr_t err; + + rec = btr_cur_get_rec(cursor); + + ut_ad(!dict_index_is_clust(cursor->index)); + ut_ad(rec_offs_validate(rec, cursor->index, *offsets)); + ut_ad(!entry->info_bits); + + /* We know that in the alphabetical ordering, entry and rec are + identified. But in their binary form there may be differences if + there are char fields in them. Therefore we have to calculate the + difference. */ + + update = row_upd_build_sec_rec_difference_binary( + rec, cursor->index, *offsets, entry, heap); + + /* If operating in fake_change mode then flow will not mark the record + deleted but will still assume it and take delete-mark path. Condition + below has a different path if record is not marked deleted but we need + to still by-pass it given that original flow has taken this path for + fake_change mode execution assuming record is delete-marked. */ + if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets)) + && UNIV_UNLIKELY(!thr_get_trx(thr)->fake_changes)) { + /* We should never insert in place of a record that + has not been delete-marked. The only exception is when + online CREATE INDEX copied the changes that we already + made to the clustered index, and completed the + secondary index creation before we got here. In this + case, the change would already be there. The CREATE + INDEX should be waiting for a MySQL meta-data lock + upgrade at least until this INSERT or UPDATE + returns. After that point, the TEMP_INDEX_PREFIX + would be dropped from the index name in + commit_inplace_alter_table(). */ + ut_a(update->n_fields == 0); + ut_a(*cursor->index->name == TEMP_INDEX_PREFIX); + ut_ad(!dict_index_is_online_ddl(cursor->index)); + return(DB_SUCCESS); + } + + if (mode == BTR_MODIFY_LEAF) { + /* Try an optimistic updating of the record, keeping changes + within the page */ + + /* TODO: pass only *offsets */ + err = btr_cur_optimistic_update( + flags | BTR_KEEP_SYS_FLAG, cursor, + offsets, &offsets_heap, update, 0, thr, + thr_get_trx(thr)->id, mtr); + switch (err) { + case DB_OVERFLOW: + case DB_UNDERFLOW: + case DB_ZIP_OVERFLOW: + err = DB_FAIL; + default: + break; + } + } else { + ut_a(mode == BTR_MODIFY_TREE); + if (buf_LRU_buf_pool_running_out()) { + + return(DB_LOCK_TABLE_FULL); + } + + err = btr_cur_pessimistic_update( + flags | BTR_KEEP_SYS_FLAG, cursor, + offsets, &offsets_heap, + heap, &dummy_big_rec, update, 0, + thr, thr_get_trx(thr)->id, mtr); + ut_ad(!dummy_big_rec); + } + + return(err); +} + +/*******************************************************************//** +Does an insert operation by delete unmarking and updating a delete marked +existing record in the index. This situation can occur if the delete marked +record is kept in the index for consistent reads. +@return DB_SUCCESS, DB_FAIL, or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_clust_index_entry_by_modify( +/*================================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether mtr holds just a leaf + latch or also a tree latch */ + btr_cur_t* cursor, /*!< in: B-tree cursor */ + ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ + mem_heap_t** offsets_heap, + /*!< in/out: pointer to memory heap that can + be emptied, or NULL */ + mem_heap_t* heap, /*!< in/out: memory heap */ + big_rec_t** big_rec,/*!< out: possible big rec vector of fields + which have to be stored externally by the + caller */ + const dtuple_t* entry, /*!< in: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr; must be committed before + latching any further pages */ +{ + const rec_t* rec; + const upd_t* update; + dberr_t err; + + ut_ad(dict_index_is_clust(cursor->index)); + + *big_rec = NULL; + + rec = btr_cur_get_rec(cursor); + + ut_ad(rec_get_deleted_flag(rec, + dict_table_is_comp(cursor->index->table))); + + /* Build an update vector containing all the fields to be modified; + NOTE that this vector may NOT contain system columns trx_id or + roll_ptr */ + + update = row_upd_build_difference_binary( + cursor->index, entry, rec, NULL, true, + thr_get_trx(thr), heap); + if (mode != BTR_MODIFY_TREE) { + ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF); + + /* Try optimistic updating of the record, keeping changes + within the page */ + + err = btr_cur_optimistic_update( + flags, cursor, offsets, offsets_heap, update, 0, thr, + thr_get_trx(thr)->id, mtr); + switch (err) { + case DB_OVERFLOW: + case DB_UNDERFLOW: + case DB_ZIP_OVERFLOW: + err = DB_FAIL; + default: + break; + } + } else { + if (buf_LRU_buf_pool_running_out()) { + + return(DB_LOCK_TABLE_FULL); + + } + err = btr_cur_pessimistic_update( + flags | BTR_KEEP_POS_FLAG, + cursor, offsets, offsets_heap, heap, + big_rec, update, 0, thr, thr_get_trx(thr)->id, mtr); + } + + return(err); +} + +/*********************************************************************//** +Returns TRUE if in a cascaded update/delete an ancestor node of node +updates (not DELETE, but UPDATE) table. +@return TRUE if an ancestor updates table */ +static +ibool +row_ins_cascade_ancestor_updates_table( +/*===================================*/ + que_node_t* node, /*!< in: node in a query graph */ + dict_table_t* table) /*!< in: table */ +{ + que_node_t* parent; + + for (parent = que_node_get_parent(node); + que_node_get_type(parent) == QUE_NODE_UPDATE; + parent = que_node_get_parent(parent)) { + + upd_node_t* upd_node; + + upd_node = static_cast<upd_node_t*>(parent); + + if (upd_node->table == table && upd_node->is_delete == FALSE) { + + return(TRUE); + } + } + + return(FALSE); +} + +/*********************************************************************//** +Returns the number of ancestor UPDATE or DELETE nodes of a +cascaded update/delete node. +@return number of ancestors */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +ulint +row_ins_cascade_n_ancestors( +/*========================*/ + que_node_t* node) /*!< in: node in a query graph */ +{ + que_node_t* parent; + ulint n_ancestors = 0; + + for (parent = que_node_get_parent(node); + que_node_get_type(parent) == QUE_NODE_UPDATE; + parent = que_node_get_parent(parent)) { + + n_ancestors++; + } + + return(n_ancestors); +} + +/******************************************************************//** +Calculates the update vector node->cascade->update for a child table in +a cascaded update. +@return number of fields in the calculated update vector; the value +can also be 0 if no foreign key fields changed; the returned value is +ULINT_UNDEFINED if the column type in the child table is too short to +fit the new value in the parent table: that means the update fails */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +ulint +row_ins_cascade_calc_update_vec( +/*============================*/ + upd_node_t* node, /*!< in: update node of the parent + table */ + dict_foreign_t* foreign, /*!< in: foreign key constraint whose + type is != 0 */ + mem_heap_t* heap, /*!< in: memory heap to use as + temporary storage */ + trx_t* trx, /*!< in: update transaction */ + ibool* fts_col_affected)/*!< out: is FTS column affected */ +{ + upd_node_t* cascade = node->cascade_node; + dict_table_t* table = foreign->foreign_table; + dict_index_t* index = foreign->foreign_index; + upd_t* update; + dict_table_t* parent_table; + dict_index_t* parent_index; + upd_t* parent_update; + ulint n_fields_updated; + ulint parent_field_no; + ulint i; + ulint j; + ibool doc_id_updated = FALSE; + ulint doc_id_pos = 0; + doc_id_t new_doc_id = FTS_NULL_DOC_ID; + + ut_a(node); + ut_a(foreign); + ut_a(cascade); + ut_a(table); + ut_a(index); + + /* Calculate the appropriate update vector which will set the fields + in the child index record to the same value (possibly padded with + spaces if the column is a fixed length CHAR or FIXBINARY column) as + the referenced index record will get in the update. */ + + parent_table = node->table; + ut_a(parent_table == foreign->referenced_table); + parent_index = foreign->referenced_index; + parent_update = node->update; + + update = cascade->update; + + update->info_bits = 0; + update->n_fields = foreign->n_fields; + + n_fields_updated = 0; + + *fts_col_affected = FALSE; + + if (table->fts) { + doc_id_pos = dict_table_get_nth_col_pos( + table, table->fts->doc_col); + } + + for (i = 0; i < foreign->n_fields; i++) { + + parent_field_no = dict_table_get_nth_col_pos( + parent_table, + dict_index_get_nth_col_no(parent_index, i)); + + for (j = 0; j < parent_update->n_fields; j++) { + const upd_field_t* parent_ufield + = &parent_update->fields[j]; + + if (parent_ufield->field_no == parent_field_no) { + + ulint min_size; + const dict_col_t* col; + ulint ufield_len; + upd_field_t* ufield; + + col = dict_index_get_nth_col(index, i); + + /* A field in the parent index record is + updated. Let us make the update vector + field for the child table. */ + + ufield = update->fields + n_fields_updated; + + ufield->field_no + = dict_table_get_nth_col_pos( + table, dict_col_get_no(col)); + + ufield->orig_len = 0; + ufield->exp = NULL; + + ufield->new_val = parent_ufield->new_val; + ufield_len = dfield_get_len(&ufield->new_val); + + /* Clear the "external storage" flag */ + dfield_set_len(&ufield->new_val, ufield_len); + + /* Do not allow a NOT NULL column to be + updated as NULL */ + + if (dfield_is_null(&ufield->new_val) + && (col->prtype & DATA_NOT_NULL)) { + + return(ULINT_UNDEFINED); + } + + /* If the new value would not fit in the + column, do not allow the update */ + + if (!dfield_is_null(&ufield->new_val) + && dtype_get_at_most_n_mbchars( + col->prtype, + col->mbminlen, col->mbmaxlen, + col->len, + ufield_len, + static_cast<char*>( + dfield_get_data( + &ufield->new_val))) + < ufield_len) { + + return(ULINT_UNDEFINED); + } + + /* If the parent column type has a different + length than the child column type, we may + need to pad with spaces the new value of the + child column */ + + min_size = dict_col_get_min_size(col); + + /* Because UNIV_SQL_NULL (the marker + of SQL NULL values) exceeds all possible + values of min_size, the test below will + not hold for SQL NULL columns. */ + + if (min_size > ufield_len) { + + byte* pad; + ulint pad_len; + byte* padded_data; + ulint mbminlen; + + padded_data = static_cast<byte*>( + mem_heap_alloc( + heap, min_size)); + + pad = padded_data + ufield_len; + pad_len = min_size - ufield_len; + + memcpy(padded_data, + dfield_get_data(&ufield + ->new_val), + ufield_len); + + mbminlen = dict_col_get_mbminlen(col); + + ut_ad(!(ufield_len % mbminlen)); + ut_ad(!(min_size % mbminlen)); + + if (mbminlen == 1 + && dtype_get_charset_coll( + col->prtype) + == DATA_MYSQL_BINARY_CHARSET_COLL) { + /* Do not pad BINARY columns */ + return(ULINT_UNDEFINED); + } + + row_mysql_pad_col(mbminlen, + pad, pad_len); + dfield_set_data(&ufield->new_val, + padded_data, min_size); + } + + /* Check whether the current column has + FTS index on it */ + if (table->fts + && dict_table_is_fts_column( + table->fts->indexes, + dict_col_get_no(col)) + != ULINT_UNDEFINED) { + *fts_col_affected = TRUE; + } + + /* If Doc ID is updated, check whether the + Doc ID is valid */ + if (table->fts + && ufield->field_no == doc_id_pos) { + doc_id_t n_doc_id; + + n_doc_id = + table->fts->cache->next_doc_id; + + new_doc_id = fts_read_doc_id( + static_cast<const byte*>( + dfield_get_data( + &ufield->new_val))); + + if (new_doc_id <= 0) { + fprintf(stderr, + "InnoDB: FTS Doc ID " + "must be larger than " + "0 \n"); + return(ULINT_UNDEFINED); + } + + if (new_doc_id < n_doc_id) { + fprintf(stderr, + "InnoDB: FTS Doc ID " + "must be larger than " + IB_ID_FMT" for table", + n_doc_id -1); + + ut_print_name(stderr, trx, + TRUE, + table->name); + + putc('\n', stderr); + return(ULINT_UNDEFINED); + } + + *fts_col_affected = TRUE; + doc_id_updated = TRUE; + } + + n_fields_updated++; + } + } + } + + /* Generate a new Doc ID if FTS index columns get updated */ + if (table->fts && *fts_col_affected) { + if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) { + doc_id_t doc_id; + upd_field_t* ufield; + + ut_ad(!doc_id_updated); + ufield = update->fields + n_fields_updated; + fts_get_next_doc_id(table, &trx->fts_next_doc_id); + doc_id = fts_update_doc_id(table, ufield, + &trx->fts_next_doc_id); + n_fields_updated++; + fts_trx_add_op(trx, table, doc_id, FTS_INSERT, NULL); + } else { + if (doc_id_updated) { + ut_ad(new_doc_id); + fts_trx_add_op(trx, table, new_doc_id, + FTS_INSERT, NULL); + } else { + fprintf(stderr, "InnoDB: FTS Doc ID must be " + "updated along with FTS indexed " + "column for table "); + ut_print_name(stderr, trx, TRUE, table->name); + putc('\n', stderr); + return(ULINT_UNDEFINED); + } + } + } + + update->n_fields = n_fields_updated; + + return(n_fields_updated); +} + +/*********************************************************************//** +Set detailed error message associated with foreign key errors for +the given transaction. */ +static +void +row_ins_set_detailed( +/*=================*/ + trx_t* trx, /*!< in: transaction */ + dict_foreign_t* foreign) /*!< in: foreign key constraint */ +{ + ut_ad(!srv_read_only_mode); + + mutex_enter(&srv_misc_tmpfile_mutex); + rewind(srv_misc_tmpfile); + + if (os_file_set_eof(srv_misc_tmpfile)) { + std::string fk_str; + ut_print_name(srv_misc_tmpfile, trx, TRUE, + foreign->foreign_table_name); + fk_str = dict_print_info_on_foreign_key_in_create_format( + trx, foreign, FALSE); + fputs(fk_str.c_str(), srv_misc_tmpfile); + trx_set_detailed_error_from_file(trx, srv_misc_tmpfile); + } else { + trx_set_detailed_error(trx, "temp file operation failed"); + } + + mutex_exit(&srv_misc_tmpfile_mutex); +} + +/*********************************************************************//** +Acquires dict_foreign_err_mutex, rewinds dict_foreign_err_file +and displays information about the given transaction. +The caller must release dict_foreign_err_mutex. */ +static +void +row_ins_foreign_trx_print( +/*======================*/ + trx_t* trx) /*!< in: transaction */ +{ + ulint n_rec_locks; + ulint n_trx_locks; + ulint heap_size; + + if (srv_read_only_mode) { + return; + } + + lock_mutex_enter(); + n_rec_locks = lock_number_of_rows_locked(&trx->lock); + n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks); + heap_size = mem_heap_get_size(trx->lock.lock_heap); + lock_mutex_exit(); + + mutex_enter(&trx_sys->mutex); + + mutex_enter(&dict_foreign_err_mutex); + rewind(dict_foreign_err_file); + ut_print_timestamp(dict_foreign_err_file); + fputs(" Transaction:\n", dict_foreign_err_file); + + trx_print_low(dict_foreign_err_file, trx, 600, + n_rec_locks, n_trx_locks, heap_size); + + mutex_exit(&trx_sys->mutex); + + ut_ad(mutex_own(&dict_foreign_err_mutex)); +} + +/*********************************************************************//** +Reports a foreign key error associated with an update or a delete of a +parent table index entry. */ +static +void +row_ins_foreign_report_err( +/*=======================*/ + const char* errstr, /*!< in: error string from the viewpoint + of the parent table */ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!< in: a matching index record in the + child table */ + const dtuple_t* entry) /*!< in: index entry in the parent + table */ +{ + std::string fk_str; + + if (srv_read_only_mode) { + return; + } + + FILE* ef = dict_foreign_err_file; + trx_t* trx = thr_get_trx(thr); + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign, + TRUE); + fputs(fk_str.c_str(), ef); + putc('\n', ef); + fputs(errstr, ef); + fputs(" in parent table, in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->referenced_index->name); + if (entry) { + fputs(" tuple:\n", ef); + dtuple_print(ef, entry); + } + fputs("\nBut in child table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(", in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->foreign_index->name); + if (rec) { + fputs(", there is a record:\n", ef); + rec_print(ef, rec, foreign->foreign_index); + } else { + fputs(", the record is not available\n", ef); + } + putc('\n', ef); + + mutex_exit(&dict_foreign_err_mutex); +} + +/*********************************************************************//** +Reports a foreign key error to dict_foreign_err_file when we are trying +to add an index entry to a child table. Note that the adding may be the result +of an update, too. */ +static +void +row_ins_foreign_report_add_err( +/*===========================*/ + trx_t* trx, /*!< in: transaction */ + dict_foreign_t* foreign, /*!< in: foreign key constraint */ + const rec_t* rec, /*!< in: a record in the parent table: + it does not match entry because we + have an error! */ + const dtuple_t* entry) /*!< in: index entry to insert in the + child table */ +{ + std::string fk_str; + + if (srv_read_only_mode) { + return; + } + + FILE* ef = dict_foreign_err_file; + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format(trx, foreign, + TRUE); + fputs(fk_str.c_str(), ef); + fputs("\nTrying to add in child table, in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->foreign_index->name); + if (entry) { + fputs(" tuple:\n", ef); + /* TODO: DB_TRX_ID and DB_ROLL_PTR may be uninitialized. + It would be better to only display the user columns. */ + dtuple_print(ef, entry); + } + fputs("\nBut in parent table ", ef); + ut_print_name(ef, trx, TRUE, foreign->referenced_table_name); + fputs(", in index ", ef); + ut_print_name(ef, trx, FALSE, foreign->referenced_index->name); + fputs(",\nthe closest match we can find is record:\n", ef); + if (rec && page_rec_is_supremum(rec)) { + /* If the cursor ended on a supremum record, it is better + to report the previous record in the error message, so that + the user gets a more descriptive error message. */ + rec = page_rec_get_prev_const(rec); + } + + if (rec) { + rec_print(ef, rec, foreign->referenced_index); + } + putc('\n', ef); + + mutex_exit(&dict_foreign_err_mutex); +} + +/*********************************************************************//** +Invalidate the query cache for the given table. */ +static +void +row_ins_invalidate_query_cache( +/*===========================*/ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + const char* name) /*!< in: table name prefixed with + database name and a '/' character */ +{ + char* buf; + char* ptr; + ulint len = strlen(name) + 1; + + buf = mem_strdupl(name, len); + + ptr = strchr(buf, '/'); + ut_a(ptr); + *ptr = '\0'; + + innobase_invalidate_query_cache(thr_get_trx(thr), buf, len); + mem_free(buf); +} +#ifdef WITH_WSREP +dberr_t wsrep_append_foreign_key(trx_t *trx, - dict_foreign_t* foreign, - const rec_t* clust_rec, - dict_index_t* clust_index, - ibool referenced, - ibool shared); ++ dict_foreign_t* foreign, ++ const rec_t* clust_rec, ++ dict_index_t* clust_index, ++ ibool referenced, ++ enum wsrep_key_type key_type); +#endif /* WITH_WSREP */ + +/*********************************************************************//** +Perform referential actions or checks when a parent row is deleted or updated +and the constraint had an ON DELETE or ON UPDATE condition which was not +RESTRICT. +@return DB_SUCCESS, DB_LOCK_WAIT, or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_foreign_check_on_constraint( +/*================================*/ + que_thr_t* thr, /*!< in: query thread whose run_node + is an update node */ + dict_foreign_t* foreign, /*!< in: foreign key constraint whose + type is != 0 */ + btr_pcur_t* pcur, /*!< in: cursor placed on a matching + index record in the child table */ + dtuple_t* entry, /*!< in: index entry in the parent + table */ + mtr_t* mtr) /*!< in: mtr holding the latch of pcur + page */ +{ + upd_node_t* node; + upd_node_t* cascade; + dict_table_t* table = foreign->foreign_table; + dict_index_t* index; + dict_index_t* clust_index; + dtuple_t* ref; + mem_heap_t* upd_vec_heap = NULL; + const rec_t* rec; + const rec_t* clust_rec; + const buf_block_t* clust_block; + upd_t* update; + ulint n_to_update; + dberr_t err; + ulint i; + trx_t* trx; + mem_heap_t* tmp_heap = NULL; + doc_id_t doc_id = FTS_NULL_DOC_ID; + ibool fts_col_affacted = FALSE; + + ut_a(thr); + ut_a(foreign); + ut_a(pcur); + ut_a(mtr); + + trx = thr_get_trx(thr); + + /* Since we are going to delete or update a row, we have to invalidate + the MySQL query cache for table. A deadlock of threads is not possible + here because the caller of this function does not hold any latches with + the sync0sync.h rank above the lock_sys_t::mutex. The query cache mutex + has a rank just above the lock_sys_t::mutex. */ + + row_ins_invalidate_query_cache(thr, table->name); + + node = static_cast<upd_node_t*>(thr->run_node); + + if (node->is_delete && 0 == (foreign->type + & (DICT_FOREIGN_ON_DELETE_CASCADE + | DICT_FOREIGN_ON_DELETE_SET_NULL))) { + + row_ins_foreign_report_err("Trying to delete", + thr, foreign, + btr_pcur_get_rec(pcur), entry); + + return(DB_ROW_IS_REFERENCED); + } + + if (!node->is_delete && 0 == (foreign->type + & (DICT_FOREIGN_ON_UPDATE_CASCADE + | DICT_FOREIGN_ON_UPDATE_SET_NULL))) { + + /* This is an UPDATE */ + + row_ins_foreign_report_err("Trying to update", + thr, foreign, + btr_pcur_get_rec(pcur), entry); + + return(DB_ROW_IS_REFERENCED); + } + + if (node->cascade_node == NULL) { + /* Extend our query graph by creating a child to current + update node. The child is used in the cascade or set null + operation. */ + + node->cascade_heap = mem_heap_create(128); + node->cascade_node = row_create_update_node_for_mysql( + table, node->cascade_heap); + que_node_set_parent(node->cascade_node, node); + } + + /* Initialize cascade_node to do the operation we want. Note that we + use the SAME cascade node to do all foreign key operations of the + SQL DELETE: the table of the cascade node may change if there are + several child tables to the table where the delete is done! */ + + cascade = node->cascade_node; + + cascade->table = table; + + cascade->foreign = foreign; + + if (node->is_delete + && (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE)) { + cascade->is_delete = TRUE; + } else { + cascade->is_delete = FALSE; + + if (foreign->n_fields > cascade->update_n_fields) { + /* We have to make the update vector longer */ + + cascade->update = upd_create(foreign->n_fields, + node->cascade_heap); + cascade->update_n_fields = foreign->n_fields; + } + } + + /* We do not allow cyclic cascaded updating (DELETE is allowed, + but not UPDATE) of the same table, as this can lead to an infinite + cycle. Check that we are not updating the same table which is + already being modified in this cascade chain. We have to check + this also because the modification of the indexes of a 'parent' + table may still be incomplete, and we must avoid seeing the indexes + of the parent table in an inconsistent state! */ + + if (!cascade->is_delete + && row_ins_cascade_ancestor_updates_table(cascade, table)) { + + /* We do not know if this would break foreign key + constraints, but play safe and return an error */ + + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying an update, possibly causing a cyclic" + " cascaded update\n" + "in the child table,", thr, foreign, + btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + if (row_ins_cascade_n_ancestors(cascade) >= 15) { + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying a too deep cascaded delete or update\n", + thr, foreign, btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + index = btr_pcur_get_btr_cur(pcur)->index; + + ut_a(index == foreign->foreign_index); + + rec = btr_pcur_get_rec(pcur); + + tmp_heap = mem_heap_create(256); + + if (dict_index_is_clust(index)) { + /* pcur is already positioned in the clustered index of + the child table */ + + clust_index = index; + clust_rec = rec; + clust_block = btr_pcur_get_block(pcur); + } else { + /* We have to look for the record in the clustered index + in the child table */ + + clust_index = dict_table_get_first_index(table); + + ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec, + tmp_heap); + btr_pcur_open_with_no_init(clust_index, ref, + PAGE_CUR_LE, BTR_SEARCH_LEAF, + cascade->pcur, 0, mtr); + + clust_rec = btr_pcur_get_rec(cascade->pcur); + clust_block = btr_pcur_get_block(cascade->pcur); + + if (!page_rec_is_user_rec(clust_rec) + || btr_pcur_get_low_match(cascade->pcur) + < dict_index_get_n_unique(clust_index)) { + + fputs("InnoDB: error in cascade of a foreign key op\n" + "InnoDB: ", stderr); + dict_index_name_print(stderr, trx, index); + + fputs("\n" + "InnoDB: record ", stderr); + rec_print(stderr, rec, index); + fputs("\n" + "InnoDB: clustered record ", stderr); + rec_print(stderr, clust_rec, clust_index); + fputs("\n" + "InnoDB: Submit a detailed bug report to" + " https://jira.mariadb.org/\n", stderr); + ut_ad(0); + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + } + + /* Set an X-lock on the row to delete or update in the child table */ + + err = lock_table(0, table, LOCK_IX, thr); + + if (err == DB_SUCCESS) { + /* Here it suffices to use a LOCK_REC_NOT_GAP type lock; + we already have a normal shared lock on the appropriate + gap if the search criterion was not unique */ + + err = lock_clust_rec_read_check_and_lock_alt( + 0, clust_block, clust_rec, clust_index, + LOCK_X, LOCK_REC_NOT_GAP, thr); + } + + if (err != DB_SUCCESS) { + + goto nonstandard_exit_func; + } + + if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) { + /* This can happen if there is a circular reference of + rows such that cascading delete comes to delete a row + already in the process of being delete marked */ + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + + if (table->fts) { + doc_id = fts_get_doc_id_from_rec(table, clust_rec, tmp_heap); + } + + if (node->is_delete + ? (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) + : (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL)) { + + /* Build the appropriate update vector which sets + foreign->n_fields first fields in rec to SQL NULL */ + + update = cascade->update; + + update->info_bits = 0; + update->n_fields = foreign->n_fields; + UNIV_MEM_INVALID(update->fields, + update->n_fields * sizeof *update->fields); + + for (i = 0; i < foreign->n_fields; i++) { + upd_field_t* ufield = &update->fields[i]; + + ufield->field_no = dict_table_get_nth_col_pos( + table, + dict_index_get_nth_col_no(index, i)); + ufield->orig_len = 0; + ufield->exp = NULL; + dfield_set_null(&ufield->new_val); + + if (table->fts && dict_table_is_fts_column( + table->fts->indexes, + dict_index_get_nth_col_no(index, i)) + != ULINT_UNDEFINED) { + fts_col_affacted = TRUE; + } + } + + if (fts_col_affacted) { + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } else if (table->fts && cascade->is_delete) { + /* DICT_FOREIGN_ON_DELETE_CASCADE case */ + for (i = 0; i < foreign->n_fields; i++) { + if (table->fts && dict_table_is_fts_column( + table->fts->indexes, + dict_index_get_nth_col_no(index, i)) + != ULINT_UNDEFINED) { + fts_col_affacted = TRUE; + } + } + + if (fts_col_affacted) { + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } + + if (!node->is_delete + && (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE)) { + + /* Build the appropriate update vector which sets changing + foreign->n_fields first fields in rec to new values */ + + upd_vec_heap = mem_heap_create(256); + + n_to_update = row_ins_cascade_calc_update_vec( + node, foreign, upd_vec_heap, trx, &fts_col_affacted); + + if (n_to_update == ULINT_UNDEFINED) { + err = DB_ROW_IS_REFERENCED; + + row_ins_foreign_report_err( + "Trying a cascaded update where the" + " updated value in the child\n" + "table would not fit in the length" + " of the column, or the value would\n" + "be NULL and the column is" + " declared as not NULL in the child table,", + thr, foreign, btr_pcur_get_rec(pcur), entry); + + goto nonstandard_exit_func; + } + + if (cascade->update->n_fields == 0) { + + /* The update does not change any columns referred + to in this foreign key constraint: no need to do + anything */ + + err = DB_SUCCESS; + + goto nonstandard_exit_func; + } + + /* Mark the old Doc ID as deleted */ + if (fts_col_affacted) { + ut_ad(table->fts); + fts_trx_add_op(trx, table, doc_id, FTS_DELETE, NULL); + } + } + + /* Store pcur position and initialize or store the cascade node + pcur stored position */ + + btr_pcur_store_position(pcur, mtr); + + if (index == clust_index) { + btr_pcur_copy_stored_position(cascade->pcur, pcur); + } else { + btr_pcur_store_position(cascade->pcur, mtr); + } + + mtr_commit(mtr); + + ut_a(cascade->pcur->rel_pos == BTR_PCUR_ON); + + cascade->state = UPD_NODE_UPDATE_CLUSTERED; + +#ifdef WITH_WSREP + err = wsrep_append_foreign_key( + thr_get_trx(thr), + foreign, + clust_rec, + clust_index, - FALSE, FALSE); ++ FALSE, WSREP_KEY_EXCLUSIVE); + if (err != DB_SUCCESS) { + fprintf(stderr, + "WSREP: foreign key append failed: %d\n", err); + } else +#endif /* WITH_WSREP */ + err = row_update_cascade_for_mysql(thr, cascade, + foreign->foreign_table); + + if (foreign->foreign_table->n_foreign_key_checks_running == 0) { + fprintf(stderr, + "InnoDB: error: table %s has the counter 0" + " though there is\n" + "InnoDB: a FOREIGN KEY check running on it.\n", + foreign->foreign_table->name); + } + + /* Release the data dictionary latch for a while, so that we do not + starve other threads from doing CREATE TABLE etc. if we have a huge + cascaded operation running. The counter n_foreign_key_checks_running + will prevent other users from dropping or ALTERing the table when we + release the latch. */ + + row_mysql_unfreeze_data_dictionary(thr_get_trx(thr)); + + DEBUG_SYNC_C("innodb_dml_cascade_dict_unfreeze"); + + row_mysql_freeze_data_dictionary(thr_get_trx(thr)); + + mtr_start(mtr); + + /* Restore pcur position */ + + btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr); + + if (tmp_heap) { + mem_heap_free(tmp_heap); + } + + if (upd_vec_heap) { + mem_heap_free(upd_vec_heap); + } + + return(err); + +nonstandard_exit_func: + if (tmp_heap) { + mem_heap_free(tmp_heap); + } + + if (upd_vec_heap) { + mem_heap_free(upd_vec_heap); + } + + btr_pcur_store_position(pcur, mtr); + + mtr_commit(mtr); + mtr_start(mtr); + + btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, mtr); + + return(err); +} + +/*********************************************************************//** +Sets a shared lock on a record. Used in locking possible duplicate key +records and also in checking foreign key constraints. +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */ +static +dberr_t +row_ins_set_shared_rec_lock( +/*========================*/ + ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or + LOCK_REC_NOT_GAP type lock */ + const buf_block_t* block, /*!< in: buffer block of rec */ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (dict_index_is_clust(index)) { + err = lock_clust_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_S, type, thr); + } else { + err = lock_sec_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_S, type, thr); + } + + return(err); +} + +/*********************************************************************//** +Sets a exclusive lock on a record. Used in locking possible duplicate key +records +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */ +static +dberr_t +row_ins_set_exclusive_rec_lock( +/*===========================*/ + ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or + LOCK_REC_NOT_GAP type lock */ + const buf_block_t* block, /*!< in: buffer block of rec */ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (dict_index_is_clust(index)) { + err = lock_clust_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_X, type, thr); + } else { + err = lock_sec_rec_read_check_and_lock( + 0, block, rec, index, offsets, LOCK_X, type, thr); + } + + return(err); +} + +/***************************************************************//** +Checks if foreign key constraint fails for an index entry. Sets shared locks +which lock either the success or the failure of the constraint. NOTE that +the caller must have a shared latch on dict_operation_lock. +@return DB_SUCCESS, DB_NO_REFERENCED_ROW, or DB_ROW_IS_REFERENCED */ +UNIV_INTERN +dberr_t +row_ins_check_foreign_constraint( +/*=============================*/ + ibool check_ref,/*!< in: TRUE if we want to check that + the referenced table is ok, FALSE if we + want to check the foreign key table */ + dict_foreign_t* foreign,/*!< in: foreign constraint; NOTE that the + tables mentioned in it must be in the + dictionary cache if they exist at all */ + dict_table_t* table, /*!< in: if check_ref is TRUE, then the foreign + table, else the referenced table */ + dtuple_t* entry, /*!< in: index entry for index */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + upd_node_t* upd_node; + dict_table_t* check_table; + dict_index_t* check_index; + ulint n_fields_cmp; + btr_pcur_t pcur; + int cmp; + ulint i; + mtr_t mtr; + trx_t* trx = thr_get_trx(thr); + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + +#ifdef WITH_WSREP + upd_node= NULL; +#endif /* WITH_WSREP */ +run_again: +#ifdef UNIV_SYNC_DEBUG + ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED)); +#endif /* UNIV_SYNC_DEBUG */ + + err = DB_SUCCESS; + + if (trx->check_foreigns == FALSE) { + /* The user has suppressed foreign key checks currently for + this session */ + goto exit_func; + } + + /* If any of the foreign key fields in entry is SQL NULL, we + suppress the foreign key check: this is compatible with Oracle, + for example */ + + for (i = 0; i < foreign->n_fields; i++) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { + + goto exit_func; + } + } + + if (que_node_get_type(thr->run_node) == QUE_NODE_UPDATE) { + upd_node = static_cast<upd_node_t*>(thr->run_node); + + if (!(upd_node->is_delete) && upd_node->foreign == foreign) { + /* If a cascaded update is done as defined by a + foreign key constraint, do not check that + constraint for the child row. In ON UPDATE CASCADE + the update of the parent row is only half done when + we come here: if we would check the constraint here + for the child row it would fail. + + A QUESTION remains: if in the child table there are + several constraints which refer to the same parent + table, we should merge all updates to the child as + one update? And the updates can be contradictory! + Currently we just perform the update associated + with each foreign key constraint, one after + another, and the user has problems predicting in + which order they are performed. */ + + goto exit_func; + } + } + + if (check_ref) { + check_table = foreign->referenced_table; + check_index = foreign->referenced_index; + } else { + check_table = foreign->foreign_table; + check_index = foreign->foreign_index; + } + + if (check_table == NULL + || check_table->ibd_file_missing + || check_index == NULL) { + + if (!srv_read_only_mode && check_ref) { + FILE* ef = dict_foreign_err_file; + std::string fk_str; + + row_ins_set_detailed(trx, foreign); + + row_ins_foreign_trx_print(trx); + + fputs("Foreign key constraint fails for table ", ef); + ut_print_name(ef, trx, TRUE, + foreign->foreign_table_name); + fputs(":\n", ef); + fk_str = dict_print_info_on_foreign_key_in_create_format( + trx, foreign, TRUE); + fputs(fk_str.c_str(), ef); + fputs("\nTrying to add to index ", ef); + ut_print_name(ef, trx, FALSE, + foreign->foreign_index->name); + fputs(" tuple:\n", ef); + dtuple_print(ef, entry); + fputs("\nBut the parent table ", ef); + ut_print_name(ef, trx, TRUE, + foreign->referenced_table_name); + fputs("\nor its .ibd file does" + " not currently exist!\n", ef); + mutex_exit(&dict_foreign_err_mutex); + + err = DB_NO_REFERENCED_ROW; + } + + goto exit_func; + } + + if (check_table != table) { + /* We already have a LOCK_IX on table, but not necessarily + on check_table */ + + err = lock_table(0, check_table, LOCK_IS, thr); + + if (err != DB_SUCCESS) { + + goto do_possible_lock_wait; + } + } + + mtr_start(&mtr); + + /* Store old value on n_fields_cmp */ + + n_fields_cmp = dtuple_get_n_fields_cmp(entry); + + dtuple_set_n_fields_cmp(entry, foreign->n_fields); + + btr_pcur_open(check_index, entry, PAGE_CUR_GE, + BTR_SEARCH_LEAF, &pcur, &mtr); + + /* Scan index records and check if there is a matching record */ + + do { + const rec_t* rec = btr_pcur_get_rec(&pcur); + const buf_block_t* block = btr_pcur_get_block(&pcur); + + SRV_CORRUPT_TABLE_CHECK(block, + { + err = DB_CORRUPTION; + goto exit_loop; + }); + + if (page_rec_is_infimum(rec)) { + + continue; + } + + offsets = rec_get_offsets(rec, check_index, + offsets, ULINT_UNDEFINED, &heap); + + if (page_rec_is_supremum(rec)) { + + err = row_ins_set_shared_rec_lock(LOCK_ORDINARY, block, + rec, check_index, + offsets, thr); + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + continue; + default: + goto end_scan; + } + } + + cmp = cmp_dtuple_rec(entry, rec, offsets); + + if (cmp == 0) { + if (rec_get_deleted_flag(rec, + rec_offs_comp(offsets))) { + err = row_ins_set_shared_rec_lock( + LOCK_ORDINARY, block, + rec, check_index, offsets, thr); + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto end_scan; + } + } else { + /* Found a matching record. Lock only + a record because we can allow inserts + into gaps */ + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, block, + rec, check_index, offsets, thr); + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto end_scan; + } + + if (check_ref) { ++#ifdef WITH_WSREP ++ enum wsrep_key_type key_type = WSREP_KEY_EXCLUSIVE; ++#endif /* WITH_WSREP */ + err = DB_SUCCESS; ++ +#ifdef WITH_WSREP ++ if (upd_node != NULL) { ++ key_type = WSREP_KEY_SHARED; ++ } else { ++ switch (wsrep_certification_rules) { ++ case WSREP_CERTIFICATION_RULES_STRICT: ++ key_type = WSREP_KEY_EXCLUSIVE; ++ break; ++ case WSREP_CERTIFICATION_RULES_OPTIMIZED: ++ key_type = WSREP_KEY_SEMI; ++ break; ++ } ++ } ++ + err = wsrep_append_foreign_key( - thr_get_trx(thr), - foreign, - rec, - check_index, - check_ref, - (upd_node) ? TRUE : FALSE); - #endif /* WITH_WSREP */ ++ thr_get_trx(thr), ++ foreign, ++ rec, ++ check_index, ++ check_ref, ++ key_type); ++ #endif /* WITH_WSREP */ ++ + goto end_scan; + } else if (foreign->type != 0) { + /* There is an ON UPDATE or ON DELETE + condition: check them in a separate + function */ + + err = row_ins_foreign_check_on_constraint( + thr, foreign, &pcur, entry, + &mtr); + if (err != DB_SUCCESS) { + /* Since reporting a plain + "duplicate key" error + message to the user in + cases where a long CASCADE + operation would lead to a + duplicate key in some + other table is very + confusing, map duplicate + key errors resulting from + FK constraints to a + separate error code. */ + + if (err == DB_DUPLICATE_KEY) { + err = DB_FOREIGN_DUPLICATE_KEY; + } + + goto end_scan; + } + + /* row_ins_foreign_check_on_constraint + may have repositioned pcur on a + different block */ + block = btr_pcur_get_block(&pcur); + } else { + row_ins_foreign_report_err( + "Trying to delete or update", + thr, foreign, rec, entry); + + err = DB_ROW_IS_REFERENCED; + goto end_scan; + } + } + } else { + ut_a(cmp < 0); + + err = row_ins_set_shared_rec_lock( + LOCK_GAP, block, + rec, check_index, offsets, thr); + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + if (check_ref) { + err = DB_NO_REFERENCED_ROW; + row_ins_foreign_report_add_err( + trx, foreign, rec, entry); + } else { + err = DB_SUCCESS; + } + default: + break; + } + + goto end_scan; + } + } while (btr_pcur_move_to_next(&pcur, &mtr)); + +exit_loop: + if (check_ref) { + row_ins_foreign_report_add_err( + trx, foreign, btr_pcur_get_rec(&pcur), entry); + err = DB_NO_REFERENCED_ROW; + } else { + err = DB_SUCCESS; + } + +end_scan: + btr_pcur_close(&pcur); + + mtr_commit(&mtr); + + /* Restore old value */ + dtuple_set_n_fields_cmp(entry, n_fields_cmp); + +do_possible_lock_wait: + if (err == DB_LOCK_WAIT) { + bool verified = false; + + trx->error_state = err; + + que_thr_stop_for_mysql(thr); + + lock_wait_suspend_thread(thr); + + if (check_table->to_be_dropped) { + /* The table is being dropped. We shall timeout + this operation */ + err = DB_LOCK_WAIT_TIMEOUT; + goto exit_func; + } + + /* We had temporarily released dict_operation_lock in + above lock sleep wait, now we have the lock again, and + we will need to re-check whether the foreign key has been + dropped. We only need to verify if the table is referenced + table case (check_ref == 0), since MDL lock will prevent + concurrent DDL and DML on the same table */ + if (!check_ref) { + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + if (*it == foreign) { + verified = true; + break; + } + } + } else { + verified = true; + } + + if (!verified) { + err = DB_DICT_CHANGED; + } else if (trx->error_state == DB_SUCCESS) { + goto run_again; + } else { + err = trx->error_state; + } + } + +exit_func: + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + + if (UNIV_UNLIKELY(trx->fake_changes)) { + err = DB_SUCCESS; + } + + return(err); +} + +/***************************************************************//** +Checks if foreign key constraints fail for an index entry. If index +is not mentioned in any constraint, this function does nothing, +Otherwise does searches to the indexes of referenced tables and +sets shared locks which lock either the success or the failure of +a constraint. +@return DB_SUCCESS or error code */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_check_foreign_constraints( +/*==============================*/ + dict_table_t* table, /*!< in: table */ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in: index entry for index */ + que_thr_t* thr) /*!< in: query thread */ +{ + dict_foreign_t* foreign; + dberr_t err; + trx_t* trx; + ibool got_s_lock = FALSE; + + trx = thr_get_trx(thr); + + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "foreign_constraint_check_for_ins"); + + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + + if (foreign->foreign_index == index) { + dict_table_t* ref_table = NULL; + dict_table_t* foreign_table = foreign->foreign_table; + dict_table_t* referenced_table + = foreign->referenced_table; + + if (referenced_table == NULL) { + + ref_table = dict_table_open_on_name( + foreign->referenced_table_name_lookup, + FALSE, FALSE, DICT_ERR_IGNORE_NONE); + } + + if (0 == trx->dict_operation_lock_mode) { + got_s_lock = TRUE; + + row_mysql_freeze_data_dictionary(trx); + } + + if (referenced_table) { + os_inc_counter(dict_sys->mutex, + foreign_table + ->n_foreign_key_checks_running); + } + + /* NOTE that if the thread ends up waiting for a lock + we will release dict_operation_lock temporarily! + But the counter on the table protects the referenced + table from being dropped while the check is running. */ + + err = row_ins_check_foreign_constraint( + TRUE, foreign, table, entry, thr); + + DBUG_EXECUTE_IF("row_ins_dict_change_err", + err = DB_DICT_CHANGED;); + + if (referenced_table) { + os_dec_counter(dict_sys->mutex, + foreign_table + ->n_foreign_key_checks_running); + } + + if (got_s_lock) { + row_mysql_unfreeze_data_dictionary(trx); + } + + if (ref_table != NULL) { + dict_table_close(ref_table, FALSE, FALSE); + } + + if (err != DB_SUCCESS) { + + return(err); + } + } + } + + return(DB_SUCCESS); +} + +/***************************************************************//** +Checks if a unique key violation to rec would occur at the index entry +insert. +@return TRUE if error */ +static +ibool +row_ins_dupl_error_with_rec( +/*========================*/ + const rec_t* rec, /*!< in: user record; NOTE that we assume + that the caller already has a record lock on + the record! */ + const dtuple_t* entry, /*!< in: entry to insert */ + dict_index_t* index, /*!< in: index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ +{ + ulint matched_fields; + ulint matched_bytes; + ulint n_unique; + ulint i; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + n_unique = dict_index_get_n_unique(index); + + matched_fields = 0; + matched_bytes = 0; + + cmp_dtuple_rec_with_match(entry, rec, offsets, + &matched_fields, &matched_bytes); + + if (matched_fields < n_unique) { + + return(FALSE); + } + + /* In a unique secondary index we allow equal key values if they + contain SQL NULLs */ + + if (!dict_index_is_clust(index)) { + + for (i = 0; i < n_unique; i++) { + if (dfield_is_null(dtuple_get_nth_field(entry, i))) { + + return(FALSE); + } + } + } + + return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); +} + +/***************************************************************//** +Scans a unique non-clustered index at a given index entry to determine +whether a uniqueness violation has occurred for the key value of the entry. +Set shared locks on possible duplicate records. +@return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_scan_sec_index_for_duplicate( +/*=================================*/ + ulint flags, /*!< in: undo logging and locking flags */ + dict_index_t* index, /*!< in: non-clustered unique index */ + dtuple_t* entry, /*!< in: index entry */ + que_thr_t* thr, /*!< in: query thread */ + bool s_latch,/*!< in: whether index->lock is being held */ + mtr_t* mtr, /*!< in/out: mini-transaction */ + mem_heap_t* offsets_heap) + /*!< in/out: memory heap that can be emptied */ +{ + ulint n_unique; + int cmp; + ulint n_fields_cmp; + btr_pcur_t pcur; + dberr_t err = DB_SUCCESS; + ulint allow_duplicates; + ulint* offsets = NULL; + +#ifdef UNIV_SYNC_DEBUG + ut_ad(s_latch == rw_lock_own(&index->lock, RW_LOCK_SHARED)); +#endif /* UNIV_SYNC_DEBUG */ + + n_unique = dict_index_get_n_unique(index); + + /* If the secondary index is unique, but one of the fields in the + n_unique first fields is NULL, a unique key violation cannot occur, + since we define NULL != NULL in this case */ + + for (ulint i = 0; i < n_unique; i++) { + if (UNIV_SQL_NULL == dfield_get_len( + dtuple_get_nth_field(entry, i))) { + + return(DB_SUCCESS); + } + } + + /* Store old value on n_fields_cmp */ + + n_fields_cmp = dtuple_get_n_fields_cmp(entry); + + dtuple_set_n_fields_cmp(entry, n_unique); + + btr_pcur_open(index, entry, PAGE_CUR_GE, + s_latch + ? BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED + : BTR_SEARCH_LEAF, + &pcur, mtr); + + allow_duplicates = thr_get_trx(thr)->duplicates; + + /* Scan index records and check if there is a duplicate */ + + do { + const rec_t* rec = btr_pcur_get_rec(&pcur); + const buf_block_t* block = btr_pcur_get_block(&pcur); + const ulint lock_type = LOCK_ORDINARY; + + if (page_rec_is_infimum(rec)) { + + continue; + } + + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, &offsets_heap); + + if (flags & BTR_NO_LOCKING_FLAG) { + /* Set no locks when applying log + in online table rebuild. */ + } else if (allow_duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + lock_type, block, rec, index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + lock_type, block, rec, index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + err = DB_SUCCESS; + case DB_SUCCESS: + break; + default: + goto end_scan; + } + + if (page_rec_is_supremum(rec)) { + + continue; + } + + cmp = cmp_dtuple_rec(entry, rec, offsets); + + if (cmp == 0) { + if (row_ins_dupl_error_with_rec(rec, entry, + index, offsets)) { + err = DB_DUPLICATE_KEY; + + thr_get_trx(thr)->error_info = index; + + /* If the duplicate is on hidden FTS_DOC_ID, + state so in the error log */ + if (DICT_TF2_FLAG_IS_SET( + index->table, + DICT_TF2_FTS_HAS_DOC_ID) + && strcmp(index->name, + FTS_DOC_ID_INDEX_NAME) == 0) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Duplicate FTS_DOC_ID value" + " on table %s", + index->table->name); + } + + goto end_scan; + } + } else { + ut_a(cmp < 0); + goto end_scan; + } + } while (btr_pcur_move_to_next(&pcur, mtr)); + +end_scan: + /* Restore old value */ + dtuple_set_n_fields_cmp(entry, n_fields_cmp); + + return(err); +} + +/** Checks for a duplicate when the table is being rebuilt online. +@retval DB_SUCCESS when no duplicate is detected +@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or +a newer version of entry (the entry should not be inserted) +@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_online( +/*=====================*/ + ulint n_uniq, /*!< in: offset of DB_TRX_ID */ + const dtuple_t* entry, /*!< in: entry that is being inserted */ + const rec_t* rec, /*!< in: clustered index record */ + ulint* offsets)/*!< in/out: rec_get_offsets(rec) */ +{ + ulint fields = 0; + ulint bytes = 0; + + /* During rebuild, there should not be any delete-marked rows + in the new table. */ + ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); + ut_ad(dtuple_get_n_fields_cmp(entry) == n_uniq); + + /* Compare the PRIMARY KEY fields and the + DB_TRX_ID, DB_ROLL_PTR. */ + cmp_dtuple_rec_with_match_low( + entry, rec, offsets, n_uniq + 2, &fields, &bytes); + + if (fields < n_uniq) { + /* Not a duplicate. */ + return(DB_SUCCESS); + } + + if (fields == n_uniq + 2) { + /* rec is an exact match of entry. */ + ut_ad(bytes == 0); + return(DB_SUCCESS_LOCKED_REC); + } + + return(DB_DUPLICATE_KEY); +} + +/** Checks for a duplicate when the table is being rebuilt online. +@retval DB_SUCCESS when no duplicate is detected +@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or +a newer version of entry (the entry should not be inserted) +@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_error_in_clust_online( +/*====================================*/ + ulint n_uniq, /*!< in: offset of DB_TRX_ID */ + const dtuple_t* entry, /*!< in: entry that is being inserted */ + const btr_cur_t*cursor, /*!< in: cursor on insert position */ + ulint** offsets,/*!< in/out: rec_get_offsets(rec) */ + mem_heap_t** heap) /*!< in/out: heap for offsets */ +{ + dberr_t err = DB_SUCCESS; + const rec_t* rec = btr_cur_get_rec(cursor); + + if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) { + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + ULINT_UNDEFINED, heap); + err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); + if (err != DB_SUCCESS) { + return(err); + } + } + + rec = page_rec_get_next_const(btr_cur_get_rec(cursor)); + + if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) { + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + ULINT_UNDEFINED, heap); + err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); + } + + return(err); +} + +/***************************************************************//** +Checks if a unique key violation error would occur at an index entry +insert. Sets shared locks on possible duplicate records. Works only +for a clustered index! +@retval DB_SUCCESS if no error +@retval DB_DUPLICATE_KEY if error, +@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate +record */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_duplicate_error_in_clust( + btr_cur_t* cursor, /*!< in: B-tree cursor */ + const dtuple_t* entry, /*!< in: entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr */ +{ + dberr_t err; + rec_t* rec; + ulint n_unique; + trx_t* trx = thr_get_trx(thr); + mem_heap_t*heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + + UT_NOT_USED(mtr); + + ut_ad(dict_index_is_clust(cursor->index)); + + /* NOTE: For unique non-clustered indexes there may be any number + of delete marked records with the same value for the non-clustered + index key (remember multiversioning), and which differ only in + the row refererence part of the index record, containing the + clustered index key fields. For such a secondary index record, + to avoid race condition, we must FIRST do the insertion and after + that check that the uniqueness condition is not breached! */ + + /* NOTE: A problem is that in the B-tree node pointers on an + upper level may match more to the entry than the actual existing + user records on the leaf level. So, even if low_match would suggest + that a duplicate key violation may occur, this may not be the case. */ + + n_unique = dict_index_get_n_unique(cursor->index); + + if (cursor->low_match >= n_unique) { + + rec = btr_cur_get_rec(cursor); + + if (!page_rec_is_infimum(rec)) { + offsets = rec_get_offsets(rec, cursor->index, offsets, + ULINT_UNDEFINED, &heap); + + /* We set a lock on the possible duplicate: this + is needed in logical logging of MySQL to make + sure that in roll-forward we get the same duplicate + errors as in original execution */ + + if (trx->duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), rec, + cursor->index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto func_exit; + } + + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { +duplicate: + trx->error_info = cursor->index; + err = DB_DUPLICATE_KEY; + goto func_exit; + } + } + } + + if (cursor->up_match >= n_unique) { + + rec = page_rec_get_next(btr_cur_get_rec(cursor)); + + if (!page_rec_is_supremum(rec)) { + offsets = rec_get_offsets(rec, cursor->index, offsets, + ULINT_UNDEFINED, &heap); + + if (trx->duplicates) { + + /* If the SQL-query will update or replace + duplicate key we will take X-lock for + duplicates ( REPLACE, LOAD DATAFILE REPLACE, + INSERT ON DUPLICATE KEY UPDATE). */ + + err = row_ins_set_exclusive_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } else { + + err = row_ins_set_shared_rec_lock( + LOCK_REC_NOT_GAP, + btr_cur_get_block(cursor), + rec, cursor->index, offsets, thr); + } + + switch (err) { + case DB_SUCCESS_LOCKED_REC: + case DB_SUCCESS: + break; + default: + goto func_exit; + } + + if (row_ins_dupl_error_with_rec( + rec, entry, cursor->index, offsets)) { + goto duplicate; + } + } + + /* This should never happen */ + ut_error; + } + + err = DB_SUCCESS; +func_exit: + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + return(err); +} + +/***************************************************************//** +Checks if an index entry has long enough common prefix with an +existing record so that the intended insert of the entry must be +changed to a modify of the existing record. In the case of a clustered +index, the prefix must be n_unique fields long. In the case of a +secondary index, all fields must be equal. InnoDB never updates +secondary index records in place, other than clearing or setting the +delete-mark flag. We could be able to update the non-unique fields +of a unique secondary index record by checking the cursor->up_match, +but we do not do so, because it could have some locking implications. +@return TRUE if the existing record should be updated; FALSE if not */ +UNIV_INLINE +ibool +row_ins_must_modify_rec( +/*====================*/ + const btr_cur_t* cursor) /*!< in: B-tree cursor */ +{ + /* NOTE: (compare to the note in row_ins_duplicate_error_in_clust) + Because node pointers on upper levels of the B-tree may match more + to entry than to actual user records on the leaf level, we + have to check if the candidate record is actually a user record. + A clustered index node pointer contains index->n_unique first fields, + and a secondary index node pointer contains all index fields. */ + + return(cursor->low_match + >= dict_index_get_n_unique_in_tree(cursor->index) + && !page_rec_is_infimum(btr_cur_get_rec(cursor))); +} + +/***************************************************************//** +Tries to insert an entry into a clustered index, ignoring foreign key +constraints. If a record with the same unique key is found, the other +record is necessarily marked deleted by a committed transaction, or a +unique key violation error occurs. The delete marked record is then +updated to an existing record, and we must write an undo log record on +the delete marked record. +@retval DB_SUCCESS on success +@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG) +@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed +@return error code */ +UNIV_INTERN +dberr_t +row_ins_clust_index_entry_low( +/*==========================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether we wish optimistic or + pessimistic descent down the index tree */ + dict_index_t* index, /*!< in: clustered index */ + ulint n_uniq, /*!< in: 0 or index->n_uniq */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + ulint n_ext, /*!< in: number of externally stored columns */ + que_thr_t* thr) /*!< in: query thread */ +{ + btr_cur_t cursor; + ulint* offsets = NULL; + dberr_t err; + big_rec_t* big_rec = NULL; + mtr_t mtr; + mem_heap_t* offsets_heap = NULL; + ulint search_mode; + + ut_ad(dict_index_is_clust(index)); + ut_ad(!dict_index_is_unique(index) + || n_uniq == dict_index_get_n_unique(index)); + ut_ad(!n_uniq || n_uniq == dict_index_get_n_unique(index)); + + /* If running with fake_changes mode on then switch from modify to + search so that code takes only s-latch and not x-latch. + For dry-run (fake-changes) s-latch is acceptable. Taking x-latch will + make it more restrictive and will block real changes/workflow. */ + if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + search_mode = (mode & BTR_MODIFY_TREE) + ? BTR_SEARCH_TREE : BTR_SEARCH_LEAF; + } else { + search_mode = mode; + } + + mtr_start(&mtr); + + if (mode == BTR_MODIFY_LEAF && dict_index_is_online_ddl(index)) { + + /* We really don't need to OR mode but will leave it for + code consistency. */ + mode |= BTR_ALREADY_S_LATCHED; + search_mode |= BTR_ALREADY_S_LATCHED; + + mtr_s_lock(dict_index_get_lock(index), &mtr); + } + + cursor.thr = thr; + + /* Note that we use PAGE_CUR_LE as the search mode, because then + the function will return in both low_match and up_match of the + cursor sensible values */ + + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, search_mode, + &cursor, 0, __FILE__, __LINE__, &mtr); + +#ifdef UNIV_DEBUG + { + page_t* page = btr_cur_get_page(&cursor); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); + + ut_ad(page_rec_is_supremum(first_rec) + || rec_get_n_fields(first_rec, index) + == dtuple_get_n_fields(entry)); + } +#endif + + if (n_uniq && (cursor.up_match >= n_uniq + || cursor.low_match >= n_uniq)) { + + if (flags + == (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) { + /* Set no locks when applying log + in online table rebuild. Only check for duplicates. */ + err = row_ins_duplicate_error_in_clust_online( + n_uniq, entry, &cursor, + &offsets, &offsets_heap); + + switch (err) { + case DB_SUCCESS: + break; + default: + ut_ad(0); + /* fall through */ + case DB_SUCCESS_LOCKED_REC: + case DB_DUPLICATE_KEY: + thr_get_trx(thr)->error_info = cursor.index; + } + } else { + /* Note that the following may return also + DB_LOCK_WAIT */ + + err = row_ins_duplicate_error_in_clust( + &cursor, entry, thr, &mtr); + } + + if (err != DB_SUCCESS) { +err_exit: + mtr_commit(&mtr); + goto func_exit; + } + } + + if (row_ins_must_modify_rec(&cursor)) { + /* There is already an index entry with a long enough common + prefix, we must convert the insert into a modify of an + existing record */ + mem_heap_t* entry_heap = mem_heap_create(1024); + + err = row_ins_clust_index_entry_by_modify( + flags, mode, &cursor, &offsets, &offsets_heap, + entry_heap, &big_rec, entry, thr, &mtr); + + rec_t* rec = btr_cur_get_rec(&cursor); + + if (big_rec && UNIV_LIKELY(!thr_get_trx(thr)->fake_changes)) { + ut_a(err == DB_SUCCESS); + /* Write out the externally stored + columns while still x-latching + index->lock and block->lock. Allocate + pages for big_rec in the mtr that + modified the B-tree, but be sure to skip + any pages that were freed in mtr. We will + write out the big_rec pages before + committing the B-tree mini-transaction. If + the system crashes so that crash recovery + will not replay the mtr_commit(&mtr), the + big_rec pages will be left orphaned until + the pages are allocated for something else. + + TODO: If the allocation extends the + tablespace, it will not be redo + logged, in either mini-transaction. + Tablespace extension should be + redo-logged in the big_rec + mini-transaction, so that recovery + will not fail when the big_rec was + written to the extended portion of the + file, in case the file was somehow + truncated in the crash. */ + + DEBUG_SYNC_C_IF_THD( + thr_get_trx(thr)->mysql_thd, + "before_row_ins_upd_extern"); + err = btr_store_big_rec_extern_fields( + index, btr_cur_get_block(&cursor), + rec, offsets, big_rec, &mtr, + BTR_STORE_INSERT_UPDATE); + DEBUG_SYNC_C_IF_THD( + thr_get_trx(thr)->mysql_thd, + "after_row_ins_upd_extern"); + /* If writing big_rec fails (for + example, because of DB_OUT_OF_FILE_SPACE), + the record will be corrupted. Even if + we did not update any externally + stored columns, our update could cause + the record to grow so that a + non-updated column was selected for + external storage. This non-update + would not have been written to the + undo log, and thus the record cannot + be rolled back. + + However, because we have not executed + mtr_commit(mtr) yet, the update will + not be replayed in crash recovery, and + the following assertion failure will + effectively "roll back" the operation. */ + ut_a(err == DB_SUCCESS); + dtuple_big_rec_free(big_rec); + } else if (big_rec != NULL + && UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + dtuple_big_rec_free(big_rec); + } + + if (err == DB_SUCCESS + && dict_index_is_online_ddl(index) + && UNIV_LIKELY(!thr_get_trx(thr)->fake_changes)) { + row_log_table_insert(rec, index, offsets); + } + + mtr_commit(&mtr); + mem_heap_free(entry_heap); + } else { + rec_t* insert_rec; + + if (mode != BTR_MODIFY_TREE) { + ut_ad(((mode & ~BTR_ALREADY_S_LATCHED) + == BTR_MODIFY_LEAF) + || thr_get_trx(thr)->fake_changes); + err = btr_cur_optimistic_insert( + flags, &cursor, &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + } else { + if (buf_LRU_buf_pool_running_out()) { + + err = DB_LOCK_TABLE_FULL; + goto err_exit; + } + + err = btr_cur_optimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + + if (err == DB_FAIL) { + err = btr_cur_pessimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, &big_rec, + n_ext, thr, &mtr); + } + } + + if (UNIV_LIKELY_NULL(big_rec)) { + mtr_commit(&mtr); + + if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + + dtuple_convert_back_big_rec( + index, entry, big_rec); + goto func_exit; + } + + /* Online table rebuild could read (and + ignore) the incomplete record at this point. + If online rebuild is in progress, the + row_ins_index_entry_big_rec() will write log. */ + + DBUG_EXECUTE_IF( + "row_ins_extern_checkpoint", + log_make_checkpoint_at( + LSN_MAX, TRUE);); + err = row_ins_index_entry_big_rec( + entry, big_rec, offsets, &offsets_heap, index, + thr_get_trx(thr)->mysql_thd, + __FILE__, __LINE__); + dtuple_convert_back_big_rec(index, entry, big_rec); + } else { + if (err == DB_SUCCESS + && dict_index_is_online_ddl(index) + && !UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + row_log_table_insert( + insert_rec, index, offsets); + } + + mtr_commit(&mtr); + } + } + +func_exit: + if (offsets_heap) { + mem_heap_free(offsets_heap); + } + + return(err); +} + +/***************************************************************//** +Starts a mini-transaction and checks if the index will be dropped. +@return true if the index is to be dropped */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +bool +row_ins_sec_mtr_start_and_check_if_aborted( +/*=======================================*/ + mtr_t* mtr, /*!< out: mini-transaction */ + dict_index_t* index, /*!< in/out: secondary index */ + bool check, /*!< in: whether to check */ + ulint search_mode) + /*!< in: flags */ +{ + ut_ad(!dict_index_is_clust(index)); + + mtr_start(mtr); + + if (!check) { + return(false); + } + + if (search_mode & BTR_ALREADY_S_LATCHED) { + mtr_s_lock(dict_index_get_lock(index), mtr); + } else { + mtr_x_lock(dict_index_get_lock(index), mtr); + } + + switch (index->online_status) { + case ONLINE_INDEX_ABORTED: + case ONLINE_INDEX_ABORTED_DROPPED: + ut_ad(*index->name == TEMP_INDEX_PREFIX); + return(true); + case ONLINE_INDEX_COMPLETE: + return(false); + case ONLINE_INDEX_CREATION: + break; + } + + ut_error; + return(true); +} + +/***************************************************************//** +Tries to insert an entry into a secondary index. If a record with exactly the +same fields is found, the other record is necessarily marked deleted. +It is then unmarked. Otherwise, the entry is just inserted to the index. +@retval DB_SUCCESS on success +@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG) +@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed +@return error code */ +UNIV_INTERN +dberr_t +row_ins_sec_index_entry_low( +/*========================*/ + ulint flags, /*!< in: undo logging and locking flags */ + ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, + depending on whether we wish optimistic or + pessimistic descent down the index tree */ + dict_index_t* index, /*!< in: secondary index */ + mem_heap_t* offsets_heap, + /*!< in/out: memory heap that can be emptied */ + mem_heap_t* heap, /*!< in/out: memory heap */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during + row_log_table_apply(), or 0 */ + que_thr_t* thr) /*!< in: query thread */ +{ + btr_cur_t cursor; + ulint search_mode; + dberr_t err = DB_SUCCESS; + ulint n_unique; + mtr_t mtr; + ulint* offsets = NULL; + + ut_ad(!dict_index_is_clust(index)); + ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_MODIFY_TREE); + + cursor.thr = thr; + ut_ad(thr_get_trx(thr)->id); + mtr_start(&mtr); + + /* If running with fake_changes mode on then avoid using insert buffer + and also switch from modify to search so that code takes only s-latch + and not x-latch. For dry-run (fake-changes) s-latch is acceptable. + Taking x-latch will make it more restrictive and will block real + changes/workflow. */ + if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) { + search_mode = (mode & BTR_MODIFY_TREE) + ? BTR_SEARCH_TREE : BTR_SEARCH_LEAF; + } else { + search_mode = mode | BTR_INSERT; + } + + /* Ensure that we acquire index->lock when inserting into an + index with index->online_status == ONLINE_INDEX_COMPLETE, but + could still be subject to rollback_inplace_alter_table(). + This prevents a concurrent change of index->online_status. + The memory object cannot be freed as long as we have an open + reference to the table, or index->table->n_ref_count > 0. */ + const bool check = *index->name == TEMP_INDEX_PREFIX; + + if (check) { + + DEBUG_SYNC_C("row_ins_sec_index_enter"); + + /* mode = MODIFY_LEAF is synonymous to search_mode = SEARCH_LEAF + search_mode = SEARCH_TREE suggest operation in fake_change mode + so continue to s-latch in this mode too. */ + + if (mode == BTR_MODIFY_LEAF || search_mode == BTR_SEARCH_TREE) { + + ut_ad((search_mode == BTR_SEARCH_TREE + && thr_get_trx(thr)->fake_changes) + || mode == BTR_MODIFY_LEAF); + + search_mode |= BTR_ALREADY_S_LATCHED; + mtr_s_lock(dict_index_get_lock(index), &mtr); + + } else { + mtr_x_lock(dict_index_get_lock(index), &mtr); + } + + if (row_log_online_op_try( + index, entry, thr_get_trx(thr)->id)) { + goto func_exit; + } + } + + if (!thr_get_trx(thr)->check_unique_secondary) { + search_mode |= BTR_IGNORE_SEC_UNIQUE; + } + + /* Note that we use PAGE_CUR_LE as the search mode, because then + the function will return in both low_match and up_match of the + cursor sensible values */ + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, + search_mode, + &cursor, 0, __FILE__, __LINE__, &mtr); + + if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) { + /* The insert was buffered during the search: we are done */ + goto func_exit; + } + +#ifdef UNIV_DEBUG + { + page_t* page = btr_cur_get_page(&cursor); + rec_t* first_rec = page_rec_get_next( + page_get_infimum_rec(page)); + + ut_ad(page_rec_is_supremum(first_rec) + || rec_get_n_fields(first_rec, index) + == dtuple_get_n_fields(entry)); + } +#endif + + n_unique = dict_index_get_n_unique(index); + + if (dict_index_is_unique(index) + && (cursor.low_match >= n_unique || cursor.up_match >= n_unique)) { + mtr_commit(&mtr); + + DEBUG_SYNC_C("row_ins_sec_index_unique"); + + if (row_ins_sec_mtr_start_and_check_if_aborted( + &mtr, index, check, search_mode)) { + goto func_exit; + } + + err = row_ins_scan_sec_index_for_duplicate( + flags, index, entry, thr, check, &mtr, offsets_heap); + + mtr_commit(&mtr); + + switch (err) { + case DB_SUCCESS: + break; + case DB_DUPLICATE_KEY: + if (*index->name == TEMP_INDEX_PREFIX) { + ut_ad(!thr_get_trx(thr) + ->dict_operation_lock_mode); + mutex_enter(&dict_sys->mutex); + dict_set_corrupted_index_cache_only( + index, index->table); + mutex_exit(&dict_sys->mutex); + /* Do not return any error to the + caller. The duplicate will be reported + by ALTER TABLE or CREATE UNIQUE INDEX. + Unfortunately we cannot report the + duplicate key value to the DDL thread, + because the altered_table object is + private to its call stack. */ + err = DB_SUCCESS; + } + /* fall through */ + default: + return(err); + } + + if (row_ins_sec_mtr_start_and_check_if_aborted( + &mtr, index, check, search_mode)) { + goto func_exit; + } + + DEBUG_SYNC_C("row_ins_sec_index_entry_dup_locks_created"); + + /* We did not find a duplicate and we have now + locked with s-locks the necessary records to + prevent any insertion of a duplicate by another + transaction. Let us now reposition the cursor and + continue the insertion. */ + + btr_cur_search_to_nth_level( + index, 0, entry, PAGE_CUR_LE, + search_mode & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE), + &cursor, 0, __FILE__, __LINE__, &mtr); + } + + if (row_ins_must_modify_rec(&cursor)) { + /* There is already an index entry with a long enough common + prefix, we must convert the insert into a modify of an + existing record */ + offsets = rec_get_offsets( + btr_cur_get_rec(&cursor), index, offsets, + ULINT_UNDEFINED, &offsets_heap); + + err = row_ins_sec_index_entry_by_modify( + flags, mode, &cursor, &offsets, + offsets_heap, heap, entry, thr, &mtr); + } else { + rec_t* insert_rec; + big_rec_t* big_rec; + + if (mode == BTR_MODIFY_LEAF) { + err = btr_cur_optimistic_insert( + flags, &cursor, &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + } else { + ut_ad(mode == BTR_MODIFY_TREE); + if (buf_LRU_buf_pool_running_out()) { + + err = DB_LOCK_TABLE_FULL; + goto func_exit; + } + + err = btr_cur_optimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + if (err == DB_FAIL) { + err = btr_cur_pessimistic_insert( + flags, &cursor, + &offsets, &offsets_heap, + entry, &insert_rec, + &big_rec, 0, thr, &mtr); + } + } + + if (err == DB_SUCCESS && trx_id) { + page_update_max_trx_id( + btr_cur_get_block(&cursor), + btr_cur_get_page_zip(&cursor), + trx_id, &mtr); + } + + ut_ad(!big_rec); + } + +func_exit: + mtr_commit(&mtr); + return(err); +} + +/***************************************************************//** +Tries to insert the externally stored fields (off-page columns) +of a clustered index entry. +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +UNIV_INTERN +dberr_t +row_ins_index_entry_big_rec_func( +/*=============================*/ + const dtuple_t* entry, /*!< in/out: index entry to insert */ + const big_rec_t* big_rec,/*!< in: externally stored fields */ + ulint* offsets,/*!< in/out: rec offsets */ + mem_heap_t** heap, /*!< in/out: memory heap */ + dict_index_t* index, /*!< in: index */ + const char* file, /*!< in: file name of caller */ +#ifndef DBUG_OFF + const void* thd, /*!< in: connection, or NULL */ +#endif /* DBUG_OFF */ + ulint line) /*!< in: line number of caller */ +{ + mtr_t mtr; + btr_cur_t cursor; + rec_t* rec; + dberr_t error; + + ut_ad(dict_index_is_clust(index)); + + DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch"); + + mtr_start(&mtr); + btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, + BTR_MODIFY_TREE, &cursor, 0, + file, line, &mtr); + rec = btr_cur_get_rec(&cursor); + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, heap); + + DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern"); + error = btr_store_big_rec_extern_fields( + index, btr_cur_get_block(&cursor), + rec, offsets, big_rec, &mtr, BTR_STORE_INSERT); + DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern"); + + if (error == DB_SUCCESS + && dict_index_is_online_ddl(index)) { + row_log_table_insert(rec, index, offsets); + } + + mtr_commit(&mtr); + + return(error); +} + +/***************************************************************//** +Inserts an entry into a clustered index. Tries first optimistic, +then pessimistic descent down the tree. If the entry matches enough +to a delete marked record, performs the insert by updating or delete +unmarking the delete marked record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +UNIV_INTERN +dberr_t +row_ins_clust_index_entry( +/*======================*/ + dict_index_t* index, /*!< in: clustered index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr, /*!< in: query thread */ + ulint n_ext) /*!< in: number of externally stored columns */ +{ + dberr_t err; + ulint n_uniq; + + if (!index->table->foreign_set.empty()) { + err = row_ins_check_foreign_constraints( + index->table, index, entry, thr); + if (err != DB_SUCCESS) { + + return(err); + } + } + + n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0; + + /* Try first optimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_clust_index_entry_low( + 0, BTR_MODIFY_LEAF, index, n_uniq, entry, n_ext, thr); + +#ifdef UNIV_DEBUG + /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC(). + Once it is fixed, remove the 'ifdef', 'if' and this comment. */ + if (!thr_get_trx(thr)->ddl) { + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "after_row_ins_clust_index_entry_leaf"); + } +#endif /* UNIV_DEBUG */ + + if (err != DB_FAIL) { + DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after"); + return(err); + } + + /* Try then pessimistic descent to the B-tree */ + + log_free_check(); + + return(row_ins_clust_index_entry_low( + 0, BTR_MODIFY_TREE, index, n_uniq, entry, n_ext, thr)); +} + +/***************************************************************//** +Inserts an entry into a secondary index. Tries first optimistic, +then pessimistic descent down the tree. If the entry matches enough +to a delete marked record, performs the insert by updating or delete +unmarking the delete marked record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +UNIV_INTERN +dberr_t +row_ins_sec_index_entry( +/*====================*/ + dict_index_t* index, /*!< in: secondary index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + mem_heap_t* offsets_heap; + mem_heap_t* heap; + + DBUG_EXECUTE_IF("row_ins_sec_index_entry_timeout", { + DBUG_SET("-d,row_ins_sec_index_entry_timeout"); + return(DB_LOCK_WAIT);}); + + if (!index->table->foreign_set.empty()) { + err = row_ins_check_foreign_constraints(index->table, index, + entry, thr); + if (err != DB_SUCCESS) { + + return(err); + } + } + + ut_ad(thr_get_trx(thr)->id); + + offsets_heap = mem_heap_create(1024); + heap = mem_heap_create(1024); + + /* Try first optimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_sec_index_entry_low( + 0, BTR_MODIFY_LEAF, index, offsets_heap, heap, entry, 0, thr); + if (err == DB_FAIL) { + mem_heap_empty(heap); + + if (index->space == IBUF_SPACE_ID + && !dict_index_is_unique(index)) { + ibuf_free_excess_pages(); + } + + /* Try then pessimistic descent to the B-tree */ + + log_free_check(); + + err = row_ins_sec_index_entry_low( + 0, BTR_MODIFY_TREE, index, + offsets_heap, heap, entry, 0, thr); + } + + mem_heap_free(heap); + mem_heap_free(offsets_heap); + return(err); +} + +/***************************************************************//** +Inserts an index entry to index. Tries first optimistic, then pessimistic +descent down the tree. If the entry matches enough to a delete marked record, +performs the insert by updating or delete unmarking the delete marked +record. +@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */ +static +dberr_t +row_ins_index_entry( +/*================*/ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in/out: index entry to insert */ + que_thr_t* thr) /*!< in: query thread */ +{ + DBUG_EXECUTE_IF("row_ins_index_entry_timeout", { + DBUG_SET("-d,row_ins_index_entry_timeout"); + return(DB_LOCK_WAIT);}); + + if (dict_index_is_clust(index)) { + return(row_ins_clust_index_entry(index, entry, thr, 0)); + } else { + return(row_ins_sec_index_entry(index, entry, thr)); + } +} + +/***********************************************************//** +Sets the values of the dtuple fields in entry from the values of appropriate +columns in row. */ +static MY_ATTRIBUTE((nonnull)) +void +row_ins_index_entry_set_vals( +/*=========================*/ + dict_index_t* index, /*!< in: index */ + dtuple_t* entry, /*!< in: index entry to make */ + const dtuple_t* row) /*!< in: row */ +{ + ulint n_fields; + ulint i; + + n_fields = dtuple_get_n_fields(entry); + + for (i = 0; i < n_fields; i++) { + dict_field_t* ind_field; + dfield_t* field; + const dfield_t* row_field; + ulint len; + + field = dtuple_get_nth_field(entry, i); + ind_field = dict_index_get_nth_field(index, i); + row_field = dtuple_get_nth_field(row, ind_field->col->ind); + len = dfield_get_len(row_field); + + /* Check column prefix indexes */ + if (ind_field->prefix_len > 0 + && dfield_get_len(row_field) != UNIV_SQL_NULL) { + + const dict_col_t* col + = dict_field_get_col(ind_field); + + len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, col->mbmaxlen, + ind_field->prefix_len, + len, + static_cast<const char*>( + dfield_get_data(row_field))); + + ut_ad(!dfield_is_ext(row_field)); + } + + dfield_set_data(field, dfield_get_data(row_field), len); + if (dfield_is_ext(row_field)) { + ut_ad(dict_index_is_clust(index)); + dfield_set_ext(field); + } + } +} + +/***********************************************************//** +Inserts a single index entry to the table. +@return DB_SUCCESS if operation successfully completed, else error +code or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins_index_entry_step( +/*=====================*/ + ins_node_t* node, /*!< in: row insert node */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + ut_ad(dtuple_check_typed(node->row)); + + row_ins_index_entry_set_vals(node->index, node->entry, node->row); + + ut_ad(dtuple_check_typed(node->entry)); + + err = row_ins_index_entry(node->index, node->entry, thr); + +#ifdef UNIV_DEBUG + /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC(). + Once it is fixed, remove the 'ifdef', 'if' and this comment. */ + if (!thr_get_trx(thr)->ddl) { + DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, + "after_row_ins_index_entry_step"); + } +#endif /* UNIV_DEBUG */ + + return(err); +} + +/***********************************************************//** +Allocates a row id for row and inits the node->index field. */ +UNIV_INLINE +void +row_ins_alloc_row_id_step( +/*======================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + row_id_t row_id; + + ut_ad(node->state == INS_NODE_ALLOC_ROW_ID); + + if (dict_index_is_unique(dict_table_get_first_index(node->table))) { + + /* No row id is stored if the clustered index is unique */ + + return; + } + + /* Fill in row id value to row */ + + row_id = dict_sys_get_new_row_id(); + + dict_sys_write_row_id(node->row_id_buf, row_id); +} + +/***********************************************************//** +Gets a row to insert from the values list. */ +UNIV_INLINE +void +row_ins_get_row_from_values( +/*========================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + que_node_t* list_node; + dfield_t* dfield; + dtuple_t* row; + ulint i; + + /* The field values are copied in the buffers of the select node and + it is safe to use them until we fetch from select again: therefore + we can just copy the pointers */ + + row = node->row; + + i = 0; + list_node = node->values_list; + + while (list_node) { + eval_exp(list_node); + + dfield = dtuple_get_nth_field(row, i); + dfield_copy_data(dfield, que_node_get_val(list_node)); + + i++; + list_node = que_node_get_next(list_node); + } +} + +/***********************************************************//** +Gets a row to insert from the select list. */ +UNIV_INLINE +void +row_ins_get_row_from_select( +/*========================*/ + ins_node_t* node) /*!< in: row insert node */ +{ + que_node_t* list_node; + dfield_t* dfield; + dtuple_t* row; + ulint i; + + /* The field values are copied in the buffers of the select node and + it is safe to use them until we fetch from select again: therefore + we can just copy the pointers */ + + row = node->row; + + i = 0; + list_node = node->select->select_list; + + while (list_node) { + dfield = dtuple_get_nth_field(row, i); + dfield_copy_data(dfield, que_node_get_val(list_node)); + + i++; + list_node = que_node_get_next(list_node); + } +} + +/***********************************************************//** +Inserts a row to a table. +@return DB_SUCCESS if operation successfully completed, else error +code or DB_LOCK_WAIT */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +dberr_t +row_ins( +/*====*/ + ins_node_t* node, /*!< in: row insert node */ + que_thr_t* thr) /*!< in: query thread */ +{ + dberr_t err; + + if (node->state == INS_NODE_ALLOC_ROW_ID) { + + row_ins_alloc_row_id_step(node); + + node->index = dict_table_get_first_index(node->table); + node->entry = UT_LIST_GET_FIRST(node->entry_list); + + if (node->ins_type == INS_SEARCHED) { + + row_ins_get_row_from_select(node); + + } else if (node->ins_type == INS_VALUES) { + + row_ins_get_row_from_values(node); + } + + node->state = INS_NODE_INSERT_ENTRIES; + } + + ut_ad(node->state == INS_NODE_INSERT_ENTRIES); + + while (node->index != NULL) { + if (node->index->type != DICT_FTS) { + err = row_ins_index_entry_step(node, thr); + + if (err != DB_SUCCESS) { + + return(err); + } + } + + node->index = dict_table_get_next_index(node->index); + node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); + + DBUG_EXECUTE_IF( + "row_ins_skip_sec", + node->index = NULL; node->entry = NULL; break;); + + /* Skip corrupted secondary index and its entry */ + while (node->index && dict_index_is_corrupted(node->index)) { + + node->index = dict_table_get_next_index(node->index); + node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry); + } + } + + ut_ad(node->entry == NULL); + + node->state = INS_NODE_ALLOC_ROW_ID; + + return(DB_SUCCESS); +} + +/***********************************************************//** +Inserts a row to a table. This is a high-level function used in SQL execution +graphs. +@return query thread to run next or NULL */ +UNIV_INTERN +que_thr_t* +row_ins_step( +/*=========*/ + que_thr_t* thr) /*!< in: query thread */ +{ + ins_node_t* node; + que_node_t* parent; + sel_node_t* sel_node; + trx_t* trx; + dberr_t err; + + ut_ad(thr); + + trx = thr_get_trx(thr); + + trx_start_if_not_started_xa(trx); + + node = static_cast<ins_node_t*>(thr->run_node); + + ut_ad(que_node_get_type(node) == QUE_NODE_INSERT); + + parent = que_node_get_parent(node); + sel_node = node->select; + + if (thr->prev_node == parent) { + node->state = INS_NODE_SET_IX_LOCK; + } + + /* If this is the first time this node is executed (or when + execution resumes after wait for the table IX lock), set an + IX lock on the table and reset the possible select node. MySQL's + partitioned table code may also call an insert within the same + SQL statement AFTER it has used this table handle to do a search. + This happens, for example, when a row update moves it to another + partition. In that case, we have already set the IX lock on the + table during the search operation, and there is no need to set + it again here. But we must write trx->id to node->trx_id_buf. */ + + trx_write_trx_id(node->trx_id_buf, trx->id); + + if (node->state == INS_NODE_SET_IX_LOCK) { + + node->state = INS_NODE_ALLOC_ROW_ID; + + /* It may be that the current session has not yet started + its transaction, or it has been committed: */ + + if (trx->id == node->trx_id) { + /* No need to do IX-locking */ + + goto same_trx; + } + + err = lock_table(0, node->table, LOCK_IX, thr); + + DBUG_EXECUTE_IF("ib_row_ins_ix_lock_wait", + err = DB_LOCK_WAIT;); + + if (err != DB_SUCCESS) { + + goto error_handling; + } + + node->trx_id = trx->id; +same_trx: + if (node->ins_type == INS_SEARCHED) { + /* Reset the cursor */ + sel_node->state = SEL_NODE_OPEN; + + /* Fetch a row to insert */ + + thr->run_node = sel_node; + + return(thr); + } + } + + if ((node->ins_type == INS_SEARCHED) + && (sel_node->state != SEL_NODE_FETCH)) { + + ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS); + + /* No more rows to insert */ + thr->run_node = parent; + + return(thr); + } + + /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */ + + err = row_ins(node, thr); + +error_handling: + trx->error_state = err; + + if (err != DB_SUCCESS) { + /* err == DB_LOCK_WAIT or SQL error detected */ + return(NULL); + } + + /* DO THE TRIGGER ACTIONS HERE */ + + if (node->ins_type == INS_SEARCHED) { + /* Fetch a row to insert */ + + thr->run_node = sel_node; + } else { + thr->run_node = que_node_get_parent(node); + } + + return(thr); +} diff --cc zlib/CMakeLists.txt index dd1e45d4acf,0c224e7ce22..d09f6c13f50 --- a/zlib/CMakeLists.txt +++ b/zlib/CMakeLists.txt @@@ -7,18 -8,141 +8,140 @@@ # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - # GNU General Public License for more details. - # + # GNU General Public License, version 2.0, for more details. + # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software - # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + cmake_minimum_required(VERSION 2.4.4) + set(CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS ON) + + project(zlib C) + + set(VERSION "1.2.11") + + include(CheckTypeSize) + include(CheckFunctionExists) + include(CheckIncludeFile) + include(CheckCSourceCompiles) + + check_include_file(sys/types.h HAVE_SYS_TYPES_H) + check_include_file(stdint.h HAVE_STDINT_H) + check_include_file(stddef.h HAVE_STDDEF_H) + + # + # Check to see if we have large file support + # + set(CMAKE_REQUIRED_DEFINITIONS -D_LARGEFILE64_SOURCE=1) + # We add these other definitions here because CheckTypeSize.cmake + # in CMake 2.4.x does not automatically do so and we want + # compatibility with CMake 2.4.x. + if(HAVE_SYS_TYPES_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_SYS_TYPES_H) + endif() + if(HAVE_STDINT_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDINT_H) + endif() + if(HAVE_STDDEF_H) + list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDDEF_H) + endif() + check_type_size(off64_t OFF64_T) + if(HAVE_OFF64_T) + add_definitions(-D_LARGEFILE64_SOURCE=1) + endif() + set(CMAKE_REQUIRED_DEFINITIONS) # clear variable + + # + # Check for fseeko + # + check_function_exists(fseeko HAVE_FSEEKO) + if(NOT HAVE_FSEEKO) + add_definitions(-DNO_FSEEKO) + endif() + + # + # Check for unistd.h + # + check_include_file(unistd.h Z_HAVE_UNISTD_H) - INCLUDE_DIRECTORIES( - ${CMAKE_SOURCE_DIR}/include - ${CMAKE_SOURCE_DIR}/zlib + + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/zconf.h.cmakein + ${CMAKE_CURRENT_BINARY_DIR}/zconf.h @ONLY + ) + include_directories( + SYSTEM ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} + ) + + #============================================================================ + # zlib + #============================================================================ + + set(ZLIB_PUBLIC_HDRS + ${CMAKE_CURRENT_BINARY_DIR}/zconf.h + zlib.h + ) + set(ZLIB_PRIVATE_HDRS + crc32.h + deflate.h + gzguts.h + inffast.h + inffixed.h + inflate.h + inftrees.h + trees.h + zutil.h + ) + set(ZLIB_SRCS + adler32.c + compress.c + crc32.c + deflate.c + gzclose.c + gzlib.c + gzread.c + gzwrite.c + inflate.c + infback.c + inftrees.c + inffast.c + trees.c + uncompr.c + zutil.c ) - SET(ZLIB_SOURCES adler32.c compress.c crc32.c crc32.h deflate.c deflate.h gzio.c infback.c inffast.c inffast.h - inffixed.h inflate.c inflate.h inftrees.c inftrees.h trees.c trees.h uncompr.c zconf.h zlib.h - zutil.c zutil.h) - ADD_CONVENIENCE_LIBRARY(zlib ${ZLIB_SOURCES}) - RESTRICT_SYMBOL_EXPORTS(zlib) + + if(NOT MINGW) + set(ZLIB_DLL_SRCS + win32/zlib1.rc # If present will override custom build rule below. + ) + endif() - + # parse the full version number from zlib.h and include in ZLIB_FULL_VERSION + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/zlib.h _zlib_h_contents) + string(REGEX REPLACE ".*#define[ \t]+ZLIB_VERSION[ \t]+\"([-0-9A-Za-z.]+)\".*" + "\\1" ZLIB_FULL_VERSION ${_zlib_h_contents}) + + ADD_CONVENIENCE_LIBRARY(zlib STATIC + ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS}) + + if(NOT CYGWIN) + # This property causes shared libraries on Linux to have the full version + # encoded into their final filename. We disable this on Cygwin because + # it causes cygz-${ZLIB_FULL_VERSION}.dll to be created when cygz.dll + # seems to be the default. + # + # This has no effect with MSVC, on that platform the version info for + # the DLL comes from the resource file win32/zlib1.rc + set_target_properties(zlib PROPERTIES VERSION ${ZLIB_FULL_VERSION}) + endif() + + if(CMAKE_SYSTEM_NAME MATCHES "SunOS") + # On unix-like platforms the library is almost always called libz + set_target_properties(zlib PROPERTIES OUTPUT_NAME z) + elseif(UNIX) + # On unix-like platforms the library is almost always called libz + set_target_properties(zlib PROPERTIES OUTPUT_NAME z) + if(NOT APPLE) + set_target_properties(zlib PROPERTIES LINK_FLAGS "-Wl,--version-script,\"${CMAKE_CURRENT_SOURCE_DIR}/zlib.map\"") + endif() -endif() ++endif()
participants (1)
-
jan