developers
Threads by month
- ----- 2024 -----
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
March 2009
- 13 participants
- 53 discussions
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2685)
by knielsen@knielsen-hq.org 18 Mar '09
by knielsen@knielsen-hq.org 18 Mar '09
18 Mar '09
#At lp:maria
2685 knielsen(a)knielsen-hq.org 2009-03-17
When we remove the server data dir before server restart, preserve the
error log (don't delete it between restarts), as it may contain
valuable information even for test cases which don't show direct
failures.
modified:
mysql-test/mysql-test-run.pl
=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl 2009-03-17 08:23:05 +0000
+++ b/mysql-test/mysql-test-run.pl 2009-03-17 11:58:05 +0000
@@ -3511,6 +3511,29 @@ sub run_testcase ($$) {
}
+# We want to preserve the error log between server restarts, as it may contain
+# valuable debugging information even if there is no test failure recorded.
+sub _preserve_error_log_names {
+ my ($mysqld)= @_;
+ my $error_log_file= $mysqld->value('log-error');
+ my $error_log_dir= dirname($error_log_file);
+ my $save_name= $error_log_dir ."/../". $mysqld->name() .".error.log";
+ return ($error_log_file, $save_name);
+}
+
+sub preserve_error_log {
+ my ($mysqld)= @_;
+ my ($error_log_file, $save_name)= _preserve_error_log_names($mysqld);
+ my $res= rename($error_log_file, $save_name);
+ # Ignore any errors, as it's just a best-effort to keep the log if possible.
+}
+
+sub restore_error_log {
+ my ($mysqld)= @_;
+ my ($error_log_file, $save_name)= _preserve_error_log_names($mysqld);
+ my $res= rename($save_name, $error_log_file);
+}
+
# Keep track of last position in mysqld error log where we scanned for
# warnings, so we can attribute any warnings found to the correct test
# suite or server restart.
@@ -3929,6 +3952,7 @@ sub clean_datadir {
foreach my $mysqld ( mysqlds() )
{
my $mysqld_dir= dirname($mysqld->value('datadir'));
+ preserve_error_log($mysqld);
if (-d $mysqld_dir ) {
mtr_verbose(" - removing '$mysqld_dir'");
rmtree($mysqld_dir);
@@ -4579,6 +4603,7 @@ sub start_servers($) {
}
if (-d $datadir ) {
+ preserve_error_log($mysqld);
mtr_verbose(" - removing '$datadir'");
rmtree($datadir);
}
@@ -4604,6 +4629,7 @@ sub start_servers($) {
unless -d $datadir;
}
+ restore_error_log($mysqld);
# Create the servers tmpdir
my $tmpdir= $mysqld->value('tmpdir');
2
1
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2686)
by knielsen@knielsen-hq.org 18 Mar '09
by knielsen@knielsen-hq.org 18 Mar '09
18 Mar '09
#At lp:maria
2686 knielsen(a)knielsen-hq.org 2009-03-18
Fix some Valgrind warnings.
modified:
dbug/dbug.c
mysql-test/valgrind.supp
sql/mysqld.cc
sql/scheduler.cc
sql/sql_select.cc
=== modified file 'dbug/dbug.c'
--- a/dbug/dbug.c 2009-03-12 22:27:35 +0000
+++ b/dbug/dbug.c 2009-03-18 14:08:05 +0000
@@ -506,6 +506,7 @@ int DbugParse(CODE_STATE *cs, const char
rel= control[0] == '+' || control[0] == '-';
if ((!rel || (!stack->out_file && !stack->next)))
{
+ FreeState(cs, stack, 0);
stack->flags= 0;
stack->delay= 0;
stack->maxdepth= 0;
@@ -1648,10 +1649,12 @@ static void FreeState(CODE_STATE *cs, st
FreeList(state->processes);
if (!is_shared(state, p_functions))
FreeList(state->p_functions);
- if (!is_shared(state, out_file))
+ if (!is_shared(state, out_file) &&
+ state->out_file != stderr && state->out_file != stdout)
DBUGCloseFile(cs, state->out_file);
(void) fflush(cs->stack->out_file);
- if (state->prof_file)
+ if (state->prof_file &&
+ state->out_file != stderr && state->out_file != stdout)
DBUGCloseFile(cs, state->prof_file);
if (free_state)
free((void*) state);
=== modified file 'mysql-test/valgrind.supp'
--- a/mysql-test/valgrind.supp 2008-11-21 14:21:50 +0000
+++ b/mysql-test/valgrind.supp 2009-03-18 14:08:05 +0000
@@ -229,6 +229,44 @@
}
{
+ libz deflate_slow 1
+ Memcheck:Cond
+ fun:deflate_slow
+ fun:deflate
+ fun:do_flush
+ fun:azflush
+}
+
+{
+ libz deflate_slow 2
+ Memcheck:Value8
+ fun:deflate_slow
+ fun:deflate
+ fun:do_flush
+ fun:azflush
+}
+
+{
+ libz deflate_slow 3
+ Memcheck:Cond
+ fun:deflate_slow
+ fun:deflate
+ fun:do_flush
+ fun:azclose
+}
+
+{
+ libz _tr_flush_block
+ Memcheck:Value8
+ fun:compress_block
+ fun:_tr_flush_block
+ fun:deflate_slow
+ fun:deflate
+ fun:do_flush
+ fun:azflush
+}
+
+{
libz deflate
Memcheck:Cond
obj:*/libz.so.*
@@ -256,6 +294,14 @@
fun:do_flush
}
+{
+ libz deflate4
+ Memcheck:Cond
+ fun:deflate
+ fun:do_flush
+ fun:azclose
+}
+
#
# Warning from my_thread_init becasue mysqld dies before kill thread exists
#
@@ -379,7 +425,7 @@
}
{
- dlclose memory loss from plugin
+ dlclose memory loss from plugin variant 1
Memcheck:Leak
fun:calloc
fun:_dlerror_run
@@ -388,6 +434,19 @@
}
{
+ dlclose memory loss from plugin variant 2
+ Memcheck:Leak
+ fun:malloc
+ fun:_dl_close_worker
+ fun:_dl_close
+ fun:_dl_catch_error
+ fun:_dlerror_run
+ fun:dlclose
+ fun:_Z15free_plugin_memP12st_plugin_dl
+ fun:_Z13plugin_dl_delPK19st_mysql_lex_string
+}
+
+{
dlopen / ptread_cancel_init memory loss on Suse Linux 10.3 32/64 bit
Memcheck:Leak
fun:*alloc
@@ -588,3 +647,19 @@
fun:dlopen*
}
+#
+# In glibc (checked version 2.7), inet_ntoa allocates an 18-byte
+# per-thread static buffer for the return value. That memory is freed
+# at thread exit, however if called from the main thread, Valgrind
+# does not see the free (test main.no-threads).
+#
+# Since inet_ntoa() does not allocate memory dynamically per-call, this
+# suppression is safe.
+#
+
+{
+ inet_ntoa thread local storage
+ Memcheck:Leak
+ fun:malloc
+ fun:inet_ntoa
+}
=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc 2009-03-13 13:31:54 +0000
+++ b/sql/mysqld.cc 2009-03-18 14:08:05 +0000
@@ -4813,10 +4813,10 @@ static bool read_init_file(char *file_na
DBUG_ENTER("read_init_file");
DBUG_PRINT("enter",("name: %s",file_name));
if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME))))
- return(1);
+ DBUG_RETURN(1);
bootstrap(file);
(void) my_fclose(file,MYF(MY_WME));
- return 0;
+ DBUG_RETURN(0);
}
@@ -4837,6 +4837,7 @@ void handle_connection_in_main_thread(TH
safe_mutex_assert_owner(&LOCK_thread_count);
thread_cache_size=0; // Safety
threads.append(thd);
+ thd->connect_utime= thd->start_utime= my_micro_time();
(void) pthread_mutex_unlock(&LOCK_thread_count);
handle_one_connection((void*) thd);
}
=== modified file 'sql/scheduler.cc'
--- a/sql/scheduler.cc 2009-03-12 22:27:35 +0000
+++ b/sql/scheduler.cc 2009-03-18 14:08:05 +0000
@@ -470,6 +470,7 @@ static void libevent_add_connection(THD
DBUG_VOID_RETURN;
}
threads.append(thd);
+ thd->connect_utime= thd->start_utime= my_micro_time();
libevent_thd_add(thd);
pthread_mutex_unlock(&LOCK_thread_count);
=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc 2009-02-19 09:01:25 +0000
+++ b/sql/sql_select.cc 2009-03-18 14:08:05 +0000
@@ -1994,8 +1994,17 @@ JOIN::exec()
tmp_fields_list2, tmp_all_fields2,
fields_list.elements, tmp_all_fields1))
DBUG_VOID_RETURN;
- curr_join->tmp_fields_list2= tmp_fields_list2;
- curr_join->tmp_all_fields2= tmp_all_fields2;
+#ifdef HAVE_purify
+ /*
+ Some GCCs use memcpy() for struct assignment, even for x=x.
+ GCC bug 19410: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410
+ */
+ if (curr_join != this)
+#endif
+ {
+ curr_join->tmp_fields_list2= tmp_fields_list2;
+ curr_join->tmp_all_fields2= tmp_all_fields2;
+ }
}
curr_fields_list= &curr_join->tmp_fields_list2;
curr_all_fields= &curr_join->tmp_all_fields2;
3
2
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2684) Bug#43418
by knielsen@knielsen-hq.org 18 Mar '09
by knielsen@knielsen-hq.org 18 Mar '09
18 Mar '09
#At lp:maria
2684 knielsen(a)knielsen-hq.org 2009-03-17
BUG#43418: MTR2: does not notice a memory leak occuring at shutdown of
mysqld w/ --valgrind
Problem was that mysql-test-run.pl sometimes skipped parsing part of
the error log, and sometimes shutdown mysqld with kill -9, preventing
tools like Valgrind and safemalloc from outputting memory leak check
in the error logs.
Fixed by
- Stopping mysqld servers gracefully rather than kill -9 when
warnings are being checked.
- After stopping mysqld servers, do an additional parse of the error
log to check for any warnings generated during shutdown.
- Fix error log parsing to be careful not to skip parsing part of the
file, by keeping track of previous file position rather than
relying on mark_log markers.
- Workers report warnings during shutdown to the master process with
a new packet 'WARNINGS' which includes a list of names of test that
might have caused the problem (could be any test run since last
server start).
- Fail entire test suite if warnings are found.
Also fixed home-brewed and broken serialization in My::Test to use the
standard Storable serializer.
modified:
mysql-test/lib/My/Test.pm
mysql-test/mysql-test-run.pl
=== modified file 'mysql-test/lib/My/Test.pm'
--- a/mysql-test/lib/My/Test.pm 2008-11-14 10:49:12 +0000
+++ b/mysql-test/lib/My/Test.pm 2009-03-17 08:23:05 +0000
@@ -9,6 +9,7 @@ package My::Test;
use strict;
use warnings;
use Carp;
+use Storable();
sub new {
@@ -30,18 +31,6 @@ sub key {
}
-sub _encode {
- my ($value)= @_;
- $value =~ s/([|\\\x{0a}\x{0d}])/sprintf('\%02X', ord($1))/eg;
- return $value;
-}
-
-sub _decode {
- my ($value)= @_;
- $value =~ s/\\([0-9a-fA-F]{2})/chr(hex($1))/ge;
- return $value;
-}
-
sub is_failed {
my ($self)= @_;
my $result= $self->{result};
@@ -58,66 +47,22 @@ sub write_test {
# Give the test a unique key before serializing it
$test->{key}= "$test" unless defined $test->{key};
- print $sock $header, "\n";
- while ((my ($key, $value)) = each(%$test)) {
- print $sock $key, "= ";
- if (ref $value eq "ARRAY") {
- print $sock "[", _encode(join(", ", @$value)), "]";
- } else {
- print $sock _encode($value);
- }
- print $sock "\n";
- }
- print $sock "\n";
+ my $serialized= Storable::freeze($test);
+ $serialized =~ s/([\x0d\x0a\\])/sprintf("\\%02x", ord($1))/eg;
+ print $sock $header, "\n", $serialized, "\n";
}
sub read_test {
my ($sock)= @_;
- my $test= My::Test->new();
- # Read the : separated key value pairs until a
- # single newline on it's own line
- my $line;
- while (defined($line= <$sock>)) {
- # List is terminated by newline on it's own
- if ($line eq "\n") {
- # Correctly terminated reply
- # print "Got newline\n";
- last;
- }
- chomp($line);
-
- # Split key/value on the first "="
- my ($key, $value)= split("= ", $line, 2);
-
- if ($value =~ /^\[(.*)\]/){
- my @values= split(", ", _decode($1));
- push(@{$test->{$key}}, @values);
- }
- else
- {
- $test->{$key}= _decode($value);
- }
- }
+ my $serialized= <$sock>;
+ chomp($serialized);
+ $serialized =~ s/\\([0-9a-fA-F]{2})/chr(hex($1))/eg;
+ my $test= Storable::thaw($serialized);
+ die "wrong class (hack attempt?)"
+ unless ref($test) eq 'My::Test';
return $test;
}
-sub print_test {
- my ($self)= @_;
-
- print "[", $self->{name}, "]", "\n";
- while ((my ($key, $value)) = each(%$self)) {
- print " ", $key, "= ";
- if (ref $value eq "ARRAY") {
- print "[", join(", ", @$value), "]";
- } else {
- print $value;
- }
- print "\n";
- }
- print "\n";
-}
-
-
1;
=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl 2009-03-12 22:27:35 +0000
+++ b/mysql-test/mysql-test-run.pl 2009-03-17 08:23:05 +0000
@@ -357,7 +357,7 @@ sub main {
mtr_print_thick_line();
mtr_print_header();
- my $completed= run_test_server($server, $tests, $opt_parallel);
+ my ($completed, $fail)= run_test_server($server, $tests, $opt_parallel);
# Send Ctrl-C to any children still running
kill("INT", keys(%children));
@@ -393,6 +393,10 @@ sub main {
mtr_error("Not all tests completed");
}
+ if ($fail) {
+ mtr_error("Test suite failure.");
+ }
+
mtr_print_line();
if ( $opt_gcov ) {
@@ -412,6 +416,7 @@ sub run_test_server ($$$) {
my $num_saved_cores= 0; # Number of core files saved in vardir/log/ so far.
my $num_saved_datadir= 0; # Number of datadirs saved in vardir/log/ so far.
my $num_failed_test= 0; # Number of tests failed so far
+ my $test_failure= 0;
# Scheduler variables
my $max_ndb= $childs / 2;
@@ -445,7 +450,7 @@ sub run_test_server ($$$) {
$s->remove($sock);
if (--$childs == 0){
$suite_timeout_proc->kill();
- return $completed;
+ return ($completed, $test_failure);
}
next;
}
@@ -509,18 +514,19 @@ sub run_test_server ($$$) {
}
$num_saved_datadir++;
+ $test_failure= 1;
if ( !$opt_force ) {
# Test has failed, force is off
$suite_timeout_proc->kill();
push(@$completed, $result);
- return $completed;
+ return ($completed, 1);
}
elsif ($opt_max_test_fail > 0 and
$num_failed_test >= $opt_max_test_fail) {
$suite_timeout_proc->kill();
mtr_report("Too many tests($num_failed_test) failed!",
"Terminating...");
- return undef;
+ return (undef, 1);
}
$num_failed_test++;
}
@@ -571,7 +577,18 @@ sub run_test_server ($$$) {
elsif ($line eq 'START'){
; # Send first test
}
- else {
+ elsif ($line eq 'WARNINGS'){
+ my $fake_test= My::Test::read_test($sock);
+ my $test_list= join (" ", @{$fake_test->{testnames}});
+ mtr_report("***Warnings generated in error logs during shutdown ".
+ "after running tests: $test_list");
+ $test_failure= 1;
+ if ( !$opt_force ) {
+ # Test failure due to warnings, force is off
+ $suite_timeout_proc->kill();
+ return ($completed, 1);
+ }
+ } else {
mtr_error("Unknown response: '$line' from client");
}
@@ -649,7 +666,7 @@ sub run_test_server ($$$) {
if ( ! $suite_timeout_proc->wait_one(0) )
{
mtr_report("Test suite timeout! Terminating...");
- return undef;
+ return (undef, 1);
}
}
}
@@ -717,7 +734,7 @@ sub run_worker ($) {
delete($test->{'comment'});
delete($test->{'logfile'});
- run_testcase($test);
+ run_testcase($test, $server);
#$test->{result}= 'MTR_RES_PASSED';
# Send it back, now with results set
#$test->print_test();
@@ -725,6 +742,15 @@ sub run_worker ($) {
}
elsif ($line eq 'BYE'){
mtr_report("Server said BYE");
+ # We need to gracefully shut down the servers to see any
+ # Valgrind memory leak errors etc. since last server restart.
+ if ($opt_warnings) {
+ stop_servers(all_servers());
+ if(check_warnings_post_shutdown($server)) {
+ # Warnings appeared in log file(s) during final server shutdown.
+ exit(1);
+ }
+ }
exit(0);
}
else {
@@ -732,8 +758,8 @@ sub run_worker ($) {
}
}
+ # NOTREACHED
stop_all_servers();
-
exit(1);
}
@@ -3109,6 +3135,7 @@ sub run_on_all($$)
sub mark_log {
my ($log, $tinfo)= @_;
my $log_msg= "CURRENT_TEST: $tinfo->{name}\n";
+ pre_write_errorlog($log, $tinfo->{name});
mtr_tofile($log, $log_msg);
}
@@ -3181,8 +3208,8 @@ my %old_env;
# > 0 failure
#
-sub run_testcase ($) {
- my $tinfo= shift;
+sub run_testcase ($$) {
+ my ($tinfo, $server_socket)= @_;
mtr_verbose("Running test:", $tinfo->{name});
@@ -3197,7 +3224,12 @@ sub run_testcase ($) {
{
my @restart= servers_need_restart($tinfo);
if ( @restart != 0) {
- stop_servers($tinfo, @restart );
+ # Remember that we restarted for this test case (count restarts)
+ $tinfo->{'restarted'}= 1;
+ stop_servers(@restart );
+ if ($opt_warnings) {
+ check_warnings_post_shutdown($server_socket);
+ }
}
if ( started(all_servers()) == 0 )
@@ -3336,7 +3368,18 @@ sub run_testcase ($) {
{
if ($check_res == 1) {
# Test case had sideeffects, not fatal error, just continue
- stop_all_servers();
+ if ($opt_warnings) {
+ # Checking error logs for warnings, so need to stop server
+ # gracefully so that memory leaks etc. can be properly detected.
+ stop_servers(all_servers());
+ check_warnings_post_shutdown($server_socket);
+ # Even if we got warnings here, we should not fail this
+ # particular test, as the warnings may be caused by an earlier
+ # test.
+ } else {
+ # Not checking warnings, so can do a hard shutdown.
+ stop_all_servers();
+ }
mtr_report("Resuming tests...\n");
}
else {
@@ -3468,6 +3511,41 @@ sub run_testcase ($) {
}
+# Keep track of last position in mysqld error log where we scanned for
+# warnings, so we can attribute any warnings found to the correct test
+# suite or server restart.
+my $last_warning_position= { };
+
+# Called just before a mysqld server is started or a testcase is run,
+# to keep track of which tests have been run since last restart, and
+# of when the error log is reset.
+#
+# Second argument $test_name is test name, or undef for server restart.
+sub pre_write_errorlog {
+ my ($error_log, $test_name)= @_;
+
+ if (! -e $error_log) {
+ # If the error log is moved away, reset the warning parse position.
+ delete $last_warning_position->{$error_log};
+ }
+
+ if (defined($test_name)) {
+ $last_warning_position->{$error_log}{test_names}= []
+ unless exists($last_warning_position->{$error_log}{test_names});
+ push @{$last_warning_position->{$error_log}{test_names}}, $test_name;
+ } else {
+ # Server restart, so clear the list of tests run since last restart.
+ # (except the last one (if any), which is the test about to be run).
+ if (defined($last_warning_position->{$error_log}{test_names}) &&
+ @{$last_warning_position->{$error_log}{test_names}}) {
+ $last_warning_position->{$error_log}{test_names}=
+ [$last_warning_position->{$error_log}{test_names}[-1]];
+ } else {
+ $last_warning_position->{$error_log}{test_names}= [];
+ }
+ }
+}
+
#
# Perform a rough examination of the servers
# error log and write all lines that look
@@ -3479,18 +3557,12 @@ sub extract_warning_lines ($) {
# Open the servers .err log file and read all lines
# belonging to current tets into @lines
my $Ferr = IO::File->new($error_log)
- or mtr_error("Could not open file '$error_log' for reading: $!");
+ or return [];
+ my $last_pos= $last_warning_position->{$error_log}{seek_pos};
+ $Ferr->seek($last_pos, 0) if defined($last_pos);
- my @lines;
- while ( my $line = <$Ferr> )
- {
- if ( $line =~ /^CURRENT_TEST:/ )
- {
- # Throw away lines from previous tests
- @lines = ();
- }
- push(@lines, $line);
- }
+ my @lines= <$Ferr>;
+ $last_warning_position->{$error_log}{seek_pos}= $Ferr->tell();
$Ferr = undef; # Close error log file
# mysql_client_test.test sends a COM_DEBUG packet to the server
@@ -3537,20 +3609,47 @@ sub extract_warning_lines ($) {
qr/Attempting backtrace/,
qr/Assertion .* failed/,
);
+ # These are taken from the include/mtr_warnings.sql global suppression
+ # list. They occur delayed, so they can be parsed during shutdown rather
+ # than during the per-test check.
+ #
+ # ToDo: having the warning suppressions inside the mysqld we are trying to
+ # check is in any case horrible. We should change it to all be done here
+ # within the Perl code, which is both simpler, easier, faster, and more
+ # robust. We could still have individual test cases put in suppressions by
+ # parsing statically or by writing dynamically to a CSV table read by the
+ # Perl code.
+ my @antipatterns =
+ (
+ qr/InnoDB: Error: in ALTER TABLE `test`.`t[12]`/,
+ qr/InnoDB: Error: table `test`.`t[12]` does not exist in the InnoDB internal/,
+ );
- foreach my $line ( @lines )
+ my $match_count= 0;
+ LINE: foreach my $line ( @lines )
{
- foreach my $pat ( @patterns )
+ PAT: foreach my $pat ( @patterns )
{
if ( $line =~ /$pat/ )
{
+ foreach my $apat (@antipatterns)
+ {
+ next LINE if $line =~ $apat;
+ }
print $Fwarn $line;
- last;
+ ++$match_count;
+ last PAT;
}
}
}
$Fwarn = undef; # Close file
+ if ($match_count > 0 &&
+ defined($last_warning_position->{$error_log}{test_names})) {
+ return $last_warning_position->{$error_log}{test_names};
+ } else {
+ return [];
+ }
}
@@ -3717,6 +3816,22 @@ sub check_warnings ($) {
mtr_error("INTERNAL_ERROR: check_warnings");
}
+# Check for warnings generated during shutdown of a mysqld server.
+# If any, report them to master server, and return true; else just return false.
+sub check_warnings_post_shutdown {
+ my ($server_socket)= @_;
+ my $testname_hash= { };
+ foreach my $mysqld ( mysqlds())
+ {
+ my $testlist= extract_warning_lines($mysqld->value('log-error'));
+ $testname_hash->{$_}= 1 for @$testlist;
+ }
+ my @warning_tests= keys(%$testname_hash);
+ if (@warning_tests) {
+ my $fake_test= My::Test->new(testnames => \@warning_tests);
+ $fake_test->write_test($server_socket, 'WARNINGS');
+ }
+}
#
# Loop through our list of processes and look for and entry
@@ -4142,6 +4257,7 @@ sub mysqld_start ($$) {
if ( defined $exe )
{
+ pre_write_errorlog($output);
$mysqld->{'proc'}= My::SafeProcess->new
(
name => $mysqld->name(),
@@ -4371,10 +4487,7 @@ sub get_extra_opts {
sub stop_servers($$) {
- my ($tinfo, @servers)= @_;
-
- # Remember if we restarted for this test case (count restarts)
- $tinfo->{'restarted'}= 1;
+ my (@servers)= @_;
if ( join('|', @servers) eq join('|', all_servers()) )
{
2
1
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2685)
by knielsen@knielsen-hq.org 18 Mar '09
by knielsen@knielsen-hq.org 18 Mar '09
18 Mar '09
#At lp:maria
2685 knielsen(a)knielsen-hq.org 2009-03-18
Add testing of extra port for pool-of-threads.
The additional test uses up all threads in the pool with SELECT
SLEEP(), and tests that this makes normal connections block, but
connections on the extra port still work.
Also test connection limit on extra port with and without
pool-of-threads enabled.
Add --connect-timeout option to mysqltest program.
Add facility for --extra-port option to ConfigFactory.
Fix regexp typo in ConfigFactory.pm
removed:
mysql-test/t/pool_of_threads-master.opt
added:
mysql-test/t/connect.cnf
mysql-test/t/pool_of_threads.cnf
modified:
client/mysqltest.cc
mysql-test/lib/My/ConfigFactory.pm
mysql-test/r/connect.result
mysql-test/r/pool_of_threads.result
mysql-test/t/connect.test
mysql-test/t/pool_of_threads.test
=== modified file 'client/mysqltest.cc'
--- a/client/mysqltest.cc 2009-02-19 09:01:25 +0000
+++ b/client/mysqltest.cc 2009-03-18 15:46:32 +0000
@@ -76,7 +76,7 @@ enum {
OPT_SKIP_SAFEMALLOC=OPT_MAX_CLIENT_OPTION,
OPT_PS_PROTOCOL, OPT_SP_PROTOCOL, OPT_CURSOR_PROTOCOL, OPT_VIEW_PROTOCOL,
OPT_MAX_CONNECT_RETRIES, OPT_MARK_PROGRESS, OPT_LOG_DIR, OPT_TAIL_LINES,
- OPT_GLOBAL_SUBST
+ OPT_GLOBAL_SUBST, OPT_MY_CONNECT_TIMEOUT
};
static int record= 0, opt_sleep= -1;
@@ -87,6 +87,7 @@ const char *opt_include= 0, *opt_charset
static int opt_port= 0;
static int opt_max_connect_retries;
static my_bool opt_compress= 0, silent= 0, verbose= 0;
+static int opt_connect_timeout= -1;
static my_bool debug_info_flag= 0, debug_check_flag= 0;
static my_bool tty_password= 0;
static my_bool opt_mark_progress= 0;
@@ -4952,6 +4953,9 @@ void do_connect(struct st_command *comma
if (opt_charsets_dir)
mysql_options(&con_slot->mysql, MYSQL_SET_CHARSET_DIR,
opt_charsets_dir);
+ if (opt_connect_timeout >= 0)
+ mysql_options(&con_slot->mysql, MYSQL_OPT_CONNECT_TIMEOUT,
+ &opt_connect_timeout);
#ifdef HAVE_OPENSSL
if (opt_use_ssl || con_ssl)
@@ -5692,6 +5696,9 @@ static struct my_option my_long_options[
#include "sslopt-longopts.h"
{"test-file", 'x', "Read test from/in this file (default stdin).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"connect-timeout", OPT_MY_CONNECT_TIMEOUT, "Client connection timeout",
+ (uchar**) &opt_connect_timeout, (uchar**) &opt_connect_timeout, 0,
+ GET_INT, REQUIRED_ARG, -1, -1, 0, 0, 0, 0},
{"timer-file", 'm', "File where the timing in micro seconds is stored.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"tmpdir", 't', "Temporary directory where sockets are put.",
=== modified file 'mysql-test/lib/My/ConfigFactory.pm'
--- a/mysql-test/lib/My/ConfigFactory.pm 2009-02-15 10:58:34 +0000
+++ b/mysql-test/lib/My/ConfigFactory.pm 2009-03-18 15:46:32 +0000
@@ -202,6 +202,7 @@ my @mysqld_rules=
{ 'pid-file' => \&fix_pidfile },
{ '#host' => \&fix_host },
{ 'port' => \&fix_port },
+ { '#extra-port' => \&fix_port },
{ 'socket' => \&fix_socket },
{ 'log-error' => \&fix_log_error },
{ 'general-log' => sub { return 1; } },
@@ -353,7 +354,7 @@ sub post_check_client_group {
sub post_check_client_groups {
my ($self, $config)= @_;
- my $first_mysqld= $config->first_like('mysqld.');
+ my $first_mysqld= $config->first_like('mysqld\.');
return unless $first_mysqld;
=== modified file 'mysql-test/r/connect.result'
--- a/mysql-test/r/connect.result 2008-03-17 11:26:00 +0000
+++ b/mysql-test/r/connect.result 2009-03-18 15:46:32 +0000
@@ -215,6 +215,13 @@ SET GLOBAL event_scheduler = OFF;
# -- End of Bug#35074.
+SELECT 'Connection on extra port ok';
+Connection on extra port ok
+Connection on extra port ok
+SELECT 'Connection on extra port 2 ok';
+Connection on extra port 2 ok
+Connection on extra port 2 ok
+# -- Success: more than --extra-max-connections + 1 normal connections not possible
# ------------------------------------------------------------------
# -- End of 5.1 tests
# ------------------------------------------------------------------
=== modified file 'mysql-test/r/pool_of_threads.result'
--- a/mysql-test/r/pool_of_threads.result 2009-03-12 22:27:35 +0000
+++ b/mysql-test/r/pool_of_threads.result 2009-03-18 15:46:32 +0000
@@ -2151,3 +2151,23 @@ Privat (Private Nutzung) Mobilfunk
Warnings:
Warning 1052 Column 'kundentyp' in group statement is ambiguous
drop table t1;
+SELECT sleep(5);
+SELECT sleep(5);
+# -- Success: more than --thread-pool-size normal connections not possible
+sleep(5)
+0
+sleep(5)
+0
+SELECT sleep(5);
+SELECT sleep(5);
+SELECT 'Connection on extra port ok';
+Connection on extra port ok
+Connection on extra port ok
+SELECT 'Connection on extra port 2 ok';
+Connection on extra port 2 ok
+Connection on extra port 2 ok
+# -- Success: more than --extra-max-connections + 1 normal connections not possible
+sleep(5)
+0
+sleep(5)
+0
=== added file 'mysql-test/t/connect.cnf'
--- a/mysql-test/t/connect.cnf 1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/connect.cnf 2009-03-18 15:46:32 +0000
@@ -0,0 +1,8 @@
+!include include/default_my.cnf
+
+[mysqld.1]
+extra-port= @mysqld.1.#extra-port
+extra-max-connections=1
+
+[ENV]
+MASTER_EXTRA_PORT= @mysqld.1.extra-port
=== modified file 'mysql-test/t/connect.test'
--- a/mysql-test/t/connect.test 2008-03-17 11:26:00 +0000
+++ b/mysql-test/t/connect.test 2009-03-18 15:46:32 +0000
@@ -288,6 +288,33 @@ let $wait_condition =
--echo # -- End of Bug#35074.
--echo
+# Test connections to the extra port.
+
+connect(extracon,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+connection extracon;
+SELECT 'Connection on extra port ok';
+
+connect(extracon2,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+connection extracon2;
+SELECT 'Connection on extra port 2 ok';
+
+--disable_abort_on_error
+--disable_result_log
+--disable_query_log
+connect(extracon3,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+--enable_query_log
+--enable_result_log
+--enable_abort_on_error
+let $error = $mysql_errno;
+if (!$error)
+{
+ --echo # -- Error: managed to establish more than --extra-max-connections + 1 connections
+}
+if ($error)
+{
+ --echo # -- Success: more than --extra-max-connections + 1 normal connections not possible
+}
+
--echo # ------------------------------------------------------------------
--echo # -- End of 5.1 tests
--echo # ------------------------------------------------------------------
=== removed file 'mysql-test/t/pool_of_threads-master.opt'
--- a/mysql-test/t/pool_of_threads-master.opt 2009-03-12 22:27:35 +0000
+++ b/mysql-test/t/pool_of_threads-master.opt 1970-01-01 00:00:00 +0000
@@ -1 +0,0 @@
---test-ignore-wrong-options --thread-handling=pool-of-threads
=== added file 'mysql-test/t/pool_of_threads.cnf'
--- a/mysql-test/t/pool_of_threads.cnf 1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/pool_of_threads.cnf 2009-03-18 15:46:32 +0000
@@ -0,0 +1,14 @@
+!include include/default_my.cnf
+
+[mysqld.1]
+test-ignore-wrong-options
+thread-handling= pool-of-threads
+thread_pool_size= 2
+extra-port= @mysqld.1.#extra-port
+extra-max-connections=1
+
+[client]
+connect-timeout= 2
+
+[ENV]
+MASTER_EXTRA_PORT= @mysqld.1.extra-port
=== modified file 'mysql-test/t/pool_of_threads.test'
--- a/mysql-test/t/pool_of_threads.test 2009-03-12 22:27:35 +0000
+++ b/mysql-test/t/pool_of_threads.test 2009-03-18 15:46:32 +0000
@@ -5,3 +5,78 @@
-- source include/common-tests.inc
+
+# Test that we cannot have more simultaneous connections than
+# --thread-pool-size on the standard port, but _can_ have additional
+# connections on the extra port.
+
+# First set two connections running, and check that extra connection
+# on normal port fails due to--thread-pool-size=2
+connection default;
+send SELECT sleep(5);
+
+connect(con2,localhost,root,,);
+connection con2;
+send SELECT sleep(5);
+--sleep 1
+
+--disable_abort_on_error
+--disable_result_log
+--disable_query_log
+connect(con3,localhost,root,,);
+--enable_query_log
+--enable_result_log
+--enable_abort_on_error
+let $error = $mysql_errno;
+if (!$error)
+{
+ --echo # -- Error: managed to establish more than --thread-pool-size connections
+}
+if ($error)
+{
+ --echo # -- Success: more than --thread-pool-size normal connections not possible
+}
+
+connection default;
+--reap
+connection con2;
+--reap
+
+# Now try again, but this time use the extra port to successfully connect.
+
+connection default;
+send SELECT sleep(5);
+
+connection con2;
+send SELECT sleep(5);
+--sleep 1
+
+connect(extracon,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+connection extracon;
+SELECT 'Connection on extra port ok';
+
+connect(extracon2,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+connection extracon2;
+SELECT 'Connection on extra port 2 ok';
+
+--disable_abort_on_error
+--disable_result_log
+--disable_query_log
+connect(extracon3,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
+--enable_query_log
+--enable_result_log
+--enable_abort_on_error
+let $error = $mysql_errno;
+if (!$error)
+{
+ --echo # -- Error: managed to establish more than --extra-max-connections + 1 connections
+}
+if ($error)
+{
+ --echo # -- Success: more than --extra-max-connections + 1 normal connections not possible
+}
+
+connection default;
+--reap
+connection con2;
+--reap
1
0
OK, thanks. Will do.
On Mar 18, 2009, at 1:05 AM, Michael Widenius wrote:
>
> Hi!
>
>>>>>> "Paul" == Paul McCullagh <paul.mccullagh(a)primebase.org> writes:
>
>>> From: Paul McCullagh <paul.mccullagh(a)primebase.org>
>>> Date: March 9, 2009 9:48:44 AM GMT+01:00
>>> To: maria-developers(a)lists.launchpad.net
>>> Subject: [Maria-developers] PBXT and MariaDB
>>>
>>> Hi Monty,
>>>
>>> In our call last week you suggested that I push PBXT into the
>>> MariaDB tree.
>>>
>>> Which branch should I push to?
>
> lp:maria
>
> In other words, the 'trunk' series at:
>
> https://launchpad.net/maria/trunk
>
> We are now looking at setting up a Buildbot network to start doing
> 'builds-after-each-push' and monthly binaries.
>
> Regards,
> Monty
--
Paul McCullagh
PrimeBase Technologies
www.primebase.org
www.blobstreaming.org
pbxt.blogspot.com
1
0
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (monty:2684)
by Michael Widenius 18 Mar '09
by Michael Widenius 18 Mar '09
18 Mar '09
#At lp:maria based on revid:monty@askmonty.org-20090318021422-jmb1ap5kougrzgzz
2684 Michael Widenius 2009-03-18
Ignore generated file event-config.h
modified:
.bzrignore
=== modified file '.bzrignore'
--- a/.bzrignore 2009-02-19 09:01:25 +0000
+++ b/.bzrignore 2009-03-18 02:15:20 +0000
@@ -1902,3 +1902,4 @@ sql/share/spanish
sql/share/swedish
sql/share/ukrainian
libmysqld/examples/mysqltest.cc
+extra/libevent/event-config.h
1
0
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (monty:2683)
by Michael Widenius 18 Mar '09
by Michael Widenius 18 Mar '09
18 Mar '09
#At lp:maria based on revid:knielsen@knielsen-hq.org-20090313133154-0qas21in74uxdtk8
2683 Michael Widenius 2009-03-18
Added mariadb and mariadb-version as my.conf option tags
Fixed compiler error when configuring without --lib-event
modified:
include/mysql_version.h.in
sql/mysqld.cc
sql/scheduler.h
per-file messages:
include/mysql_version.h.in
Added mariadb-'version' as a my.cnf option tag for mysqld
sql/mysqld.cc
Added mariadb and mariadb-version as my.conf option tags
=== modified file 'include/mysql_version.h.in'
--- a/include/mysql_version.h.in 2007-09-13 14:19:46 +0000
+++ b/include/mysql_version.h.in 2009-03-18 02:14:22 +0000
@@ -11,6 +11,7 @@
#define PROTOCOL_VERSION @PROTOCOL_VERSION@
#define MYSQL_SERVER_VERSION "@VERSION@"
#define MYSQL_BASE_VERSION "mysqld-@MYSQL_BASE_VERSION@"
+#define MARIADB_BASE_VERSION "mariadb-@MYSQL_BASE_VERSION@"
#define MYSQL_SERVER_SUFFIX_DEF "@MYSQL_SERVER_SUFFIX@"
#define FRM_VER @DOT_FRM_VERSION@
#define MYSQL_VERSION_ID @MYSQL_VERSION_ID@
=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc 2009-03-13 13:31:54 +0000
+++ b/sql/mysqld.cc 2009-03-18 02:14:22 +0000
@@ -3014,7 +3014,9 @@ static const char *load_default_groups[]
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
"mysql_cluster",
#endif
-"mysqld","server", MYSQL_BASE_VERSION, 0, 0};
+"mysqld", "server", MYSQL_BASE_VERSION,
+"mariadb", MARIADB_BASE_VERSION,
+0, 0};
#if defined(__WIN__)
static const int load_default_groups_sz=
=== modified file 'sql/scheduler.h'
--- a/sql/scheduler.h 2009-03-12 22:27:35 +0000
+++ b/sql/scheduler.h 2009-03-18 02:14:22 +0000
@@ -82,7 +82,9 @@ void pool_of_threads_scheduler(scheduler
#else
#define HAVE_POOL_OF_THREADS 0 /* For easyer tests */
-#define pool_of_threads_scheduler(A) one_thread_per_connection_scheduler(A)
+#define pool_of_threads_scheduler(A) \
+ one_thread_per_connection_scheduler(A, &max_connections, \
+ &connection_count)
class thd_scheduler
{};
1
0
[Maria-developers] bzr commit into mariadb 5.1, with Maria 2.0:maria/2.0 branch (sanja:2722)
by sanja@askmonty.org 17 Mar '09
by sanja@askmonty.org 17 Mar '09
17 Mar '09
#At lp:maria/2.0
2722 sanja(a)askmonty.org 2009-03-17
Group commit patch.
Transaction log syncs statistic.
Buffer finish optimisation in case of absence of CRC page protection or sector protetction.
Fixed bug of waiting for flush pass end.
(put together only Monty review, will be pushed separately)
modified:
mysql-test/suite/maria/r/maria3.result
mysys/my_init.c
storage/maria/ha_maria.cc
storage/maria/ma_init.c
storage/maria/ma_loghandler.c
storage/maria/ma_loghandler.h
tests/fork_big2.pl
per-file messages:
mysql-test/suite/maria/r/maria3.result
Transaction log syncs statistic.
mysys/my_init.c
The initialization should be here but it do niot want to link (works without it because initialization do not do something except checks).
storage/maria/ha_maria.cc
Transaction log syncs statistic.
Group commit control variables and routines.
storage/maria/ma_init.c
Finishing of soft group commit thread.
storage/maria/ma_loghandler.c
Group commit (soft and hard) added.
Transaction log syncs statistic.
Buffer finish optimisation in case of absence of CRC page protection or sector protetction.
Fixed bug of waiting for flush pass end.
storage/maria/ma_loghandler.h
Group commit control variables and routines.
Transaction log syncs statistic.
tests/fork_big2.pl
Fixed test written by windows to get it working under linux (line endings)
=== modified file 'mysql-test/suite/maria/r/maria3.result'
--- a/mysql-test/suite/maria/r/maria3.result 2008-10-09 20:03:54 +0000
+++ b/mysql-test/suite/maria/r/maria3.result 2009-03-17 20:32:17 +0000
@@ -264,6 +264,8 @@ Variable_name Value
maria_block_size 8192
maria_checkpoint_interval 30
maria_force_start_after_recovery_failures 0
+maria_group_commit none
+maria_group_commit_rate 800
maria_log_file_size 4294959104
maria_log_purge_type immediate
maria_max_sort_file_size 9223372036854775807
=== modified file 'mysys/my_init.c'
--- a/mysys/my_init.c 2008-12-10 09:02:25 +0000
+++ b/mysys/my_init.c 2009-03-17 20:32:17 +0000
@@ -40,6 +40,7 @@ static void netware_init();
#else
#define netware_init()
#endif
+#include <my_atomic.h>
my_bool my_init_done= 0;
uint mysys_usage_id= 0; /* Incremented for each my_init() */
@@ -82,6 +83,10 @@ my_bool my_init(void)
if (my_progname)
my_progname_short= my_progname + dirname_length(my_progname);
+/*
+ if (my_atomic_initialize())
+ return 1;
+*/
#if defined(THREAD) && defined(SAFE_MUTEX)
safe_mutex_global_init(); /* Must be called early */
#endif
=== modified file 'storage/maria/ha_maria.cc'
--- a/storage/maria/ha_maria.cc 2009-01-16 16:18:17 +0000
+++ b/storage/maria/ha_maria.cc 2009-03-17 20:32:17 +0000
@@ -101,22 +101,40 @@ TYPELIB maria_translog_purge_type_typeli
array_elements(maria_translog_purge_type_names) - 1, "",
maria_translog_purge_type_names, NULL
};
+
+/* transactional log directory sync */
const char *maria_sync_log_dir_names[]=
{
"NEVER", "NEWFILE", "ALWAYS", NullS
};
-
TYPELIB maria_sync_log_dir_typelib=
{
array_elements(maria_sync_log_dir_names) - 1, "",
maria_sync_log_dir_names, NULL
};
+/* transactional log group commit */
+const char *maria_group_commit_names[]=
+{
+ "none", "hard", "soft", NullS
+};
+TYPELIB maria_group_commit_typelib=
+{
+ array_elements(maria_group_commit_names) - 1, "",
+ maria_group_commit_names, NULL
+};
+
/** Interval between background checkpoints in seconds */
static ulong checkpoint_interval;
static void update_checkpoint_interval(MYSQL_THD thd,
struct st_mysql_sys_var *var,
void *var_ptr, const void *save);
+static void update_maria_group_commit(MYSQL_THD thd,
+ struct st_mysql_sys_var *var,
+ void *var_ptr, const void *save);
+static void update_maria_group_commit_rate(MYSQL_THD thd,
+ struct st_mysql_sys_var *var,
+ void *var_ptr, const void *save);
/** After that many consecutive recovery failures, remove logs */
static ulong force_start_after_recovery_failures;
static void update_log_file_size(MYSQL_THD thd,
@@ -163,6 +181,22 @@ static MYSQL_SYSVAR_ULONG(log_file_size,
NULL, update_log_file_size, TRANSLOG_FILE_SIZE,
TRANSLOG_MIN_FILE_SIZE, 0xffffffffL, TRANSLOG_PAGE_SIZE);
+static MYSQL_SYSVAR_ENUM(group_commit, group_commit,
+ PLUGIN_VAR_RQCMDARG,
+ "Specifies maria group commit mode. "
+ "Possible values are \"none\" (no group commit), "
+ "\"hard\" (with waiting to actual commit), "
+ "\"soft\" (no wait for commit (DANGEROUS!!!))",
+ NULL, update_maria_group_commit,
+ TRANSLOG_GCOMMIT_NONE, &maria_group_commit_typelib);
+
+static MYSQL_SYSVAR_ULONG(group_commit_rate, group_commit_rate,
+ PLUGIN_VAR_RQCMDARG,
+ "If group commits switched on commit will happens with about every "
+ "100/maria_group_commit_rate second. 0 is special value which switch "
+ "rate off",
+ NULL, update_maria_group_commit_rate, 800, 0, UINT_MAX, 1);
+
static MYSQL_SYSVAR_ENUM(log_purge_type, log_purge_type,
PLUGIN_VAR_RQCMDARG,
"Specifies how maria transactional log will be purged. "
@@ -3247,6 +3281,8 @@ static struct st_mysql_sys_var* system_v
MYSQL_SYSVAR(block_size),
MYSQL_SYSVAR(checkpoint_interval),
MYSQL_SYSVAR(force_start_after_recovery_failures),
+ MYSQL_SYSVAR(group_commit),
+ MYSQL_SYSVAR(group_commit_rate),
MYSQL_SYSVAR(page_checksum),
MYSQL_SYSVAR(log_dir_path),
MYSQL_SYSVAR(log_file_size),
@@ -3277,6 +3313,97 @@ static void update_checkpoint_interval(M
}
/**
+ @brief Updates group commit mode
+*/
+
+static void update_maria_group_commit(MYSQL_THD thd,
+ struct st_mysql_sys_var *var,
+ void *var_ptr, const void *save)
+{
+ ulong value= (ulong)*((long *)var_ptr);
+ DBUG_ENTER("update_maria_group_commit");
+ DBUG_PRINT("enter", ("old value: %lu new value %lu rate %lu",
+ value, (ulong)(*(long *)save), group_commit_rate));
+ /* old value */
+ switch (value)
+ {
+ case TRANSLOG_GCOMMIT_NONE:
+ break;
+ case TRANSLOG_GCOMMIT_HARD:
+ translog_hard_group_commit(FALSE);
+ break;
+ case TRANSLOG_GCOMMIT_SOFT:
+ translog_soft_sync(FALSE);
+ if (group_commit_rate)
+ translog_soft_sync_end();
+ break;
+ default:
+ DBUG_ASSERT(0); /* impossible */
+ }
+ value= *(ulong *)var_ptr= (ulong)(*(long *)save);
+ translog_sync();
+ /* new value */
+ switch (value)
+ {
+ case TRANSLOG_GCOMMIT_NONE:
+ break;
+ case TRANSLOG_GCOMMIT_HARD:
+ translog_hard_group_commit(TRUE);
+ break;
+ case TRANSLOG_GCOMMIT_SOFT:
+ translog_soft_sync(TRUE);
+ /* variable change made under global lock so we can just read it */
+ if (group_commit_rate)
+ translog_soft_sync_start();
+ break;
+ default:
+ DBUG_ASSERT(0); /* impossible */
+ }
+ DBUG_VOID_RETURN;
+}
+
+/**
+ @brief Updates group commit rate
+*/
+
+static void update_maria_group_commit_rate(MYSQL_THD thd,
+ struct st_mysql_sys_var *var,
+ void *var_ptr, const void *save)
+{
+ ulong new_value= (ulong)*((long *)save);
+ DBUG_ENTER("update_maria_group_commit_rate");
+ DBUG_PRINT("enter", ("old value: %lu new value %lu group commit %lu",
+ *(ulong *)var_ptr, new_value, group_commit));
+ if (new_value &&
+ ((TRANSLOG_RATE_BASE * 1000000000ULL / new_value +
+ TRANSLOG_RATE_BASE / 2) /
+ TRANSLOG_RATE_BASE) == 0)
+ new_value= 0; /* protection against too small value */
+ /* variable change made under global lock so we can just read it */
+ switch (group_commit)
+ {
+ case TRANSLOG_GCOMMIT_NONE:
+ *(ulong *)var_ptr= new_value;
+ translog_set_group_commit_rate(new_value);
+ break;
+ case TRANSLOG_GCOMMIT_HARD:
+ *(ulong *)var_ptr= new_value;
+ translog_set_group_commit_rate(new_value);
+ break;
+ case TRANSLOG_GCOMMIT_SOFT:
+ if (*(ulong *)var_ptr)
+ translog_soft_sync_end();
+ translog_set_group_commit_rate(new_value);
+ if ((*(ulong *)var_ptr= new_value))
+ translog_soft_sync_start();
+ break;
+ default:
+ DBUG_ASSERT(0); /* impossible */
+ }
+ DBUG_VOID_RETURN;
+}
+
+/**
@brief Updates the transaction log file limit.
*/
@@ -3298,6 +3425,7 @@ static SHOW_VAR status_variables[]= {
{"Maria_pagecache_reads", (char*) &maria_pagecache_var.global_cache_read, SHOW_LONGLONG},
{"Maria_pagecache_write_requests", (char*) &maria_pagecache_var.global_cache_w_requests, SHOW_LONGLONG},
{"Maria_pagecache_writes", (char*) &maria_pagecache_var.global_cache_write, SHOW_LONGLONG},
+ {"Maria_transaction_log_syncs", (char*) &translog_syncs, SHOW_LONGLONG},
{NullS, NullS, SHOW_LONG}
};
=== modified file 'storage/maria/ma_init.c'
--- a/storage/maria/ma_init.c 2008-10-09 20:03:54 +0000
+++ b/storage/maria/ma_init.c 2009-03-17 20:32:17 +0000
@@ -82,6 +82,11 @@ void maria_end(void)
maria_inited= maria_multi_threaded= FALSE;
ft_free_stopwords();
ma_checkpoint_end();
+ if (translog_status == TRANSLOG_OK)
+ {
+ translog_soft_sync_end();
+ translog_sync();
+ }
if ((trid= trnman_get_max_trid()) > max_trid_in_control_file)
{
/*
=== modified file 'storage/maria/ma_loghandler.c'
--- a/storage/maria/ma_loghandler.c 2009-01-16 09:38:02 +0000
+++ b/storage/maria/ma_loghandler.c 2009-03-17 20:32:17 +0000
@@ -18,6 +18,7 @@
#include "ma_blockrec.h" /* for some constants and in-write hooks */
#include "ma_key_recover.h" /* For some in-write hooks */
#include "ma_checkpoint.h"
+#include "ma_servicethread.h"
/*
On Windows, neither my_open() nor my_sync() work for directories.
@@ -47,6 +48,15 @@
#include <m_ctype.h>
#endif
+/** @brief protects checkpoint_in_progress */
+static pthread_mutex_t LOCK_soft_sync;
+/** @brief for killing the background checkpoint thread */
+static pthread_cond_t COND_soft_sync;
+/** @brief control structure for checkpoint background thread */
+static MA_SERVICE_THREAD_CONTROL soft_sync_control=
+ {THREAD_DEAD, FALSE, &LOCK_soft_sync, &COND_soft_sync};
+
+
/* transaction log file descriptor */
typedef struct st_translog_file
{
@@ -124,10 +134,20 @@ struct st_translog_buffer
/* Previous buffer offset to detect it flush finish */
TRANSLOG_ADDRESS prev_buffer_offset;
/*
+ If the buffer was forced to close it save value of its horizon
+ otherwise LSN_IMPOSSIBLE
+ */
+ TRANSLOG_ADDRESS pre_force_close_horizon;
+ /*
How much is written (or will be written when copy_to_buffer_in_progress
become 0) to this buffer
*/
translog_size_t size;
+ /*
+ How much data was skipped during moving page from previous buffer
+ to this one (it is optimisation of forcing buffer to finish
+ */
+ uint skipped_data;
/* File handler for this buffer */
TRANSLOG_FILE *file;
/* Threads which are waiting for buffer filling/freeing */
@@ -304,6 +324,7 @@ struct st_translog_descriptor
*/
pthread_mutex_t log_flush_lock;
pthread_cond_t log_flush_cond;
+ pthread_cond_t new_goal_cond;
/* Protects changing of headers of finished files (max_lsn) */
pthread_mutex_t file_header_lock;
@@ -333,6 +354,8 @@ struct st_translog_descriptor
my_bool is_everything_flushed;
/* True when flush pass is in progress */
my_bool flush_in_progress;
+ /* The flush number (used to distinguish two flushes goes one by one) */
+ volatile int flush_no;
/* Next flush pass variables */
TRANSLOG_ADDRESS next_pass_max_lsn;
pthread_t max_lsn_requester;
@@ -343,12 +366,40 @@ static struct st_translog_descriptor log
ulong log_purge_type= TRANSLOG_PURGE_IMMIDIATE;
ulong log_file_size= TRANSLOG_FILE_SIZE;
ulong sync_log_dir= TRANSLOG_SYNC_DIR_NEWFILE;
+ulong group_commit= TRANSLOG_GCOMMIT_NONE;
+ulong group_commit_rate= 0;
/* Marker for end of log */
static uchar end_of_log= 0;
#define END_OF_LOG &end_of_log
+static my_atomic_rwlock_t soft_sync_rwl;
+static my_atomic_rwlock_t hgroup_commit_rwl;
+/**
+ Switch for "soft" sync (no real sync() but periodical sync by service
+ thread)
+*/
+static volatile uint32 soft_sync= FALSE;
+/**
+ Switch for "hard" group commit mode
+*/
+static uint32 hgroup_commit= FALSE;
+/**
+ File numbers interval which have to be sync()
+*/
+static uint32 soft_sync_min= 0;
+static uint32 soft_sync_max= 0;
+static my_atomic_rwlock_t group_commit_wait_rwl;
+/**
+ stores interval in nanoseconds/TRANSLOG_RATE_BASE (to
+ fit into uint32)
+*/
+static uint32 group_commit_wait= 0;
enum enum_translog_status translog_status= TRANSLOG_UNINITED;
+ulonglong translog_syncs= 0; /* Number of sync()s */
+
+/* time of last flush */
+static ulonglong flush_start= 0;
/* chunk types */
#define TRANSLOG_CHUNK_LSN 0x00 /* 0 chunk refer as LSN (head or tail */
@@ -978,12 +1029,17 @@ static TRANSLOG_FILE *get_logfile_by_num
static TRANSLOG_FILE *get_current_logfile()
{
TRANSLOG_FILE *file;
+ DBUG_ENTER("get_current_logfile");
rw_rdlock(&log_descriptor.open_files_lock);
+ DBUG_PRINT("info", ("max_file: %lu min_file: %lu open_files: %lu",
+ (ulong) log_descriptor.max_file,
+ (ulong) log_descriptor.min_file,
+ (ulong) log_descriptor.open_files.elements));
DBUG_ASSERT(log_descriptor.max_file - log_descriptor.min_file + 1 ==
log_descriptor.open_files.elements);
file= *dynamic_element(&log_descriptor.open_files, 0, TRANSLOG_FILE **);
rw_unlock(&log_descriptor.open_files_lock);
- return (file);
+ DBUG_RETURN(file);
}
uchar NEAR maria_trans_file_magic[]=
@@ -1067,6 +1123,7 @@ static my_bool translog_write_file_heade
static my_bool translog_max_lsn_to_header(File file, LSN lsn)
{
uchar lsn_buff[LSN_STORE_SIZE];
+ my_bool rc;
DBUG_ENTER("translog_max_lsn_to_header");
DBUG_PRINT("enter", ("File descriptor: %ld "
"lsn: (%lu,0x%lx)",
@@ -1075,11 +1132,13 @@ static my_bool translog_max_lsn_to_heade
lsn_store(lsn_buff, lsn);
- DBUG_RETURN(my_pwrite(file, lsn_buff,
- LSN_STORE_SIZE,
- (LOG_HEADER_DATA_SIZE - LSN_STORE_SIZE),
- log_write_flags) != 0 ||
- my_sync(file, MYF(MY_WME)) != 0);
+ if (!(rc= (my_pwrite(file, lsn_buff,
+ LSN_STORE_SIZE,
+ (LOG_HEADER_DATA_SIZE - LSN_STORE_SIZE),
+ log_write_flags) != 0 ||
+ my_sync(file, MYF(MY_WME)) != 0)))
+ translog_syncs++;
+ DBUG_RETURN(rc);
}
@@ -1421,7 +1480,9 @@ LSN translog_get_file_max_lsn_stored(uin
static my_bool translog_buffer_init(struct st_translog_buffer *buffer, int num)
{
DBUG_ENTER("translog_buffer_init");
- buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE;
+ buffer->pre_force_close_horizon=
+ buffer->prev_last_lsn= buffer->last_lsn=
+ LSN_IMPOSSIBLE;
DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx",
(ulong) buffer));
@@ -1433,6 +1494,7 @@ static my_bool translog_buffer_init(stru
memset(buffer->buffer, TRANSLOG_FILLER, TRANSLOG_WRITE_BUFFER);
/* Buffer size */
buffer->size= 0;
+ buffer->skipped_data= 0;
/* cond of thread which is waiting for buffer filling */
if (pthread_cond_init(&buffer->waiting_filling_buffer, 0))
DBUG_RETURN(1);
@@ -1487,7 +1549,10 @@ static my_bool translog_close_log_file(T
TODO: sync only we have changed the log
*/
if (!file->is_sync)
+ {
rc= my_sync(file->handler.file, MYF(MY_WME));
+ translog_syncs++;
+ }
rc|= my_close(file->handler.file, MYF(MY_WME));
my_free(file, MYF(0));
return test(rc);
@@ -2042,7 +2107,8 @@ static void translog_start_buffer(struct
(ulong) LSN_OFFSET(log_descriptor.horizon),
(ulong) LSN_OFFSET(log_descriptor.horizon)));
DBUG_ASSERT(buffer_no == buffer->buffer_no);
- buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE;
+ buffer->pre_force_close_horizon=
+ buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE;
DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx",
(ulong) buffer));
buffer->offset= log_descriptor.horizon;
@@ -2050,6 +2116,7 @@ static void translog_start_buffer(struct
buffer->file= get_current_logfile();
buffer->overlay= 0;
buffer->size= 0;
+ buffer->skipped_data= 0;
translog_cursor_init(cursor, buffer, buffer_no);
DBUG_PRINT("info", ("file: #%ld (%d) init cursor #%u: 0x%lx "
"chaser: %d Size: %lu (%lu)",
@@ -2521,6 +2588,7 @@ static my_bool translog_buffer_flush(str
TRANSLOG_ADDRESS offset= buffer->offset;
TRANSLOG_FILE *file= buffer->file;
uint8 ver= buffer->ver;
+ uint skipped_data;
DBUG_ENTER("translog_buffer_flush");
DBUG_PRINT("enter",
("Buffer: #%u 0x%lx file: %d offset: (%lu,0x%lx) size: %lu",
@@ -2555,6 +2623,8 @@ static my_bool translog_buffer_flush(str
disk
*/
file= buffer->file;
+ skipped_data= buffer->skipped_data;
+ DBUG_ASSERT(skipped_data < TRANSLOG_PAGE_SIZE);
for (i= 0, pg= LSN_OFFSET(buffer->offset) / TRANSLOG_PAGE_SIZE;
i < buffer->size;
i+= TRANSLOG_PAGE_SIZE, pg++)
@@ -2571,13 +2641,16 @@ static my_bool translog_buffer_flush(str
DBUG_ASSERT(i + TRANSLOG_PAGE_SIZE <= buffer->size);
if (translog_status != TRANSLOG_OK && translog_status != TRANSLOG_SHUTDOWN)
DBUG_RETURN(1);
- if (pagecache_inject(log_descriptor.pagecache,
+ if (pagecache_write_part(log_descriptor.pagecache,
&file->handler, pg, 3,
buffer->buffer + i,
PAGECACHE_PLAIN_PAGE,
PAGECACHE_LOCK_LEFT_UNLOCKED,
- PAGECACHE_PIN_LEFT_UNPINNED, 0,
- LSN_IMPOSSIBLE))
+ PAGECACHE_PIN_LEFT_UNPINNED,
+ PAGECACHE_WRITE_DONE, 0,
+ LSN_IMPOSSIBLE,
+ skipped_data,
+ TRANSLOG_PAGE_SIZE - skipped_data))
{
DBUG_PRINT("error",
("Can't write page (%lu,0x%lx) to pagecache, error: %d",
@@ -2587,10 +2660,12 @@ static my_bool translog_buffer_flush(str
translog_stop_writing();
DBUG_RETURN(1);
}
+ skipped_data= 0;
}
file->is_sync= 0;
- if (my_pwrite(file->handler.file, buffer->buffer,
- buffer->size, LSN_OFFSET(buffer->offset),
+ if (my_pwrite(file->handler.file, buffer->buffer + buffer->skipped_data,
+ buffer->size - buffer->skipped_data,
+ LSN_OFFSET(buffer->offset) + buffer->skipped_data,
log_write_flags))
{
DBUG_PRINT("error", ("Can't write buffer (%lu,0x%lx) size %lu "
@@ -3319,11 +3394,17 @@ static my_bool translog_truncate_log(TRA
((my_chsize(fd, next_page_offset, TRANSLOG_FILLER, MYF(MY_WME)) ||
(page_rest && my_pwrite(fd, page_buff, page_rest, LSN_OFFSET(addr),
log_write_flags)) ||
- my_sync(fd, MYF(MY_WME))) |
+ my_sync(fd, MYF(MY_WME))) ||
my_close(fd, MYF(MY_WME))) ||
(sync_log_dir >= TRANSLOG_SYNC_DIR_ALWAYS &&
sync_dir(log_descriptor.directory_fd, MYF(MY_WME | MY_IGNORE_BADFD))))
DBUG_RETURN(1);
+ /*
+ if above 'if' failed all translog initialization failed so it does not
+ matter if sync will be done but translog_syncs will not be increased in
+ this case
+ */
+ translog_syncs+= 2; /*sunc() log and sync() dir */
/* fix the horizon */
log_descriptor.horizon= addr;
@@ -3485,6 +3566,8 @@ my_bool translog_init_with_table(const c
id_to_share= NULL;
log_descriptor.directory_fd= -1;
log_descriptor.is_everything_flushed= 1;
+ log_descriptor.flush_in_progress= 0;
+ log_descriptor.flush_no= 0;
log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE;
(*init_table_func)();
@@ -3495,6 +3578,8 @@ my_bool translog_init_with_table(const c
log_descriptor.open_flags= O_BINARY | O_RDONLY;
else
log_descriptor.open_flags= O_BINARY | O_RDWR;
+ my_atomic_rwlock_init(&soft_sync_rwl);
+ my_atomic_rwlock_init(&group_commit_wait_rwl);
if (pthread_mutex_init(&log_descriptor.sent_to_disk_lock,
MY_MUTEX_INIT_FAST) ||
pthread_mutex_init(&log_descriptor.file_header_lock,
@@ -3508,6 +3593,7 @@ my_bool translog_init_with_table(const c
pthread_mutex_init(&log_descriptor.dirty_buffer_mask_lock,
MY_MUTEX_INIT_FAST) ||
pthread_cond_init(&log_descriptor.log_flush_cond, 0) ||
+ pthread_cond_init(&log_descriptor.new_goal_cond, 0) ||
my_rwlock_init(&log_descriptor.open_files_lock,
NULL) ||
my_init_dynamic_array(&log_descriptor.open_files,
@@ -3909,7 +3995,6 @@ my_bool translog_init_with_table(const c
log_descriptor.flushed= log_descriptor.horizon;
log_descriptor.in_buffers_only= log_descriptor.bc.buffer->offset;
log_descriptor.max_lsn= LSN_IMPOSSIBLE; /* set to 0 */
- log_descriptor.previous_flush_horizon= log_descriptor.horizon;
/*
Now 'flushed' is set to 'horizon' value, but 'horizon' is (potentially)
address of the next LSN and we want indicate that all LSNs that are
@@ -3992,6 +4077,10 @@ my_bool translog_init_with_table(const c
It is beginning of the log => there is no LSNs in the log =>
There is no harm in leaving it "as-is".
*/
+ log_descriptor.previous_flush_horizon= log_descriptor.horizon;
+ DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)",
+ LSN_IN_PARTS(log_descriptor.
+ previous_flush_horizon)));
DBUG_RETURN(0);
}
file_no--;
@@ -4067,6 +4156,9 @@ my_bool translog_init_with_table(const c
translog_free_record_header(&rec);
}
}
+ log_descriptor.previous_flush_horizon= log_descriptor.horizon;
+ DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)",
+ LSN_IN_PARTS(log_descriptor.previous_flush_horizon)));
DBUG_RETURN(0);
err:
ma_message_no_user(0, "log initialization failed");
@@ -4154,6 +4246,9 @@ void translog_destroy()
pthread_mutex_destroy(&log_descriptor.log_flush_lock);
pthread_mutex_destroy(&log_descriptor.dirty_buffer_mask_lock);
pthread_cond_destroy(&log_descriptor.log_flush_cond);
+ pthread_cond_destroy(&log_descriptor.new_goal_cond);
+ my_atomic_rwlock_destroy(&soft_sync_rwl);
+ my_atomic_rwlock_destroy(&group_commit_wait_rwl);
rwlock_destroy(&log_descriptor.open_files_lock);
delete_dynamic(&log_descriptor.open_files);
delete_dynamic(&log_descriptor.unfinished_files);
@@ -7383,18 +7478,20 @@ static void translog_force_current_buffe
*/
DBUG_PRINT("info", ("left: %u", (uint) left));
+ old_buffer->pre_force_close_horizon=
+ old_buffer->offset + old_buffer->size;
/* decrease offset */
new_buff_beginning-= log_descriptor.bc.current_page_fill;
current_page_fill= log_descriptor.bc.current_page_fill;
memset(log_descriptor.bc.ptr, TRANSLOG_FILLER, left);
- log_descriptor.bc.buffer->size+= left;
+ old_buffer->size+= left;
DBUG_PRINT("info", ("Finish Page buffer #%u: 0x%lx "
"Size: %lu",
- (uint) log_descriptor.bc.buffer->buffer_no,
- (ulong) log_descriptor.bc.buffer,
- (ulong) log_descriptor.bc.buffer->size));
- DBUG_ASSERT(log_descriptor.bc.buffer->buffer_no ==
+ (uint) old_buffer->buffer_no,
+ (ulong) old_buffer,
+ (ulong) old_buffer->size));
+ DBUG_ASSERT(old_buffer->buffer_no ==
log_descriptor.bc.buffer_no);
}
else
@@ -7425,6 +7522,10 @@ static void translog_force_current_buffe
log_descriptor.bc.buffer->offset= new_buff_beginning;
log_descriptor.bc.write_counter= write_counter;
log_descriptor.bc.previous_offset= previous_offset;
+ new_buffer->prev_last_lsn= BUFFER_MAX_LSN(old_buffer);
+ DBUG_PRINT("info", ("prev_last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ LSN_IN_PARTS(new_buffer->prev_last_lsn),
+ (ulong) new_buffer));
/*
Advances this log pointer, increases writers and let other threads to
@@ -7501,11 +7602,14 @@ static void translog_force_current_buffe
if (left)
{
- /*
- TODO: do not copy beginning of the page if we have no CRC or sector
- checks on
- */
- memcpy(new_buffer->buffer, data, current_page_fill);
+ if (log_descriptor.flags &
+ (TRANSLOG_PAGE_CRC | TRANSLOG_SECTOR_PROTECTION))
+ memcpy(new_buffer->buffer, data, current_page_fill);
+ else
+ {
+ new_buffer->skipped_data= current_page_fill;
+ DBUG_ASSERT(new_buffer->skipped_data < TRANSLOG_PAGE_SIZE);
+ }
}
old_buffer->next_buffer_offset= new_buffer->offset;
translog_buffer_lock(new_buffer);
@@ -7545,6 +7649,7 @@ void translog_flush_wait_for_end(LSN ls
void translog_flush_set_new_goal_and_wait(TRANSLOG_ADDRESS lsn)
{
+ int flush_no= log_descriptor.flush_no;
DBUG_ENTER("translog_flush_set_new_goal_and_wait");
DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
safe_mutex_assert_owner(&log_descriptor.log_flush_lock);
@@ -7552,8 +7657,9 @@ void translog_flush_set_new_goal_and_wai
{
log_descriptor.next_pass_max_lsn= lsn;
log_descriptor.max_lsn_requester= pthread_self();
+ pthread_cond_broadcast(&log_descriptor.new_goal_cond);
}
- while (log_descriptor.flush_in_progress)
+ while (flush_no == log_descriptor.flush_no)
{
pthread_cond_wait(&log_descriptor.log_flush_cond,
&log_descriptor.log_flush_lock);
@@ -7563,67 +7669,71 @@ void translog_flush_set_new_goal_and_wai
/**
- @brief Flush the log up to given LSN (included)
-
- @param lsn log record serial number up to which (inclusive)
- the log has to be flushed
-
- @return Operation status
- @retval 0 OK
- @retval 1 Error
+ @brief sync() range of files (inclusive) and directory (by request)
+ @param min from file
+ @param max to file
+ @param sync_dir need sync directory
*/
-my_bool translog_flush(TRANSLOG_ADDRESS lsn)
+static my_bool translog_sync_files(uint32 min, uint32 max,
+ my_bool sync_dir)
{
- LSN sent_to_disk= LSN_IMPOSSIBLE;
- TRANSLOG_ADDRESS flush_horizon;
- uint fn, i;
- dirty_buffer_mask_t dirty_buffer_mask;
- uint8 last_buffer_no, start_buffer_no;
+ uint fn;
my_bool rc= 0;
- DBUG_ENTER("translog_flush");
- DBUG_PRINT("enter", ("Flush up to LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
- DBUG_ASSERT(translog_status == TRANSLOG_OK ||
- translog_status == TRANSLOG_READONLY);
- LINT_INIT(sent_to_disk);
+ DBUG_ENTER("translog_sync_files");
+ DBUG_PRINT("info", ("min: %lu max: %lu sync dir: %d",
+ (ulong) min, (ulong) max, (int) sync_dir));
+ DBUG_ASSERT(min <= max);
- pthread_mutex_lock(&log_descriptor.log_flush_lock);
- DBUG_PRINT("info", ("Everything is flushed up to (%lu,0x%lx)",
- LSN_IN_PARTS(log_descriptor.flushed)));
- if (cmp_translog_addr(log_descriptor.flushed, lsn) >= 0)
- {
- pthread_mutex_unlock(&log_descriptor.log_flush_lock);
- DBUG_RETURN(0);
- }
- if (log_descriptor.flush_in_progress)
+ flush_start= my_getsystime();
+ for (fn= min; fn <= max; fn++)
{
- translog_flush_set_new_goal_and_wait(lsn);
- if (!pthread_equal(log_descriptor.max_lsn_requester, pthread_self()))
+ TRANSLOG_FILE *file= get_logfile_by_number(fn);
+ DBUG_ASSERT(file != NULL);
+ if (!file->is_sync)
{
- /* fix lsn if it was horizon */
- if (cmp_translog_addr(lsn, log_descriptor.bc.buffer->last_lsn) > 0)
- lsn= BUFFER_MAX_LSN(log_descriptor.bc.buffer);
- translog_flush_wait_for_end(lsn);
- pthread_mutex_unlock(&log_descriptor.log_flush_lock);
- DBUG_RETURN(0);
+ if (my_sync(file->handler.file, MYF(MY_WME)))
+ {
+ rc= 1;
+ translog_stop_writing();
+ DBUG_RETURN(rc);
+ }
+ translog_syncs++;
+ file->is_sync= 1;
}
- log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE;
}
- log_descriptor.flush_in_progress= 1;
- flush_horizon= log_descriptor.previous_flush_horizon;
- DBUG_PRINT("info", ("flush_in_progress is set"));
- pthread_mutex_unlock(&log_descriptor.log_flush_lock);
- translog_lock();
- if (log_descriptor.is_everything_flushed)
+ if (sync_dir)
{
- DBUG_PRINT("info", ("everything is flushed"));
- rc= (translog_status == TRANSLOG_READONLY);
- translog_unlock();
- goto out;
+ if (!(rc|= sync_dir(log_descriptor.directory_fd,
+ MYF(MY_WME | MY_IGNORE_BADFD))))
+ translog_syncs++;
}
+ DBUG_RETURN(rc);
+}
+
+
+/*
+ @brief Flushes buffers up to lsn
+
+ @param lsn LSN to which we should flush
+ @parem sent_to_disk returns 'sent to disk' position
+ @param flush_horizon returns horizon of the flush
+
+ @note See comment to translog_flush().
+*/
+
+void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
+ TRANSLOG_ADDRESS *sent_to_disk,
+ TRANSLOG_ADDRESS *flush_horizon)
+{
+ dirty_buffer_mask_t dirty_buffer_mask;
+ uint i;
+ uint8 last_buffer_no, start_buffer_no;
+ DBUG_ENTER("translog_flush_buffers");
+
/*
We will recheck information when will lock buffers one by
one so we can use unprotected read here (this is just for
@@ -7647,15 +7757,15 @@ my_bool translog_flush(TRANSLOG_ADDRESS
/*
if LSN up to which we have to flush bigger then maximum LSN of previous
buffer and at least one LSN was saved in the current buffer (last_lsn !=
- LSN_IMPOSSIBLE) then we better finish the current buffer.
+ LSN_IMPOSSIBLE) then we have to close the current buffer.
*/
- if (cmp_translog_addr(lsn, log_descriptor.bc.buffer->prev_last_lsn) > 0 &&
+ if (cmp_translog_addr(*lsn, log_descriptor.bc.buffer->prev_last_lsn) > 0 &&
log_descriptor.bc.buffer->last_lsn != LSN_IMPOSSIBLE)
{
struct st_translog_buffer *buffer= log_descriptor.bc.buffer;
- lsn= log_descriptor.bc.buffer->last_lsn; /* fix lsn if it was horizon */
+ *lsn= log_descriptor.bc.buffer->last_lsn; /* fix lsn if it was horizon */
DBUG_PRINT("info", ("LSN to flush fixed to last lsn: (%lu,0x%lx)",
- LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn)));
+ LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn)));
last_buffer_no= log_descriptor.bc.buffer_no;
log_descriptor.is_everything_flushed= 1;
translog_force_current_buffer_to_finish();
@@ -7667,8 +7777,10 @@ my_bool translog_flush(TRANSLOG_ADDRESS
TRANSLOG_BUFFERS_NO);
translog_unlock();
}
- sent_to_disk= translog_get_sent_to_disk();
- if (cmp_translog_addr(lsn, sent_to_disk) > 0)
+
+ /* flush buffers */
+ *sent_to_disk= translog_get_sent_to_disk();
+ if (cmp_translog_addr(*lsn, *sent_to_disk) > 0)
{
DBUG_PRINT("info", ("Start buffer #: %u last buffer #: %u",
@@ -7688,52 +7800,224 @@ my_bool translog_flush(TRANSLOG_ADDRESS
LSN_IN_PARTS(buffer->last_lsn),
(buffer->file ?
"dirty" : "closed")));
- if (buffer->prev_last_lsn <= lsn &&
+ if (buffer->prev_last_lsn <= *lsn &&
buffer->file != NULL)
{
- DBUG_ASSERT(flush_horizon <= buffer->offset + buffer->size);
- flush_horizon= buffer->offset + buffer->size;
+ DBUG_ASSERT(*flush_horizon <= buffer->offset + buffer->size);
+ *flush_horizon= (buffer->pre_force_close_horizon ?
+ buffer->pre_force_close_horizon :
+ buffer->offset + buffer->size);
+ DBUG_PRINT("info", ("flush_horizon: (%lu,0x%lx)",
+ LSN_IN_PARTS(*flush_horizon)));
+ DBUG_ASSERT(*flush_horizon <= log_descriptor.horizon);
translog_buffer_flush(buffer);
}
translog_buffer_unlock(buffer);
i= (i + 1) % TRANSLOG_BUFFERS_NO;
} while (i != last_buffer_no);
- sent_to_disk= translog_get_sent_to_disk();
+ *sent_to_disk= translog_get_sent_to_disk();
}
- /* sync files from previous flush till current one */
- for (fn= LSN_FILE_NO(log_descriptor.flushed); fn <= LSN_FILE_NO(lsn); fn++)
+ DBUG_VOID_RETURN;
+}
+
+/**
+ @brief Flush the log up to given LSN (included)
+
+ @param lsn log record serial number up to which (inclusive)
+ the log has to be flushed
+
+ @return Operation status
+ @retval 0 OK
+ @retval 1 Error
+
+ @note
+ - Non group commit logic: Commits made in passes. Thread which started
+ flush first is performing actual flush, other threads sets new goal (LSN)
+ of the next pass (if it is maximum) and waits for the pass end or just
+ wait for the pass end.
+ - If hard group commit switched on and rate set to zero the first thread
+ repeat pass if finds at the end of pass new goals (just take them as its
+ new goal). The process can not loop forever because we have limited number
+ of threads.
+ - If hard group commit switched ON and rate is not zero then at the end of
+ the pass first tread waits for new goals but not more then need to have
+ actual rate close to set value. If time exceed it stop pass and let other
+ thread start new pass if time is not over take new goal and repeats the
+ pass.
+ - If soft group commit switched on everything work as without group commit
+ but this procedure do not perform real sync(). If rate is not zero real
+ sync() will be performed by service thread with the rate and if there was
+ new LSN appeared.
+
+ @note Terminology:
+ 'sent to disk' means written to disk but not sync()ed,
+ 'flushed' mean sent to disk and synced().
+*/
+
+my_bool translog_flush(TRANSLOG_ADDRESS lsn)
+{
+ struct timespec abstime;
+ ulonglong flush_interval;
+ ulonglong time_spent;
+ LSN sent_to_disk= LSN_IMPOSSIBLE;
+ TRANSLOG_ADDRESS flush_horizon;
+ my_bool rc= 0;
+ my_bool hgroup_commit_at_start;
+
+ DBUG_ENTER("translog_flush");
+ DBUG_PRINT("enter", ("Flush up to LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_ASSERT(translog_status == TRANSLOG_OK ||
+ translog_status == TRANSLOG_READONLY);
+ LINT_INIT(sent_to_disk);
+
+ pthread_mutex_lock(&log_descriptor.log_flush_lock);
+ DBUG_PRINT("info", ("Everything is flushed up to (%lu,0x%lx)",
+ LSN_IN_PARTS(log_descriptor.flushed)));
+ if (cmp_translog_addr(log_descriptor.flushed, lsn) >= 0)
{
- TRANSLOG_FILE *file= get_logfile_by_number(fn);
- DBUG_ASSERT(file != NULL);
- if (!file->is_sync)
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+ DBUG_RETURN(0);
+ }
+ if (log_descriptor.flush_in_progress)
+ {
+ translog_lock();
+ /* fix lsn if it was horizon */
+ if (cmp_translog_addr(lsn, log_descriptor.bc.buffer->last_lsn) > 0)
+ lsn= BUFFER_MAX_LSN(log_descriptor.bc.buffer);
+ translog_unlock();
+ translog_flush_set_new_goal_and_wait(lsn);
+ if (!pthread_equal(log_descriptor.max_lsn_requester, pthread_self()))
{
- if (my_sync(file->handler.file, MYF(MY_WME)))
+ translog_flush_wait_for_end(lsn);
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+ DBUG_RETURN(0);
+ }
+ log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE;
+ }
+ log_descriptor.flush_in_progress= 1;
+ flush_horizon= log_descriptor.previous_flush_horizon;
+ DBUG_PRINT("info", ("flush_in_progress is set, flush_horizon: (%lu,0x%lx)",
+ LSN_IN_PARTS(flush_horizon)));
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+
+ my_atomic_rwlock_wrlock(&hgroup_commit_rwl);
+ hgroup_commit_at_start= my_atomic_load32(&hgroup_commit);
+ my_atomic_rwlock_wrunlock(&hgroup_commit_rwl);
+ if (hgroup_commit_at_start)
+ {
+ my_atomic_rwlock_rdlock(&group_commit_wait_rwl);
+ flush_interval= my_atomic_load32(&group_commit_wait) * TRANSLOG_RATE_BASE;
+ my_atomic_rwlock_rdunlock(&group_commit_wait_rwl);
+ }
+
+ translog_lock();
+ if (log_descriptor.is_everything_flushed)
+ {
+ DBUG_PRINT("info", ("everything is flushed"));
+ rc= (translog_status == TRANSLOG_READONLY);
+ translog_unlock();
+ goto out;
+ }
+
+ for (;;)
+ {
+ translog_flush_buffers(&lsn, &sent_to_disk, &flush_horizon);
+
+ if (!hgroup_commit_at_start)
+ break; /* flush pass is ended */
+
+retest:
+ if (flush_interval != 0 &&
+ (my_getsystime() - flush_start) >= flush_interval)
+ break; /* flush pass is ended */
+
+ pthread_mutex_lock(&log_descriptor.log_flush_lock);
+ if (log_descriptor.next_pass_max_lsn != LSN_IMPOSSIBLE)
+ {
+ /* take next goal */
+ lsn= log_descriptor.next_pass_max_lsn;
+ log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE;
+ /* prevent other thread from continue */
+ log_descriptor.max_lsn_requester= pthread_self();
+ DBUG_PRINT("info", ("flush took next goal: (%lu,0x%lx)",
+ LSN_IN_PARTS(lsn)));
+ }
+ else
+ {
+ if (flush_interval == 0 ||
+ (time_spent= (my_getsystime() - flush_start)) >= flush_interval)
{
- rc= 1;
- translog_stop_writing();
- sent_to_disk= LSN_IMPOSSIBLE;
- goto out;
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+ break;
}
- file->is_sync= 1;
+ DBUG_PRINT("info", ("flush waits: %llu interval: %llu spent: %llu",
+ flush_interval - time_spent,
+ flush_interval, time_spent));
+ /* wait time or next goal */
+ set_timespec_nsec(abstime, flush_interval - time_spent);
+ pthread_cond_timedwait(&log_descriptor.new_goal_cond,
+ &log_descriptor.log_flush_lock,
+ &abstime);
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+ DBUG_PRINT("info", ("retest conditions"));
+ goto retest;
}
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
+
+ /* next flush pass */
+ DBUG_PRINT("info", ("next flush pass"));
+ translog_lock();
}
- if (sync_log_dir >= TRANSLOG_SYNC_DIR_ALWAYS &&
- (LSN_FILE_NO(log_descriptor.previous_flush_horizon) !=
- LSN_FILE_NO(flush_horizon) ||
- ((LSN_OFFSET(log_descriptor.previous_flush_horizon) - 1) /
- TRANSLOG_PAGE_SIZE) !=
- ((LSN_OFFSET(flush_horizon) - 1) / TRANSLOG_PAGE_SIZE)))
- rc|= sync_dir(log_descriptor.directory_fd, MYF(MY_WME | MY_IGNORE_BADFD));
+ /*
+ sync() files from previous flush till current one
+
+ We read soft_sync unprotected because it is actually 1 bit
+ value and we do not care much about "a bit old value" speed
+ of test is much more valuable here.
+ */
+ if (!soft_sync || hgroup_commit_at_start)
+ {
+ if ((rc=
+ translog_sync_files(LSN_FILE_NO(log_descriptor.flushed),
+ LSN_FILE_NO(lsn),
+ sync_log_dir >= TRANSLOG_SYNC_DIR_ALWAYS &&
+ (LSN_FILE_NO(log_descriptor.
+ previous_flush_horizon) !=
+ LSN_FILE_NO(flush_horizon) ||
+ ((LSN_OFFSET(log_descriptor.
+ previous_flush_horizon) - 1) /
+ TRANSLOG_PAGE_SIZE) !=
+ ((LSN_OFFSET(flush_horizon) - 1) /
+ TRANSLOG_PAGE_SIZE)))))
+ {
+ sent_to_disk= LSN_IMPOSSIBLE;
+ goto out;
+ }
+ /* keep values for soft sync() and forced sync() actual */
+ my_atomic_rwlock_wrlock(&soft_sync_rwl);
+ my_atomic_store32(&soft_sync_min, LSN_FILE_NO(lsn));
+ my_atomic_store32(&soft_sync_max, LSN_FILE_NO(lsn));
+ my_atomic_rwlock_wrunlock(&soft_sync_rwl);
+ }
+ else
+ {
+ my_atomic_rwlock_wrlock(&soft_sync_rwl);
+ my_atomic_store32(&soft_sync_max, LSN_FILE_NO(lsn));
+ my_atomic_rwlock_wrunlock(&soft_sync_rwl);
+ }
+
+ DBUG_ASSERT(flush_horizon <= log_descriptor.horizon);
log_descriptor.previous_flush_horizon= flush_horizon;
out:
pthread_mutex_lock(&log_descriptor.log_flush_lock);
if (sent_to_disk != LSN_IMPOSSIBLE)
log_descriptor.flushed= sent_to_disk;
log_descriptor.flush_in_progress= 0;
+ log_descriptor.flush_no++;
DBUG_PRINT("info", ("flush_in_progress is dropped"));
- pthread_mutex_unlock(&log_descriptor.log_flush_lock);\
+ pthread_mutex_unlock(&log_descriptor.log_flush_lock);
pthread_cond_broadcast(&log_descriptor.log_flush_cond);
DBUG_RETURN(rc);
}
@@ -8103,6 +8387,7 @@ LSN translog_first_theoretical_lsn()
my_bool translog_purge(TRANSLOG_ADDRESS low)
{
uint32 last_need_file= LSN_FILE_NO(low);
+ uint32 min_unsync;
TRANSLOG_ADDRESS horizon= translog_get_horizon();
int rc= 0;
DBUG_ENTER("translog_purge");
@@ -8110,6 +8395,12 @@ my_bool translog_purge(TRANSLOG_ADDRESS
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
+ my_atomic_rwlock_wrlock(&soft_sync_rwl);
+ min_unsync= my_atomic_load32(&soft_sync_min);
+ my_atomic_rwlock_wrunlock(&soft_sync_rwl);
+ if (min_unsync < last_need_file)
+ last_need_file= min_unsync;
+
pthread_mutex_lock(&log_descriptor.purger_lock);
if (LSN_FILE_NO(log_descriptor.last_lsn_checked) < last_need_file)
{
@@ -8346,6 +8637,160 @@ my_bool translog_log_debug_info(TRN *trn
}
+
+/**
+ Sets soft sync mode
+
+ @param mode TRUE if we need switch soft sync on else off
+*/
+
+void translog_soft_sync(my_bool mode)
+{
+ my_atomic_rwlock_wrlock(&soft_sync_rwl);
+ my_atomic_store32(&soft_sync, (uint32) mode);
+ my_atomic_rwlock_wrunlock(&soft_sync_rwl);
+}
+
+
+/**
+ Sets hard group commit
+
+ @param mode TRUE if we need switch hard group commit on else off
+*/
+
+void translog_hard_group_commit(my_bool mode)
+{
+ my_atomic_rwlock_wrlock(&hgroup_commit_rwl);
+ my_atomic_store32(&hgroup_commit, (uint32) mode);
+ my_atomic_rwlock_wrunlock(&hgroup_commit_rwl);
+}
+
+
+/**
+ @brief forced log sync (used when we are switching modes)
+*/
+
+void translog_sync()
+{
+ uint32 max= get_current_logfile()->number;
+ uint32 min;
+ DBUG_ENTER("ma_translog_sync");
+
+ my_atomic_rwlock_rdlock(&soft_sync_rwl);
+ min= my_atomic_load32(&soft_sync_min);
+ my_atomic_rwlock_rdunlock(&soft_sync_rwl);
+ if (!min)
+ min= max;
+
+ translog_sync_files(min, max, sync_log_dir >= TRANSLOG_SYNC_DIR_ALWAYS);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ @brief set rate for group commit
+
+ @param rate rate to set.
+
+ @note internally it stores interval in nanoseconds/TRANSLOG_RATE_BASE (to
+ fit into uint32)
+*/
+
+void translog_set_group_commit_rate(uint32 rate)
+{
+ DBUG_ENTER("translog_set_group_commit_rate");
+ ulonglong wait_time;
+ if (rate)
+ {
+ wait_time= ((TRANSLOG_RATE_BASE * 1000000000ULL / rate +
+ TRANSLOG_RATE_BASE / 2) /
+ TRANSLOG_RATE_BASE);
+ if (wait_time == 0)
+ wait_time= 1; /* protection from getting special value */
+ }
+ else
+ wait_time= 0;
+ my_atomic_rwlock_wrlock(&group_commit_wait_rwl);
+ my_atomic_store32(&group_commit_wait, (uint32)wait_time);
+ my_atomic_rwlock_wrunlock(&group_commit_wait_rwl);
+ DBUG_PRINT("info", ("rate: %lu wait: %llu",
+ (ulong)rate, (ulonglong)wait_time));
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ @brief syncing service thread
+*/
+
+static pthread_handler_t
+ma_soft_sync_background( void *arg __attribute__((unused)))
+{
+
+ my_thread_init();
+ DBUG_ENTER("ma_soft_sync_background");
+ for(;;)
+ {
+ ulonglong prev_loop= my_getsystime();
+ ulonglong time, sleep;
+ uint32 min, max;
+ my_atomic_rwlock_rdlock(&soft_sync_rwl);
+ min= my_atomic_load32(&soft_sync_min);
+ max= my_atomic_load32(&soft_sync_max);
+ my_atomic_store32(&soft_sync_min, max);
+ my_atomic_rwlock_rdunlock(&soft_sync_rwl);
+
+ my_atomic_rwlock_rdlock(&group_commit_wait_rwl);
+ sleep= my_atomic_load32(&group_commit_wait) * TRANSLOG_RATE_BASE;
+ my_atomic_rwlock_rdunlock(&group_commit_wait_rwl);
+ translog_sync_files(min, max, FALSE);
+ time= my_getsystime() - prev_loop;
+ if (time > sleep)
+ sleep= 0;
+ else
+ sleep-= time;
+ if (my_service_thread_sleep(&soft_sync_control, sleep))
+ break;
+ }
+ my_service_thread_signal_end(&soft_sync_control);
+ my_thread_end();
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Starts syncing thread
+*/
+
+int translog_soft_sync_start(void)
+{
+ pthread_t th;
+ int res= 0;
+ DBUG_ENTER("translog_soft_sync_start");
+ if (ma_service_thread_control_init(&soft_sync_control))
+ res= 1;
+ else if (!(res= pthread_create(&th, NULL, ma_soft_sync_background, NULL)))
+ soft_sync_control.status= THREAD_RUNNING;
+ DBUG_RETURN(res);
+}
+
+
+/**
+ @brief Stops syncing thread
+*/
+
+void translog_soft_sync_end(void)
+{
+ DBUG_ENTER("translog_soft_sync_end");
+ if (soft_sync_control.inited)
+ {
+ ma_service_thread_control_end(&soft_sync_control);
+ }
+ DBUG_VOID_RETURN;
+}
+
+
#ifdef MARIA_DUMP_LOG
#include <my_getopt.h>
extern void translog_example_table_init();
=== modified file 'storage/maria/ma_loghandler.h'
--- a/storage/maria/ma_loghandler.h 2009-01-15 22:25:53 +0000
+++ b/storage/maria/ma_loghandler.h 2009-03-17 20:32:17 +0000
@@ -342,6 +342,14 @@ enum enum_translog_status
TRANSLOG_SHUTDOWN /* going to shutdown the loghandler */
};
extern enum enum_translog_status translog_status;
+extern ulonglong translog_syncs; /* Number of sync()s */
+
+void translog_soft_sync(my_bool mode);
+void translog_hard_group_commit(my_bool mode);
+int translog_soft_sync_start(void);
+void translog_soft_sync_end(void);
+void translog_sync();
+void translog_set_group_commit_rate(uint32 rate);
/*
all the rest added because of recovery; should we make
@@ -441,6 +449,18 @@ extern LOG_DESC log_record_type_descript
typedef enum
{
+ TRANSLOG_GCOMMIT_NONE,
+ TRANSLOG_GCOMMIT_HARD,
+ TRANSLOG_GCOMMIT_SOFT
+} enum_maria_group_commit;
+extern ulong group_commit;
+extern ulong group_commit_rate;
+/**
+ group commit interval is TRANSLOG_RATE_BASE/<rate> seconds
+*/
+#define TRANSLOG_RATE_BASE 100
+typedef enum
+{
TRANSLOG_PURGE_IMMIDIATE,
TRANSLOG_PURGE_EXTERNAL,
TRANSLOG_PURGE_ONDEMAND
=== modified file 'tests/fork_big2.pl'
--- a/tests/fork_big2.pl 2006-02-12 21:26:30 +0000
+++ b/tests/fork_big2.pl 2009-03-17 20:32:17 +0000
@@ -16,21 +16,21 @@ package main;
$opt_skip_create=$opt_skip_in=$opt_verbose=$opt_fast_insert=
$opt_lock_tables=$opt_debug=$opt_skip_drop=$opt_fast=$opt_force=0;
-$opt_thread_factor=1;
-$opt_insert=1;
-$opt_select=6;$opt_join=4;
-$opt_select_count=$opt_join_count=0;
-$opt_update=1;$opt_delete=0;
-$opt_flush=$opt_check=$opt_repair=$opt_alter=0;
-$opt_join_range=100;
+$opt_thread_factor=1;
+$opt_insert=1;
+$opt_select=6;$opt_join=4;
+$opt_select_count=$opt_join_count=0;
+$opt_update=1;$opt_delete=0;
+$opt_flush=$opt_check=$opt_repair=$opt_alter=0;
+$opt_join_range=100;
$opt_resize_interval=0;
$opt_time=0;
$opt_host=$opt_user=$opt_password=""; $opt_db="test";
$opt_verbose=$opt_debug=$opt_lock_tables=$opt_fast_insert=$opt_fast=$opt_skip_in=$opt_force=undef; # Ignore warnings from these
-GetOptions("host=s","db=s","user=s","password=s","loop-count=i","skip-create","skip-in","skip-drop",
- "verbose","fast-insert","lock-tables","debug","fast","force","thread-factor=i",
- "insert=i", "select=i", "join=i", "select-count=i", "join-count=i", "update=i", "delete=i",
+GetOptions("host=s","db=s","user=s","password=s","loop-count=i","skip-create","skip-in","skip-drop",
+ "verbose","fast-insert","lock-tables","debug","fast","force","thread-factor=i",
+ "insert=i", "select=i", "join=i", "select-count=i", "join-count=i", "update=i", "delete=i",
"flush=i", "check=i", "repair=i", "alter=i", "resize-interval=i", "max-join_range=i", "time=i") || die "Aborted";
print "Test of multiple connections that test the following things:\n";
@@ -48,20 +48,20 @@ srand 100; # Make random numbers repea
####
#### Start timeing and start test
-####
-
+####
+
$opt_insert*=$opt_thread_factor;
-$opt_select*=$opt_thread_factor;
-$opt_join*=$opt_thread_factor;
-$opt_select_count*=$opt_thread_factor;
-$opt_join_count*=$opt_thread_factor;
-$opt_update*=$opt_thread_factor;
-$opt_delete*=$opt_thread_factor;
-
-if ($opt_time == 0 && $opt_insert == 0)
-{
- $opt_insert=1;
-}
+$opt_select*=$opt_thread_factor;
+$opt_join*=$opt_thread_factor;
+$opt_select_count*=$opt_thread_factor;
+$opt_join_count*=$opt_thread_factor;
+$opt_update*=$opt_thread_factor;
+$opt_delete*=$opt_thread_factor;
+
+if ($opt_time == 0 && $opt_insert == 0)
+{
+ $opt_insert=1;
+}
$start_time=new Benchmark;
$dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
@@ -100,71 +100,71 @@ $|= 1; # Autoflush
####
#### Start the tests
####
-if ($opt_time != 0)
-{
- test_abort() if (($pid=fork()) == 0); $work{$pid}="abort";
+if ($opt_time != 0)
+{
+ test_abort() if (($pid=fork()) == 0); $work{$pid}="abort";
}
for ($i=0 ; $i < $opt_insert ; $i ++)
{
test_insert() if (($pid=fork()) == 0); $work{$pid}="insert";
-}
+}
$threads=$i;
-for ($i=0 ; $i < $opt_select ; $i ++)
-{
- test_select() if (($pid=fork()) == 0); $work{$pid}="select";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_join ; $i ++)
-{
- test_join() if (($pid=fork()) == 0); $work{$pid}="join";
-}
-$threads+=$i;
+for ($i=0 ; $i < $opt_select ; $i ++)
+{
+ test_select() if (($pid=fork()) == 0); $work{$pid}="select";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_join ; $i ++)
+{
+ test_join() if (($pid=fork()) == 0); $work{$pid}="join";
+}
+$threads+=$i;
for ($i=0 ; $i < $opt_select_count ; $i ++)
{
test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count";
}
-$threads+=$i;
-for ($i=0 ; $i < $opt_join_count ; $i ++)
-{
- test_join_count() if (($pid=fork()) == 0); $work{$pid}="join_count";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_update ; $i ++)
-{
- test_update() if (($pid=fork()) == 0); $work{$pid}="update";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_delete ; $i ++)
-{
- test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_flush ; $i ++)
-{
- test_flush() if (($pid=fork()) == 0); $work{$pid}="flush";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_check ; $i ++)
-{
- test_check() if (($pid=fork()) == 0); $work{$pid}="check";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_repair ; $i ++)
-{
- test_repair() if (($pid=fork()) == 0); $work{$pid}="repair";
-}
-$threads+=$i;
-for ($i=0 ; $i < $opt_alter ; $i ++)
-{
- test_alter() if (($pid=fork()) == 0); $work{$pid}="alter";
-}
-$threads+=$i;
+$threads+=$i;
+for ($i=0 ; $i < $opt_join_count ; $i ++)
+{
+ test_join_count() if (($pid=fork()) == 0); $work{$pid}="join_count";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_update ; $i ++)
+{
+ test_update() if (($pid=fork()) == 0); $work{$pid}="update";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_delete ; $i ++)
+{
+ test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_flush ; $i ++)
+{
+ test_flush() if (($pid=fork()) == 0); $work{$pid}="flush";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_check ; $i ++)
+{
+ test_check() if (($pid=fork()) == 0); $work{$pid}="check";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_repair ; $i ++)
+{
+ test_repair() if (($pid=fork()) == 0); $work{$pid}="repair";
+}
+$threads+=$i;
+for ($i=0 ; $i < $opt_alter ; $i ++)
+{
+ test_alter() if (($pid=fork()) == 0); $work{$pid}="alter";
+}
+$threads+=$i;
if ($opt_resize_interval != 0)
{
test_resize() if (($pid=fork()) == 0); $work{$pid}="resize";
$threads+=1;
}
-
+
print "Started $threads threads\n";
$errors=0;
@@ -172,17 +172,17 @@ $running_insert_threads=$opt_insert;
while (($pid=wait()) != -1)
{
$ret=$?/256;
- print "thread '" . $work{$pid} . "' finished with exit code $ret\n";
- if ($opt_time == 0)
+ print "thread '" . $work{$pid} . "' finished with exit code $ret\n";
+ if ($opt_time == 0)
{
if ($work{$pid} =~ /^insert/)
{
if (!--$running_insert_threads)
- {
-
+ {
+
# Time to stop other threads
signal_abort();
- }
+ }
}
}
$errors++ if ($ret != 0);
@@ -214,17 +214,17 @@ print "Total time: " .
exit(0);
-#
-# Sleep and then abort other threads
-#
-
-sub test_abort
-{
- sleep($opt_time);
- signal_abort();
- exit(0);
-}
-
+#
+# Sleep and then abort other threads
+#
+
+sub test_abort
+{
+ sleep($opt_time);
+ signal_abort();
+ exit(0);
+}
+
#
# Insert records in the table
@@ -363,58 +363,58 @@ sub test_join
$dbh->disconnect; $dbh=0;
print "Test_join: Executed $count joins\n";
exit(0);
-}
-
-#
-# select records
-# Do continously joins between the first and second for range and count selected rows
-#
-
-sub test_join_count
-{
- my ($dbh, $i, $j, $count, $loop);
-
- $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
- $opt_user, $opt_password,
- { PrintError => 0}) || die $DBI::errstr;
-
- $count_query=make_count_query($numtables);
- $count=0;
- $loop=9999;
- $sum=0;
-
- srand();
-
- $i=0;
- while (($i++ % 10) || !test_if_abort($dbh))
- {
- if ($loop++ >= 10)
- {
- $loop=0;
- $row_counts=simple_query($dbh, $count_query);
- }
- for ($j=0 ; $j < $numtables-1 ; $j++)
- {
- my ($id1)= int rand $row_counts->[$j];
- my ($id2)= int rand $row_counts->[$j];
- if ($id1 > $id2)
- {
- my $id0=$id1; $id1=$id2; $id2=$id0;
- if ($id2-$id1 > $opt_join_range)
- {
- $id2=$id1+$opt_join_range;
- }
- }
- my ($t1,$t2)= ($testtables[$j]->[0],$testtables[$j+1]->[0]);
- $row=simple_query($dbh, "select count(*) from $t1, $t2 where $t1.id=$t2.id and $t1.id between $id1 and $id2");
- $sum+=$row->[0];
- $count++;
- }
- }
- $dbh->disconnect; $dbh=0;
- print "Test_join_count: Executed $count joins: total $sum rows\n";
- exit(0);
-}
+}
+
+#
+# select records
+# Do continously joins between the first and second for range and count selected rows
+#
+
+sub test_join_count
+{
+ my ($dbh, $i, $j, $count, $loop);
+
+ $dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
+ $opt_user, $opt_password,
+ { PrintError => 0}) || die $DBI::errstr;
+
+ $count_query=make_count_query($numtables);
+ $count=0;
+ $loop=9999;
+ $sum=0;
+
+ srand();
+
+ $i=0;
+ while (($i++ % 10) || !test_if_abort($dbh))
+ {
+ if ($loop++ >= 10)
+ {
+ $loop=0;
+ $row_counts=simple_query($dbh, $count_query);
+ }
+ for ($j=0 ; $j < $numtables-1 ; $j++)
+ {
+ my ($id1)= int rand $row_counts->[$j];
+ my ($id2)= int rand $row_counts->[$j];
+ if ($id1 > $id2)
+ {
+ my $id0=$id1; $id1=$id2; $id2=$id0;
+ if ($id2-$id1 > $opt_join_range)
+ {
+ $id2=$id1+$opt_join_range;
+ }
+ }
+ my ($t1,$t2)= ($testtables[$j]->[0],$testtables[$j+1]->[0]);
+ $row=simple_query($dbh, "select count(*) from $t1, $t2 where $t1.id=$t2.id and $t1.id between $id1 and $id2");
+ $sum+=$row->[0];
+ $count++;
+ }
+ }
+ $dbh->disconnect; $dbh=0;
+ print "Test_join_count: Executed $count joins: total $sum rows\n";
+ exit(0);
+}
#
1
0
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2683)
by knielsen@knielsen-hq.org 16 Mar '09
by knielsen@knielsen-hq.org 16 Mar '09
16 Mar '09
#At lp:maria
2683 knielsen(a)knielsen-hq.org 2009-03-16
Remove redundant code, probably bad automerge
modified:
mysql-test/lib/mtr_cases.pm
=== modified file 'mysql-test/lib/mtr_cases.pm'
--- a/mysql-test/lib/mtr_cases.pm 2009-02-19 09:01:25 +0000
+++ b/mysql-test/lib/mtr_cases.pm 2009-03-16 09:27:05 +0000
@@ -799,15 +799,6 @@ sub collect_one_test_case {
push(@{$tinfo->{'master_opt'}}, @$suite_opts);
push(@{$tinfo->{'slave_opt'}}, @$suite_opts);
- #-----------------------------------------------------------------------
- # Check for test specific config file
- #-----------------------------------------------------------------------
- my $test_cnf_file= "$testdir/$tname.cnf";
- if ( -f $test_cnf_file) {
- # Specifies the configuration file to use for this test
- $tinfo->{'template_path'}= $test_cnf_file;
- }
-
# ----------------------------------------------------------------------
# Check for test specific config file
# ----------------------------------------------------------------------
1
0
[Maria-developers] bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2683)
by knielsen@knielsen-hq.org 16 Mar '09
by knielsen@knielsen-hq.org 16 Mar '09
16 Mar '09
#At lp:maria
2683 knielsen(a)knielsen-hq.org 2009-03-16
Make test --big, as it takes a _long_ time to run and only tests a single bug.
modified:
mysql-test/t/multi_update2.test
=== modified file 'mysql-test/t/multi_update2.test'
--- a/mysql-test/t/multi_update2.test 2008-11-19 18:17:26 +0000
+++ b/mysql-test/t/multi_update2.test 2009-03-16 09:25:20 +0000
@@ -2,6 +2,9 @@
# Test of update statement that uses many tables.
#
+# This is a big test.
+--source include/big_test.inc
+
--disable_warnings
DROP TABLE IF EXISTS t1,t2;
--enable_warnings
1
0