maria-developers team mailing list archive
-
maria-developers team
-
Mailing list archive
-
Message #01472
bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (igor:2747)
#At lp:maria based on revid:igor@xxxxxxxxxxxx-20091026204820-qsop1lweyo1y5aor
2747 Igor Babaev 2009-11-09 [merge]
Merge
added:
mysql-test/extra/rpl_tests/rpl_mixing_engines.test
mysql-test/include/partition_date_range.inc
mysql-test/include/rpl_loaddata_charset.inc
mysql-test/r/disabled_partition.require
mysql-test/r/partition_disabled.result
mysql-test/r/table_elim_debug.result
mysql-test/std_data/loaddata_utf8.dat
mysql-test/std_data/parts/t1.frm
mysql-test/suite/rpl/r/rpl_create_if_not_exists.result
mysql-test/suite/rpl/r/rpl_create_tmp_table_if_not_exists.result
mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result
mysql-test/suite/rpl/t/rpl_create_if_not_exists.test
mysql-test/suite/rpl/t/rpl_create_tmp_table_if_not_exists.test
mysql-test/suite/rpl/t/rpl_stm_mixing_engines.test
mysql-test/t/partition_disabled-master.opt
mysql-test/t/partition_disabled.test
mysql-test/t/table_elim_debug.test
storage/federated/README
storage/federatedx/
storage/federatedx/AUTHORS
storage/federatedx/CMakeLists.txt
storage/federatedx/ChangeLog
storage/federatedx/FAQ
storage/federatedx/Makefile.am
storage/federatedx/README
storage/federatedx/README.windows
storage/federatedx/TODO
storage/federatedx/federatedx_io.cc
storage/federatedx/federatedx_io_mysql.cc
storage/federatedx/federatedx_io_null.cc
storage/federatedx/federatedx_probes.h
storage/federatedx/federatedx_txn.cc
storage/federatedx/ha_federatedx.cc
storage/federatedx/ha_federatedx.h
storage/federatedx/plug.in
renamed:
storage/federated/plug.in => storage/federated/plug.in.disabled
modified:
.bzrignore
BUILD/SETUP.sh
BUILD/check-cpu
client/mysql.cc
client/mysql_upgrade.c
client/mysqladmin.cc
client/mysqldump.c
client/mysqltest.cc
cmd-line-utils/readline/bind.c
cmd-line-utils/readline/histfile.c
cmd-line-utils/readline/undo.c
extra/libevent/event-internal.h
extra/yassl/taocrypt/src/twofish.cpp
include/my_global.h
libmysql/libmysql.c
libmysqld/Makefile.am
mysql-test/collections/default.experimental
mysql-test/include/commit.inc
mysql-test/lib/mtr_cases.pm
mysql-test/mysql-test-run.pl
mysql-test/r/alter_table.result
mysql-test/r/analyse.result
mysql-test/r/auto_increment.result
mysql-test/r/commit_1innodb.result
mysql-test/r/func_misc.result
mysql-test/r/gis-rtree.result
mysql-test/r/group_min_max.result
mysql-test/r/handler_myisam.result
mysql-test/r/innodb_xtradb_bug317074.result
mysql-test/r/lock_multi_bug38499.result
mysql-test/r/lock_multi_bug38691.result
mysql-test/r/merge.result
mysql-test/r/myisam_debug.result
mysql-test/r/mysqlbinlog_row_trans.result
mysql-test/r/not_partition.result
mysql-test/r/partition.result
mysql-test/r/partition_pruning.result
mysql-test/r/partition_range.result
mysql-test/r/subselect.result
mysql-test/r/type_newdecimal.result
mysql-test/r/view.result
mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
mysql-test/suite/binlog/r/binlog_stm_drop_tmp_tbl.result
mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result
mysql-test/suite/federated/disabled.def
mysql-test/suite/federated/federated.result
mysql-test/suite/federated/federated.test
mysql-test/suite/federated/federated_archive.result
mysql-test/suite/federated/federated_bug_13118.result
mysql-test/suite/federated/federated_bug_25714.result
mysql-test/suite/federated/federated_cleanup.inc
mysql-test/suite/federated/federated_innodb.result
mysql-test/suite/federated/federated_server.result
mysql-test/suite/federated/federated_server.test
mysql-test/suite/federated/federated_transactions.result
mysql-test/suite/funcs_1/r/is_engines_federated.result
mysql-test/suite/parts/inc/partition.pre
mysql-test/suite/parts/inc/partition_bigint.inc
mysql-test/suite/parts/inc/partition_binary.inc
mysql-test/suite/parts/inc/partition_bit.inc
mysql-test/suite/parts/inc/partition_char.inc
mysql-test/suite/parts/inc/partition_date.inc
mysql-test/suite/parts/inc/partition_datetime.inc
mysql-test/suite/parts/inc/partition_decimal.inc
mysql-test/suite/parts/inc/partition_double.inc
mysql-test/suite/parts/inc/partition_enum.inc
mysql-test/suite/parts/inc/partition_float.inc
mysql-test/suite/parts/inc/partition_int.inc
mysql-test/suite/parts/inc/partition_mediumint.inc
mysql-test/suite/parts/inc/partition_smallint.inc
mysql-test/suite/parts/inc/partition_time.inc
mysql-test/suite/parts/inc/partition_timestamp.inc
mysql-test/suite/parts/inc/partition_tinyint.inc
mysql-test/suite/parts/inc/partition_varbinary.inc
mysql-test/suite/parts/inc/partition_varchar.inc
mysql-test/suite/parts/inc/partition_year.inc
mysql-test/suite/parts/r/partition_char_innodb.result
mysql-test/suite/parts/r/partition_char_myisam.result
mysql-test/suite/parts/r/partition_datetime_innodb.result
mysql-test/suite/parts/r/partition_datetime_myisam.result
mysql-test/suite/parts/t/part_supported_sql_func_innodb.test
mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test
mysql-test/suite/parts/t/partition_alter4_myisam.test
mysql-test/suite/pbxt/r/partition_range.result
mysql-test/suite/pbxt/r/subselect.result
mysql-test/suite/pbxt/t/subselect.test
mysql-test/suite/rpl/r/rpl_concurrency_error.result
mysql-test/suite/rpl/r/rpl_drop_if_exists.result
mysql-test/suite/rpl/r/rpl_drop_temp.result
mysql-test/suite/rpl/r/rpl_events.result
mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
mysql-test/suite/rpl/r/rpl_loaddata_charset.result
mysql-test/suite/rpl/r/rpl_rewrt_db.result
mysql-test/suite/rpl/t/rpl_concurrency_error.test
mysql-test/suite/rpl/t/rpl_drop_temp.test
mysql-test/suite/rpl/t/rpl_events.test
mysql-test/suite/rpl/t/rpl_loaddata_charset.test
mysql-test/suite/rpl/t/rpl_rewrt_db-slave.opt
mysql-test/suite/rpl/t/rpl_rewrt_db.test
mysql-test/t/almost_full.test
mysql-test/t/alter_table.test
mysql-test/t/analyse.test
mysql-test/t/archive.test
mysql-test/t/auto_increment.test
mysql-test/t/bench_count_distinct.test
mysql-test/t/change_user.test
mysql-test/t/check.test
mysql-test/t/count_distinct2.test
mysql-test/t/count_distinct3.test
mysql-test/t/ctype_euckr.test
mysql-test/t/derived.test
mysql-test/t/events_time_zone.test
mysql-test/t/fulltext2.test
mysql-test/t/func_misc.test
mysql-test/t/gis-rtree.test
mysql-test/t/group_min_max.test
mysql-test/t/handler_myisam.test
mysql-test/t/heap.test
mysql-test/t/innodb_xtradb_bug317074.test
mysql-test/t/insert.test
mysql-test/t/kill.test
mysql-test/t/lock_multi_bug38499.test
mysql-test/t/lock_multi_bug38691.test
mysql-test/t/merge.test
mysql-test/t/multi_update.test
mysql-test/t/multi_update2.test
mysql-test/t/myisam.test
mysql-test/t/myisam_debug.test
mysql-test/t/myisampack.test
mysql-test/t/not_partition.test
mysql-test/t/order_by.test
mysql-test/t/order_fill_sortbuf.test
mysql-test/t/partition.test
mysql-test/t/partition_archive.test
mysql-test/t/partition_pruning.test
mysql-test/t/select_found.test
mysql-test/t/sp-big.test
mysql-test/t/subselect.test
mysql-test/t/type_newdecimal.test
mysql-test/t/view.test
mysql-test/t/warnings.test
mysql-test/valgrind.supp
mysys/array.c
mysys/mf_iocache.c
mysys/mf_pack.c
mysys/my_copy.c
mysys/my_getopt.c
mysys/my_redel.c
mysys/safemalloc.c
mysys/typelib.c
regex/regcomp.c
scripts/make_binary_distribution.sh
server-tools/instance-manager/mysql_connection.cc
sql-common/client.c
sql-common/my_time.c
sql/client_settings.h
sql/events.cc
sql/field.cc
sql/field.h
sql/ha_partition.cc
sql/item.cc
sql/item.h
sql/item_cmpfunc.cc
sql/item_create.cc
sql/item_func.cc
sql/item_func.h
sql/item_sum.cc
sql/item_timefunc.cc
sql/lock.cc
sql/log.cc
sql/log_event.cc
sql/my_decimal.h
sql/mysql_priv.h
sql/mysqld.cc
sql/opt_range.cc
sql/partition_info.h
sql/share/errmsg.txt
sql/slave.cc
sql/spatial.cc
sql/sql_base.cc
sql/sql_db.cc
sql/sql_delete.cc
sql/sql_insert.cc
sql/sql_parse.cc
sql/sql_partition.cc
sql/sql_plugin.cc
sql/sql_profile.cc
sql/sql_select.cc
sql/sql_show.cc
sql/sql_table.cc
sql/sql_update.cc
sql/sql_view.cc
sql/sql_yacc.yy
sql/table.cc
storage/heap/hp_test2.c
storage/myisam/ft_boolean_search.c
storage/myisam/mi_check.c
storage/myisam/mi_create.c
storage/myisam/mi_delete.c
storage/myisam/mi_dynrec.c
storage/myisam/mi_open.c
storage/myisam/mi_packrec.c
storage/myisam/mi_search.c
storage/myisam/mi_update.c
storage/myisam/sort.c
storage/myisammrg/ha_myisammrg.cc
storage/myisammrg/myrg_open.c
storage/myisammrg/myrg_rkey.c
storage/ndb/include/mgmapi/ndb_logevent.h
storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
storage/pbxt/src/cache_xt.cc
storage/xtradb/include/buf0buf.ic
storage/xtradb/include/srv0srv.h
storage/xtradb/srv/srv0srv.c
strings/ctype-ucs2.c
strings/ctype-utf8.c
strings/decimal.c
support-files/mysql.spec.sh
tests/mysql_client_test.c
win/make_mariadb_win_dist
=== modified file '.bzrignore'
--- a/.bzrignore 2009-09-15 12:12:51 +0000
+++ b/.bzrignore 2009-11-06 17:22:32 +0000
@@ -1921,3 +1921,4 @@ sql/share/ukrainian
libmysqld/examples/mysqltest.cc
extra/libevent/event-config.h
libmysqld/opt_table_elimination.cc
+libmysqld/ha_federatedx.cc
=== modified file 'BUILD/SETUP.sh'
--- a/BUILD/SETUP.sh 2009-10-06 14:53:46 +0000
+++ b/BUILD/SETUP.sh 2009-10-29 00:04:56 +0000
@@ -146,6 +146,13 @@ then
debug_cflags="$debug_cflags $debug_extra_cflags"
fi
+static_link="--with-mysqld-ldflags=-all-static "
+static_link="$static_link --with-client-ldflags=-all-static"
+# we need local-infile in all binaries for rpl000001
+# if you need to disable local-infile in the client, write a build script
+# and unset local_infile_configs
+local_infile_configs="--enable-local-infile"
+
#
# Configuration options.
#
@@ -154,6 +161,8 @@ base_configs="$base_configs --with-extra
base_configs="$base_configs --enable-thread-safe-client "
base_configs="$base_configs --with-big-tables"
base_configs="$base_configs --with-plugin-maria --with-maria-tmp-tables --without-plugin-innodb_plugin"
+# Compile our client programs with static libraries to allow them to be moved
+base_configs="$base_configs --with-mysqld-ldflags=-static --with-client-ldflags=-static"
if test -d "$path/../cmd-line-utils/readline"
then
@@ -163,14 +172,6 @@ then
base_configs="$base_configs --with-libedit"
fi
-static_link="--with-mysqld-ldflags=-all-static "
-static_link="$static_link --with-client-ldflags=-all-static"
-# we need local-infile in all binaries for rpl000001
-# if you need to disable local-infile in the client, write a build script
-# and unset local_infile_configs
-local_infile_configs="--enable-local-infile"
-
-
max_no_embedded_configs="$SSL_LIBRARY --with-plugins=max"
max_no_qc_configs="$SSL_LIBRARY --with-plugins=max --without-query-cache"
max_no_ndb_configs="$SSL_LIBRARY --with-plugins=max-no-ndb --with-embedded-server --with-libevent"
=== modified file 'BUILD/check-cpu'
--- a/BUILD/check-cpu 2009-07-10 13:04:40 +0000
+++ b/BUILD/check-cpu 2009-09-01 13:39:13 +0000
@@ -70,6 +70,11 @@ check_cpu () {
Alpha*EV6*)
cpu_arg="ev6";
;;
+ #Core 2 Duo
+ *Intel*Core\(TM\)2*)
+ cpu_arg="nocona"
+ core2="yes"
+ ;;
# Intel ia32
*Intel*Core*|*X[eE][oO][nN]*)
# a Xeon is just another pentium4 ...
@@ -134,10 +139,6 @@ check_cpu () {
*i386*)
cpu_arg="i386"
;;
- #Core 2 Duo
- *Intel*Core\(TM\)2*)
- cpu_arg="nocona"
- ;;
# Intel ia64
*Itanium*)
cpu_arg="itanium"
=== modified file 'client/mysql.cc'
--- a/client/mysql.cc 2009-10-26 11:35:42 +0000
+++ b/client/mysql.cc 2009-11-06 17:22:32 +0000
@@ -86,7 +86,7 @@ extern "C" {
#endif
#undef bcmp // Fix problem with new readline
-#if defined( __WIN__)
+#if defined(__WIN__)
#include <conio.h>
#elif !defined(__NETWARE__)
#include <readline/readline.h>
@@ -106,7 +106,7 @@ extern "C" {
#define cmp_database(cs,A,B) strcmp((A),(B))
#endif
-#if !defined( __WIN__) && !defined(__NETWARE__) && !defined(THREAD)
+#if !defined(__WIN__) && !defined(__NETWARE__) && !defined(THREAD)
#define USE_POPEN
#endif
@@ -1870,7 +1870,7 @@ static int read_and_execute(bool interac
if (opt_outfile && glob_buffer.is_empty())
fflush(OUTFILE);
-#if defined( __WIN__) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
tee_fputs(prompt, stdout);
#if defined(__NETWARE__)
line=fgets(linebuffer, sizeof(linebuffer)-1, stdin);
@@ -1881,7 +1881,7 @@ static int read_and_execute(bool interac
if (p != NULL)
*p = '\0';
}
-#else defined(__WIN__)
+#else
if (!tmpbuf.is_alloced())
tmpbuf.alloc(65535);
tmpbuf.length(0);
@@ -1907,7 +1907,7 @@ static int read_and_execute(bool interac
if (opt_outfile)
fputs(prompt, OUTFILE);
line= readline(prompt);
-#endif /* defined( __WIN__) || defined(__NETWARE__) */
+#endif /* defined(__WIN__) || defined(__NETWARE__) */
/*
When Ctrl+d or Ctrl+z is pressed, the line may be NULL on some OS
@@ -1955,10 +1955,10 @@ static int read_and_execute(bool interac
}
}
-#if defined( __WIN__) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
buffer.free();
#endif
-#if defined( __WIN__)
+#if defined(__WIN__)
tmpbuf.free();
#endif
@@ -3835,6 +3835,7 @@ com_edit(String *buffer,char *line __att
char errmsg[100];
sprintf(errmsg, "Command '%.40s' failed", buff);
put_info(errmsg, INFO_ERROR, 0, NullS);
+ goto err;
}
if (!my_stat(filename,&stat_arg,MYF(MY_WME)))
@@ -4635,7 +4636,7 @@ void tee_putc(int c, FILE *file)
putc(c, OUTFILE);
}
-#if defined( __WIN__) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
#include <time.h>
#else
#include <sys/times.h>
@@ -4647,7 +4648,7 @@ void tee_putc(int c, FILE *file)
static ulong start_timer(void)
{
-#if defined( __WIN__) || defined(__NETWARE__)
+#if defined(__WIN__) || defined(__NETWARE__)
return clock();
#else
struct tms tms_tmp;
=== modified file 'client/mysql_upgrade.c'
--- a/client/mysql_upgrade.c 2009-10-26 11:35:42 +0000
+++ b/client/mysql_upgrade.c 2009-11-06 17:22:32 +0000
@@ -552,6 +552,7 @@ static int upgrade_already_done(void)
FILE *in;
char upgrade_info_file[FN_REFLEN]= {0};
char buf[sizeof(MYSQL_SERVER_VERSION)+1];
+ char *res;
if (get_upgrade_info_file_name(upgrade_info_file))
return 0; /* Could not get filename => not sure */
=== modified file 'client/mysqladmin.cc'
--- a/client/mysqladmin.cc 2009-10-26 11:35:42 +0000
+++ b/client/mysqladmin.cc 2009-11-06 17:22:32 +0000
@@ -22,6 +22,7 @@
#endif
#include <sys/stat.h>
#include <mysql.h>
+#include <sql_common.h>
#define ADMIN_VERSION "8.42"
#define MAX_MYSQL_VAR 512
@@ -353,6 +354,11 @@ int main(int argc,char *argv[])
if (sql_connect(&mysql, option_wait))
{
+ /*
+ We couldn't get an initial connection and will definitely exit.
+ The following just determines the exit-code we'll give.
+ */
+
unsigned int err= mysql_errno(&mysql);
if (err >= CR_MIN_ERROR && err <= CR_MAX_ERROR)
error= 1;
@@ -371,41 +377,79 @@ int main(int argc,char *argv[])
}
else
{
- while (!interrupted)
+ /*
+ --count=0 aborts right here. Otherwise iff --sleep=t ("interval")
+ is given a t!=0, we get an endless loop, or n iterations if --count=n
+ was given an n!=0. If --sleep wasn't given, we get one iteration.
+
+ To wit, --wait loops the connection-attempts, while --sleep loops
+ the command execution (endlessly if no --count is given).
+ */
+
+ while (!interrupted && (!opt_count_iterations || nr_iterations))
{
new_line = 0;
- if ((error=execute_commands(&mysql,argc,commands)))
+
+ if ((error= execute_commands(&mysql,argc,commands)))
{
+ /*
+ Unknown/malformed command always aborts and can't be --forced.
+ If the user got confused about the syntax, proceeding would be
+ dangerous ...
+ */
if (error > 0)
- break; /* Wrong command error */
- if (!option_force)
+ break;
+
+ /*
+ Command was well-formed, but failed on the server. Might succeed
+ on retry (if conditions on server change etc.), but needs --force
+ to retry.
+ */
+ if (!option_force)
+ break;
+ } /* if((error= ... */
+
+ if (interval) /* --sleep=interval given */
+ {
+ /*
+ If connection was dropped (unintentionally, or due to SHUTDOWN),
+ re-establish it if --wait ("retry-connect") was given and user
+ didn't signal for us to die. Otherwise, signal failure.
+ */
+
+ if (mysql.net.vio == 0)
{
if (option_wait && !interrupted)
{
- mysql_close(&mysql);
- if (!sql_connect(&mysql, option_wait))
- {
- sleep(1); /* Don't retry too rapidly */
- continue; /* Retry */
- }
+ sleep(1);
+ sql_connect(&mysql, option_wait);
+ /*
+ continue normally and decrease counters so that
+ "mysqladmin --count=1 --wait=1 shutdown"
+ cannot loop endlessly.
+ */
}
- error=1;
- break;
- }
- }
- if (interval)
- {
- if (opt_count_iterations && --nr_iterations == 0)
- break;
+ else
+ {
+ /*
+ connexion broke, and we have no order to re-establish it. fail.
+ */
+ if (!option_force)
+ error= 1;
+ break;
+ }
+ } /* lost connection */
+
sleep(interval);
if (new_line)
puts("");
}
else
- break;
- }
- mysql_close(&mysql);
- }
+ break; /* no --sleep, done looping */
+ } /* command-loop */
+ } /* got connection */
+
+ mysql_close(&mysql);
my_free(opt_password,MYF(MY_ALLOW_ZERO_PTR));
my_free(user,MYF(MY_ALLOW_ZERO_PTR));
#ifdef HAVE_SMEM
@@ -423,6 +467,17 @@ sig_handler endprog(int signal_number __
interrupted=1;
}
+/**
+ @brief connect to server, optionally waiting for same to come up
+
+ @param mysql connection struct
+ @param wait wait for server to come up?
+ (0: no, ~0: forever, n: cycles)
+
+ @return Operation result
+ @retval 0 success
+ @retval 1 failure
+*/
static my_bool sql_connect(MYSQL *mysql, uint wait)
{
@@ -431,7 +486,7 @@ static my_bool sql_connect(MYSQL *mysql,
for (;;)
{
if (mysql_real_connect(mysql,host,user,opt_password,NullS,tcp_port,
- unix_port, 0))
+ unix_port, CLIENT_REMEMBER_OPTIONS))
{
mysql->reconnect= 1;
if (info)
@@ -442,9 +497,9 @@ static my_bool sql_connect(MYSQL *mysql,
return 0;
}
- if (!wait)
+ if (!wait) // was or reached 0, fail
{
- if (!option_silent)
+ if (!option_silent) // print diagnostics
{
if (!host)
host= (char*) LOCAL_HOST;
@@ -468,11 +523,18 @@ static my_bool sql_connect(MYSQL *mysql,
}
return 1;
}
+
if (wait != (uint) ~0)
- wait--; /* One less retry */
+ wait--; /* count down, one less retry */
+
if ((mysql_errno(mysql) != CR_CONN_HOST_ERROR) &&
(mysql_errno(mysql) != CR_CONNECTION_ERROR))
{
+ /*
+ Error is worse than "server doesn't answer (yet?)";
+ fail even if we still have "wait-coins" unless --force
+ was also given.
+ */
fprintf(stderr,"Got error: %s\n", mysql_error(mysql));
if (!option_force)
return 1;
@@ -496,11 +558,18 @@ static my_bool sql_connect(MYSQL *mysql,
}
-/*
- Execute a command.
- Return 0 on ok
- -1 on retryable error
- 1 on fatal error
+/**
+ @brief Execute all commands
+
+ @details We try to execute all commands we were given, in the order
+ given, but return with non-zero as soon as we encounter trouble.
+ By that token, individual commands can be considered a conjunction
+ with boolean short-cut.
+
+ @return success?
+ @retval 0 Yes! ALL commands worked!
+ @retval 1 No, one failed and will never work (malformed): fatal error!
+ @retval -1 No, one failed on the server, may work next time!
*/
static int execute_commands(MYSQL *mysql,int argc, char **argv)
@@ -570,7 +639,6 @@ static int execute_commands(MYSQL *mysql
mysql_error(mysql));
return -1;
}
- mysql_close(mysql); /* Close connection to avoid error messages */
argc=1; /* force SHUTDOWN to be the last command */
if (got_pidfile)
{
@@ -1036,14 +1104,16 @@ static void usage(void)
static int drop_db(MYSQL *mysql, const char *db)
{
char name_buff[FN_REFLEN+20], buf[10];
+ char *input;
+
if (!option_force)
{
puts("Dropping the database is potentially a very bad thing to do.");
puts("Any data stored in the database will be destroyed.\n");
printf("Do you really want to drop the '%s' database [y/N] ",db);
fflush(stdout);
- if (fgets(buf,sizeof(buf)-1,stdin) == 0 ||
- (*buf != 'y') && (*buf != 'Y'))
+ input= fgets(buf, sizeof(buf)-1, stdin);
+ if (!input || ((*input != 'y') && (*input != 'Y')))
{
puts("\nOK, aborting database drop!");
return -1;
=== modified file 'client/mysqldump.c'
--- a/client/mysqldump.c 2009-09-07 20:50:10 +0000
+++ b/client/mysqldump.c 2009-10-15 21:38:29 +0000
@@ -5008,7 +5008,7 @@ int main(int argc, char **argv)
exit_code= get_options(&argc, &argv);
if (exit_code)
{
- free_resources(0);
+ free_resources();
exit(exit_code);
}
@@ -5016,14 +5016,14 @@ int main(int argc, char **argv)
{
if(!(stderror_file= freopen(log_error_file, "a+", stderr)))
{
- free_resources(0);
+ free_resources();
exit(EX_MYSQLERR);
}
}
if (connect_to_db(current_host, current_user, opt_password))
{
- free_resources(0);
+ free_resources();
exit(EX_MYSQLERR);
}
if (!path)
=== modified file 'client/mysqltest.cc'
--- a/client/mysqltest.cc 2009-10-09 08:09:24 +0000
+++ b/client/mysqltest.cc 2009-10-15 21:52:31 +0000
@@ -443,10 +443,12 @@ DYNAMIC_STRING ds_res;
char builtin_echo[FN_REFLEN];
+static void cleanup_and_exit(int exit_code) __attribute__((noreturn));
+
void die(const char *fmt, ...)
- ATTRIBUTE_FORMAT(printf, 1, 2);
+ ATTRIBUTE_FORMAT(printf, 1, 2) __attribute__((noreturn));
void abort_not_supported_test(const char *fmt, ...)
- ATTRIBUTE_FORMAT(printf, 1, 2);
+ ATTRIBUTE_FORMAT(printf, 1, 2) __attribute__((noreturn));
void verbose_msg(const char *fmt, ...)
ATTRIBUTE_FORMAT(printf, 1, 2);
void log_msg(const char *fmt, ...)
@@ -3737,10 +3739,9 @@ void do_wait_for_slave_to_stop(struct st
MYSQL* mysql = &cur_con->mysql;
for (;;)
{
- MYSQL_RES *res;
+ MYSQL_RES *UNINIT_VAR(res);
MYSQL_ROW row;
int done;
- LINT_INIT(res);
if (mysql_query(mysql,"show status like 'Slave_running'") ||
!(res=mysql_store_result(mysql)))
@@ -5275,13 +5276,12 @@ my_bool end_of_query(int c)
int read_line(char *buf, int size)
{
- char c, last_quote;
+ char c, UNINIT_VAR(last_quote);
char *p= buf, *buf_end= buf + size - 1;
int skip_char= 0;
enum {R_NORMAL, R_Q, R_SLASH_IN_Q,
R_COMMENT, R_LINE_START} state= R_LINE_START;
DBUG_ENTER("read_line");
- LINT_INIT(last_quote);
start_lineno= cur_file->lineno;
DBUG_PRINT("info", ("Starting to read at lineno: %d", start_lineno));
@@ -6530,8 +6530,7 @@ void run_query_normal(struct st_connecti
if (!disable_result_log)
{
- ulonglong affected_rows; /* Ok to be undef if 'disable_info' is set */
- LINT_INIT(affected_rows);
+ ulonglong UNINIT_VAR(affected_rows); /* Ok to be undef if 'disable_info' is set */
if (res)
{
=== modified file 'cmd-line-utils/readline/bind.c'
--- a/cmd-line-utils/readline/bind.c 2009-09-07 20:50:10 +0000
+++ b/cmd-line-utils/readline/bind.c 2009-10-15 21:38:29 +0000
@@ -339,9 +339,7 @@ rl_generic_bind (type, keyseq, data, map
char *keys;
int keys_len;
register int i;
- KEYMAP_ENTRY k;
-
- k.function = 0;
+ KEYMAP_ENTRY k= { 0, NULL };
/* If no keys to bind to, exit right away. */
if (keyseq == 0 || *keyseq == 0)
@@ -776,7 +774,7 @@ _rl_read_file (filename, sizep)
file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */
-if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
+ if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
file_size + 1 < file_size)
{
if (file >= 0)
=== modified file 'cmd-line-utils/readline/histfile.c'
--- a/cmd-line-utils/readline/histfile.c 2009-06-29 14:00:47 +0000
+++ b/cmd-line-utils/readline/histfile.c 2009-08-28 16:21:54 +0000
@@ -186,7 +186,7 @@ read_history_range (filename, from, to)
file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */
-if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
+ if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
file_size + 1 < file_size)
{
errno = overflow_errno;
@@ -311,6 +311,7 @@ history_truncate_file (fname, lines)
int file, chars_read, rv;
struct stat finfo;
size_t file_size;
+ size_t bytes_written;
buffer = (char *)NULL;
filename = history_filename (fname);
@@ -340,7 +341,7 @@ history_truncate_file (fname, lines)
file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */
-if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
+ if ((sizeof(off_t) > sizeof(size_t) && finfo.st_size > (off_t)(size_t)~0) ||
file_size + 1 < file_size)
{
close (file);
@@ -400,7 +401,7 @@ if ((sizeof(off_t) > sizeof(size_t) && f
truncate to. */
if (bp > buffer && ((file = open (filename, O_WRONLY|O_TRUNC|O_BINARY, 0600)) != -1))
{
- write (file, bp, chars_read - (bp - buffer));
+ bytes_written= write (file, bp, chars_read - (bp - buffer));
#if defined (__BEOS__)
/* BeOS ignores O_TRUNC. */
=== modified file 'cmd-line-utils/readline/undo.c'
--- a/cmd-line-utils/readline/undo.c 2009-06-29 14:00:47 +0000
+++ b/cmd-line-utils/readline/undo.c 2009-08-28 16:21:54 +0000
@@ -137,7 +137,8 @@ UNDO_LIST *
_rl_copy_undo_list (head)
UNDO_LIST *head;
{
- UNDO_LIST *list, *new, *roving, *c;
+ UNDO_LIST *list, *new, *c;
+ UNDO_LIST *roving= NULL;
list = head;
new = 0;
=== modified file 'extra/libevent/event-internal.h'
--- a/extra/libevent/event-internal.h 2009-03-12 22:27:35 +0000
+++ b/extra/libevent/event-internal.h 2009-10-31 05:29:16 +0000
@@ -70,6 +70,11 @@ struct event_base {
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#ifndef HAVE_TAILQFOREACH
+/* These following macros are copied from BSD sys/queue.h
+ Copyright (c) 1991, 1993, The Regents of the University of California.
+ All rights reserved.
+*/
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
=== modified file 'extra/yassl/taocrypt/src/twofish.cpp'
--- a/extra/yassl/taocrypt/src/twofish.cpp 2007-01-29 15:54:40 +0000
+++ b/extra/yassl/taocrypt/src/twofish.cpp 2009-10-30 18:50:56 +0000
@@ -55,6 +55,7 @@ void Twofish::Process(byte* out, const b
in += BLOCK_SIZE;
}
else if (mode_ == CBC)
+ {
if (dir_ == ENCRYPTION)
while (blocks--) {
r_[0] ^= *(word32*)in;
@@ -82,6 +83,7 @@ void Twofish::Process(byte* out, const b
out += BLOCK_SIZE;
in += BLOCK_SIZE;
}
+ }
}
#endif // DO_TWOFISH_ASM
=== modified file 'include/my_global.h'
--- a/include/my_global.h 2009-09-16 19:05:03 +0000
+++ b/include/my_global.h 2009-10-15 21:38:29 +0000
@@ -578,6 +578,25 @@ int __void__;
#define IF_VALGRIND(A,B) (B)
#endif
+/*
+ Suppress uninitialized variable warning without generating code.
+
+ The _cplusplus is a temporary workaround for C++ code pending a fix
+ for a g++ bug (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34772).
+*/
+#if defined(_lint) || defined(FORCE_INIT_OF_VARS) || defined(__cplusplus) || \
+ !defined(__GNUC__)
+#define UNINIT_VAR(x) x= 0
+#else
+#define UNINIT_VAR(x) x= x
+#endif
+
+/* Define some useful general macros */
+#if !defined(max)
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
#if !defined(HAVE_UINT)
#undef HAVE_UINT
#define HAVE_UINT
=== modified file 'libmysql/libmysql.c'
--- a/libmysql/libmysql.c 2009-10-23 16:48:54 +0000
+++ b/libmysql/libmysql.c 2009-11-06 17:22:32 +0000
@@ -1648,8 +1648,7 @@ myodbc_remove_escape(MYSQL *mysql,char *
char *to;
#ifdef USE_MB
my_bool use_mb_flag=use_mb(mysql->charset);
- char *end;
- LINT_INIT(end);
+ char *UNINIT_VAR(end);
if (use_mb_flag)
for (end=name; *end ; end++) ;
#endif
=== modified file 'libmysqld/Makefile.am'
--- a/libmysqld/Makefile.am 2009-09-15 10:46:35 +0000
+++ b/libmysqld/Makefile.am 2009-10-30 18:50:56 +0000
@@ -124,7 +124,7 @@ handler.o: handler.cc
# found to append fileslists that collected by configure
# to the sources list
-ha_federated.o:ha_federated.cc
+ha_federatedx.o:ha_federatedx.cc
$(CXXCOMPILE) $(LM_CFLAGS) -c $<
ha_heap.o:ha_heap.cc
=== modified file 'mysql-test/collections/default.experimental'
--- a/mysql-test/collections/default.experimental 2009-07-08 07:31:49 +0000
+++ b/mysql-test/collections/default.experimental 2009-08-13 20:45:01 +0000
@@ -2,12 +2,5 @@ funcs_1.charset_collation_1
binlog.binlog_tmp_table # Bug#45578: Test binlog_tmp_table fails ramdonly on PB2: Unknown table 't2'
main.ctype_gbk_binlog # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists
rpl.rpl_row_create_table # Bug#45576: rpl_row_create_table fails on PB2
-rpl.rpl_extraColmaster_myisam # Bug#46013: rpl_extraColmaster_myisam fails on pb2
-rpl.rpl_stm_reset_slave # Bug#46014: rpl_stm_reset_slave crashes the server sporadically in pb2
-rpl.rpl_extraCol_myisam # Bug#40796
-rpl.rpl_extraColmaster_innodb # Bug#40796
-rpl.rpl_extraCol_innodb # Bug#40796
rpl_ndb.rpl_ndb_log # Bug#38998
rpl.rpl_innodb_bug28430 # Bug#46029
-rpl.rpl_row_basic_3innodb # Bug#45243
-rpl.rpl_truncate_3innodb # Bug#46030
=== added file 'mysql-test/extra/rpl_tests/rpl_mixing_engines.test'
--- a/mysql-test/extra/rpl_tests/rpl_mixing_engines.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/extra/rpl_tests/rpl_mixing_engines.test 2009-08-27 12:46:29 +0000
@@ -0,0 +1,710 @@
+###################################################################################
+# This test checks if transactions that mixes transactional and non-transactional
+# tables are correctly handled in statement mode. In an nutshell, we have what
+# follows:
+#
+# 1) "B T T C" generates in binlog the "B T T C" entries.
+#
+# 2) "B T T R" generates in binlog an "empty" entry.
+#
+# 3) "B T N C" generates in binlog the "B T N C" entries.
+#
+# 4) "B T N R" generates in binlog the "B T N R" entries.
+#
+# 5) "T" generates in binlog the "B T C" entry.
+#
+# 6) "N" generates in binlog the "N" entry.
+#
+# 7) "M" generates in binglog the "B M C" entries.
+#
+# 8) "B N N T C" generates in binglog the "N N B T C" entries.
+#
+# 9) "B N N T R" generates in binlog the "N N B T R" entries.
+#
+# 10) "B N N C" generates in binglog the "N N" entries.
+#
+# 11) "B N N R" generates in binlog the "N N" entries.
+#
+# 12) "B M T C" generates in the binlog the "B M T C" entries.
+#
+# 13) "B M T R" generates in the binlog the "B M T R" entries.
+###################################################################################
+
+--echo ###################################################################################
+--echo # CONFIGURATION
+--echo ###################################################################################
+connection master;
+
+SET SQL_LOG_BIN=0;
+CREATE TABLE nt_1 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_2 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_3 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_4 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE tt_1 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_2 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_3 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_4 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+SET SQL_LOG_BIN=1;
+
+connection slave;
+
+SET SQL_LOG_BIN=0;
+CREATE TABLE nt_1 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_2 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_3 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_4 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE tt_1 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_2 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_3 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_4 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+SET SQL_LOG_BIN=1;
+
+connection master;
+
+DELIMITER |;
+
+CREATE FUNCTION f1 () RETURNS VARCHAR(64)
+BEGIN
+ RETURN "Testing...";
+END|
+
+CREATE FUNCTION f2 () RETURNS VARCHAR(64)
+BEGIN
+ RETURN f1();
+END|
+
+CREATE PROCEDURE pc_i_tt_3 (IN x INT, IN y VARCHAR(64))
+BEGIN
+ INSERT INTO tt_3 VALUES (y,x,x);
+END|
+
+CREATE TRIGGER tr_i_tt_3_to_nt_3 BEFORE INSERT ON tt_3 FOR EACH ROW
+BEGIN
+ INSERT INTO nt_3 VALUES (NEW.a, NEW.b, NEW.c);
+END|
+
+CREATE TRIGGER tr_i_nt_4_to_tt_4 BEFORE INSERT ON nt_4 FOR EACH ROW
+BEGIN
+ INSERT INTO tt_4 VALUES (NEW.a, NEW.b, NEW.c);
+END|
+
+DELIMITER ;|
+
+--echo ###################################################################################
+--echo # MIXING TRANSACTIONAL and NON-TRANSACTIONAL TABLES
+--echo ###################################################################################
+connection master;
+
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #1) "B T T C" generates in binlog the "B T T C" entries.
+--echo #
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 4", 4, "new text 4");
+INSERT INTO tt_2 VALUES ("new text 4", 4, "new text 4");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #1.e) "B T T C" with error in T generates in binlog the "B T T C" entries.
+--echo #
+INSERT INTO tt_1 VALUES ("new text -2", -2, "new text -2");
+BEGIN;
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1"), ("new text -2", -2, "new text -2");
+INSERT INTO tt_2 VALUES ("new text -3", -3, "new text -3");
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_2 VALUES ("new text -5", -5, "new text -5");
+--error ER_DUP_ENTRY
+INSERT INTO tt_2 VALUES ("new text -4", -4, "new text -4"), ("new text -5", -5, "new text -5");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #2) "B T T R" generates in binlog an "empty" entry.
+--echo #
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 5", 5, "new text 5");
+INSERT INTO tt_2 VALUES ("new text 5", 5, "new text 5");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #2.e) "B T T R" with error in T generates in binlog an "empty" entry.
+--echo #
+INSERT INTO tt_1 VALUES ("new text -7", -7, "new text -7");
+BEGIN;
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -6", -6, "new text -6"), ("new text -7", -7, "new text -7");
+INSERT INTO tt_2 VALUES ("new text -8", -8, "new text -8");
+ROLLBACK;
+
+BEGIN;
+INSERT INTO tt_2 VALUES ("new text -10", -10, "new text -10");
+--error ER_DUP_ENTRY
+INSERT INTO tt_2 VALUES ("new text -9", -9, "new text -9"), ("new text -10", -10, "new text -10");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #3) "B T N C" generates in binlog the "B T N C" entries.
+--echo #
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 6", 6, "new text 6");
+INSERT INTO nt_1 VALUES ("new text 6", 6, "new text 6");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #3.e) "B T N C" with error in either T or N generates in binlog the "B T N C" entries.
+--echo #
+INSERT INTO tt_1 VALUES ("new text -12", -12, "new text -12");
+BEGIN;
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -11", -11, "new text -11"), ("new text -12", -12, "new text -12");
+INSERT INTO nt_1 VALUES ("new text -13", -13, "new text -13");
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -14", -14, "new text -14");
+INSERT INTO nt_1 VALUES ("new text -16", -16, "new text -16");
+--error ER_DUP_ENTRY
+INSERT INTO nt_1 VALUES ("new text -15", -15, "new text -15"), ("new text -16", -16, "new text -16");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #4) "B T N R" generates in binlog the "B T N R" entries.
+--echo #
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 7", 7, "new text 7");
+INSERT INTO nt_1 VALUES ("new text 7", 7, "new text 7");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #4.e) "B T N R" with error in either T or N generates in binlog the "B T N R" entries.
+--echo #
+INSERT INTO tt_1 VALUES ("new text -17", -17, "new text -17");
+BEGIN;
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -16", -16, "new text -16"), ("new text -17", -17, "new text -17");
+INSERT INTO nt_1 VALUES ("new text -18", -18, "new text -18");
+ROLLBACK;
+
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -19", -19, "new text -19");
+INSERT INTO nt_1 VALUES ("new text -21", -21, "new text -21");
+--error ER_DUP_ENTRY
+INSERT INTO nt_1 VALUES ("new text -20", -20, "new text -20"), ("new text -21", -21, "new text -21");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #5) "T" generates in binlog the "B T C" entry.
+--echo #
+INSERT INTO tt_1 VALUES ("new text 8", 8, "new text 8");
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #5.e) "T" with error in T generates in binlog an "empty" entry.
+--echo #
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1");
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1"), ("new text -22", -22, "new text -22");
+--error ER_DUP_ENTRY
+INSERT INTO tt_1 VALUES ("new text -23", -23, "new text -23"), ("new text -1", -1, "new text -1");
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #6) "N" generates in binlog the "N" entry.
+--echo #
+INSERT INTO nt_1 VALUES ("new text 9", 9, "new text 9");
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #6.e) "N" with error in N generates in binlog an empty entry if the error
+--echo # happens in the first tuple. Otherwise, generates the "N" entry and
+--echo # the error is appended.
+--echo #
+INSERT INTO nt_1 VALUES ("new text -1", -1, "new text -1");
+--error ER_DUP_ENTRY
+INSERT INTO nt_1 VALUES ("new text -1", -1, "new text -1");
+--error ER_DUP_ENTRY
+INSERT INTO nt_1 VALUES ("new text -24", -24, "new text -24"), ("new text -1", -1, "new text -1");
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #7) "M" generates in binglog the "B M C" entries.
+--echo #
+
+DELETE FROM nt_1;
+
+INSERT INTO nt_1 SELECT * FROM tt_1;
+
+DELETE FROM tt_1;
+
+INSERT INTO tt_1 SELECT * FROM nt_1;
+
+INSERT INTO tt_3 VALUES ("new text 000", 000, '');
+
+INSERT INTO tt_3 VALUES("new text 100", 100, f1());
+
+INSERT INTO nt_4 VALUES("new text 100", 100, f1());
+
+INSERT INTO tt_3 VALUES("new text 200", 200, f2());
+
+INSERT INTO nt_4 VALUES ("new text 300", 300, '');
+
+INSERT INTO nt_4 VALUES ("new text 400", 400, f1());
+
+INSERT INTO nt_4 VALUES ("new text 500", 500, f2());
+
+CALL pc_i_tt_3(600, "Testing...");
+
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 1", nt_4.a= "new text 1", tt_3.a= "new text 1", tt_4.a= "new text 1" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 2", tt_4.a= "new text 2", nt_3.a= "new text 2", nt_4.a = "new text 2" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 3", nt_3.a= "new text 3", nt_4.a= "new text 3", tt_4.a = "new text 3" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 4", nt_3.a= "new text 4", nt_4.a= "new text 4", tt_4.a = "new text 4" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #7.e) "M" with error in M generates in binglog the "B M R" entries.
+--echo #
+
+INSERT INTO nt_3 VALUES ("new text -26", -26, '');
+SELECT * FROM tt_3;
+--error ER_DUP_ENTRY
+INSERT INTO tt_3 VALUES ("new text -25", -25, ''), ("new text -26", -26, '');
+SELECT * FROM tt_3;
+
+INSERT INTO tt_4 VALUES ("new text -26", -26, '');
+SELECT * FROM nt_4;
+--error ER_DUP_ENTRY
+INSERT INTO nt_4 VALUES ("new text -25", -25, ''), ("new text -26", -26, '');
+SELECT * FROM nt_4;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #8) "B N N T C" generates in binglog the "N N B T C" entries.
+--echo #
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 10", 10, "new text 10");
+INSERT INTO nt_2 VALUES ("new text 10", 10, "new text 10");
+INSERT INTO tt_1 VALUES ("new text 10", 10, "new text 10");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+--echo #
+--echo #8.e) "B N N T R" See 6.e and 9.e.
+--echo #
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #9) "B N N T R" generates in binlog the "N N B T R" entries.
+--echo #
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 11", 11, "new text 11");
+INSERT INTO nt_2 VALUES ("new text 11", 11, "new text 11");
+INSERT INTO tt_1 VALUES ("new text 11", 11, "new text 11");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #9.e) "B N N T R" with error in N generates in binlog the "N N B T R" entries.
+--echo #
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text -25", -25, "new text -25");
+INSERT INTO nt_2 VALUES ("new text -25", -25, "new text -25");
+--error ER_DUP_ENTRY
+INSERT INTO nt_2 VALUES ("new text -26", -26, "new text -26"), ("new text -25", -25, "new text -25");
+INSERT INTO tt_1 VALUES ("new text -27", -27, "new text -27");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #10) "B N N C" generates in binglog the "N N" entries.
+--echo #
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 12", 12, "new text 12");
+INSERT INTO nt_2 VALUES ("new text 12", 12, "new text 12");
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+--echo #
+--echo #10.e) "B N N C" See 6.e and 9.e.
+--echo #
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #11) "B N N R" generates in binlog the "N N" entries.
+--echo #
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 13", 13, "new text 13");
+INSERT INTO nt_2 VALUES ("new text 13", 13, "new text 13");
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+--echo #
+--echo #11.e) "B N N R" See 6.e and 9.e.
+--echo #
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #12) "B M T C" generates in the binlog the "B M T C" entries.
+--echo #
+DELETE FROM nt_1;
+BEGIN;
+INSERT INTO nt_1 SELECT * FROM tt_1;
+INSERT INTO tt_2 VALUES ("new text 14", 14, "new text 14");
+COMMIT;
+
+DELETE FROM tt_1;
+BEGIN;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+INSERT INTO tt_2 VALUES ("new text 15", 15, "new text 15");
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_3 VALUES ("new text 700", 700, '');
+INSERT INTO tt_1 VALUES ("new text 800", 800, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_3 VALUES("new text 900", 900, f1());
+INSERT INTO tt_1 VALUES ("new text 1000", 1000, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_3 VALUES(1100, 1100, f2());
+INSERT INTO tt_1 VALUES ("new text 1200", 1200, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO nt_4 VALUES ("new text 1300", 1300, '');
+INSERT INTO tt_1 VALUES ("new text 1400", 1400, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 1500", 1500, f1());
+INSERT INTO tt_1 VALUES ("new text 1600", 1600, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 1700", 1700, f2());
+INSERT INTO tt_1 VALUES ("new text 1800", 1800, '');
+COMMIT;
+
+BEGIN;
+CALL pc_i_tt_3(1900, "Testing...");
+INSERT INTO tt_1 VALUES ("new text 2000", 2000, '');
+COMMIT;
+
+BEGIN;
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 5", nt_4.a= "new text 5", tt_3.a= "new text 5", tt_4.a= "new text 5" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2100", 2100, '');
+COMMIT;
+
+BEGIN;
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 6", tt_4.a= "new text 6", nt_3.a= "new text 6", nt_4.a = "new text 6" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2200", 2200, '');
+COMMIT;
+
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 7", nt_3.a= "new text 7", nt_4.a= "new text 7", tt_4.a = "new text 7" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2300", 2300, '');
+COMMIT;
+
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 8", nt_3.a= "new text 8", nt_4.a= "new text 8", tt_4.a = "new text 8" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2400", 2400, '');
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #12.e) "B M T C" with error in M generates in the binlog the "B M T C" entries.
+--echo #
+
+--echo # There is a bug in the slave that needs to be fixed before enabling
+--echo # this part of the test. A bug report will be filed referencing this
+--echo # test case.
+
+BEGIN;
+INSERT INTO nt_3 VALUES ("new text -28", -28, '');
+--error ER_DUP_ENTRY
+INSERT INTO tt_3 VALUES ("new text -27", -27, ''), ("new text -28", -28, '');
+INSERT INTO tt_1 VALUES ("new text -27", -27, '');
+COMMIT;
+
+BEGIN;
+INSERT INTO tt_4 VALUES ("new text -28", -28, '');
+--error ER_DUP_ENTRY
+INSERT INTO nt_4 VALUES ("new text -27", -27, ''), ("new text -28", -28, '');
+INSERT INTO tt_1 VALUES ("new text -28", -28, '');
+COMMIT;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #13) "B M T R" generates in the binlog the "B M T R" entries
+--echo #
+
+DELETE FROM nt_1;
+BEGIN;
+INSERT INTO nt_1 SELECT * FROM tt_1;
+INSERT INTO tt_2 VALUES ("new text 17", 17, "new text 17");
+ROLLBACK;
+
+DELETE FROM tt_1;
+BEGIN;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+INSERT INTO tt_2 VALUES ("new text 18", 18, "new text 18");
+ROLLBACK;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+
+BEGIN;
+INSERT INTO tt_3 VALUES ("new text 2500", 2500, '');
+INSERT INTO tt_1 VALUES ("new text 2600", 2600, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO tt_3 VALUES("new text 2700", 2700, f1());
+INSERT INTO tt_1 VALUES ("new text 2800", 2800, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO tt_3 VALUES(2900, 2900, f2());
+INSERT INTO tt_1 VALUES ("new text 3000", 3000, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO nt_4 VALUES ("new text 3100", 3100, '');
+INSERT INTO tt_1 VALUES ("new text 3200", 3200, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 3300", 3300, f1());
+INSERT INTO tt_1 VALUES ("new text 3400", 3400, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 3500", 3500, f2());
+INSERT INTO tt_1 VALUES ("new text 3600", 3600, '');
+ROLLBACK;
+
+BEGIN;
+CALL pc_i_tt_3(3700, "Testing...");
+INSERT INTO tt_1 VALUES ("new text 3700", 3700, '');
+ROLLBACK;
+
+BEGIN;
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 9", nt_4.a= "new text 9", tt_3.a= "new text 9", tt_4.a= "new text 9" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 3800", 3800, '');
+ROLLBACK;
+
+BEGIN;
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 10", tt_4.a= "new text 10", nt_3.a= "new text 10", nt_4.a = "new text 10" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 3900", 3900, '');
+ROLLBACK;
+
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 11", nt_3.a= "new text 11", nt_4.a= "new text 11", tt_4.a = "new text 11" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 4000", 4000, '');
+ROLLBACK;
+
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 12", nt_3.a= "new text 12", nt_4.a= "new text 12", tt_4.a = "new text 12" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 4100", 4100, '');
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+--echo
+--echo
+--echo
+--echo
+let $binlog_start= query_get_value("SHOW MASTER STATUS", Position, 1);
+--echo #
+--echo #13.e) "B M T R" with error in M generates in the binlog the "B M T R" entries.
+--echo #
+
+BEGIN;
+INSERT INTO nt_3 VALUES ("new text -30", -30, '');
+--error ER_DUP_ENTRY
+INSERT INTO tt_3 VALUES ("new text -29", -29, ''), ("new text -30", -30, '');
+INSERT INTO tt_1 VALUES ("new text -30", -30, '');
+ROLLBACK;
+
+BEGIN;
+INSERT INTO tt_4 VALUES ("new text -30", -30, '');
+--error ER_DUP_ENTRY
+INSERT INTO nt_4 VALUES ("new text -29", -29, ''), ("new text -30", -30, '');
+INSERT INTO tt_1 VALUES ("new text -31", -31, '');
+ROLLBACK;
+
+--source include/show_binlog_events.inc
+
+connection master;
+sync_slave_with_master;
+
+--exec $MYSQL_DUMP --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/test-master.sql
+--exec $MYSQL_DUMP_SLAVE --compact --order-by-primary --skip-extended-insert --no-create-info test > $MYSQLTEST_VARDIR/tmp/test-slave.sql
+--diff_files $MYSQLTEST_VARDIR/tmp/test-master.sql $MYSQLTEST_VARDIR/tmp/test-slave.sql
+
+--echo ###################################################################################
+--echo # CLEAN
+--echo ###################################################################################
+
+connection master;
+DROP TABLE tt_1;
+DROP TABLE tt_2;
+DROP TABLE tt_3;
+DROP TABLE tt_4;
+DROP TABLE nt_1;
+DROP TABLE nt_2;
+DROP TABLE nt_3;
+DROP TABLE nt_4;
+DROP PROCEDURE pc_i_tt_3;
+DROP FUNCTION f1;
+DROP FUNCTION f2;
+
+sync_slave_with_master;
=== modified file 'mysql-test/include/commit.inc'
--- a/mysql-test/include/commit.inc 2009-06-19 11:27:24 +0000
+++ b/mysql-test/include/commit.inc 2009-08-26 23:13:03 +0000
@@ -725,9 +725,9 @@ call p_verify_status_increment(4, 4, 4,
alter table t3 add column (b int);
call p_verify_status_increment(2, 0, 2, 0);
alter table t3 rename t4;
-call p_verify_status_increment(1, 0, 1, 0);
+call p_verify_status_increment(2, 2, 2, 2);
rename table t4 to t3;
-call p_verify_status_increment(1, 0, 1, 0);
+call p_verify_status_increment(2, 2, 2, 2);
truncate table t3;
call p_verify_status_increment(4, 4, 4, 4);
create view v1 as select * from t2;
=== added file 'mysql-test/include/partition_date_range.inc'
--- a/mysql-test/include/partition_date_range.inc 1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/partition_date_range.inc 2009-09-01 12:53:27 +0000
@@ -0,0 +1,69 @@
+# Created for verifying bug#20577.
+# expects TABLE t1 (... , a DATE, ...)
+
+--sorted_result
+SELECT * FROM t1 WHERE a < '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a <= '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a >= '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a > '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a = '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a < '1001-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a <= '1001-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a >= '1001-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a > '1001-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a = '1001-00-00';
+--echo # Disabling warnings for the invalid date
+--disable_warnings
+--sorted_result
+SELECT * FROM t1 WHERE a < '1999-02-31';
+--sorted_result
+SELECT * FROM t1 WHERE a <= '1999-02-31';
+--sorted_result
+SELECT * FROM t1 WHERE a >= '1999-02-31';
+--sorted_result
+SELECT * FROM t1 WHERE a > '1999-02-31';
+--sorted_result
+SELECT * FROM t1 WHERE a = '1999-02-31';
+--enable_warnings
+--sorted_result
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+--sorted_result
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+--sorted_result
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+if ($explain_partitions)
+{
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
+--echo # Disabling warnings for the invalid date
+--disable_warnings
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1999-02-31';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1999-02-31';
+--enable_warnings
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+}
=== added file 'mysql-test/include/rpl_loaddata_charset.inc'
--- a/mysql-test/include/rpl_loaddata_charset.inc 1970-01-01 00:00:00 +0000
+++ b/mysql-test/include/rpl_loaddata_charset.inc 2009-08-12 05:31:56 +0000
@@ -0,0 +1,35 @@
+connection master;
+--disable_warnings
+DROP DATABASE IF EXISTS mysqltest;
+--enable_warnings
+
+CREATE DATABASE mysqltest CHARSET UTF8;
+USE mysqltest;
+CREATE TABLE t (cl varchar(100)) CHARSET UTF8;
+
+if (!$LOAD_LOCAL)
+{
+ LOAD DATA INFILE '../../std_data/loaddata_utf8.dat' INTO TABLE t
+ FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';
+}
+if ($LOAD_LOCAL)
+{
+ LOAD DATA LOCAL INFILE './std_data/loaddata_utf8.dat' INTO TABLE t
+ FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';
+}
+
+save_master_pos;
+echo ----------content on master----------;
+SELECT hex(cl) FROM t;
+
+connection slave;
+sync_with_master;
+echo ----------content on slave----------;
+USE mysqltest;
+SELECT hex(cl) FROM t;
+
+connection master;
+DROP DATABASE mysqltest;
+save_master_pos;
+connection slave;
+sync_with_master;
=== modified file 'mysql-test/lib/mtr_cases.pm'
--- a/mysql-test/lib/mtr_cases.pm 2009-10-14 08:09:56 +0000
+++ b/mysql-test/lib/mtr_cases.pm 2009-11-06 17:24:38 +0000
@@ -249,6 +249,10 @@ sub collect_one_suite($)
$suitedir= my_find_dir($::basedir,
["mysql-test/suite",
"mysql-test",
+ "share/mysql-test/suite",
+ "share/mysql-test",
+ "share/mysql/mysql-test/suite",
+ "share/mysql/mysql-test",
# Look in storage engine specific suite dirs
"storage/*/mysql-test-suites"
],
=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl 2009-10-26 11:35:42 +0000
+++ b/mysql-test/mysql-test-run.pl 2009-11-06 17:24:38 +0000
@@ -127,7 +127,6 @@ my $path_config_file; # The ge
our $opt_vs_config = $ENV{'MTR_VS_CONFIG'};
my $DEFAULT_SUITES= "binlog,federated,main,maria,rpl,innodb,parts";
-my $opt_suites;
our $opt_usage;
our $opt_list_options;
@@ -1001,6 +1000,12 @@ sub command_line_setup {
{
$basedir= dirname($basedir);
}
+ # For .deb, it's like RPM, but installed in /usr/share/mysql/mysql-test.
+ # So move up one more directory level yet.
+ if ( ! $source_dist and ! -d "$basedir/bin" )
+ {
+ $basedir= dirname($basedir);
+ }
# Look for the client binaries directory
if ($path_client_bindir)
=== modified file 'mysql-test/r/alter_table.result'
--- a/mysql-test/r/alter_table.result 2009-06-07 10:05:19 +0000
+++ b/mysql-test/r/alter_table.result 2009-10-28 07:52:34 +0000
@@ -143,16 +143,6 @@ t1 1 n4 1 n4 A NULL NULL NULL YES BTREE
t1 1 n4 2 n1 A NULL NULL NULL BTREE disabled
t1 1 n4 3 n2 A NULL NULL NULL YES BTREE disabled
t1 1 n4 4 n3 A NULL NULL NULL YES BTREE disabled
-insert into t1 values(10,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(9,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(8,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(7,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(6,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(5,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(4,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(3,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(2,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(1,RAND()*1000,RAND()*1000,RAND());
alter table t1 enable keys;
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
@@ -183,106 +173,6 @@ i int(10) unsigned NO PRI NULL auto_incr
c char(10) YES NULL
drop table t1;
create table t1 (a int, b int);
-insert into t1 values(1,100), (2,100), (3, 100);
-insert into t1 values(1,99), (2,99), (3, 99);
-insert into t1 values(1,98), (2,98), (3, 98);
-insert into t1 values(1,97), (2,97), (3, 97);
-insert into t1 values(1,96), (2,96), (3, 96);
-insert into t1 values(1,95), (2,95), (3, 95);
-insert into t1 values(1,94), (2,94), (3, 94);
-insert into t1 values(1,93), (2,93), (3, 93);
-insert into t1 values(1,92), (2,92), (3, 92);
-insert into t1 values(1,91), (2,91), (3, 91);
-insert into t1 values(1,90), (2,90), (3, 90);
-insert into t1 values(1,89), (2,89), (3, 89);
-insert into t1 values(1,88), (2,88), (3, 88);
-insert into t1 values(1,87), (2,87), (3, 87);
-insert into t1 values(1,86), (2,86), (3, 86);
-insert into t1 values(1,85), (2,85), (3, 85);
-insert into t1 values(1,84), (2,84), (3, 84);
-insert into t1 values(1,83), (2,83), (3, 83);
-insert into t1 values(1,82), (2,82), (3, 82);
-insert into t1 values(1,81), (2,81), (3, 81);
-insert into t1 values(1,80), (2,80), (3, 80);
-insert into t1 values(1,79), (2,79), (3, 79);
-insert into t1 values(1,78), (2,78), (3, 78);
-insert into t1 values(1,77), (2,77), (3, 77);
-insert into t1 values(1,76), (2,76), (3, 76);
-insert into t1 values(1,75), (2,75), (3, 75);
-insert into t1 values(1,74), (2,74), (3, 74);
-insert into t1 values(1,73), (2,73), (3, 73);
-insert into t1 values(1,72), (2,72), (3, 72);
-insert into t1 values(1,71), (2,71), (3, 71);
-insert into t1 values(1,70), (2,70), (3, 70);
-insert into t1 values(1,69), (2,69), (3, 69);
-insert into t1 values(1,68), (2,68), (3, 68);
-insert into t1 values(1,67), (2,67), (3, 67);
-insert into t1 values(1,66), (2,66), (3, 66);
-insert into t1 values(1,65), (2,65), (3, 65);
-insert into t1 values(1,64), (2,64), (3, 64);
-insert into t1 values(1,63), (2,63), (3, 63);
-insert into t1 values(1,62), (2,62), (3, 62);
-insert into t1 values(1,61), (2,61), (3, 61);
-insert into t1 values(1,60), (2,60), (3, 60);
-insert into t1 values(1,59), (2,59), (3, 59);
-insert into t1 values(1,58), (2,58), (3, 58);
-insert into t1 values(1,57), (2,57), (3, 57);
-insert into t1 values(1,56), (2,56), (3, 56);
-insert into t1 values(1,55), (2,55), (3, 55);
-insert into t1 values(1,54), (2,54), (3, 54);
-insert into t1 values(1,53), (2,53), (3, 53);
-insert into t1 values(1,52), (2,52), (3, 52);
-insert into t1 values(1,51), (2,51), (3, 51);
-insert into t1 values(1,50), (2,50), (3, 50);
-insert into t1 values(1,49), (2,49), (3, 49);
-insert into t1 values(1,48), (2,48), (3, 48);
-insert into t1 values(1,47), (2,47), (3, 47);
-insert into t1 values(1,46), (2,46), (3, 46);
-insert into t1 values(1,45), (2,45), (3, 45);
-insert into t1 values(1,44), (2,44), (3, 44);
-insert into t1 values(1,43), (2,43), (3, 43);
-insert into t1 values(1,42), (2,42), (3, 42);
-insert into t1 values(1,41), (2,41), (3, 41);
-insert into t1 values(1,40), (2,40), (3, 40);
-insert into t1 values(1,39), (2,39), (3, 39);
-insert into t1 values(1,38), (2,38), (3, 38);
-insert into t1 values(1,37), (2,37), (3, 37);
-insert into t1 values(1,36), (2,36), (3, 36);
-insert into t1 values(1,35), (2,35), (3, 35);
-insert into t1 values(1,34), (2,34), (3, 34);
-insert into t1 values(1,33), (2,33), (3, 33);
-insert into t1 values(1,32), (2,32), (3, 32);
-insert into t1 values(1,31), (2,31), (3, 31);
-insert into t1 values(1,30), (2,30), (3, 30);
-insert into t1 values(1,29), (2,29), (3, 29);
-insert into t1 values(1,28), (2,28), (3, 28);
-insert into t1 values(1,27), (2,27), (3, 27);
-insert into t1 values(1,26), (2,26), (3, 26);
-insert into t1 values(1,25), (2,25), (3, 25);
-insert into t1 values(1,24), (2,24), (3, 24);
-insert into t1 values(1,23), (2,23), (3, 23);
-insert into t1 values(1,22), (2,22), (3, 22);
-insert into t1 values(1,21), (2,21), (3, 21);
-insert into t1 values(1,20), (2,20), (3, 20);
-insert into t1 values(1,19), (2,19), (3, 19);
-insert into t1 values(1,18), (2,18), (3, 18);
-insert into t1 values(1,17), (2,17), (3, 17);
-insert into t1 values(1,16), (2,16), (3, 16);
-insert into t1 values(1,15), (2,15), (3, 15);
-insert into t1 values(1,14), (2,14), (3, 14);
-insert into t1 values(1,13), (2,13), (3, 13);
-insert into t1 values(1,12), (2,12), (3, 12);
-insert into t1 values(1,11), (2,11), (3, 11);
-insert into t1 values(1,10), (2,10), (3, 10);
-insert into t1 values(1,9), (2,9), (3, 9);
-insert into t1 values(1,8), (2,8), (3, 8);
-insert into t1 values(1,7), (2,7), (3, 7);
-insert into t1 values(1,6), (2,6), (3, 6);
-insert into t1 values(1,5), (2,5), (3, 5);
-insert into t1 values(1,4), (2,4), (3, 4);
-insert into t1 values(1,3), (2,3), (3, 3);
-insert into t1 values(1,2), (2,2), (3, 2);
-insert into t1 values(1,1), (2,1), (3, 1);
alter table t1 add unique (a,b), add key (b);
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
=== modified file 'mysql-test/r/analyse.result'
--- a/mysql-test/r/analyse.result 2007-05-30 16:25:16 +0000
+++ b/mysql-test/r/analyse.result 2009-08-27 10:59:25 +0000
@@ -28,9 +28,7 @@ test.t1.bool N Y 1 1 0 0 1.0000 NULL ENU
test.t1.d 2002-03-03 2002-03-05 10 10 0 0 10.0000 NULL ENUM('2002-03-03','2002-03-04','2002-03-05') NOT NULL
drop table t1,t2;
EXPLAIN SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE();
-id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> system NULL NULL NULL NULL 1
-2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
+ERROR HY000: Incorrect usage of PROCEDURE and subquery
create table t1 (a int not null);
create table t2 select * from t1 where 0=1 procedure analyse();
show create table t2;
@@ -153,4 +151,9 @@ select f3 from t1 procedure analyse(1, 1
Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype
test.t1.f3 5.99999 9.55555 7 7 0 0 7.77777 1.77778 FLOAT(6,5) NOT NULL
drop table t1;
+CREATE TABLE t1(a INT,b INT,c INT,d INT,e INT,f INT,g INT,h INT,i INT,j INT,k INT);
+INSERT INTO t1 VALUES ();
+SELECT * FROM (SELECT * FROM t1) d PROCEDURE ANALYSE();
+ERROR HY000: Incorrect usage of PROCEDURE and subquery
+DROP TABLE t1;
End of 4.1 tests
=== modified file 'mysql-test/r/auto_increment.result'
--- a/mysql-test/r/auto_increment.result 2009-02-05 09:49:32 +0000
+++ b/mysql-test/r/auto_increment.result 2009-08-20 12:30:59 +0000
@@ -462,3 +462,17 @@ select last_insert_id();
last_insert_id()
3
drop table t1;
+#
+# Bug#46616: Assertion `!table->auto_increment_field_not_null' on view
+# manipulations
+#
+CREATE TABLE t1 ( a INT );
+INSERT INTO t1 VALUES (1), (1);
+CREATE TABLE t2 ( a INT AUTO_INCREMENT KEY );
+CREATE TABLE IF NOT EXISTS t2 AS SELECT a FROM t1;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+UPDATE t2 SET a = 2;
+SELECT a FROM t2;
+a
+2
+DROP TABLE t1, t2;
=== modified file 'mysql-test/r/commit_1innodb.result'
--- a/mysql-test/r/commit_1innodb.result 2009-06-19 11:27:24 +0000
+++ b/mysql-test/r/commit_1innodb.result 2009-08-26 23:13:03 +0000
@@ -841,17 +841,17 @@ call p_verify_status_increment(2, 0, 2,
SUCCESS
alter table t3 rename t4;
-call p_verify_status_increment(1, 0, 1, 0);
+call p_verify_status_increment(2, 2, 2, 2);
SUCCESS
rename table t4 to t3;
-call p_verify_status_increment(1, 0, 1, 0);
+call p_verify_status_increment(2, 2, 2, 2);
SUCCESS
truncate table t3;
call p_verify_status_increment(4, 4, 4, 4);
-ERROR
-Expected commit increment: 4 actual: 2
+SUCCESS
+
create view v1 as select * from t2;
call p_verify_status_increment(1, 0, 1, 0);
SUCCESS
=== added file 'mysql-test/r/disabled_partition.require'
--- a/mysql-test/r/disabled_partition.require 1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/disabled_partition.require 2009-01-08 14:16:44 +0000
@@ -0,0 +1,2 @@
+Variable_name Value
+have_partitioning DISABLED
=== modified file 'mysql-test/r/func_misc.result'
--- a/mysql-test/r/func_misc.result 2009-06-11 16:21:32 +0000
+++ b/mysql-test/r/func_misc.result 2009-10-28 07:52:34 +0000
@@ -116,10 +116,6 @@ CREATE TEMPORARY TABLE t_history (attemp
start_ts DATETIME, end_ts DATETIME,
start_cached INTEGER, end_cached INTEGER);
CREATE TABLE t1 (f1 BIGINT);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
INSERT INTO t_history
SET attempt = 4 - 4 + 1, start_ts = NOW(),
start_cached = 0;
=== modified file 'mysql-test/r/gis-rtree.result'
--- a/mysql-test/r/gis-rtree.result 2009-07-10 23:12:13 +0000
+++ b/mysql-test/r/gis-rtree.result 2009-10-28 07:52:34 +0000
@@ -12,156 +12,6 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`fid`),
SPATIAL KEY `g` (`g`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(150 150, 150 150)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(149 149, 151 151)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(148 148, 152 152)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(147 147, 153 153)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(146 146, 154 154)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(145 145, 155 155)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(144 144, 156 156)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(143 143, 157 157)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(142 142, 158 158)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(141 141, 159 159)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(140 140, 160 160)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(139 139, 161 161)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(138 138, 162 162)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(137 137, 163 163)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(136 136, 164 164)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(135 135, 165 165)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(134 134, 166 166)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(133 133, 167 167)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(132 132, 168 168)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(131 131, 169 169)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(130 130, 170 170)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(129 129, 171 171)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(128 128, 172 172)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(127 127, 173 173)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(126 126, 174 174)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(125 125, 175 175)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(124 124, 176 176)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(123 123, 177 177)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(122 122, 178 178)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(121 121, 179 179)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(120 120, 180 180)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(119 119, 181 181)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(118 118, 182 182)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(117 117, 183 183)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(116 116, 184 184)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(115 115, 185 185)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(114 114, 186 186)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(113 113, 187 187)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(112 112, 188 188)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(111 111, 189 189)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(110 110, 190 190)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(109 109, 191 191)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(108 108, 192 192)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(107 107, 193 193)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(106 106, 194 194)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(105 105, 195 195)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(104 104, 196 196)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(103 103, 197 197)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(102 102, 198 198)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(101 101, 199 199)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(100 100, 200 200)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(99 99, 201 201)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(98 98, 202 202)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(97 97, 203 203)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(96 96, 204 204)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(95 95, 205 205)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(94 94, 206 206)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(93 93, 207 207)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(92 92, 208 208)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(91 91, 209 209)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(90 90, 210 210)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(89 89, 211 211)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(88 88, 212 212)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(87 87, 213 213)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(86 86, 214 214)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(85 85, 215 215)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(84 84, 216 216)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(83 83, 217 217)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(82 82, 218 218)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(81 81, 219 219)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(80 80, 220 220)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(79 79, 221 221)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(78 78, 222 222)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(77 77, 223 223)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(76 76, 224 224)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(75 75, 225 225)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(74 74, 226 226)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(73 73, 227 227)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(72 72, 228 228)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(71 71, 229 229)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(70 70, 230 230)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(69 69, 231 231)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(68 68, 232 232)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(67 67, 233 233)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(66 66, 234 234)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(65 65, 235 235)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(64 64, 236 236)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(63 63, 237 237)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(62 62, 238 238)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(61 61, 239 239)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(60 60, 240 240)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(59 59, 241 241)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(58 58, 242 242)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(57 57, 243 243)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(56 56, 244 244)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(55 55, 245 245)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(54 54, 246 246)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(53 53, 247 247)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(52 52, 248 248)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(51 51, 249 249)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(50 50, 250 250)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(49 49, 251 251)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(48 48, 252 252)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(47 47, 253 253)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(46 46, 254 254)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(45 45, 255 255)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(44 44, 256 256)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(43 43, 257 257)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(42 42, 258 258)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(41 41, 259 259)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(40 40, 260 260)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(39 39, 261 261)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(38 38, 262 262)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(37 37, 263 263)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(36 36, 264 264)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(35 35, 265 265)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(34 34, 266 266)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(33 33, 267 267)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(32 32, 268 268)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(31 31, 269 269)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(30 30, 270 270)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(29 29, 271 271)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(28 28, 272 272)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(27 27, 273 273)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(26 26, 274 274)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(25 25, 275 275)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(24 24, 276 276)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(23 23, 277 277)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(22 22, 278 278)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(21 21, 279 279)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(20 20, 280 280)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(19 19, 281 281)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(18 18, 282 282)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(17 17, 283 283)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(16 16, 284 284)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(15 15, 285 285)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(14 14, 286 286)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(13 13, 287 287)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(12 12, 288 288)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(11 11, 289 289)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(10 10, 290 290)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(9 9, 291 291)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(8 8, 292 292)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(7 7, 293 293)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(6 6, 294 294)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(5 5, 295 295)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(4 4, 296 296)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(3 3, 297 297)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(2 2, 298 298)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(1 1, 299 299)'));
SELECT count(*) FROM t1;
count(*)
150
@@ -186,106 +36,6 @@ CREATE TABLE t2 (
fid INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
g GEOMETRY NOT NULL
) ENGINE=MyISAM;
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 10 * 10 - 9), Point(10 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 9 * 10 - 9), Point(10 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 8 * 10 - 9), Point(10 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 7 * 10 - 9), Point(10 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 6 * 10 - 9), Point(10 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 5 * 10 - 9), Point(10 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 4 * 10 - 9), Point(10 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 3 * 10 - 9), Point(10 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 2 * 10 - 9), Point(10 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 1 * 10 - 9), Point(10 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 10 * 10 - 9), Point(9 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 9 * 10 - 9), Point(9 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 8 * 10 - 9), Point(9 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 7 * 10 - 9), Point(9 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 6 * 10 - 9), Point(9 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 5 * 10 - 9), Point(9 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 4 * 10 - 9), Point(9 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 3 * 10 - 9), Point(9 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 2 * 10 - 9), Point(9 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 1 * 10 - 9), Point(9 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 10 * 10 - 9), Point(8 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 9 * 10 - 9), Point(8 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 8 * 10 - 9), Point(8 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 7 * 10 - 9), Point(8 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 6 * 10 - 9), Point(8 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 5 * 10 - 9), Point(8 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 4 * 10 - 9), Point(8 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 3 * 10 - 9), Point(8 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 2 * 10 - 9), Point(8 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 1 * 10 - 9), Point(8 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 10 * 10 - 9), Point(7 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 9 * 10 - 9), Point(7 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 8 * 10 - 9), Point(7 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 7 * 10 - 9), Point(7 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 6 * 10 - 9), Point(7 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 5 * 10 - 9), Point(7 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 4 * 10 - 9), Point(7 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 3 * 10 - 9), Point(7 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 2 * 10 - 9), Point(7 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 1 * 10 - 9), Point(7 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 10 * 10 - 9), Point(6 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 9 * 10 - 9), Point(6 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 8 * 10 - 9), Point(6 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 7 * 10 - 9), Point(6 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 6 * 10 - 9), Point(6 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 5 * 10 - 9), Point(6 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 4 * 10 - 9), Point(6 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 3 * 10 - 9), Point(6 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 2 * 10 - 9), Point(6 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 1 * 10 - 9), Point(6 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 10 * 10 - 9), Point(5 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 9 * 10 - 9), Point(5 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 8 * 10 - 9), Point(5 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 7 * 10 - 9), Point(5 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 6 * 10 - 9), Point(5 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 5 * 10 - 9), Point(5 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 4 * 10 - 9), Point(5 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 3 * 10 - 9), Point(5 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 2 * 10 - 9), Point(5 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 1 * 10 - 9), Point(5 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 10 * 10 - 9), Point(4 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 9 * 10 - 9), Point(4 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 8 * 10 - 9), Point(4 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 7 * 10 - 9), Point(4 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 6 * 10 - 9), Point(4 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 5 * 10 - 9), Point(4 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 4 * 10 - 9), Point(4 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 3 * 10 - 9), Point(4 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 2 * 10 - 9), Point(4 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 1 * 10 - 9), Point(4 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 10 * 10 - 9), Point(3 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 9 * 10 - 9), Point(3 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 8 * 10 - 9), Point(3 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 7 * 10 - 9), Point(3 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 6 * 10 - 9), Point(3 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 5 * 10 - 9), Point(3 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 4 * 10 - 9), Point(3 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 3 * 10 - 9), Point(3 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 2 * 10 - 9), Point(3 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 1 * 10 - 9), Point(3 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 10 * 10 - 9), Point(2 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 9 * 10 - 9), Point(2 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 8 * 10 - 9), Point(2 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 7 * 10 - 9), Point(2 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 6 * 10 - 9), Point(2 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 5 * 10 - 9), Point(2 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 4 * 10 - 9), Point(2 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 3 * 10 - 9), Point(2 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 2 * 10 - 9), Point(2 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 1 * 10 - 9), Point(2 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 10 * 10 - 9), Point(1 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 9 * 10 - 9), Point(1 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 8 * 10 - 9), Point(1 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 7 * 10 - 9), Point(1 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 6 * 10 - 9), Point(1 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 5 * 10 - 9), Point(1 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 4 * 10 - 9), Point(1 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 3 * 10 - 9), Point(1 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 2 * 10 - 9), Point(1 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 1 * 10 - 9), Point(1 * 10, 1 * 10)));
ALTER TABLE t2 ADD SPATIAL KEY(g);
SHOW CREATE TABLE t2;
Table Create Table
@@ -309,404 +59,204 @@ fid AsText(g)
56 LINESTRING(41 41,50 50)
45 LINESTRING(51 51,60 60)
55 LINESTRING(41 51,50 60)
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 10 * 10 - 9), Point(10 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 9 * 10 - 9), Point(10 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 8 * 10 - 9), Point(10 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 7 * 10 - 9), Point(10 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 6 * 10 - 9), Point(10 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 5 * 10 - 9), Point(10 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 4 * 10 - 9), Point(10 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 3 * 10 - 9), Point(10 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 2 * 10 - 9), Point(10 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 1 * 10 - 9), Point(10 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 10 * 10 - 9), Point(9 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 9 * 10 - 9), Point(9 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 8 * 10 - 9), Point(9 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 7 * 10 - 9), Point(9 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 6 * 10 - 9), Point(9 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 5 * 10 - 9), Point(9 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 4 * 10 - 9), Point(9 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 3 * 10 - 9), Point(9 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 2 * 10 - 9), Point(9 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 1 * 10 - 9), Point(9 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 10 * 10 - 9), Point(8 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 9 * 10 - 9), Point(8 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 8 * 10 - 9), Point(8 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 7 * 10 - 9), Point(8 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 6 * 10 - 9), Point(8 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 5 * 10 - 9), Point(8 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 4 * 10 - 9), Point(8 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 3 * 10 - 9), Point(8 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 2 * 10 - 9), Point(8 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 1 * 10 - 9), Point(8 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 10 * 10 - 9), Point(7 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 9 * 10 - 9), Point(7 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 8 * 10 - 9), Point(7 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 7 * 10 - 9), Point(7 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 6 * 10 - 9), Point(7 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 5 * 10 - 9), Point(7 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 4 * 10 - 9), Point(7 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 3 * 10 - 9), Point(7 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 2 * 10 - 9), Point(7 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 1 * 10 - 9), Point(7 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 10 * 10 - 9), Point(6 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 9 * 10 - 9), Point(6 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 8 * 10 - 9), Point(6 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 7 * 10 - 9), Point(6 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 6 * 10 - 9), Point(6 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 5 * 10 - 9), Point(6 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 4 * 10 - 9), Point(6 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 3 * 10 - 9), Point(6 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 2 * 10 - 9), Point(6 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 1 * 10 - 9), Point(6 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 10 * 10 - 9), Point(5 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 9 * 10 - 9), Point(5 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 8 * 10 - 9), Point(5 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 7 * 10 - 9), Point(5 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 6 * 10 - 9), Point(5 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 5 * 10 - 9), Point(5 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 4 * 10 - 9), Point(5 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 3 * 10 - 9), Point(5 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 2 * 10 - 9), Point(5 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 1 * 10 - 9), Point(5 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 10 * 10 - 9), Point(4 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 9 * 10 - 9), Point(4 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 8 * 10 - 9), Point(4 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 7 * 10 - 9), Point(4 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 6 * 10 - 9), Point(4 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 5 * 10 - 9), Point(4 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 4 * 10 - 9), Point(4 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 3 * 10 - 9), Point(4 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 2 * 10 - 9), Point(4 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 1 * 10 - 9), Point(4 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 10 * 10 - 9), Point(3 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 9 * 10 - 9), Point(3 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 8 * 10 - 9), Point(3 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 7 * 10 - 9), Point(3 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 6 * 10 - 9), Point(3 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 5 * 10 - 9), Point(3 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 4 * 10 - 9), Point(3 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 3 * 10 - 9), Point(3 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 2 * 10 - 9), Point(3 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 1 * 10 - 9), Point(3 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 10 * 10 - 9), Point(2 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 9 * 10 - 9), Point(2 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 8 * 10 - 9), Point(2 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 7 * 10 - 9), Point(2 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 6 * 10 - 9), Point(2 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 5 * 10 - 9), Point(2 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 4 * 10 - 9), Point(2 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 3 * 10 - 9), Point(2 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 2 * 10 - 9), Point(2 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 1 * 10 - 9), Point(2 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 10 * 10 - 9), Point(1 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 9 * 10 - 9), Point(1 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 8 * 10 - 9), Point(1 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 7 * 10 - 9), Point(1 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 6 * 10 - 9), Point(1 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 5 * 10 - 9), Point(1 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 4 * 10 - 9), Point(1 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 3 * 10 - 9), Point(1 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 2 * 10 - 9), Point(1 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 1 * 10 - 9), Point(1 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
count(*)
100
DROP TABLE t2;
=== modified file 'mysql-test/r/group_min_max.result'
--- a/mysql-test/r/group_min_max.result 2009-07-13 17:36:54 +0000
+++ b/mysql-test/r/group_min_max.result 2009-08-30 07:03:37 +0000
@@ -2502,3 +2502,15 @@ a MAX(b)
2 1
DROP TABLE t;
End of 5.0 tests
+#
+# Bug #46607: Assertion failed: (cond_type == Item::FUNC_ITEM) results in
+# server crash
+#
+CREATE TABLE t (a INT, b INT, INDEX (a,b));
+INSERT INTO t VALUES (2,0), (2,0), (2,1), (2,1);
+INSERT INTO t SELECT * FROM t;
+SELECT a, MAX(b) FROM t WHERE b GROUP BY a;
+a MAX(b)
+2 1
+DROP TABLE t;
+End of 5.1 tests
=== modified file 'mysql-test/r/handler_myisam.result'
--- a/mysql-test/r/handler_myisam.result 2009-07-10 23:12:13 +0000
+++ b/mysql-test/r/handler_myisam.result 2009-08-21 05:55:35 +0000
@@ -741,3 +741,19 @@ USE information_schema;
HANDLER COLUMNS OPEN;
ERROR HY000: Incorrect usage of HANDLER OPEN and information_schema
USE test;
+#
+# BUG #46456: HANDLER OPEN + TRUNCATE + DROP (temporary) TABLE, crash
+#
+CREATE TABLE t1 AS SELECT 1 AS f1;
+HANDLER t1 OPEN;
+TRUNCATE t1;
+HANDLER t1 READ FIRST;
+ERROR 42S02: Unknown table 't1' in HANDLER
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 AS SELECT 1 AS f1;
+HANDLER t1 OPEN;
+TRUNCATE t1;
+HANDLER t1 READ FIRST;
+ERROR 42S02: Unknown table 't1' in HANDLER
+DROP TABLE t1;
+End of 5.1 tests
=== modified file 'mysql-test/r/innodb_xtradb_bug317074.result'
--- a/mysql-test/r/innodb_xtradb_bug317074.result 2009-09-08 16:04:58 +0000
+++ b/mysql-test/r/innodb_xtradb_bug317074.result 2009-10-28 07:52:34 +0000
@@ -3,3 +3,30 @@ SET @old_innodb_file_per_table=@@innodb_
SET @old_innodb_file_format_check=@@innodb_file_format_check;
SET GLOBAL innodb_file_format='Barracuda';
SET GLOBAL innodb_file_per_table=ON;
+DROP TABLE IF EXISTS `test1`;
+CREATE TABLE IF NOT EXISTS `test1` (
+`a` int primary key auto_increment,
+`b` int default 0,
+`c` char(100) default 'testtest'
+) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+set autocommit=0;
+CREATE PROCEDURE insert_many(p1 int)
+BEGIN
+SET @x = 0;
+SET @y = 0;
+REPEAT
+insert into test1 set b=1;
+SET @x = @x + 1;
+SET @y = @y + 1;
+IF @y >= 1000 THEN
+commit;
+SET @y = 0;
+END IF;
+UNTIL @x >= p1 END REPEAT;
+END|
+DROP PROCEDURE insert_many;
+ALTER TABLE test1 ENGINE=MyISAM;
+DROP TABLE test1;
+SET GLOBAL innodb_file_format=@old_innodb_file_format;
+SET GLOBAL innodb_file_per_table=@old_innodb_file_per_table;
+SET GLOBAL innodb_file_format_check=@old_innodb_file_format_check;
=== modified file 'mysql-test/r/lock_multi_bug38499.result'
--- a/mysql-test/r/lock_multi_bug38499.result 2009-03-23 14:22:31 +0000
+++ b/mysql-test/r/lock_multi_bug38499.result 2009-08-28 21:49:16 +0000
@@ -1,3 +1,5 @@
+SET @odl_sync_frm = @@global.sync_frm;
+SET @@global.sync_frm = OFF;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1( a INT, b INT );
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4);
@@ -17,3 +19,4 @@ ALTER TABLE t1 ADD COLUMN a INT;
# 2.2.1. normal mode
# 2.2.2. PS mode
DROP TABLE t1;
+SET @@global.sync_frm = @odl_sync_frm;
=== modified file 'mysql-test/r/lock_multi_bug38691.result'
--- a/mysql-test/r/lock_multi_bug38691.result 2009-03-23 14:22:31 +0000
+++ b/mysql-test/r/lock_multi_bug38691.result 2009-08-28 21:49:16 +0000
@@ -1,3 +1,5 @@
+SET @odl_sync_frm = @@global.sync_frm;
+SET @@global.sync_frm = OFF;
DROP TABLE IF EXISTS t1,t2,t3;
CREATE TABLE t1 (
a int(11) unsigned default NULL,
@@ -15,3 +17,4 @@ CREATE TABLE t3 SELECT * FROM t1;
# normal mode
# PS mode
DROP TABLE t1, t2, t3;
+SET @@global.sync_frm = @odl_sync_frm;
=== modified file 'mysql-test/r/merge.result'
--- a/mysql-test/r/merge.result 2009-09-07 20:50:10 +0000
+++ b/mysql-test/r/merge.result 2009-11-06 17:22:32 +0000
@@ -1846,56 +1846,6 @@ c1
DROP TABLE t1, t2, t3;
CREATE TABLE t1 (id INTEGER, grp TINYINT, id_rev INTEGER);
SET @rnd_max= 2147483647;
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
set @@read_buffer_size=2*1024*1024;
CREATE TABLE t2 SELECT * FROM t1;
INSERT INTO t1 (id, grp, id_rev) SELECT id, grp, id_rev FROM t2;
@@ -2209,4 +2159,16 @@ ERROR HY000: Table storage engine for 'm
DROP TABLE m1,t1,t2,t3,t4,t5,t6,t7;
SELECT 1 FROM m1;
ERROR 42S02: Table 'test.m1' doesn't exist
+#
+# Bug #46614: Assertion in show_create_trigger()
+#
+CREATE TABLE t1(a int);
+CREATE TABLE t2(a int);
+CREATE TABLE t3(a int) ENGINE = MERGE UNION(t1, t2);
+CREATE TRIGGER tr1 AFTER INSERT ON t3 FOR EACH ROW CALL foo();
+SHOW CREATE TRIGGER tr1;
+Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation
+tr1 CREATE DEFINER=`root`@`localhost` TRIGGER tr1 AFTER INSERT ON t3 FOR EACH ROW CALL foo() latin1 latin1_swedish_ci latin1_swedish_ci
+DROP TRIGGER tr1;
+DROP TABLE t1, t2, t3;
End of 5.1 tests
=== modified file 'mysql-test/r/myisam_debug.result'
--- a/mysql-test/r/myisam_debug.result 2009-04-30 11:03:44 +0000
+++ b/mysql-test/r/myisam_debug.result 2009-10-28 07:52:34 +0000
@@ -12,16 +12,6 @@ CREATE TABLE `t2` (
KEY (id1), KEY(id)
) ENGINE=MyISAM;
INSERT INTO t2 (id) VALUES (123);
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
-INSERT INTO t2 (id) SELECT id FROM t2;
# Switch to insert Connection
SET SESSION debug='+d,wait_in_enable_indexes';
# Send insert data
=== modified file 'mysql-test/r/mysqlbinlog_row_trans.result'
--- a/mysql-test/r/mysqlbinlog_row_trans.result 2009-02-10 21:26:37 +0000
+++ b/mysql-test/r/mysqlbinlog_row_trans.result 2009-08-27 09:32:27 +0000
@@ -215,14 +215,30 @@ COMMIT/*!*/;
# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+# at #
+#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
+SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t1
/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Xid = #
+COMMIT/*!*/;
+# at #
+#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
+SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t1
/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Xid = #
+COMMIT/*!*/;
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
BEGIN
@@ -331,9 +347,17 @@ COMMIT/*!*/;
# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+# at #
+#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
+SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t1
/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Xid = #
+COMMIT/*!*/;
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t2
@@ -449,9 +473,17 @@ ROLLBACK
# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
+BEGIN
+/*!*/;
+# at #
+#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
+SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t1
/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Xid = #
+COMMIT/*!*/;
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
TRUNCATE TABLE t2
=== modified file 'mysql-test/r/not_partition.result'
--- a/mysql-test/r/not_partition.result 2006-10-26 17:11:09 +0000
+++ b/mysql-test/r/not_partition.result 2009-01-08 14:16:44 +0000
@@ -1,3 +1,48 @@
+DROP TABLE IF EXISTS t1;
+FLUSH TABLES;
+SELECT * FROM t1;
+ERROR 42000: Unknown table engine 'partition'
+TRUNCATE TABLE t1;
+ERROR 42000: Unknown table engine 'partition'
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze Error Unknown table engine 'partition'
+test.t1 analyze error Corrupt
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check Error Unknown table engine 'partition'
+test.t1 check error Corrupt
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize Error Unknown table engine 'partition'
+test.t1 optimize error Corrupt
+REPAIR TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 repair Error Unknown table engine 'partition'
+test.t1 repair error Corrupt
+ALTER TABLE t1 REPAIR PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 repair Error Unknown table engine 'partition'
+test.t1 repair error Corrupt
+ALTER TABLE t1 CHECK PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 check Error Unknown table engine 'partition'
+test.t1 check error Corrupt
+ALTER TABLE t1 OPTIMIZE PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 optimize Error Unknown table engine 'partition'
+test.t1 optimize error Corrupt
+ALTER TABLE t1 ANALYZE PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze Error Unknown table engine 'partition'
+test.t1 analyze error Corrupt
+ALTER TABLE t1 REBUILD PARTITION ALL;
+ERROR 42000: Unknown table engine 'partition'
+ALTER TABLE t1 ENGINE Memory;
+ERROR 42000: Unknown table engine 'partition'
+ALTER TABLE t1 ADD (new INT);
+ERROR 42000: Unknown table engine 'partition'
+DROP TABLE t1;
CREATE TABLE t1 (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
=== modified file 'mysql-test/r/partition.result'
--- a/mysql-test/r/partition.result 2009-09-07 20:50:10 +0000
+++ b/mysql-test/r/partition.result 2009-10-15 21:38:29 +0000
@@ -1,4 +1,55 @@
drop table if exists t1, t2;
+CREATE TABLE t1 (
+a int NOT NULL,
+b int NOT NULL);
+CREATE TABLE t2 (
+a int NOT NULL,
+b int NOT NULL,
+INDEX(b)
+)
+PARTITION BY HASH(a) PARTITIONS 2;
+INSERT INTO t1 VALUES (399, 22);
+INSERT INTO t2 VALUES (1, 22), (1, 42);
+INSERT INTO t2 SELECT 1, 399 FROM t2, t1
+WHERE t1.b = t2.b;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+a timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+b varchar(10),
+PRIMARY KEY (a)
+)
+PARTITION BY RANGE (to_days(a)) (
+PARTITION p1 VALUES LESS THAN (733407),
+PARTITION pmax VALUES LESS THAN MAXVALUE
+);
+INSERT INTO t1 VALUES ('2007-07-30 17:35:48', 'p1');
+INSERT INTO t1 VALUES ('2009-07-14 17:35:55', 'pmax');
+INSERT INTO t1 VALUES ('2009-09-21 17:31:42', 'pmax');
+SELECT * FROM t1;
+a b
+2007-07-30 17:35:48 p1
+2009-07-14 17:35:55 pmax
+2009-09-21 17:31:42 pmax
+ALTER TABLE t1 REORGANIZE PARTITION pmax INTO (
+PARTITION p3 VALUES LESS THAN (733969),
+PARTITION pmax VALUES LESS THAN MAXVALUE);
+SELECT * FROM t1;
+a b
+2007-07-30 17:35:48 p1
+2009-07-14 17:35:55 pmax
+2009-09-21 17:31:42 pmax
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `b` varchar(10) DEFAULT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (to_days(a))
+(PARTITION p1 VALUES LESS THAN (733407) ENGINE = MyISAM,
+ PARTITION p3 VALUES LESS THAN (733969) ENGINE = MyISAM,
+ PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */
+DROP TABLE t1;
CREATE TABLE t1 (a INT, FOREIGN KEY (a) REFERENCES t0 (a))
ENGINE=MyISAM
PARTITION BY HASH (a);
=== added file 'mysql-test/r/partition_disabled.result'
--- a/mysql-test/r/partition_disabled.result 1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/partition_disabled.result 2009-01-08 14:16:44 +0000
@@ -0,0 +1,93 @@
+DROP TABLE IF EXISTS t1;
+FLUSH TABLES;
+SELECT * FROM t1;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+TRUNCATE TABLE t1;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 analyze error Corrupt
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 check error Corrupt
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 optimize error Corrupt
+REPAIR TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 repair Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 repair error Corrupt
+ALTER TABLE t1 REPAIR PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 repair Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 repair error Corrupt
+ALTER TABLE t1 CHECK PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 check Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 check error Corrupt
+ALTER TABLE t1 OPTIMIZE PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 optimize Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 optimize error Corrupt
+ALTER TABLE t1 ANALYZE PARTITION ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze Error The MySQL server is running with the --skip-partition option so it cannot execute this statement
+test.t1 analyze error Corrupt
+ALTER TABLE t1 REBUILD PARTITION ALL;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+ALTER TABLE t1 ENGINE Memory;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+ALTER TABLE t1 ADD (new INT);
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+DROP TABLE t1;
+CREATE TABLE t1 (
+firstname VARCHAR(25) NOT NULL,
+lastname VARCHAR(25) NOT NULL,
+username VARCHAR(16) NOT NULL,
+email VARCHAR(35),
+joined DATE NOT NULL
+)
+PARTITION BY KEY(joined)
+PARTITIONS 6;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+ALTER TABLE t1 PARTITION BY KEY(joined) PARTITIONS 2;
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+drop table t1;
+ERROR 42S02: Unknown table 't1'
+CREATE TABLE t1 (
+firstname VARCHAR(25) NOT NULL,
+lastname VARCHAR(25) NOT NULL,
+username VARCHAR(16) NOT NULL,
+email VARCHAR(35),
+joined DATE NOT NULL
+)
+PARTITION BY RANGE( YEAR(joined) ) (
+PARTITION p0 VALUES LESS THAN (1960),
+PARTITION p1 VALUES LESS THAN (1970),
+PARTITION p2 VALUES LESS THAN (1980),
+PARTITION p3 VALUES LESS THAN (1990),
+PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+drop table t1;
+ERROR 42S02: Unknown table 't1'
+CREATE TABLE t1 (id INT, purchased DATE)
+PARTITION BY RANGE( YEAR(purchased) )
+SUBPARTITION BY HASH( TO_DAYS(purchased) )
+SUBPARTITIONS 2 (
+PARTITION p0 VALUES LESS THAN (1990),
+PARTITION p1 VALUES LESS THAN (2000),
+PARTITION p2 VALUES LESS THAN MAXVALUE
+);
+ERROR HY000: The MySQL server is running with the --skip-partition option so it cannot execute this statement
+drop table t1;
+ERROR 42S02: Unknown table 't1'
+create table t1 (a varchar(10) charset latin1 collate latin1_bin);
+insert into t1 values (''),(' '),('a'),('a '),('a ');
+explain partitions select * from t1 where a='a ' OR a='a';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 Using where
+drop table t1;
=== modified file 'mysql-test/r/partition_pruning.result'
--- a/mysql-test/r/partition_pruning.result 2008-12-28 11:33:49 +0000
+++ b/mysql-test/r/partition_pruning.result 2009-09-01 12:53:27 +0000
@@ -1,4 +1,1282 @@
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+# test of RANGE and index
+CREATE TABLE t1 (a DATE, KEY(a))
+PARTITION BY RANGE (TO_DAYS(a))
+(PARTITION `pNULL` VALUES LESS THAN (0),
+PARTITION `p0001-01-01` VALUES LESS THAN (366 + 1),
+PARTITION `p1001-01-01` VALUES LESS THAN (TO_DAYS('1001-01-01') + 1),
+PARTITION `p2001-01-01` VALUES LESS THAN (TO_DAYS('2001-01-01') + 1));
+INSERT INTO t1 VALUES ('0000-00-00'), ('0000-01-02'), ('0001-01-01'),
+('1001-00-00'), ('1001-01-01'), ('1002-00-00'), ('2001-01-01');
+SELECT * FROM t1 WHERE a < '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a <= '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a >= '1001-01-01';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-01-01';
+a
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-01-01';
+a
+1001-01-01
+SELECT * FROM t1 WHERE a < '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+SELECT * FROM t1 WHERE a <= '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a >= '1001-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-00-00';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-00-00';
+a
+1001-00-00
+# Disabling warnings for the invalid date
+SELECT * FROM t1 WHERE a < '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a <= '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a >= '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a > '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a = '1999-02-31';
+a
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+a
+0001-01-01
+1001-00-00
+1001-01-01
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
+# Disabling warnings for the invalid date
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 2 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 2 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+# test without index
+ALTER TABLE t1 DROP KEY a;
+SELECT * FROM t1 WHERE a < '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a <= '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a >= '1001-01-01';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-01-01';
+a
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-01-01';
+a
+1001-01-01
+SELECT * FROM t1 WHERE a < '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+SELECT * FROM t1 WHERE a <= '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a >= '1001-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-00-00';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-00-00';
+a
+1001-00-00
+# Disabling warnings for the invalid date
+SELECT * FROM t1 WHERE a < '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a <= '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a >= '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a > '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a = '1999-02-31';
+a
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+a
+0001-01-01
+1001-00-00
+1001-01-01
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1001-01-01 system NULL NULL NULL NULL 1
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ALL NULL NULL NULL NULL 7 Using where
+# Disabling warnings for the invalid date
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+DROP TABLE t1;
+# test of LIST and index
+CREATE TABLE t1 (a DATE, KEY(a))
+PARTITION BY LIST (TO_DAYS(a))
+(PARTITION `p0001-01-01` VALUES IN (TO_DAYS('0001-01-01')),
+PARTITION `p2001-01-01` VALUES IN (TO_DAYS('2001-01-01')),
+PARTITION `pNULL` VALUES IN (NULL),
+PARTITION `p0000-01-02` VALUES IN (TO_DAYS('0000-01-02')),
+PARTITION `p1001-01-01` VALUES IN (TO_DAYS('1001-01-01')));
+INSERT INTO t1 VALUES ('0000-00-00'), ('0000-01-02'), ('0001-01-01'),
+('1001-00-00'), ('1001-01-01'), ('1002-00-00'), ('2001-01-01');
+SELECT * FROM t1 WHERE a < '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a <= '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a >= '1001-01-01';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-01-01';
+a
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-01-01';
+a
+1001-01-01
+SELECT * FROM t1 WHERE a < '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+SELECT * FROM t1 WHERE a <= '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a >= '1001-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-00-00';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-00-00';
+a
+1001-00-00
+# Disabling warnings for the invalid date
+SELECT * FROM t1 WHERE a < '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a <= '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a >= '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a > '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a = '1999-02-31';
+a
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+a
+0001-01-01
+1001-00-00
+1001-01-01
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
+# Disabling warnings for the invalid date
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL range a a 4 NULL 2 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL range a a 4 NULL 2 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ref a a 4 const 1 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01 range a a 4 NULL 2 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+# test without index
+ALTER TABLE t1 DROP KEY a;
+SELECT * FROM t1 WHERE a < '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a <= '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a >= '1001-01-01';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-01-01';
+a
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-01-01';
+a
+1001-01-01
+SELECT * FROM t1 WHERE a < '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+SELECT * FROM t1 WHERE a <= '1001-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+SELECT * FROM t1 WHERE a >= '1001-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a > '1001-00-00';
+a
+1001-01-01
+1002-00-00
+2001-01-01
+SELECT * FROM t1 WHERE a = '1001-00-00';
+a
+1001-00-00
+# Disabling warnings for the invalid date
+SELECT * FROM t1 WHERE a < '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a <= '1999-02-31';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a >= '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a > '1999-02-31';
+a
+2001-01-01
+SELECT * FROM t1 WHERE a = '1999-02-31';
+a
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+a
+0000-00-00
+0000-01-02
+0001-01-01
+1001-00-00
+1001-01-01
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+a
+1001-00-00
+1001-01-01
+1002-00-00
+SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+a
+0001-01-01
+1001-00-00
+1001-01-01
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1001-01-01 system NULL NULL NULL NULL 1
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ALL NULL NULL NULL NULL 7 Using where
+# Disabling warnings for the invalid date
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2001-01-01,pNULL ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1999-02-31';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pNULL,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 ALL NULL NULL NULL NULL 7 Using where
+DROP TABLE t1;
+# Test with DATETIME column NOT NULL
+CREATE TABLE t1 (
+a int(10) unsigned NOT NULL,
+b DATETIME NOT NULL,
+PRIMARY KEY (a, b)
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+(1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-02 23:59:59'),
+(1, '2009-04-03'), (2, '2009-04-03'), (1, '2009-04-04'), (2, '2009-04-04'),
+(1, '2009-04-05'), (1, '2009-04-06'), (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 6 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 8 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 12 NULL 8 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 index NULL PRIMARY 12 NULL 13 Using where; Using index
+DROP TABLE t1;
+# Test with DATE column NOT NULL
+CREATE TABLE t1 (
+a int(10) unsigned NOT NULL,
+b DATE NOT NULL,
+PRIMARY KEY (a, b)
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+(1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-03'), (2, '2009-04-03'),
+(1, '2009-04-04'), (2, '2009-04-04'), (1, '2009-04-05'), (1, '2009-04-06'),
+(1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 5 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 7 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 7 NULL 7 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 index NULL PRIMARY 7 NULL 12 Using where; Using index
+DROP TABLE t1;
+# Test with DATETIME column NULL
+CREATE TABLE t1 (
+a int(10) unsigned NOT NULL,
+b DATETIME NULL
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+(1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-02 23:59:59'),
+(1, '2009-04-03'), (2, '2009-04-03'), (1, '2009-04-04'), (2, '2009-04-04'),
+(1, '2009-04-05'), (1, '2009-04-06'), (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 6 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 8 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 8 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090402 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 13 Using where
+DROP TABLE t1;
+# Test with DATE column NULL
+CREATE TABLE t1 (
+a int(10) unsigned NOT NULL,
+b DATE NULL
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+(1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-03'), (2, '2009-04-03'),
+(1, '2009-04-04'), (2, '2009-04-04'), (1, '2009-04-05'), (1, '2009-04-06'),
+(1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 5 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 7 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402,p20090403 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090402 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN PARTITIONS SELECT * FROM t1
+WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401,p20090403,p20090404,p20090405 ALL NULL NULL NULL NULL 12 Using where
+DROP TABLE t1;
+# For better code coverage of the patch
+CREATE TABLE t1 (
+a int(10) unsigned NOT NULL,
+b DATE
+) PARTITION BY RANGE ( TO_DAYS(b) )
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (2, NULL);
+# test with an invalid date, which lead to item->null_value is set.
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-99' AS DATETIME);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p20090401 ALL NULL NULL NULL NULL 2 Using where
+Warnings:
+Warning 1292 Incorrect datetime value: '2009-04-99'
+Warning 1292 Incorrect datetime value: '2009-04-99'
+DROP TABLE t1;
CREATE TABLE t1
(a INT NOT NULL AUTO_INCREMENT,
b DATETIME,
=== modified file 'mysql-test/r/partition_range.result'
--- a/mysql-test/r/partition_range.result 2008-11-04 07:43:21 +0000
+++ b/mysql-test/r/partition_range.result 2009-08-26 10:59:49 +0000
@@ -745,7 +745,7 @@ a
EXPLAIN PARTITIONS SELECT * FROM t1
WHERE a >= '2004-07-01' AND a <= '2004-09-30';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p407,p408,p409 ALL NULL NULL NULL NULL 9 Using where
+1 SIMPLE t1 p3xx,p407,p408,p409 ALL NULL NULL NULL NULL 18 Using where
SELECT * from t1
WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
(a >= '2005-07-01' AND a <= '2005-09-30');
@@ -772,7 +772,7 @@ EXPLAIN PARTITIONS SELECT * from t1
WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
(a >= '2005-07-01' AND a <= '2005-09-30');
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where
+1 SIMPLE t1 p3xx,p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 27 Using where
DROP TABLE t1;
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
=== modified file 'mysql-test/r/subselect.result'
--- a/mysql-test/r/subselect.result 2009-09-15 10:46:35 +0000
+++ b/mysql-test/r/subselect.result 2009-10-15 21:38:29 +0000
@@ -75,7 +75,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1
select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1));
ERROR HY000: Incorrect usage of PROCEDURE and subquery
SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1));
-ERROR HY000: Incorrect parameters to procedure 'ANALYSE'
+ERROR HY000: Incorrect usage of PROCEDURE and subquery
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL;
ERROR 42S22: Unknown column 'a' in 'field list'
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NOT NULL;
@@ -4383,6 +4383,34 @@ id select_type table type possible_keys
1 PRIMARY C ALL NULL NULL NULL NULL 20 100.00 Using where
DROP TABLE C;
# End of test for bug#45061.
+#
+# Bug #46749: Segfault in add_key_fields() with outer subquery level
+# field references
+#
+CREATE TABLE t1 (
+a int,
+b int,
+UNIQUE (a), KEY (b)
+);
+INSERT INTO t1 VALUES (1,1), (2,1);
+CREATE TABLE st1 like t1;
+INSERT INTO st1 VALUES (1,1), (2,1);
+CREATE TABLE st2 like t1;
+INSERT INTO st2 VALUES (1,1), (2,1);
+EXPLAIN
+SELECT MAX(b), (SELECT COUNT(*) FROM st1,st2 WHERE st2.b <= t1.b)
+FROM t1
+WHERE a = 230;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+2 DEPENDENT SUBQUERY st1 index NULL a 5 NULL 2 Using index
+2 DEPENDENT SUBQUERY st2 index b b 5 NULL 2 Using where; Using index; Using join buffer
+SELECT MAX(b), (SELECT COUNT(*) FROM st1,st2 WHERE st2.b <= t1.b)
+FROM t1
+WHERE a = 230;
+MAX(b) (SELECT COUNT(*) FROM st1,st2 WHERE st2.b <= t1.b)
+NULL 0
+DROP TABLE t1, st1, st2;
End of 5.0 tests.
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 VALUES (2,22),(1,11),(2,22);
=== added file 'mysql-test/r/table_elim_debug.result'
--- a/mysql-test/r/table_elim_debug.result 1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/table_elim_debug.result 2009-10-29 17:50:33 +0000
@@ -0,0 +1,22 @@
+drop table if exists t1, t2;
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+create table t2 (a int primary key, b int)
+as select a, a as b from t1 where a in (1,2);
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+set optimizer_switch='table_elimination=off';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using index
+set optimizer_switch='table_elimination=on';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+set optimizer_switch='table_elimination=default';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+drop table t1, t2;
=== modified file 'mysql-test/r/type_newdecimal.result'
--- a/mysql-test/r/type_newdecimal.result 2009-07-03 10:36:04 +0000
+++ b/mysql-test/r/type_newdecimal.result 2009-08-24 19:47:08 +0000
@@ -1495,9 +1495,9 @@ CREATE TABLE t1 (a int DEFAULT NULL, b i
INSERT INTO t1 VALUES (3,30), (1,10), (2,10);
SELECT a+CAST(1 AS decimal(65,30)) AS aa, SUM(b) FROM t1 GROUP BY aa;
aa SUM(b)
-2.000000000000000000000000000000 10
-3.000000000000000000000000000000 10
-4.000000000000000000000000000000 30
+2.00000000000000000000000000000 10
+3.00000000000000000000000000000 10
+4.00000000000000000000000000000 30
SELECT a+CAST(1 AS decimal(65,31)) AS aa, SUM(b) FROM t1 GROUP BY aa;
ERROR 42000: Too big scale 31 specified for column '1'. Maximum is 30.
DROP TABLE t1;
@@ -1521,13 +1521,13 @@ f1
DROP TABLE t1;
CREATE TABLE t1 SELECT 123451234512345123451234512345123451234512345.678906789067890678906789067890678906789067890 AS f1;
Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
+Note 1265 Data truncated for column 'f1' at row 1
DESC t1;
Field Type Null Key Default Extra
-f1 decimal(65,30) NO 0.000000000000000000000000000000
+f1 decimal(65,20) NO 0.00000000000000000000
SELECT f1 FROM t1;
f1
-99999999999999999999999999999999999.999999999999999999999999999999
+123451234512345123451234512345123451234512345.67890678906789067891
DROP TABLE t1;
select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
1.01500000 * 1.01500000 * 0.99500000);
@@ -1595,7 +1595,7 @@ Warnings:
Note 1265 Data truncated for column 'my_col' at row 1
DESCRIBE t1;
Field Type Null Key Default Extra
-my_col decimal(65,30) NO 0.000000000000000000000000000000
+my_col decimal(32,30) NO 0.000000000000000000000000000000
SELECT my_col FROM t1;
my_col
1.123456789123456789123456789123
@@ -1625,8 +1625,212 @@ Warnings:
Note 1265 Data truncated for column 'my_col' at row 1
DESCRIBE t1;
Field Type Null Key Default Extra
-my_col decimal(65,30) YES NULL
+my_col decimal(30,30) YES NULL
SELECT my_col FROM t1;
my_col
0.012345687012345687012345687012
DROP TABLE t1;
+#
+# Bug#45261: Crash, stored procedure + decimal
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 SELECT
+/* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001
+AS c1;
+Warnings:
+Warning 1264 Out of range value for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+99999999999999999999999999999999999999999999999999999999999999999
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001.
+AS c1;
+Warnings:
+Warning 1264 Out of range value for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+99999999999999999999999999999999999999999999999999999999999999999
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001.1 /* 1 */
+AS c1;
+Warnings:
+Warning 1264 Out of range value for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+99999999999999999999999999999999999999999999999999999999999999999
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 82 */ 1000000000000000000000000000000000000000000000000000000000000000000000000000000001
+AS c1;
+Warnings:
+Error 1292 Truncated incorrect DECIMAL value: ''
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+99999999999999999999999999999999999999999999999999999999999999999
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 40 */ 1000000000000000000000000000000000000001.1000000000000000000000000000000000000001 /* 40 */
+AS c1;
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,25) NO 0.0000000000000000000000000
+SELECT * FROM t1;
+c1
+1000000000000000000000000000000000000001.1000000000000000000000000
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 1 */ 1.10000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 80 */
+AS c1;
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(31,30) NO 0.000000000000000000000000000000
+SELECT * FROM t1;
+c1
+1.100000000000000000000000000000
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 1 */ 1.100000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 81 */
+AS c1;
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(31,30) NO 0.000000000000000000000000000000
+SELECT * FROM t1;
+c1
+1.100000000000000000000000000000
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+.100000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 81 */
+AS c1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(30,30) NO 0.000000000000000000000000000000
+SELECT * FROM t1;
+c1
+0.100000000000000000000000000000
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 45 */ 123456789012345678901234567890123456789012345.123456789012345678901234567890123456789012345 /* 45 */
+AS c1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,20) NO 0.00000000000000000000
+SELECT * FROM t1;
+c1
+123456789012345678901234567890123456789012345.12345678901234567890
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 65 */ 12345678901234567890123456789012345678901234567890123456789012345.1 /* 1 */
+AS c1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+12345678901234567890123456789012345678901234567890123456789012345
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+/* 66 */ 123456789012345678901234567890123456789012345678901234567890123456.1 /* 1 */
+AS c1;
+Warnings:
+Warning 1264 Out of range value for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,0) NO 0
+SELECT * FROM t1;
+c1
+99999999999999999999999999999999999999999999999999999999999999999
+DROP TABLE t1;
+CREATE TABLE t1 SELECT
+.123456789012345678901234567890123456789012345678901234567890123456 /* 66 */
+AS c1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(30,30) NO 0.000000000000000000000000000000
+SELECT * FROM t1;
+c1
+0.123456789012345678901234567890
+DROP TABLE t1;
+CREATE TABLE t1 AS SELECT 123.1234567890123456789012345678901 /* 31 */ AS c1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(33,30) NO 0.000000000000000000000000000000
+SELECT * FROM t1;
+c1
+123.123456789012345678901234567890
+DROP TABLE t1;
+CREATE TABLE t1 SELECT 1.1 + CAST(1 AS DECIMAL(65,30)) AS c1;
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,29) NO 0.00000000000000000000000000000
+SELECT * FROM t1;
+c1
+2.10000000000000000000000000000
+DROP TABLE t1;
+#
+# Test that the integer and decimal parts are properly calculated.
+#
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT MIN(a + 0.0000000000000000000000000000001) AS c1 FROM t1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 3
+DESC t2;
+Field Type Null Key Default Extra
+c1 decimal(32,30) YES NULL
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT IFNULL(a + 0.0000000000000000000000000000001, NULL) AS c1 FROM t1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+Note 1265 Data truncated for column 'c1' at row 2
+Note 1265 Data truncated for column 'c1' at row 3
+DESC t2;
+Field Type Null Key Default Extra
+c1 decimal(32,30) YES NULL
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT CASE a WHEN 0.1 THEN 0.0000000000000000000000000000000000000000000000000000000000000000001 END AS c1 FROM t1;
+Warnings:
+Note 1265 Data truncated for column 'c1' at row 1
+DESC t2;
+Field Type Null Key Default Extra
+c1 decimal(31,30) YES NULL
+DROP TABLE t1,t2;
+#
+# Test that variables get maximum precision.
+#
+SET @decimal= 1.1;
+CREATE TABLE t1 SELECT @decimal AS c1;
+DESC t1;
+Field Type Null Key Default Extra
+c1 decimal(65,30) YES NULL
+SELECT * FROM t1;
+c1
+1.100000000000000000000000000000
+DROP TABLE t1;
=== modified file 'mysql-test/r/view.result'
--- a/mysql-test/r/view.result 2009-09-07 20:50:10 +0000
+++ b/mysql-test/r/view.result 2009-10-15 21:38:29 +0000
@@ -3718,117 +3718,6 @@ DROP TABLE t1;
# -- End of test case for Bug#40825
-#
-# Bug #45806 crash when replacing into a view with a join!
-#
-CREATE TABLE t1(a INT UNIQUE);
-CREATE VIEW v1 AS SELECT t1.a FROM t1, t1 AS a;
-INSERT INTO t1 VALUES (1), (2);
-REPLACE INTO v1(a) SELECT 1 FROM t1,t1 AS c;
-SELECT * FROM v1;
-a
-1
-2
-1
-2
-REPLACE INTO v1(a) SELECT 3 FROM t1,t1 AS c;
-SELECT * FROM v1;
-a
-1
-2
-3
-1
-2
-3
-1
-2
-3
-DELETE FROM t1 WHERE a=3;
-INSERT INTO v1(a) SELECT 1 FROM t1,t1 AS c
-ON DUPLICATE KEY UPDATE `v1`.`a`= 1;
-SELECT * FROM v1;
-a
-1
-2
-1
-2
-CREATE VIEW v2 AS SELECT t1.a FROM t1, v1 AS a;
-REPLACE INTO v2(a) SELECT 1 FROM t1,t1 AS c;
-SELECT * FROM v2;
-a
-1
-2
-1
-2
-1
-2
-1
-2
-REPLACE INTO v2(a) SELECT 3 FROM t1,t1 AS c;
-SELECT * FROM v2;
-a
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-INSERT INTO v2(a) SELECT 1 FROM t1,t1 AS c
-ON DUPLICATE KEY UPDATE `v2`.`a`= 1;
-SELECT * FROM v2;
-a
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-1
-2
-3
-DROP VIEW v1;
-DROP VIEW v2;
-DROP TABLE t1;
-# -- End of test case for Bug#45806
# -----------------------------------------------------------------
# -- End of 5.0 tests.
# -----------------------------------------------------------------
=== added file 'mysql-test/std_data/loaddata_utf8.dat'
--- a/mysql-test/std_data/loaddata_utf8.dat 1970-01-01 00:00:00 +0000
+++ b/mysql-test/std_data/loaddata_utf8.dat 2009-08-12 03:54:05 +0000
@@ -0,0 +1,3 @@
+一二三
+四五六
+七八九
=== added file 'mysql-test/std_data/parts/t1.frm'
Files a/mysql-test/std_data/parts/t1.frm 1970-01-01 00:00:00 +0000 and b/mysql-test/std_data/parts/t1.frm 2009-01-08 14:16:44 +0000 differ
=== modified file 'mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result'
--- a/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result 2009-05-31 05:44:41 +0000
+++ b/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result 2009-08-26 23:13:03 +0000
@@ -379,7 +379,9 @@ master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
+master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
@@ -838,8 +840,10 @@ UPDATE t4,t3 SET t4.a=t3.a + bug27417(1)
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Query # # use `test`; UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */
+master-bin.000001 # Query # # ROLLBACK
select count(*) from t1 /* must be 4 */;
count(*)
4
=== modified file 'mysql-test/suite/binlog/r/binlog_stm_drop_tmp_tbl.result'
--- a/mysql-test/suite/binlog/r/binlog_stm_drop_tmp_tbl.result 2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/binlog/r/binlog_stm_drop_tmp_tbl.result 2009-08-28 09:45:57 +0000
@@ -17,5 +17,5 @@ master-bin.000001 # Query # # create dat
master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table shortn1 (a int)
master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table `table:name` (a int)
master-bin.000001 # Query # # use `drop-temp+table-test`; create temporary table shortn2 (a int)
-master-bin.000001 # Query # # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2`,`drop-temp+table-test`.`table:name`,`drop-temp+table-test`.`shortn1`
+master-bin.000001 # Query # # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `shortn2`,`table:name`,`shortn1`
drop database `drop-temp+table-test`;
=== modified file 'mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result'
--- a/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result 2009-05-31 05:44:41 +0000
+++ b/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result 2009-08-28 09:45:57 +0000
@@ -258,7 +258,7 @@ master-bin.000001 # Query # # use `test`
master-bin.000001 # Query # # use `test`; insert t0 select * from t1
master-bin.000001 # Query # # use `test`; insert into t0 select GET_LOCK("lock1",null)
master-bin.000001 # Query # # use `test`; create table t2 (n int) engine=innodb
-master-bin.000001 # Query # # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `test`.`t1`,`test`.`ti`
+master-bin.000001 # Query # # use `test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t1`,`ti`
do release_lock("lock1");
drop table t0,t2;
set autocommit=0;
@@ -346,7 +346,9 @@ master-bin.000001 # Query # # use `test`
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS t2
master-bin.000001 # Query # # use `test`; CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (4,4)
+master-bin.000001 # Query # # BEGIN
master-bin.000001 # Query # # use `test`; TRUNCATE table t2
+master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (5,5)
master-bin.000001 # Query # # use `test`; DROP TABLE t2
master-bin.000001 # Query # # use `test`; INSERT INTO t1 values (6,6)
@@ -545,8 +547,10 @@ UPDATE t4,t3 SET t4.a=t3.a + bug27417(1)
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Query # # use `test`; UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */
+master-bin.000001 # Query # # ROLLBACK
/* the output must denote there is the query */;
select count(*) from t1 /* must be 4 */;
count(*)
@@ -782,8 +786,10 @@ UPDATE t4,t3 SET t4.a=t3.a + bug27417(1)
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
master-bin.000001 # Intvar # # INSERT_ID=6
master-bin.000001 # Query # # use `test`; UPDATE t4,t3 SET t4.a=t3.a + bug27417(1) /* top level non-ta table */
+master-bin.000001 # Query # # ROLLBACK
select count(*) from t1 /* must be 4 */;
count(*)
4
=== modified file 'mysql-test/suite/federated/disabled.def'
--- a/mysql-test/suite/federated/disabled.def 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/disabled.def 2009-10-30 18:50:56 +0000
@@ -9,4 +9,5 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
-federated_transactions : Bug#29523 Transactions do not work
+federated_server : needs fixup
+
=== modified file 'mysql-test/suite/federated/federated.result'
--- a/mysql-test/suite/federated/federated.result 2009-03-19 08:49:51 +0000
+++ b/mysql-test/suite/federated/federated.result 2009-10-30 18:50:56 +0000
@@ -47,9 +47,10 @@ CREATE TABLE federated.t1 (
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t3';
-SELECT * FROM federated.t1;
-ERROR HY000: The foreign data source you are trying to reference does not exist. Data source error: error: 1146 'Table 'federated.t3' doesn't exist'
-DROP TABLE federated.t1;
+ERROR HY000: Can't create federated table. Foreign data src error: database: 'federated' username: 'root' hostname: '127.0.0.1'
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note 1051 Unknown table 't1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -59,9 +60,10 @@ CREATE TABLE federated.t1 (
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://user:pass@127.0.0.1:SLAVE_PORT/federated/t1';
-SELECT * FROM federated.t1;
-ERROR HY000: Unable to connect to foreign data source: Access denied for user 'user'@'localhost' (using password: YES)
-DROP TABLE federated.t1;
+ERROR HY000: Can't create federated table. Foreign data src error: database: 'federated' username: 'user' hostname: '127.0.0.1'
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note 1051 Unknown table 't1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -1944,15 +1946,7 @@ Bug#18287 create federated table always
Test that self-references work
-create table federated.t1 (a int primary key);
-create table federated.t2 (a int primary key)
-ENGINE=FEDERATED
-connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
-insert into federated.t1 (a) values (1);
-select * from federated.t2;
-a
-1
-drop table federated.t1, federated.t2;
+fix LOCK_open before reenabling test for Bug#18287
CREATE TABLE federated.t1 (a INT PRIMARY KEY) DEFAULT CHARSET=utf8;
CREATE TABLE federated.t1 (a INT PRIMARY KEY)
ENGINE=FEDERATED
@@ -1960,13 +1954,11 @@ CONNECTION='mysql://root@127.0.0.1:SLAVE
DEFAULT CHARSET=utf8;
SELECT transactions FROM information_schema.engines WHERE engine="FEDERATED";
transactions
-NO
+YES
INSERT INTO federated.t1 VALUES (1);
SET autocommit=0;
INSERT INTO federated.t1 VALUES (2);
ROLLBACK;
-Warnings:
-Warning 1196 Some non-transactional changed tables couldn't be rolled back
SET autocommit=1;
SELECT * FROM federated.t1;
a
@@ -2157,6 +2149,6 @@ End of 5.1 tests
SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MASTER_CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
=== modified file 'mysql-test/suite/federated/federated.test'
--- a/mysql-test/suite/federated/federated.test 2009-03-19 08:49:51 +0000
+++ b/mysql-test/suite/federated/federated.test 2009-10-30 18:50:56 +0000
@@ -57,6 +57,7 @@ CREATE TABLE federated.t1 (
# test non-existant table
--replace_result $SLAVE_MYPORT SLAVE_PORT
+--error ER_CANT_CREATE_FEDERATED_TABLE
eval CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -66,12 +67,11 @@ eval CREATE TABLE federated.t1 (
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3';
---error ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST
-SELECT * FROM federated.t1;
-DROP TABLE federated.t1;
+DROP TABLE IF EXISTS federated.t1;
# test bad user/password
--replace_result $SLAVE_MYPORT SLAVE_PORT
+--error ER_CANT_CREATE_FEDERATED_TABLE
eval CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -81,9 +81,7 @@ eval CREATE TABLE federated.t1 (
)
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
CONNECTION='mysql://user:pass@127.0.0.1:$SLAVE_MYPORT/federated/t1';
---error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
-SELECT * FROM federated.t1;
-DROP TABLE federated.t1;
+DROP TABLE IF EXISTS federated.t1;
# # correct connection, same named tables
--replace_result $SLAVE_MYPORT SLAVE_PORT
@@ -1806,6 +1804,8 @@ drop table federated.t1;
--echo
--echo Test that self-references work
--echo
+--echo fix LOCK_open before reenabling test for Bug#18287
+--disable_parsing
connection slave;
create table federated.t1 (a int primary key);
--replace_result $SLAVE_MYPORT SLAVE_PORT
@@ -1815,7 +1815,7 @@ eval create table federated.t2 (a int pr
insert into federated.t1 (a) values (1);
select * from federated.t2;
drop table federated.t1, federated.t2;
-
+--enable_parsing
#
# BUG#29875 Disable support for transactions
#
=== modified file 'mysql-test/suite/federated/federated_archive.result'
--- a/mysql-test/suite/federated/federated_archive.result 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_archive.result 2009-10-30 18:50:56 +0000
@@ -34,6 +34,6 @@ id name
DROP TABLE federated.t1;
DROP TABLE federated.archive_table;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
=== modified file 'mysql-test/suite/federated/federated_bug_13118.result'
--- a/mysql-test/suite/federated/federated_bug_13118.result 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_bug_13118.result 2009-10-30 18:50:56 +0000
@@ -25,6 +25,6 @@ foo bar
DROP TABLE federated.t1;
DROP TABLE federated.bug_13118_table;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
=== modified file 'mysql-test/suite/federated/federated_bug_25714.result'
--- a/mysql-test/suite/federated/federated_bug_25714.result 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_bug_25714.result 2009-10-30 18:50:56 +0000
@@ -48,6 +48,6 @@ SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MAS
DROP TABLE federated.t1;
SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
=== modified file 'mysql-test/suite/federated/federated_cleanup.inc'
--- a/mysql-test/suite/federated/federated_cleanup.inc 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_cleanup.inc 2009-10-30 18:50:56 +0000
@@ -1,9 +1,9 @@
connection master;
--disable_warnings
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
connection slave;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
--enable_warnings
=== modified file 'mysql-test/suite/federated/federated_innodb.result'
--- a/mysql-test/suite/federated/federated_innodb.result 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_innodb.result 2009-10-30 18:50:56 +0000
@@ -20,6 +20,6 @@ a b
drop table federated.t1;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
=== modified file 'mysql-test/suite/federated/federated_server.result'
--- a/mysql-test/suite/federated/federated_server.result 2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_server.result 2009-10-30 18:50:56 +0000
@@ -178,8 +178,8 @@ INSERT INTO db_bogus.t1 VALUES ('2','thi
create server 's1' foreign data wrapper 'mysql' options
(HOST '127.0.0.1',
DATABASE 'db_legitimate',
-USER 'root',
-PASSWORD '',
+USER 'test_fed',
+PASSWORD 'foo',
PORT SLAVE_PORT,
SOCKET '',
OWNER 'root');
=== modified file 'mysql-test/suite/federated/federated_server.test'
--- a/mysql-test/suite/federated/federated_server.test 2009-06-05 15:35:22 +0000
+++ b/mysql-test/suite/federated/federated_server.test 2009-10-30 18:50:56 +0000
@@ -3,6 +3,7 @@
# Slow test, don't run during staging part
-- source include/not_staging.inc
+-- source include/big_test.inc
-- source federated.inc
connection slave;
@@ -182,13 +183,17 @@ CREATE TABLE db_bogus.t1 (
;
INSERT INTO db_bogus.t1 VALUES ('2','this is bogus');
+connection slave;
+create user test_fed@localhost identified by 'foo';
+grant all on db_legitimate.* to test_fed@localhost;
+
connection master;
--replace_result $SLAVE_MYPORT SLAVE_PORT
eval create server 's1' foreign data wrapper 'mysql' options
(HOST '127.0.0.1',
DATABASE 'db_legitimate',
- USER 'root',
- PASSWORD '',
+ USER 'test_fed',
+ PASSWORD 'foo',
PORT $SLAVE_MYPORT,
SOCKET '',
OWNER 'root');
=== modified file 'mysql-test/suite/federated/federated_transactions.result'
--- a/mysql-test/suite/federated/federated_transactions.result 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_transactions.result 2009-10-30 18:50:56 +0000
@@ -1,13 +1,4 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
-DROP DATABASE IF EXISTS federated;
CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
CREATE DATABASE federated;
DROP TABLE IF EXISTS federated.t1;
Warnings:
=== modified file 'mysql-test/suite/funcs_1/r/is_engines_federated.result'
--- a/mysql-test/suite/funcs_1/r/is_engines_federated.result 2008-03-07 19:18:14 +0000
+++ b/mysql-test/suite/funcs_1/r/is_engines_federated.result 2009-11-07 06:29:10 +0000
@@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines
WHERE ENGINE = 'FEDERATED';
ENGINE FEDERATED
SUPPORT YES
-COMMENT Federated MySQL storage engine
-TRANSACTIONS NO
+COMMENT FederatedX pluggable storage engine
+TRANSACTIONS YES
XA NO
-SAVEPOINTS NO
+SAVEPOINTS YES
=== modified file 'mysql-test/suite/parts/inc/partition.pre'
--- a/mysql-test/suite/parts/inc/partition.pre 2008-01-10 15:50:37 +0000
+++ b/mysql-test/suite/parts/inc/partition.pre 2009-10-28 07:52:34 +0000
@@ -152,6 +152,7 @@ ENGINE = MEMORY;
--echo # Logging of <max_row> INSERTs into t0_template suppressed
--disable_query_log
let $num= `SELECT @max_row`;
+begin;
while ($num)
{
eval INSERT INTO t0_template
@@ -160,6 +161,7 @@ f_charbig = '===$num===';
dec $num;
}
+commit;
--enable_query_log
# Auxiliary table used for comparisons of table definitions and file lists
=== modified file 'mysql-test/suite/parts/inc/partition_bigint.inc'
--- a/mysql-test/suite/parts/inc/partition_bigint.inc 2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_bigint.inc 2009-10-28 07:52:34 +0000
@@ -32,11 +32,13 @@ delete from t2;
let $count=$maxrows;
--echo $maxrows inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
=== modified file 'mysql-test/suite/parts/inc/partition_binary.inc'
--- a/mysql-test/suite/parts/inc/partition_binary.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_binary.inc 2009-10-28 07:52:34 +0000
@@ -22,13 +22,16 @@ show create table t2;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t2;
select hex(a) from t2;
drop table t2;
@@ -48,13 +51,16 @@ show create table t3;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t3;
select hex(a) from t3;
drop table t3;
@@ -73,14 +79,16 @@ show create table t4;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
-
+commit;
+--enable_query_log
select count(*) from t4;
select hex(a) from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_bit.inc'
--- a/mysql-test/suite/parts/inc/partition_bit.inc 2008-02-07 15:26:22 +0000
+++ b/mysql-test/suite/parts/inc/partition_bit.inc 2009-10-28 07:52:34 +0000
@@ -74,11 +74,13 @@ show create table t3;
let $count=255;
--echo $count inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values ($count);
dec $count;
}
+commit;
--enable_query_log
select hex(a) from t3 where a=b'01010101';
delete from t3 where a=b'01010101';
@@ -96,11 +98,13 @@ show create table t4;
let $count=32;
--echo $count inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values ($count);
dec $count;
}
+commit;
--enable_query_log
select hex(a) from t4 where a=b'00000001';
delete from t4 where a=b'00000001';
=== modified file 'mysql-test/suite/parts/inc/partition_char.inc'
--- a/mysql-test/suite/parts/inc/partition_char.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_char.inc 2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -47,13 +50,16 @@ show create table t3;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t3;
select a from t3;
drop table t3;
@@ -71,13 +77,16 @@ show create table t4;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t4;
select a from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_date.inc'
--- a/mysql-test/suite/parts/inc/partition_date.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_date.inc 2009-10-28 07:52:34 +0000
@@ -23,7 +23,8 @@ select * from t2;
delete from t2;
let $count=28;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (19700101+$count-1);
@@ -31,7 +32,8 @@ eval insert into t2 values (19700201+$co
eval insert into t2 values (19700301+$count-1);
dec $count;
}
-#--enable_query_log
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -47,11 +49,15 @@ partition quarter4 values less than (13)
show create table t3;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (adddate(19700101,interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -67,11 +73,15 @@ partition quarter4 values in (10,11,12)
show create table t4;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (adddate(19700101,interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_datetime.inc'
--- a/mysql-test/suite/parts/inc/partition_datetime.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_datetime.inc 2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
delete from t2;
let $count=59;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (19700101000000+$count);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (13)
show create table t3;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (adddate(19700101000000,interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (10,11,12)
show create table t4;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (adddate(19700101000000,interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_decimal.inc'
--- a/mysql-test/suite/parts/inc/partition_decimal.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_decimal.inc 2009-10-28 07:52:34 +0000
@@ -24,6 +24,7 @@ delete from t2;
let $count=$maxrows;
--echo $count*3 inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
@@ -31,6 +32,7 @@ eval insert into t2 values ($count+0.333
eval insert into t2 values ($count+0.755555555);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
@@ -53,6 +55,8 @@ partition pa10 values less than (10)
show create table t3;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values ($count);
@@ -60,6 +64,7 @@ eval insert into t3 values ($count+0.333
eval insert into t3 values ($count+0.755555555);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t3;
drop table t3;
@@ -75,6 +80,8 @@ partition pa10 values in (9,10)
show create table t4;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values ($count);
@@ -82,6 +89,7 @@ eval insert into t4 values ($count+0.333
eval insert into t4 values ($count+0.755555555);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_double.inc'
--- a/mysql-test/suite/parts/inc/partition_double.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_double.inc 2009-10-28 07:52:34 +0000
@@ -24,6 +24,7 @@ delete from t2;
let $count=$maxrows;
--echo $maxrows*3 inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
@@ -31,6 +32,7 @@ eval insert into t2 values ($count+0.33)
eval insert into t2 values ($count+0.75);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
@@ -52,6 +54,8 @@ partition pa10 values less than (10)
show create table t3;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values ($count);
@@ -59,6 +63,8 @@ eval insert into t3 values ($count+0.33)
eval insert into t3 values ($count+0.75);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -72,6 +78,8 @@ partition pa10 values in (7,8,9,10)
show create table t4;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values ($count);
@@ -79,6 +87,8 @@ eval insert into t4 values ($count+0.33)
eval insert into t4 values ($count+0.75);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_enum.inc'
--- a/mysql-test/suite/parts/inc/partition_enum.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_enum.inc 2009-10-28 07:52:34 +0000
@@ -26,12 +26,15 @@ partition by key (a) partitions 27;
show create table t2;
let $letter=26;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($letter)
{
eval insert into t2 values (char(ascii('A')+$letter));
dec $letter;
}
+commit;
+--enable_query_log
insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
select count(*) from t2;
select * from t2;
@@ -55,12 +58,15 @@ partition pa36 values less than (37)
show create table t3;
let $letter=36;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+ begin;
while ($letter)
{
#eval insert into t3 values ($letter);
dec $letter;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
=== modified file 'mysql-test/suite/parts/inc/partition_float.inc'
--- a/mysql-test/suite/parts/inc/partition_float.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_float.inc 2009-10-28 07:52:34 +0000
@@ -28,6 +28,7 @@ delete from t2;
let $count=$maxrows;
--echo $maxrows*3 inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
@@ -35,6 +36,7 @@ eval insert into t2 values ($count+0.33)
eval insert into t2 values ($count+0.75);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
@@ -55,6 +57,8 @@ partition pa10 values less than (10)
show create table t3;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values ($count);
@@ -62,6 +66,8 @@ eval insert into t3 values ($count+0.33)
eval insert into t3 values ($count+0.75);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -75,6 +81,8 @@ partition pa10 values in (7,8,9,10)
show create table t4;
let $count=9;
--echo $count*3 inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values ($count);
@@ -82,6 +90,8 @@ eval insert into t4 values ($count+0.33)
eval insert into t4 values ($count+0.75);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_int.inc'
--- a/mysql-test/suite/parts/inc/partition_int.inc 2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_int.inc 2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
let $count=$maxrows;
--echo $count inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
=== modified file 'mysql-test/suite/parts/inc/partition_mediumint.inc'
--- a/mysql-test/suite/parts/inc/partition_mediumint.inc 2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_mediumint.inc 2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
let $count=$maxrows;
--echo $maxrows inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
=== modified file 'mysql-test/suite/parts/inc/partition_smallint.inc'
--- a/mysql-test/suite/parts/inc/partition_smallint.inc 2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_smallint.inc 2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
let $count=$maxrows;
--echo $count inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
=== modified file 'mysql-test/suite/parts/inc/partition_time.inc'
--- a/mysql-test/suite/parts/inc/partition_time.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_time.inc 2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
delete from t2;
let $count=59;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (000100+$count);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (61)
show create table t3;
let $count=59;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (100000+$count);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (46,47,48,4
show create table t4;
let $count=59;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (100000+$count);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_timestamp.inc'
--- a/mysql-test/suite/parts/inc/partition_timestamp.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_timestamp.inc 2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
delete from t2;
let $count=59;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (19710101000000+$count);
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (13)
show create table t3;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (date_add('1970-01-01 00:00:00',interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (10,11,12)
show create table t4;
let $count=12;
--echo $count inserts;
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (date_add('1970-01-01 00:00:00',interval $count-1 month));
dec $count;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_tinyint.inc'
--- a/mysql-test/suite/parts/inc/partition_tinyint.inc 2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_tinyint.inc 2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
let $count=255;
--echo 255 inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values ($count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
drop table t2;
=== modified file 'mysql-test/suite/parts/inc/partition_varbinary.inc'
--- a/mysql-test/suite/parts/inc/partition_varbinary.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_varbinary.inc 2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (repeat(char(ascii('a')+$letter),$count*$count));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -47,13 +50,16 @@ show create table t3;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t3;
select hex(a) from t3;
drop table t3;
@@ -71,13 +77,16 @@ show create table t4;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t4;
select hex(a) from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_varchar.inc'
--- a/mysql-test/suite/parts/inc/partition_varchar.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_varchar.inc 2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (repeat(char(ascii('a')+$letter),$count*$count));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t2;
select * from t2;
drop table t2;
@@ -46,13 +49,16 @@ show create table t3;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t3;
select * from t3;
drop table t3;
@@ -70,13 +76,16 @@ show create table t4;
let $count=26;
let $letter=0;
--echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
while ($count)
{
eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
dec $count;
inc $letter;
}
+commit;
+--enable_query_log
select count(*) from t4;
select * from t4;
drop table t4;
=== modified file 'mysql-test/suite/parts/inc/partition_year.inc'
--- a/mysql-test/suite/parts/inc/partition_year.inc 2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_year.inc 2009-10-28 07:52:34 +0000
@@ -24,11 +24,13 @@ delete from t2;
let $count=255;
--echo $count inserts;
--disable_query_log
+begin;
while ($count)
{
eval insert into t2 values (1901+$count);
dec $count;
}
+commit;
--enable_query_log
select count(*) from t2;
select * from t2;
=== modified file 'mysql-test/suite/parts/r/partition_char_innodb.result'
--- a/mysql-test/suite/parts/r/partition_char_innodb.result 2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_char_innodb.result 2009-10-28 07:52:34 +0000
@@ -45,32 +45,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
select count(*) from t2;
count(*)
26
@@ -153,32 +127,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
select count(*) from t2;
count(*)
26
@@ -258,32 +206,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
select count(*) from t2;
count(*)
26
@@ -363,32 +285,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 30 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
select count(*) from t2;
count(*)
26
@@ -479,34 +375,8 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
0 inserts;
-insert into t2 values (char(ascii('A')+26));
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
-insert into t2 values (char(ascii('A')+25));
-insert into t2 values (char(ascii('A')+24));
-insert into t2 values (char(ascii('A')+23));
-insert into t2 values (char(ascii('A')+22));
-insert into t2 values (char(ascii('A')+21));
-insert into t2 values (char(ascii('A')+20));
-insert into t2 values (char(ascii('A')+19));
-insert into t2 values (char(ascii('A')+18));
-insert into t2 values (char(ascii('A')+17));
-insert into t2 values (char(ascii('A')+16));
-insert into t2 values (char(ascii('A')+15));
-insert into t2 values (char(ascii('A')+14));
-insert into t2 values (char(ascii('A')+13));
-insert into t2 values (char(ascii('A')+12));
-insert into t2 values (char(ascii('A')+11));
-insert into t2 values (char(ascii('A')+10));
-insert into t2 values (char(ascii('A')+9));
-insert into t2 values (char(ascii('A')+8));
-insert into t2 values (char(ascii('A')+7));
-insert into t2 values (char(ascii('A')+6));
-insert into t2 values (char(ascii('A')+5));
-insert into t2 values (char(ascii('A')+4));
-insert into t2 values (char(ascii('A')+3));
-insert into t2 values (char(ascii('A')+2));
-insert into t2 values (char(ascii('A')+1));
insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
select count(*) from t2;
count(*)
=== modified file 'mysql-test/suite/parts/r/partition_char_myisam.result'
--- a/mysql-test/suite/parts/r/partition_char_myisam.result 2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_char_myisam.result 2009-10-28 07:52:34 +0000
@@ -45,32 +45,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
select count(*) from t2;
count(*)
26
@@ -153,32 +127,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
select count(*) from t2;
count(*)
26
@@ -258,32 +206,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
select count(*) from t2;
count(*)
26
@@ -363,32 +285,6 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 30 */
26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
select count(*) from t2;
count(*)
26
@@ -479,34 +375,8 @@ t2 CREATE TABLE `t2` (
/*!50100 PARTITION BY KEY (a)
PARTITIONS 27 */
0 inserts;
-insert into t2 values (char(ascii('A')+26));
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
-insert into t2 values (char(ascii('A')+25));
-insert into t2 values (char(ascii('A')+24));
-insert into t2 values (char(ascii('A')+23));
-insert into t2 values (char(ascii('A')+22));
-insert into t2 values (char(ascii('A')+21));
-insert into t2 values (char(ascii('A')+20));
-insert into t2 values (char(ascii('A')+19));
-insert into t2 values (char(ascii('A')+18));
-insert into t2 values (char(ascii('A')+17));
-insert into t2 values (char(ascii('A')+16));
-insert into t2 values (char(ascii('A')+15));
-insert into t2 values (char(ascii('A')+14));
-insert into t2 values (char(ascii('A')+13));
-insert into t2 values (char(ascii('A')+12));
-insert into t2 values (char(ascii('A')+11));
-insert into t2 values (char(ascii('A')+10));
-insert into t2 values (char(ascii('A')+9));
-insert into t2 values (char(ascii('A')+8));
-insert into t2 values (char(ascii('A')+7));
-insert into t2 values (char(ascii('A')+6));
-insert into t2 values (char(ascii('A')+5));
-insert into t2 values (char(ascii('A')+4));
-insert into t2 values (char(ascii('A')+3));
-insert into t2 values (char(ascii('A')+2));
-insert into t2 values (char(ascii('A')+1));
insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
select count(*) from t2;
count(*)
=== modified file 'mysql-test/suite/parts/r/partition_datetime_innodb.result'
--- a/mysql-test/suite/parts/r/partition_datetime_innodb.result 2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_datetime_innodb.result 2009-10-28 07:52:34 +0000
@@ -60,65 +60,6 @@ a
2020-12-31 10:11:12
delete from t2;
59 inserts;
-insert into t2 values (19710101000000+59);
-insert into t2 values (19710101000000+58);
-insert into t2 values (19710101000000+57);
-insert into t2 values (19710101000000+56);
-insert into t2 values (19710101000000+55);
-insert into t2 values (19710101000000+54);
-insert into t2 values (19710101000000+53);
-insert into t2 values (19710101000000+52);
-insert into t2 values (19710101000000+51);
-insert into t2 values (19710101000000+50);
-insert into t2 values (19710101000000+49);
-insert into t2 values (19710101000000+48);
-insert into t2 values (19710101000000+47);
-insert into t2 values (19710101000000+46);
-insert into t2 values (19710101000000+45);
-insert into t2 values (19710101000000+44);
-insert into t2 values (19710101000000+43);
-insert into t2 values (19710101000000+42);
-insert into t2 values (19710101000000+41);
-insert into t2 values (19710101000000+40);
-insert into t2 values (19710101000000+39);
-insert into t2 values (19710101000000+38);
-insert into t2 values (19710101000000+37);
-insert into t2 values (19710101000000+36);
-insert into t2 values (19710101000000+35);
-insert into t2 values (19710101000000+34);
-insert into t2 values (19710101000000+33);
-insert into t2 values (19710101000000+32);
-insert into t2 values (19710101000000+31);
-insert into t2 values (19710101000000+30);
-insert into t2 values (19710101000000+29);
-insert into t2 values (19710101000000+28);
-insert into t2 values (19710101000000+27);
-insert into t2 values (19710101000000+26);
-insert into t2 values (19710101000000+25);
-insert into t2 values (19710101000000+24);
-insert into t2 values (19710101000000+23);
-insert into t2 values (19710101000000+22);
-insert into t2 values (19710101000000+21);
-insert into t2 values (19710101000000+20);
-insert into t2 values (19710101000000+19);
-insert into t2 values (19710101000000+18);
-insert into t2 values (19710101000000+17);
-insert into t2 values (19710101000000+16);
-insert into t2 values (19710101000000+15);
-insert into t2 values (19710101000000+14);
-insert into t2 values (19710101000000+13);
-insert into t2 values (19710101000000+12);
-insert into t2 values (19710101000000+11);
-insert into t2 values (19710101000000+10);
-insert into t2 values (19710101000000+9);
-insert into t2 values (19710101000000+8);
-insert into t2 values (19710101000000+7);
-insert into t2 values (19710101000000+6);
-insert into t2 values (19710101000000+5);
-insert into t2 values (19710101000000+4);
-insert into t2 values (19710101000000+3);
-insert into t2 values (19710101000000+2);
-insert into t2 values (19710101000000+1);
select count(*) from t2;
count(*)
59
@@ -206,18 +147,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
12 inserts;
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
select count(*) from t3;
@@ -260,18 +189,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
12 inserts;
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
select count(*) from t4;
@@ -354,90 +271,6 @@ a
2020-12-31
delete from t2;
28 inserts;
-insert into t2 values (19700101+28-1);
-insert into t2 values (19700201+28-1);
-insert into t2 values (19700301+28-1);
-insert into t2 values (19700101+27-1);
-insert into t2 values (19700201+27-1);
-insert into t2 values (19700301+27-1);
-insert into t2 values (19700101+26-1);
-insert into t2 values (19700201+26-1);
-insert into t2 values (19700301+26-1);
-insert into t2 values (19700101+25-1);
-insert into t2 values (19700201+25-1);
-insert into t2 values (19700301+25-1);
-insert into t2 values (19700101+24-1);
-insert into t2 values (19700201+24-1);
-insert into t2 values (19700301+24-1);
-insert into t2 values (19700101+23-1);
-insert into t2 values (19700201+23-1);
-insert into t2 values (19700301+23-1);
-insert into t2 values (19700101+22-1);
-insert into t2 values (19700201+22-1);
-insert into t2 values (19700301+22-1);
-insert into t2 values (19700101+21-1);
-insert into t2 values (19700201+21-1);
-insert into t2 values (19700301+21-1);
-insert into t2 values (19700101+20-1);
-insert into t2 values (19700201+20-1);
-insert into t2 values (19700301+20-1);
-insert into t2 values (19700101+19-1);
-insert into t2 values (19700201+19-1);
-insert into t2 values (19700301+19-1);
-insert into t2 values (19700101+18-1);
-insert into t2 values (19700201+18-1);
-insert into t2 values (19700301+18-1);
-insert into t2 values (19700101+17-1);
-insert into t2 values (19700201+17-1);
-insert into t2 values (19700301+17-1);
-insert into t2 values (19700101+16-1);
-insert into t2 values (19700201+16-1);
-insert into t2 values (19700301+16-1);
-insert into t2 values (19700101+15-1);
-insert into t2 values (19700201+15-1);
-insert into t2 values (19700301+15-1);
-insert into t2 values (19700101+14-1);
-insert into t2 values (19700201+14-1);
-insert into t2 values (19700301+14-1);
-insert into t2 values (19700101+13-1);
-insert into t2 values (19700201+13-1);
-insert into t2 values (19700301+13-1);
-insert into t2 values (19700101+12-1);
-insert into t2 values (19700201+12-1);
-insert into t2 values (19700301+12-1);
-insert into t2 values (19700101+11-1);
-insert into t2 values (19700201+11-1);
-insert into t2 values (19700301+11-1);
-insert into t2 values (19700101+10-1);
-insert into t2 values (19700201+10-1);
-insert into t2 values (19700301+10-1);
-insert into t2 values (19700101+9-1);
-insert into t2 values (19700201+9-1);
-insert into t2 values (19700301+9-1);
-insert into t2 values (19700101+8-1);
-insert into t2 values (19700201+8-1);
-insert into t2 values (19700301+8-1);
-insert into t2 values (19700101+7-1);
-insert into t2 values (19700201+7-1);
-insert into t2 values (19700301+7-1);
-insert into t2 values (19700101+6-1);
-insert into t2 values (19700201+6-1);
-insert into t2 values (19700301+6-1);
-insert into t2 values (19700101+5-1);
-insert into t2 values (19700201+5-1);
-insert into t2 values (19700301+5-1);
-insert into t2 values (19700101+4-1);
-insert into t2 values (19700201+4-1);
-insert into t2 values (19700301+4-1);
-insert into t2 values (19700101+3-1);
-insert into t2 values (19700201+3-1);
-insert into t2 values (19700301+3-1);
-insert into t2 values (19700101+2-1);
-insert into t2 values (19700201+2-1);
-insert into t2 values (19700301+2-1);
-insert into t2 values (19700101+1-1);
-insert into t2 values (19700201+1-1);
-insert into t2 values (19700301+1-1);
select count(*) from t2;
count(*)
84
@@ -550,18 +383,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
12 inserts;
-insert into t3 values (adddate(19700101,interval 12-1 month));
-insert into t3 values (adddate(19700101,interval 11-1 month));
-insert into t3 values (adddate(19700101,interval 10-1 month));
-insert into t3 values (adddate(19700101,interval 9-1 month));
-insert into t3 values (adddate(19700101,interval 8-1 month));
-insert into t3 values (adddate(19700101,interval 7-1 month));
-insert into t3 values (adddate(19700101,interval 6-1 month));
-insert into t3 values (adddate(19700101,interval 5-1 month));
-insert into t3 values (adddate(19700101,interval 4-1 month));
-insert into t3 values (adddate(19700101,interval 3-1 month));
-insert into t3 values (adddate(19700101,interval 2-1 month));
-insert into t3 values (adddate(19700101,interval 1-1 month));
select count(*) from t3;
count(*)
12
@@ -602,18 +423,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
12 inserts;
-insert into t4 values (adddate(19700101,interval 12-1 month));
-insert into t4 values (adddate(19700101,interval 11-1 month));
-insert into t4 values (adddate(19700101,interval 10-1 month));
-insert into t4 values (adddate(19700101,interval 9-1 month));
-insert into t4 values (adddate(19700101,interval 8-1 month));
-insert into t4 values (adddate(19700101,interval 7-1 month));
-insert into t4 values (adddate(19700101,interval 6-1 month));
-insert into t4 values (adddate(19700101,interval 5-1 month));
-insert into t4 values (adddate(19700101,interval 4-1 month));
-insert into t4 values (adddate(19700101,interval 3-1 month));
-insert into t4 values (adddate(19700101,interval 2-1 month));
-insert into t4 values (adddate(19700101,interval 1-1 month));
select count(*) from t4;
count(*)
12
@@ -694,65 +503,6 @@ a
14:15:16
delete from t2;
59 inserts;
-insert into t2 values (000100+59);
-insert into t2 values (000100+58);
-insert into t2 values (000100+57);
-insert into t2 values (000100+56);
-insert into t2 values (000100+55);
-insert into t2 values (000100+54);
-insert into t2 values (000100+53);
-insert into t2 values (000100+52);
-insert into t2 values (000100+51);
-insert into t2 values (000100+50);
-insert into t2 values (000100+49);
-insert into t2 values (000100+48);
-insert into t2 values (000100+47);
-insert into t2 values (000100+46);
-insert into t2 values (000100+45);
-insert into t2 values (000100+44);
-insert into t2 values (000100+43);
-insert into t2 values (000100+42);
-insert into t2 values (000100+41);
-insert into t2 values (000100+40);
-insert into t2 values (000100+39);
-insert into t2 values (000100+38);
-insert into t2 values (000100+37);
-insert into t2 values (000100+36);
-insert into t2 values (000100+35);
-insert into t2 values (000100+34);
-insert into t2 values (000100+33);
-insert into t2 values (000100+32);
-insert into t2 values (000100+31);
-insert into t2 values (000100+30);
-insert into t2 values (000100+29);
-insert into t2 values (000100+28);
-insert into t2 values (000100+27);
-insert into t2 values (000100+26);
-insert into t2 values (000100+25);
-insert into t2 values (000100+24);
-insert into t2 values (000100+23);
-insert into t2 values (000100+22);
-insert into t2 values (000100+21);
-insert into t2 values (000100+20);
-insert into t2 values (000100+19);
-insert into t2 values (000100+18);
-insert into t2 values (000100+17);
-insert into t2 values (000100+16);
-insert into t2 values (000100+15);
-insert into t2 values (000100+14);
-insert into t2 values (000100+13);
-insert into t2 values (000100+12);
-insert into t2 values (000100+11);
-insert into t2 values (000100+10);
-insert into t2 values (000100+9);
-insert into t2 values (000100+8);
-insert into t2 values (000100+7);
-insert into t2 values (000100+6);
-insert into t2 values (000100+5);
-insert into t2 values (000100+4);
-insert into t2 values (000100+3);
-insert into t2 values (000100+2);
-insert into t2 values (000100+1);
select count(*) from t2;
count(*)
59
@@ -840,65 +590,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (46) ENGINE = InnoDB,
PARTITION quarter4 VALUES LESS THAN (61) ENGINE = InnoDB) */
59 inserts;
-insert into t3 values (100000+59);
-insert into t3 values (100000+58);
-insert into t3 values (100000+57);
-insert into t3 values (100000+56);
-insert into t3 values (100000+55);
-insert into t3 values (100000+54);
-insert into t3 values (100000+53);
-insert into t3 values (100000+52);
-insert into t3 values (100000+51);
-insert into t3 values (100000+50);
-insert into t3 values (100000+49);
-insert into t3 values (100000+48);
-insert into t3 values (100000+47);
-insert into t3 values (100000+46);
-insert into t3 values (100000+45);
-insert into t3 values (100000+44);
-insert into t3 values (100000+43);
-insert into t3 values (100000+42);
-insert into t3 values (100000+41);
-insert into t3 values (100000+40);
-insert into t3 values (100000+39);
-insert into t3 values (100000+38);
-insert into t3 values (100000+37);
-insert into t3 values (100000+36);
-insert into t3 values (100000+35);
-insert into t3 values (100000+34);
-insert into t3 values (100000+33);
-insert into t3 values (100000+32);
-insert into t3 values (100000+31);
-insert into t3 values (100000+30);
-insert into t3 values (100000+29);
-insert into t3 values (100000+28);
-insert into t3 values (100000+27);
-insert into t3 values (100000+26);
-insert into t3 values (100000+25);
-insert into t3 values (100000+24);
-insert into t3 values (100000+23);
-insert into t3 values (100000+22);
-insert into t3 values (100000+21);
-insert into t3 values (100000+20);
-insert into t3 values (100000+19);
-insert into t3 values (100000+18);
-insert into t3 values (100000+17);
-insert into t3 values (100000+16);
-insert into t3 values (100000+15);
-insert into t3 values (100000+14);
-insert into t3 values (100000+13);
-insert into t3 values (100000+12);
-insert into t3 values (100000+11);
-insert into t3 values (100000+10);
-insert into t3 values (100000+9);
-insert into t3 values (100000+8);
-insert into t3 values (100000+7);
-insert into t3 values (100000+6);
-insert into t3 values (100000+5);
-insert into t3 values (100000+4);
-insert into t3 values (100000+3);
-insert into t3 values (100000+2);
-insert into t3 values (100000+1);
select count(*) from t3;
count(*)
59
@@ -986,65 +677,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (31,32,33,34,35,36,37,38,39,40,41,42,43,44,45) ENGINE = InnoDB,
PARTITION quarter4 VALUES IN (46,47,48,49,50,51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */
59 inserts;
-insert into t4 values (100000+59);
-insert into t4 values (100000+58);
-insert into t4 values (100000+57);
-insert into t4 values (100000+56);
-insert into t4 values (100000+55);
-insert into t4 values (100000+54);
-insert into t4 values (100000+53);
-insert into t4 values (100000+52);
-insert into t4 values (100000+51);
-insert into t4 values (100000+50);
-insert into t4 values (100000+49);
-insert into t4 values (100000+48);
-insert into t4 values (100000+47);
-insert into t4 values (100000+46);
-insert into t4 values (100000+45);
-insert into t4 values (100000+44);
-insert into t4 values (100000+43);
-insert into t4 values (100000+42);
-insert into t4 values (100000+41);
-insert into t4 values (100000+40);
-insert into t4 values (100000+39);
-insert into t4 values (100000+38);
-insert into t4 values (100000+37);
-insert into t4 values (100000+36);
-insert into t4 values (100000+35);
-insert into t4 values (100000+34);
-insert into t4 values (100000+33);
-insert into t4 values (100000+32);
-insert into t4 values (100000+31);
-insert into t4 values (100000+30);
-insert into t4 values (100000+29);
-insert into t4 values (100000+28);
-insert into t4 values (100000+27);
-insert into t4 values (100000+26);
-insert into t4 values (100000+25);
-insert into t4 values (100000+24);
-insert into t4 values (100000+23);
-insert into t4 values (100000+22);
-insert into t4 values (100000+21);
-insert into t4 values (100000+20);
-insert into t4 values (100000+19);
-insert into t4 values (100000+18);
-insert into t4 values (100000+17);
-insert into t4 values (100000+16);
-insert into t4 values (100000+15);
-insert into t4 values (100000+14);
-insert into t4 values (100000+13);
-insert into t4 values (100000+12);
-insert into t4 values (100000+11);
-insert into t4 values (100000+10);
-insert into t4 values (100000+9);
-insert into t4 values (100000+8);
-insert into t4 values (100000+7);
-insert into t4 values (100000+6);
-insert into t4 values (100000+5);
-insert into t4 values (100000+4);
-insert into t4 values (100000+3);
-insert into t4 values (100000+2);
-insert into t4 values (100000+1);
select count(*) from t4;
count(*)
59
@@ -1172,65 +804,6 @@ a
2020-12-31 10:11:12
delete from t2;
59 inserts;
-insert into t2 values (19700101000000+59);
-insert into t2 values (19700101000000+58);
-insert into t2 values (19700101000000+57);
-insert into t2 values (19700101000000+56);
-insert into t2 values (19700101000000+55);
-insert into t2 values (19700101000000+54);
-insert into t2 values (19700101000000+53);
-insert into t2 values (19700101000000+52);
-insert into t2 values (19700101000000+51);
-insert into t2 values (19700101000000+50);
-insert into t2 values (19700101000000+49);
-insert into t2 values (19700101000000+48);
-insert into t2 values (19700101000000+47);
-insert into t2 values (19700101000000+46);
-insert into t2 values (19700101000000+45);
-insert into t2 values (19700101000000+44);
-insert into t2 values (19700101000000+43);
-insert into t2 values (19700101000000+42);
-insert into t2 values (19700101000000+41);
-insert into t2 values (19700101000000+40);
-insert into t2 values (19700101000000+39);
-insert into t2 values (19700101000000+38);
-insert into t2 values (19700101000000+37);
-insert into t2 values (19700101000000+36);
-insert into t2 values (19700101000000+35);
-insert into t2 values (19700101000000+34);
-insert into t2 values (19700101000000+33);
-insert into t2 values (19700101000000+32);
-insert into t2 values (19700101000000+31);
-insert into t2 values (19700101000000+30);
-insert into t2 values (19700101000000+29);
-insert into t2 values (19700101000000+28);
-insert into t2 values (19700101000000+27);
-insert into t2 values (19700101000000+26);
-insert into t2 values (19700101000000+25);
-insert into t2 values (19700101000000+24);
-insert into t2 values (19700101000000+23);
-insert into t2 values (19700101000000+22);
-insert into t2 values (19700101000000+21);
-insert into t2 values (19700101000000+20);
-insert into t2 values (19700101000000+19);
-insert into t2 values (19700101000000+18);
-insert into t2 values (19700101000000+17);
-insert into t2 values (19700101000000+16);
-insert into t2 values (19700101000000+15);
-insert into t2 values (19700101000000+14);
-insert into t2 values (19700101000000+13);
-insert into t2 values (19700101000000+12);
-insert into t2 values (19700101000000+11);
-insert into t2 values (19700101000000+10);
-insert into t2 values (19700101000000+9);
-insert into t2 values (19700101000000+8);
-insert into t2 values (19700101000000+7);
-insert into t2 values (19700101000000+6);
-insert into t2 values (19700101000000+5);
-insert into t2 values (19700101000000+4);
-insert into t2 values (19700101000000+3);
-insert into t2 values (19700101000000+2);
-insert into t2 values (19700101000000+1);
select count(*) from t2;
count(*)
59
@@ -1318,18 +891,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
12 inserts;
-insert into t3 values (adddate(19700101000000,interval 12-1 month));
-insert into t3 values (adddate(19700101000000,interval 11-1 month));
-insert into t3 values (adddate(19700101000000,interval 10-1 month));
-insert into t3 values (adddate(19700101000000,interval 9-1 month));
-insert into t3 values (adddate(19700101000000,interval 8-1 month));
-insert into t3 values (adddate(19700101000000,interval 7-1 month));
-insert into t3 values (adddate(19700101000000,interval 6-1 month));
-insert into t3 values (adddate(19700101000000,interval 5-1 month));
-insert into t3 values (adddate(19700101000000,interval 4-1 month));
-insert into t3 values (adddate(19700101000000,interval 3-1 month));
-insert into t3 values (adddate(19700101000000,interval 2-1 month));
-insert into t3 values (adddate(19700101000000,interval 1-1 month));
select count(*) from t3;
count(*)
12
@@ -1370,18 +931,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
12 inserts;
-insert into t4 values (adddate(19700101000000,interval 12-1 month));
-insert into t4 values (adddate(19700101000000,interval 11-1 month));
-insert into t4 values (adddate(19700101000000,interval 10-1 month));
-insert into t4 values (adddate(19700101000000,interval 9-1 month));
-insert into t4 values (adddate(19700101000000,interval 8-1 month));
-insert into t4 values (adddate(19700101000000,interval 7-1 month));
-insert into t4 values (adddate(19700101000000,interval 6-1 month));
-insert into t4 values (adddate(19700101000000,interval 5-1 month));
-insert into t4 values (adddate(19700101000000,interval 4-1 month));
-insert into t4 values (adddate(19700101000000,interval 3-1 month));
-insert into t4 values (adddate(19700101000000,interval 2-1 month));
-insert into t4 values (adddate(19700101000000,interval 1-1 month));
select count(*) from t4;
count(*)
12
=== modified file 'mysql-test/suite/parts/r/partition_datetime_myisam.result'
--- a/mysql-test/suite/parts/r/partition_datetime_myisam.result 2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_datetime_myisam.result 2009-10-28 07:52:34 +0000
@@ -60,65 +60,6 @@ a
2020-12-31 10:11:12
delete from t2;
59 inserts;
-insert into t2 values (19710101000000+59);
-insert into t2 values (19710101000000+58);
-insert into t2 values (19710101000000+57);
-insert into t2 values (19710101000000+56);
-insert into t2 values (19710101000000+55);
-insert into t2 values (19710101000000+54);
-insert into t2 values (19710101000000+53);
-insert into t2 values (19710101000000+52);
-insert into t2 values (19710101000000+51);
-insert into t2 values (19710101000000+50);
-insert into t2 values (19710101000000+49);
-insert into t2 values (19710101000000+48);
-insert into t2 values (19710101000000+47);
-insert into t2 values (19710101000000+46);
-insert into t2 values (19710101000000+45);
-insert into t2 values (19710101000000+44);
-insert into t2 values (19710101000000+43);
-insert into t2 values (19710101000000+42);
-insert into t2 values (19710101000000+41);
-insert into t2 values (19710101000000+40);
-insert into t2 values (19710101000000+39);
-insert into t2 values (19710101000000+38);
-insert into t2 values (19710101000000+37);
-insert into t2 values (19710101000000+36);
-insert into t2 values (19710101000000+35);
-insert into t2 values (19710101000000+34);
-insert into t2 values (19710101000000+33);
-insert into t2 values (19710101000000+32);
-insert into t2 values (19710101000000+31);
-insert into t2 values (19710101000000+30);
-insert into t2 values (19710101000000+29);
-insert into t2 values (19710101000000+28);
-insert into t2 values (19710101000000+27);
-insert into t2 values (19710101000000+26);
-insert into t2 values (19710101000000+25);
-insert into t2 values (19710101000000+24);
-insert into t2 values (19710101000000+23);
-insert into t2 values (19710101000000+22);
-insert into t2 values (19710101000000+21);
-insert into t2 values (19710101000000+20);
-insert into t2 values (19710101000000+19);
-insert into t2 values (19710101000000+18);
-insert into t2 values (19710101000000+17);
-insert into t2 values (19710101000000+16);
-insert into t2 values (19710101000000+15);
-insert into t2 values (19710101000000+14);
-insert into t2 values (19710101000000+13);
-insert into t2 values (19710101000000+12);
-insert into t2 values (19710101000000+11);
-insert into t2 values (19710101000000+10);
-insert into t2 values (19710101000000+9);
-insert into t2 values (19710101000000+8);
-insert into t2 values (19710101000000+7);
-insert into t2 values (19710101000000+6);
-insert into t2 values (19710101000000+5);
-insert into t2 values (19710101000000+4);
-insert into t2 values (19710101000000+3);
-insert into t2 values (19710101000000+2);
-insert into t2 values (19710101000000+1);
select count(*) from t2;
count(*)
59
@@ -206,18 +147,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
12 inserts;
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
select count(*) from t3;
@@ -260,18 +189,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
12 inserts;
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
select count(*) from t4;
@@ -354,90 +271,6 @@ a
2020-12-31
delete from t2;
28 inserts;
-insert into t2 values (19700101+28-1);
-insert into t2 values (19700201+28-1);
-insert into t2 values (19700301+28-1);
-insert into t2 values (19700101+27-1);
-insert into t2 values (19700201+27-1);
-insert into t2 values (19700301+27-1);
-insert into t2 values (19700101+26-1);
-insert into t2 values (19700201+26-1);
-insert into t2 values (19700301+26-1);
-insert into t2 values (19700101+25-1);
-insert into t2 values (19700201+25-1);
-insert into t2 values (19700301+25-1);
-insert into t2 values (19700101+24-1);
-insert into t2 values (19700201+24-1);
-insert into t2 values (19700301+24-1);
-insert into t2 values (19700101+23-1);
-insert into t2 values (19700201+23-1);
-insert into t2 values (19700301+23-1);
-insert into t2 values (19700101+22-1);
-insert into t2 values (19700201+22-1);
-insert into t2 values (19700301+22-1);
-insert into t2 values (19700101+21-1);
-insert into t2 values (19700201+21-1);
-insert into t2 values (19700301+21-1);
-insert into t2 values (19700101+20-1);
-insert into t2 values (19700201+20-1);
-insert into t2 values (19700301+20-1);
-insert into t2 values (19700101+19-1);
-insert into t2 values (19700201+19-1);
-insert into t2 values (19700301+19-1);
-insert into t2 values (19700101+18-1);
-insert into t2 values (19700201+18-1);
-insert into t2 values (19700301+18-1);
-insert into t2 values (19700101+17-1);
-insert into t2 values (19700201+17-1);
-insert into t2 values (19700301+17-1);
-insert into t2 values (19700101+16-1);
-insert into t2 values (19700201+16-1);
-insert into t2 values (19700301+16-1);
-insert into t2 values (19700101+15-1);
-insert into t2 values (19700201+15-1);
-insert into t2 values (19700301+15-1);
-insert into t2 values (19700101+14-1);
-insert into t2 values (19700201+14-1);
-insert into t2 values (19700301+14-1);
-insert into t2 values (19700101+13-1);
-insert into t2 values (19700201+13-1);
-insert into t2 values (19700301+13-1);
-insert into t2 values (19700101+12-1);
-insert into t2 values (19700201+12-1);
-insert into t2 values (19700301+12-1);
-insert into t2 values (19700101+11-1);
-insert into t2 values (19700201+11-1);
-insert into t2 values (19700301+11-1);
-insert into t2 values (19700101+10-1);
-insert into t2 values (19700201+10-1);
-insert into t2 values (19700301+10-1);
-insert into t2 values (19700101+9-1);
-insert into t2 values (19700201+9-1);
-insert into t2 values (19700301+9-1);
-insert into t2 values (19700101+8-1);
-insert into t2 values (19700201+8-1);
-insert into t2 values (19700301+8-1);
-insert into t2 values (19700101+7-1);
-insert into t2 values (19700201+7-1);
-insert into t2 values (19700301+7-1);
-insert into t2 values (19700101+6-1);
-insert into t2 values (19700201+6-1);
-insert into t2 values (19700301+6-1);
-insert into t2 values (19700101+5-1);
-insert into t2 values (19700201+5-1);
-insert into t2 values (19700301+5-1);
-insert into t2 values (19700101+4-1);
-insert into t2 values (19700201+4-1);
-insert into t2 values (19700301+4-1);
-insert into t2 values (19700101+3-1);
-insert into t2 values (19700201+3-1);
-insert into t2 values (19700301+3-1);
-insert into t2 values (19700101+2-1);
-insert into t2 values (19700201+2-1);
-insert into t2 values (19700301+2-1);
-insert into t2 values (19700101+1-1);
-insert into t2 values (19700201+1-1);
-insert into t2 values (19700301+1-1);
select count(*) from t2;
count(*)
84
@@ -550,18 +383,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
12 inserts;
-insert into t3 values (adddate(19700101,interval 12-1 month));
-insert into t3 values (adddate(19700101,interval 11-1 month));
-insert into t3 values (adddate(19700101,interval 10-1 month));
-insert into t3 values (adddate(19700101,interval 9-1 month));
-insert into t3 values (adddate(19700101,interval 8-1 month));
-insert into t3 values (adddate(19700101,interval 7-1 month));
-insert into t3 values (adddate(19700101,interval 6-1 month));
-insert into t3 values (adddate(19700101,interval 5-1 month));
-insert into t3 values (adddate(19700101,interval 4-1 month));
-insert into t3 values (adddate(19700101,interval 3-1 month));
-insert into t3 values (adddate(19700101,interval 2-1 month));
-insert into t3 values (adddate(19700101,interval 1-1 month));
select count(*) from t3;
count(*)
12
@@ -602,18 +423,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
12 inserts;
-insert into t4 values (adddate(19700101,interval 12-1 month));
-insert into t4 values (adddate(19700101,interval 11-1 month));
-insert into t4 values (adddate(19700101,interval 10-1 month));
-insert into t4 values (adddate(19700101,interval 9-1 month));
-insert into t4 values (adddate(19700101,interval 8-1 month));
-insert into t4 values (adddate(19700101,interval 7-1 month));
-insert into t4 values (adddate(19700101,interval 6-1 month));
-insert into t4 values (adddate(19700101,interval 5-1 month));
-insert into t4 values (adddate(19700101,interval 4-1 month));
-insert into t4 values (adddate(19700101,interval 3-1 month));
-insert into t4 values (adddate(19700101,interval 2-1 month));
-insert into t4 values (adddate(19700101,interval 1-1 month));
select count(*) from t4;
count(*)
12
@@ -694,65 +503,6 @@ a
14:15:16
delete from t2;
59 inserts;
-insert into t2 values (000100+59);
-insert into t2 values (000100+58);
-insert into t2 values (000100+57);
-insert into t2 values (000100+56);
-insert into t2 values (000100+55);
-insert into t2 values (000100+54);
-insert into t2 values (000100+53);
-insert into t2 values (000100+52);
-insert into t2 values (000100+51);
-insert into t2 values (000100+50);
-insert into t2 values (000100+49);
-insert into t2 values (000100+48);
-insert into t2 values (000100+47);
-insert into t2 values (000100+46);
-insert into t2 values (000100+45);
-insert into t2 values (000100+44);
-insert into t2 values (000100+43);
-insert into t2 values (000100+42);
-insert into t2 values (000100+41);
-insert into t2 values (000100+40);
-insert into t2 values (000100+39);
-insert into t2 values (000100+38);
-insert into t2 values (000100+37);
-insert into t2 values (000100+36);
-insert into t2 values (000100+35);
-insert into t2 values (000100+34);
-insert into t2 values (000100+33);
-insert into t2 values (000100+32);
-insert into t2 values (000100+31);
-insert into t2 values (000100+30);
-insert into t2 values (000100+29);
-insert into t2 values (000100+28);
-insert into t2 values (000100+27);
-insert into t2 values (000100+26);
-insert into t2 values (000100+25);
-insert into t2 values (000100+24);
-insert into t2 values (000100+23);
-insert into t2 values (000100+22);
-insert into t2 values (000100+21);
-insert into t2 values (000100+20);
-insert into t2 values (000100+19);
-insert into t2 values (000100+18);
-insert into t2 values (000100+17);
-insert into t2 values (000100+16);
-insert into t2 values (000100+15);
-insert into t2 values (000100+14);
-insert into t2 values (000100+13);
-insert into t2 values (000100+12);
-insert into t2 values (000100+11);
-insert into t2 values (000100+10);
-insert into t2 values (000100+9);
-insert into t2 values (000100+8);
-insert into t2 values (000100+7);
-insert into t2 values (000100+6);
-insert into t2 values (000100+5);
-insert into t2 values (000100+4);
-insert into t2 values (000100+3);
-insert into t2 values (000100+2);
-insert into t2 values (000100+1);
select count(*) from t2;
count(*)
59
@@ -840,65 +590,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (46) ENGINE = MyISAM,
PARTITION quarter4 VALUES LESS THAN (61) ENGINE = MyISAM) */
59 inserts;
-insert into t3 values (100000+59);
-insert into t3 values (100000+58);
-insert into t3 values (100000+57);
-insert into t3 values (100000+56);
-insert into t3 values (100000+55);
-insert into t3 values (100000+54);
-insert into t3 values (100000+53);
-insert into t3 values (100000+52);
-insert into t3 values (100000+51);
-insert into t3 values (100000+50);
-insert into t3 values (100000+49);
-insert into t3 values (100000+48);
-insert into t3 values (100000+47);
-insert into t3 values (100000+46);
-insert into t3 values (100000+45);
-insert into t3 values (100000+44);
-insert into t3 values (100000+43);
-insert into t3 values (100000+42);
-insert into t3 values (100000+41);
-insert into t3 values (100000+40);
-insert into t3 values (100000+39);
-insert into t3 values (100000+38);
-insert into t3 values (100000+37);
-insert into t3 values (100000+36);
-insert into t3 values (100000+35);
-insert into t3 values (100000+34);
-insert into t3 values (100000+33);
-insert into t3 values (100000+32);
-insert into t3 values (100000+31);
-insert into t3 values (100000+30);
-insert into t3 values (100000+29);
-insert into t3 values (100000+28);
-insert into t3 values (100000+27);
-insert into t3 values (100000+26);
-insert into t3 values (100000+25);
-insert into t3 values (100000+24);
-insert into t3 values (100000+23);
-insert into t3 values (100000+22);
-insert into t3 values (100000+21);
-insert into t3 values (100000+20);
-insert into t3 values (100000+19);
-insert into t3 values (100000+18);
-insert into t3 values (100000+17);
-insert into t3 values (100000+16);
-insert into t3 values (100000+15);
-insert into t3 values (100000+14);
-insert into t3 values (100000+13);
-insert into t3 values (100000+12);
-insert into t3 values (100000+11);
-insert into t3 values (100000+10);
-insert into t3 values (100000+9);
-insert into t3 values (100000+8);
-insert into t3 values (100000+7);
-insert into t3 values (100000+6);
-insert into t3 values (100000+5);
-insert into t3 values (100000+4);
-insert into t3 values (100000+3);
-insert into t3 values (100000+2);
-insert into t3 values (100000+1);
select count(*) from t3;
count(*)
59
@@ -986,65 +677,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (31,32,33,34,35,36,37,38,39,40,41,42,43,44,45) ENGINE = MyISAM,
PARTITION quarter4 VALUES IN (46,47,48,49,50,51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */
59 inserts;
-insert into t4 values (100000+59);
-insert into t4 values (100000+58);
-insert into t4 values (100000+57);
-insert into t4 values (100000+56);
-insert into t4 values (100000+55);
-insert into t4 values (100000+54);
-insert into t4 values (100000+53);
-insert into t4 values (100000+52);
-insert into t4 values (100000+51);
-insert into t4 values (100000+50);
-insert into t4 values (100000+49);
-insert into t4 values (100000+48);
-insert into t4 values (100000+47);
-insert into t4 values (100000+46);
-insert into t4 values (100000+45);
-insert into t4 values (100000+44);
-insert into t4 values (100000+43);
-insert into t4 values (100000+42);
-insert into t4 values (100000+41);
-insert into t4 values (100000+40);
-insert into t4 values (100000+39);
-insert into t4 values (100000+38);
-insert into t4 values (100000+37);
-insert into t4 values (100000+36);
-insert into t4 values (100000+35);
-insert into t4 values (100000+34);
-insert into t4 values (100000+33);
-insert into t4 values (100000+32);
-insert into t4 values (100000+31);
-insert into t4 values (100000+30);
-insert into t4 values (100000+29);
-insert into t4 values (100000+28);
-insert into t4 values (100000+27);
-insert into t4 values (100000+26);
-insert into t4 values (100000+25);
-insert into t4 values (100000+24);
-insert into t4 values (100000+23);
-insert into t4 values (100000+22);
-insert into t4 values (100000+21);
-insert into t4 values (100000+20);
-insert into t4 values (100000+19);
-insert into t4 values (100000+18);
-insert into t4 values (100000+17);
-insert into t4 values (100000+16);
-insert into t4 values (100000+15);
-insert into t4 values (100000+14);
-insert into t4 values (100000+13);
-insert into t4 values (100000+12);
-insert into t4 values (100000+11);
-insert into t4 values (100000+10);
-insert into t4 values (100000+9);
-insert into t4 values (100000+8);
-insert into t4 values (100000+7);
-insert into t4 values (100000+6);
-insert into t4 values (100000+5);
-insert into t4 values (100000+4);
-insert into t4 values (100000+3);
-insert into t4 values (100000+2);
-insert into t4 values (100000+1);
select count(*) from t4;
count(*)
59
@@ -1172,65 +804,6 @@ a
2020-12-31 10:11:12
delete from t2;
59 inserts;
-insert into t2 values (19700101000000+59);
-insert into t2 values (19700101000000+58);
-insert into t2 values (19700101000000+57);
-insert into t2 values (19700101000000+56);
-insert into t2 values (19700101000000+55);
-insert into t2 values (19700101000000+54);
-insert into t2 values (19700101000000+53);
-insert into t2 values (19700101000000+52);
-insert into t2 values (19700101000000+51);
-insert into t2 values (19700101000000+50);
-insert into t2 values (19700101000000+49);
-insert into t2 values (19700101000000+48);
-insert into t2 values (19700101000000+47);
-insert into t2 values (19700101000000+46);
-insert into t2 values (19700101000000+45);
-insert into t2 values (19700101000000+44);
-insert into t2 values (19700101000000+43);
-insert into t2 values (19700101000000+42);
-insert into t2 values (19700101000000+41);
-insert into t2 values (19700101000000+40);
-insert into t2 values (19700101000000+39);
-insert into t2 values (19700101000000+38);
-insert into t2 values (19700101000000+37);
-insert into t2 values (19700101000000+36);
-insert into t2 values (19700101000000+35);
-insert into t2 values (19700101000000+34);
-insert into t2 values (19700101000000+33);
-insert into t2 values (19700101000000+32);
-insert into t2 values (19700101000000+31);
-insert into t2 values (19700101000000+30);
-insert into t2 values (19700101000000+29);
-insert into t2 values (19700101000000+28);
-insert into t2 values (19700101000000+27);
-insert into t2 values (19700101000000+26);
-insert into t2 values (19700101000000+25);
-insert into t2 values (19700101000000+24);
-insert into t2 values (19700101000000+23);
-insert into t2 values (19700101000000+22);
-insert into t2 values (19700101000000+21);
-insert into t2 values (19700101000000+20);
-insert into t2 values (19700101000000+19);
-insert into t2 values (19700101000000+18);
-insert into t2 values (19700101000000+17);
-insert into t2 values (19700101000000+16);
-insert into t2 values (19700101000000+15);
-insert into t2 values (19700101000000+14);
-insert into t2 values (19700101000000+13);
-insert into t2 values (19700101000000+12);
-insert into t2 values (19700101000000+11);
-insert into t2 values (19700101000000+10);
-insert into t2 values (19700101000000+9);
-insert into t2 values (19700101000000+8);
-insert into t2 values (19700101000000+7);
-insert into t2 values (19700101000000+6);
-insert into t2 values (19700101000000+5);
-insert into t2 values (19700101000000+4);
-insert into t2 values (19700101000000+3);
-insert into t2 values (19700101000000+2);
-insert into t2 values (19700101000000+1);
select count(*) from t2;
count(*)
59
@@ -1318,18 +891,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
12 inserts;
-insert into t3 values (adddate(19700101000000,interval 12-1 month));
-insert into t3 values (adddate(19700101000000,interval 11-1 month));
-insert into t3 values (adddate(19700101000000,interval 10-1 month));
-insert into t3 values (adddate(19700101000000,interval 9-1 month));
-insert into t3 values (adddate(19700101000000,interval 8-1 month));
-insert into t3 values (adddate(19700101000000,interval 7-1 month));
-insert into t3 values (adddate(19700101000000,interval 6-1 month));
-insert into t3 values (adddate(19700101000000,interval 5-1 month));
-insert into t3 values (adddate(19700101000000,interval 4-1 month));
-insert into t3 values (adddate(19700101000000,interval 3-1 month));
-insert into t3 values (adddate(19700101000000,interval 2-1 month));
-insert into t3 values (adddate(19700101000000,interval 1-1 month));
select count(*) from t3;
count(*)
12
@@ -1370,18 +931,6 @@ SUBPARTITIONS 3
PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
12 inserts;
-insert into t4 values (adddate(19700101000000,interval 12-1 month));
-insert into t4 values (adddate(19700101000000,interval 11-1 month));
-insert into t4 values (adddate(19700101000000,interval 10-1 month));
-insert into t4 values (adddate(19700101000000,interval 9-1 month));
-insert into t4 values (adddate(19700101000000,interval 8-1 month));
-insert into t4 values (adddate(19700101000000,interval 7-1 month));
-insert into t4 values (adddate(19700101000000,interval 6-1 month));
-insert into t4 values (adddate(19700101000000,interval 5-1 month));
-insert into t4 values (adddate(19700101000000,interval 4-1 month));
-insert into t4 values (adddate(19700101000000,interval 3-1 month));
-insert into t4 values (adddate(19700101000000,interval 2-1 month));
-insert into t4 values (adddate(19700101000000,interval 1-1 month));
select count(*) from t4;
count(*)
12
=== modified file 'mysql-test/suite/parts/t/part_supported_sql_func_innodb.test'
--- a/mysql-test/suite/parts/t/part_supported_sql_func_innodb.test 2007-11-20 15:04:07 +0000
+++ b/mysql-test/suite/parts/t/part_supported_sql_func_innodb.test 2009-10-28 08:08:54 +0000
@@ -25,6 +25,8 @@
let $debug= 0;
let $do_long_tests= 1;
+#
+--source include/big_test.inc
# The server must support partitioning.
--source include/have_partition.inc
=== modified file 'mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test'
--- a/mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test 2009-10-09 13:08:09 +0000
+++ b/mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test 2009-10-28 08:08:54 +0000
@@ -43,6 +43,8 @@ SET @max_row = 20;
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
+# Slow running test
+--source include/big_test.inc
# This test relies on connecting externally from mysqltest, doesn't
# work with embedded.
--source include/not_embedded.inc
=== modified file 'mysql-test/suite/parts/t/partition_alter4_myisam.test'
--- a/mysql-test/suite/parts/t/partition_alter4_myisam.test 2009-10-09 13:08:09 +0000
+++ b/mysql-test/suite/parts/t/partition_alter4_myisam.test 2009-10-28 08:08:54 +0000
@@ -40,6 +40,8 @@ SET @max_row = 20;
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
+# Slow running test
+--source include/big_test.inc
# This test relies on connecting externally from mysqltest, doesn't
# work with embedded.
--source include/not_embedded.inc
=== modified file 'mysql-test/suite/pbxt/r/partition_range.result'
--- a/mysql-test/suite/pbxt/r/partition_range.result 2009-04-02 10:03:14 +0000
+++ b/mysql-test/suite/pbxt/r/partition_range.result 2009-10-16 12:45:42 +0000
@@ -710,7 +710,7 @@ a
EXPLAIN PARTITIONS SELECT * FROM t1
WHERE a >= '2004-07-01' AND a <= '2004-09-30';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p407,p408,p409 ALL NULL NULL NULL NULL 9 Using where
+1 SIMPLE t1 p3xx,p407,p408,p409 ALL NULL NULL NULL NULL 18 Using where
SELECT * from t1
WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
(a >= '2005-07-01' AND a <= '2005-09-30');
@@ -737,5 +737,5 @@ EXPLAIN PARTITIONS SELECT * from t1
WHERE (a >= '2004-07-01' AND a <= '2004-09-30') OR
(a >= '2005-07-01' AND a <= '2005-09-30');
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 18 Using where
+1 SIMPLE t1 p3xx,p407,p408,p409,p507,p508,p509 ALL NULL NULL NULL NULL 27 Using where
DROP TABLE t1;
=== modified file 'mysql-test/suite/pbxt/r/subselect.result'
--- a/mysql-test/suite/pbxt/r/subselect.result 2009-08-17 15:57:58 +0000
+++ b/mysql-test/suite/pbxt/r/subselect.result 2009-10-16 12:45:42 +0000
@@ -75,7 +75,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1
select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1));
ERROR HY000: Incorrect usage of PROCEDURE and subquery
SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1));
-ERROR HY000: Incorrect parameters to procedure 'ANALYSE'
+ERROR HY000: Incorrect usage of PROCEDURE and subquery
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL;
ERROR 42S22: Unknown column 'a' in 'field list'
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NOT NULL;
=== modified file 'mysql-test/suite/pbxt/t/subselect.test'
--- a/mysql-test/suite/pbxt/t/subselect.test 2009-10-26 11:35:42 +0000
+++ b/mysql-test/suite/pbxt/t/subselect.test 2009-11-06 17:22:32 +0000
@@ -30,7 +30,7 @@ SELECT 1 IN (SELECT 1);
SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a));
-- error 1221
select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1));
--- error 1108
+-- error ER_WRONG_USAGE
SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1));
-- error ER_BAD_FIELD_ERROR
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL;
=== modified file 'mysql-test/suite/rpl/r/rpl_concurrency_error.result'
--- a/mysql-test/suite/rpl/r/rpl_concurrency_error.result 2009-07-18 20:07:56 +0000
+++ b/mysql-test/suite/rpl/r/rpl_concurrency_error.result 2009-08-13 16:21:01 +0000
@@ -101,6 +101,8 @@ master-bin.000001 # Query # # use `test`
master-bin.000001 # Query # # use `test`; INSERT INTO t VALUES (6 + (1 * 10),"brown")
master-bin.000001 # Query # # use `test`; INSERT INTO n VALUES (now(),"brown")
master-bin.000001 # Xid # # COMMIT /* XID */
+source include/diff_master_slave.inc;
+source include/diff_master_slave.inc;
########################################################################
# Cleanup
########################################################################
=== added file 'mysql-test/suite/rpl/r/rpl_create_if_not_exists.result'
--- a/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result 2009-08-29 08:52:22 +0000
@@ -0,0 +1,33 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+DROP DATABASE IF EXISTS mysqltest;
+CREATE DATABASE IF NOT EXISTS mysqltest;
+USE mysqltest;
+CREATE TABLE IF NOT EXISTS t(c1 int);
+CREATE TABLE IF NOT EXISTS t1 LIKE t;
+CREATE TABLE IF NOT EXISTS t2 SELECT * FROM t;
+CREATE EVENT IF NOT EXISTS e
+ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
+DO SELECT now();
+DROP DATABASE mysqltest;
+CREATE DATABASE IF NOT EXISTS mysqltest;
+USE mysqltest;
+CREATE TABLE IF NOT EXISTS t(c1 int);
+CREATE TABLE IF NOT EXISTS t1 LIKE t;
+CREATE TABLE IF NOT EXISTS t2 SELECT * FROM t;
+CREATE EVENT IF NOT EXISTS e
+ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
+DO SELECT now();
+SHOW TABLES in mysqltest;
+Tables_in_mysqltest
+t
+t1
+t2
+SHOW EVENTS in mysqltest;
+Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
+mysqltest e root@localhost SYSTEM ONE TIME # NULL NULL NULL NULL SLAVESIDE_DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+DROP DATABASE IF EXISTS mysqltest;
=== added file 'mysql-test/suite/rpl/r/rpl_create_tmp_table_if_not_exists.result'
--- a/mysql-test/suite/rpl/r/rpl_create_tmp_table_if_not_exists.result 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_create_tmp_table_if_not_exists.result 2009-08-13 02:48:57 +0000
@@ -0,0 +1,22 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+DROP DATABASE IF EXISTS mysqltest;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int);
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int);
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # DROP DATABASE IF EXISTS mysqltest
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int)
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int)
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp
+master-bin.000001 # Query # # use `test`; CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp
=== modified file 'mysql-test/suite/rpl/r/rpl_drop_if_exists.result'
--- a/mysql-test/suite/rpl/r/rpl_drop_if_exists.result 2009-02-11 17:46:43 +0000
+++ b/mysql-test/suite/rpl/r/rpl_drop_if_exists.result 2009-08-29 08:52:22 +0000
@@ -43,7 +43,7 @@ master-bin.000001 # Query # # use `test`
master-bin.000001 # Query # # DROP DATABASE IF EXISTS db_bug_13684
master-bin.000001 # Query # # CREATE DATABASE db_bug_13684
master-bin.000001 # Query # # use `test`; CREATE TABLE db_bug_13684.t (a int)
-master-bin.000001 # Query # # use `test`; CREATE EVENT db_bug_13684.e
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` EVENT db_bug_13684.e
ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
DO
UPDATE db_bug_13684.t SET a = a + 1
@@ -75,7 +75,7 @@ master-bin.000001 # Query # # use `test`
master-bin.000001 # Query # # DROP DATABASE IF EXISTS db_bug_13684
master-bin.000001 # Query # # CREATE DATABASE db_bug_13684
master-bin.000001 # Query # # use `test`; CREATE TABLE db_bug_13684.t (a int)
-master-bin.000001 # Query # # use `test`; CREATE EVENT db_bug_13684.e
+master-bin.000001 # Query # # use `test`; CREATE DEFINER=`root`@`localhost` EVENT db_bug_13684.e
ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
DO
UPDATE db_bug_13684.t SET a = a + 1
=== modified file 'mysql-test/suite/rpl/r/rpl_drop_temp.result'
--- a/mysql-test/suite/rpl/r/rpl_drop_temp.result 2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/rpl/r/rpl_drop_temp.result 2009-08-28 09:45:57 +0000
@@ -5,6 +5,7 @@ reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
create database if not exists mysqltest;
+use mysqltest;
create temporary table mysqltest.t1 (n int)ENGINE=MyISAM;
create temporary table mysqltest.t2 (n int)ENGINE=MyISAM;
show status like 'Slave_open_temp_tables';
=== modified file 'mysql-test/suite/rpl/r/rpl_events.result'
--- a/mysql-test/suite/rpl/r/rpl_events.result 2009-02-19 09:01:25 +0000
+++ b/mysql-test/suite/rpl/r/rpl_events.result 2009-10-15 21:38:29 +0000
@@ -191,5 +191,63 @@ select * from t28953;
END;|
ALTER EVENT event1 RENAME TO event2;
DROP EVENT event2;
+CREATE TABLE test.t1(details CHAR(30));
+CREATE EVENT /*!50000 event44331_1 */
+ON SCHEDULE AT CURRENT_TIMESTAMP
+ON COMPLETION PRESERVE DISABLE
+DO INSERT INTO test.t1 VALUES('event event44331_1 fired - no definer');
+CREATE DEFINER=CURRENT_USER /*!50000 EVENT event44331_2 */
+ON SCHEDULE AT CURRENT_TIMESTAMP
+ON COMPLETION PRESERVE DISABLE
+DO INSERT INTO test.t1 VALUES('event event44331_2 fired - DEFINER=CURRENT_USER');
+CREATE DEFINER=CURRENT_USER() EVENT event44331_3
+ON SCHEDULE AT CURRENT_TIMESTAMP
+ON COMPLETION PRESERVE DISABLE
+DO INSERT INTO test.t1 VALUES('event event44331_3 fired - DEFINER=CURRENT_USER() function');
+CREATE /*!50000 DEFINER='user44331' */ EVENT event44331_4
+ON SCHEDULE AT CURRENT_TIMESTAMP
+ON COMPLETION PRESERVE DISABLE
+DO INSERT INTO test.t1 VALUES('event event44331_4 fired - DEFINER=user1');
+Warnings:
+Note 1449 The user specified as a definer ('user44331'@'%') does not exist
+#on master
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_1';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_1 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_2';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_2 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_3';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_3 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_4';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_4 user44331@%
+#on slave
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_1';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_1 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_2';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_2 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_3';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_3 root@localhost
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+where EVENT_NAME='event44331_4';
+EVENT_SCHEMA EVENT_NAME DEFINER
+test event44331_4 user44331@%
SET @@global.event_scheduler= @old_event_scheduler;
DROP TABLE t28953;
+DROP TABLE t1;
+DROP EVENT event44331_1;
+DROP EVENT event44331_2;
+DROP EVENT event44331_3;
+DROP EVENT event44331_4;
=== modified file 'mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result'
--- a/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result 2009-05-31 05:44:41 +0000
+++ b/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result 2009-08-29 08:52:22 +0000
@@ -690,7 +690,7 @@ test_rpl e1 root@localhost SYSTEM RECURR
USE test_rpl;
SHOW EVENTS;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
-test_rpl e1 @ SYSTEM RECURRING NULL 1 # # NULL SLAVESIDE_DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+test_rpl e1 root@localhost SYSTEM RECURRING NULL 1 # # NULL SLAVESIDE_DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
==========MASTER==========
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -963,7 +963,9 @@ master-bin.000001 # Xid 1 # #
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test_rpl`; INSERT INTO t1 VALUES(1, 't1, text 1')
master-bin.000001 # Xid 1 # #
+master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test_rpl`; TRUNCATE t1
+master-bin.000001 # Xid 1 # #
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test_rpl`; DELETE FROM t1
master-bin.000001 # Xid 1 # #
@@ -1076,7 +1078,7 @@ master-bin.000001 # Query 1 # use `test_
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test_rpl`; INSERT INTO t1 VALUES(1, 'test1')
master-bin.000001 # Xid 1 # #
-master-bin.000001 # Query 1 # use `test_rpl`; CREATE EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1
+master-bin.000001 # Query 1 # use `test_rpl`; CREATE DEFINER=`root`@`localhost` EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1
master-bin.000001 # Query 1 # use `test_rpl`; ALTER EVENT e1 RENAME TO e2
master-bin.000001 # Query 1 # use `test_rpl`; DROP EVENT e2
master-bin.000001 # Query 1 # BEGIN
=== modified file 'mysql-test/suite/rpl/r/rpl_loaddata_charset.result'
--- a/mysql-test/suite/rpl/r/rpl_loaddata_charset.result 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/rpl/r/rpl_loaddata_charset.result 2009-08-12 05:31:56 +0000
@@ -35,3 +35,44 @@ C3BF
D0AA
D0AA
drop table t1;
+-------------test bug#45516------------------
+DROP DATABASE IF EXISTS mysqltest;
+CREATE DATABASE mysqltest CHARSET UTF8;
+USE mysqltest;
+CREATE TABLE t (cl varchar(100)) CHARSET UTF8;
+LOAD DATA LOCAL INFILE './std_data/loaddata_utf8.dat' INTO TABLE t
+FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';
+----------content on master----------
+SELECT hex(cl) FROM t;
+hex(cl)
+E4B880E4BA8CE4B889
+E59B9BE4BA94E585AD
+E4B883E585ABE4B99D
+----------content on slave----------
+USE mysqltest;
+SELECT hex(cl) FROM t;
+hex(cl)
+E4B880E4BA8CE4B889
+E59B9BE4BA94E585AD
+E4B883E585ABE4B99D
+DROP DATABASE mysqltest;
+DROP DATABASE IF EXISTS mysqltest;
+CREATE DATABASE mysqltest CHARSET UTF8;
+USE mysqltest;
+CREATE TABLE t (cl varchar(100)) CHARSET UTF8;
+LOAD DATA INFILE '../../std_data/loaddata_utf8.dat' INTO TABLE t
+FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';
+----------content on master----------
+SELECT hex(cl) FROM t;
+hex(cl)
+E4B880E4BA8CE4B889
+E59B9BE4BA94E585AD
+E4B883E585ABE4B99D
+----------content on slave----------
+USE mysqltest;
+SELECT hex(cl) FROM t;
+hex(cl)
+E4B880E4BA8CE4B889
+E59B9BE4BA94E585AD
+E4B883E585ABE4B99D
+DROP DATABASE mysqltest;
=== modified file 'mysql-test/suite/rpl/r/rpl_rewrt_db.result'
--- a/mysql-test/suite/rpl/r/rpl_rewrt_db.result 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/rpl/r/rpl_rewrt_db.result 2009-08-28 09:45:57 +0000
@@ -90,5 +90,132 @@ a b
2 row 2
3 row 3
0
+set sql_log_bin= 0;
drop database rewrite;
+set sql_log_bin= 1;
+set sql_log_bin= 0;
drop table t1;
+set sql_log_bin= 1;
+
+****
+**** Bug #46861 Auto-closing of temporary tables broken by replicate-rewrite-db
+****
+
+****
+**** Preparing the environment
+****
+SET sql_log_bin= 0;
+CREATE DATABASE database_master_temp_01;
+CREATE DATABASE database_master_temp_02;
+CREATE DATABASE database_master_temp_03;
+SET sql_log_bin= 1;
+SET sql_log_bin= 0;
+CREATE DATABASE database_slave_temp_01;
+CREATE DATABASE database_slave_temp_02;
+CREATE DATABASE database_slave_temp_03;
+SET sql_log_bin= 1;
+
+****
+**** Creating temporary tables on different databases with different connections
+****
+**** con_temp_01 --> creates
+**** t_01_01_temp on database_master_temp_01
+****
+**** con_temp_02 --> creates
+**** t_01_01_temp on database_master_temp_01
+**** t_02_01_temp, t_02_02_temp on database_master_temp_02
+****
+**** con_temp_02 --> creates
+**** t_01_01_temp on database_master_temp_01
+**** t_02_01_temp, t_02_02_temp on database_master_temp_02
+**** t_03_01_temp, t_03_02_temp, t_03_03_temp on database_master_temp_03
+****
+
+con_temp_01
+
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+
+con_temp_02
+
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+USE database_master_temp_02;
+CREATE TEMPORARY TABLE t_02_01_temp(a int);
+INSERT INTO t_02_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_02_02_temp(a int);
+INSERT INTO t_02_02_temp VALUES(1);
+
+con_temp_03
+
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+USE database_master_temp_02;
+CREATE TEMPORARY TABLE t_02_01_temp(a int);
+INSERT INTO t_02_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_02_02_temp(a int);
+INSERT INTO t_02_02_temp VALUES(1);
+USE database_master_temp_03;
+CREATE TEMPORARY TABLE t_03_01_temp(a int);
+INSERT INTO t_03_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_03_02_temp(a int);
+INSERT INTO t_03_02_temp VALUES(1);
+CREATE TEMPORARY TABLE t_03_03_temp(a int);
+INSERT INTO t_03_03_temp VALUES(1);
+
+**** Dropping the connections
+**** We want to SHOW BINLOG EVENTS, to know what was logged. But there is no
+**** guarantee that logging of the terminated con1 has been done yet.a To be
+**** sure that logging has been done, we use a user lock.
+
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 10
+select get_lock("con_01",10);
+get_lock("con_01",10)
+1
+select get_lock("con_01",10);
+get_lock("con_01",10)
+1
+select get_lock("con_02",10);
+get_lock("con_02",10)
+1
+select get_lock("con_02",10);
+get_lock("con_02",10)
+1
+select get_lock("con_03",10);
+get_lock("con_03",10)
+1
+select get_lock("con_03",10);
+get_lock("con_03",10)
+1
+
+**** Checking the binary log and temporary tables
+
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 0
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `database_master_temp_01`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_01_01_temp`
+master-bin.000001 # Query # # use `database_master_temp_02`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_02_02_temp`,`t_02_01_temp`
+master-bin.000001 # Query # # use `database_master_temp_01`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_01_01_temp`
+master-bin.000001 # Query # # use `database_master_temp_03`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_03_03_temp`,`t_03_02_temp`,`t_03_01_temp`
+master-bin.000001 # Query # # use `database_master_temp_02`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_02_02_temp`,`t_02_01_temp`
+master-bin.000001 # Query # # use `database_master_temp_01`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `t_01_01_temp`
+****
+**** Cleaning up the test case
+****
+SET sql_log_bin= 0;
+DROP DATABASE database_master_temp_01;
+DROP DATABASE database_master_temp_02;
+DROP DATABASE database_master_temp_03;
+SET sql_log_bin= 1;
+SET sql_log_bin= 0;
+DROP DATABASE database_slave_temp_01;
+DROP DATABASE database_slave_temp_02;
+DROP DATABASE database_slave_temp_03;
+SET sql_log_bin= 1;
=== added file 'mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result'
--- a/mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_stm_mixing_engines.result 2009-08-27 12:46:29 +0000
@@ -0,0 +1,870 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+###################################################################################
+# CONFIGURATION
+###################################################################################
+SET SQL_LOG_BIN=0;
+CREATE TABLE nt_1 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_2 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_3 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_4 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE tt_1 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_2 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_3 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_4 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+SET SQL_LOG_BIN=1;
+SET SQL_LOG_BIN=0;
+CREATE TABLE nt_1 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_2 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_3 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE nt_4 (a text, b int PRIMARY KEY, c text) ENGINE = MyISAM;
+CREATE TABLE tt_1 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_2 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_3 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+CREATE TABLE tt_4 (a text, b int PRIMARY KEY, c text) ENGINE = Innodb;
+SET SQL_LOG_BIN=1;
+CREATE FUNCTION f1 () RETURNS VARCHAR(64)
+BEGIN
+RETURN "Testing...";
+END|
+CREATE FUNCTION f2 () RETURNS VARCHAR(64)
+BEGIN
+RETURN f1();
+END|
+CREATE PROCEDURE pc_i_tt_3 (IN x INT, IN y VARCHAR(64))
+BEGIN
+INSERT INTO tt_3 VALUES (y,x,x);
+END|
+CREATE TRIGGER tr_i_tt_3_to_nt_3 BEFORE INSERT ON tt_3 FOR EACH ROW
+BEGIN
+INSERT INTO nt_3 VALUES (NEW.a, NEW.b, NEW.c);
+END|
+CREATE TRIGGER tr_i_nt_4_to_tt_4 BEFORE INSERT ON nt_4 FOR EACH ROW
+BEGIN
+INSERT INTO tt_4 VALUES (NEW.a, NEW.b, NEW.c);
+END|
+###################################################################################
+# MIXING TRANSACTIONAL and NON-TRANSACTIONAL TABLES
+###################################################################################
+#
+#1) "B T T C" generates in binlog the "B T T C" entries.
+#
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 4", 4, "new text 4");
+INSERT INTO tt_2 VALUES ("new text 4", 4, "new text 4");
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 4", 4, "new text 4")
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 4", 4, "new text 4")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#1.e) "B T T C" with error in T generates in binlog the "B T T C" entries.
+#
+INSERT INTO tt_1 VALUES ("new text -2", -2, "new text -2");
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1"), ("new text -2", -2, "new text -2");
+ERROR 23000: Duplicate entry '-2' for key 'PRIMARY'
+INSERT INTO tt_2 VALUES ("new text -3", -3, "new text -3");
+COMMIT;
+BEGIN;
+INSERT INTO tt_2 VALUES ("new text -5", -5, "new text -5");
+INSERT INTO tt_2 VALUES ("new text -4", -4, "new text -4"), ("new text -5", -5, "new text -5");
+ERROR 23000: Duplicate entry '-5' for key 'PRIMARY'
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -2", -2, "new text -2")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text -3", -3, "new text -3")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text -5", -5, "new text -5")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#2) "B T T R" generates in binlog an "empty" entry.
+#
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 5", 5, "new text 5");
+INSERT INTO tt_2 VALUES ("new text 5", 5, "new text 5");
+ROLLBACK;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+
+
+
+
+#
+#2.e) "B T T R" with error in T generates in binlog an "empty" entry.
+#
+INSERT INTO tt_1 VALUES ("new text -7", -7, "new text -7");
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -6", -6, "new text -6"), ("new text -7", -7, "new text -7");
+ERROR 23000: Duplicate entry '-7' for key 'PRIMARY'
+INSERT INTO tt_2 VALUES ("new text -8", -8, "new text -8");
+ROLLBACK;
+BEGIN;
+INSERT INTO tt_2 VALUES ("new text -10", -10, "new text -10");
+INSERT INTO tt_2 VALUES ("new text -9", -9, "new text -9"), ("new text -10", -10, "new text -10");
+ERROR 23000: Duplicate entry '-10' for key 'PRIMARY'
+ROLLBACK;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -7", -7, "new text -7")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#3) "B T N C" generates in binlog the "B T N C" entries.
+#
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 6", 6, "new text 6");
+INSERT INTO nt_1 VALUES ("new text 6", 6, "new text 6");
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 6", 6, "new text 6")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 6", 6, "new text 6")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#3.e) "B T N C" with error in either T or N generates in binlog the "B T N C" entries.
+#
+INSERT INTO tt_1 VALUES ("new text -12", -12, "new text -12");
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -11", -11, "new text -11"), ("new text -12", -12, "new text -12");
+ERROR 23000: Duplicate entry '-12' for key 'PRIMARY'
+INSERT INTO nt_1 VALUES ("new text -13", -13, "new text -13");
+COMMIT;
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -14", -14, "new text -14");
+INSERT INTO nt_1 VALUES ("new text -16", -16, "new text -16");
+INSERT INTO nt_1 VALUES ("new text -15", -15, "new text -15"), ("new text -16", -16, "new text -16");
+ERROR 23000: Duplicate entry '-16' for key 'PRIMARY'
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -12", -12, "new text -12")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -13", -13, "new text -13")
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -14", -14, "new text -14")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -16", -16, "new text -16")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -15", -15, "new text -15"), ("new text -16", -16, "new text -16")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#4) "B T N R" generates in binlog the "B T N R" entries.
+#
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text 7", 7, "new text 7");
+INSERT INTO nt_1 VALUES ("new text 7", 7, "new text 7");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 7", 7, "new text 7")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 7", 7, "new text 7")
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#4.e) "B T N R" with error in either T or N generates in binlog the "B T N R" entries.
+#
+INSERT INTO tt_1 VALUES ("new text -17", -17, "new text -17");
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -16", -16, "new text -16"), ("new text -17", -17, "new text -17");
+ERROR 23000: Duplicate entry '-17' for key 'PRIMARY'
+INSERT INTO nt_1 VALUES ("new text -18", -18, "new text -18");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO tt_1 VALUES ("new text -19", -19, "new text -19");
+INSERT INTO nt_1 VALUES ("new text -21", -21, "new text -21");
+INSERT INTO nt_1 VALUES ("new text -20", -20, "new text -20"), ("new text -21", -21, "new text -21");
+ERROR 23000: Duplicate entry '-21' for key 'PRIMARY'
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -17", -17, "new text -17")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -18", -18, "new text -18")
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -19", -19, "new text -19")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -21", -21, "new text -21")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -20", -20, "new text -20"), ("new text -21", -21, "new text -21")
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#5) "T" generates in binlog the "B T C" entry.
+#
+INSERT INTO tt_1 VALUES ("new text 8", 8, "new text 8");
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 8", 8, "new text 8")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#5.e) "T" with error in T generates in binlog an "empty" entry.
+#
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1");
+INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1"), ("new text -22", -22, "new text -22");
+ERROR 23000: Duplicate entry '-1' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -23", -23, "new text -23"), ("new text -1", -1, "new text -1");
+ERROR 23000: Duplicate entry '-1' for key 'PRIMARY'
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -1", -1, "new text -1")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#6) "N" generates in binlog the "N" entry.
+#
+INSERT INTO nt_1 VALUES ("new text 9", 9, "new text 9");
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 9", 9, "new text 9")
+
+
+
+
+#
+#6.e) "N" with error in N generates in binlog an empty entry if the error
+# happens in the first tuple. Otherwise, generates the "N" entry and
+# the error is appended.
+#
+INSERT INTO nt_1 VALUES ("new text -1", -1, "new text -1");
+INSERT INTO nt_1 VALUES ("new text -1", -1, "new text -1");
+ERROR 23000: Duplicate entry '-1' for key 'PRIMARY'
+INSERT INTO nt_1 VALUES ("new text -24", -24, "new text -24"), ("new text -1", -1, "new text -1");
+ERROR 23000: Duplicate entry '-1' for key 'PRIMARY'
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -1", -1, "new text -1")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -24", -24, "new text -24"), ("new text -1", -1, "new text -1")
+
+
+
+
+#
+#7) "M" generates in binglog the "B M C" entries.
+#
+DELETE FROM nt_1;
+INSERT INTO nt_1 SELECT * FROM tt_1;
+DELETE FROM tt_1;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+INSERT INTO tt_3 VALUES ("new text 000", 000, '');
+INSERT INTO tt_3 VALUES("new text 100", 100, f1());
+INSERT INTO nt_4 VALUES("new text 100", 100, f1());
+INSERT INTO tt_3 VALUES("new text 200", 200, f2());
+INSERT INTO nt_4 VALUES ("new text 300", 300, '');
+INSERT INTO nt_4 VALUES ("new text 400", 400, f1());
+INSERT INTO nt_4 VALUES ("new text 500", 500, f2());
+CALL pc_i_tt_3(600, "Testing...");
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 1", nt_4.a= "new text 1", tt_3.a= "new text 1", tt_4.a= "new text 1" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 2", tt_4.a= "new text 2", nt_3.a= "new text 2", nt_4.a = "new text 2" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 3", nt_3.a= "new text 3", nt_4.a= "new text 3", tt_4.a = "new text 3" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 4", nt_3.a= "new text 4", nt_4.a= "new text 4", tt_4.a = "new text 4" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; DELETE FROM nt_1
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 SELECT * FROM tt_1
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; DELETE FROM tt_1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 SELECT * FROM nt_1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text 000", 000, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES("new text 100", 100, f1())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES("new text 100", 100, f1())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES("new text 200", 200, f2())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text 300", 300, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text 400", 400, f1())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text 500", 500, f2())
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ( NAME_CONST('y',_latin1'Testing...' COLLATE 'latin1_swedish_ci'), NAME_CONST('x',600), NAME_CONST('x',600))
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 1", nt_4.a= "new text 1", tt_3.a= "new text 1", tt_4.a= "new text 1" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 2", tt_4.a= "new text 2", nt_3.a= "new text 2", nt_4.a = "new text 2" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 3", nt_3.a= "new text 3", nt_4.a= "new text 3", tt_4.a = "new text 3" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 4", nt_3.a= "new text 4", nt_4.a= "new text 4", tt_4.a = "new text 4" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#7.e) "M" with error in M generates in binglog the "B M R" entries.
+#
+INSERT INTO nt_3 VALUES ("new text -26", -26, '');
+SELECT * FROM tt_3;
+a b c
+new text 000 0
+new text 4 100 Testing...
+new text 200 200 Testing...
+Testing... 600 600
+INSERT INTO tt_3 VALUES ("new text -25", -25, ''), ("new text -26", -26, '');
+ERROR 23000: Duplicate entry '-26' for key 'PRIMARY'
+SELECT * FROM tt_3;
+a b c
+new text 000 0
+new text 4 100 Testing...
+new text 200 200 Testing...
+Testing... 600 600
+INSERT INTO tt_4 VALUES ("new text -26", -26, '');
+SELECT * FROM nt_4;
+a b c
+new text 4 100 Testing...
+new text 300 300
+new text 400 400 Testing...
+new text 500 500 Testing...
+INSERT INTO nt_4 VALUES ("new text -25", -25, ''), ("new text -26", -26, '');
+ERROR 23000: Duplicate entry '-26' for key 'PRIMARY'
+SELECT * FROM nt_4;
+a b c
+new text 4 100 Testing...
+new text 300 300
+new text 400 400 Testing...
+new text 500 500 Testing...
+new text -25 -25
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_3 VALUES ("new text -26", -26, '')
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text -25", -25, ''), ("new text -26", -26, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_4 VALUES ("new text -26", -26, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text -25", -25, ''), ("new text -26", -26, '')
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#8) "B N N T C" generates in binglog the "N N B T C" entries.
+#
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 10", 10, "new text 10");
+INSERT INTO nt_2 VALUES ("new text 10", 10, "new text 10");
+INSERT INTO tt_1 VALUES ("new text 10", 10, "new text 10");
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 10", 10, "new text 10")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text 10", 10, "new text 10")
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 10", 10, "new text 10")
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#8.e) "B N N T R" See 6.e and 9.e.
+#
+
+
+
+
+#
+#9) "B N N T R" generates in binlog the "N N B T R" entries.
+#
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 11", 11, "new text 11");
+INSERT INTO nt_2 VALUES ("new text 11", 11, "new text 11");
+INSERT INTO tt_1 VALUES ("new text 11", 11, "new text 11");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 11", 11, "new text 11")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text 11", 11, "new text 11")
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 11", 11, "new text 11")
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#9.e) "B N N T R" with error in N generates in binlog the "N N B T R" entries.
+#
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text -25", -25, "new text -25");
+INSERT INTO nt_2 VALUES ("new text -25", -25, "new text -25");
+INSERT INTO nt_2 VALUES ("new text -26", -26, "new text -26"), ("new text -25", -25, "new text -25");
+ERROR 23000: Duplicate entry '-25' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -27", -27, "new text -27");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text -25", -25, "new text -25")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text -25", -25, "new text -25")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text -26", -26, "new text -26"), ("new text -25", -25, "new text -25")
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -27", -27, "new text -27")
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#10) "B N N C" generates in binglog the "N N" entries.
+#
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 12", 12, "new text 12");
+INSERT INTO nt_2 VALUES ("new text 12", 12, "new text 12");
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 12", 12, "new text 12")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text 12", 12, "new text 12")
+
+
+
+
+#
+#10.e) "B N N C" See 6.e and 9.e.
+#
+
+
+
+
+#
+#11) "B N N R" generates in binlog the "N N" entries.
+#
+BEGIN;
+INSERT INTO nt_1 VALUES ("new text 13", 13, "new text 13");
+INSERT INTO nt_2 VALUES ("new text 13", 13, "new text 13");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 VALUES ("new text 13", 13, "new text 13")
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_2 VALUES ("new text 13", 13, "new text 13")
+
+
+
+
+#
+#11.e) "B N N R" See 6.e and 9.e.
+#
+
+
+
+
+#
+#12) "B M T C" generates in the binlog the "B M T C" entries.
+#
+DELETE FROM nt_1;
+BEGIN;
+INSERT INTO nt_1 SELECT * FROM tt_1;
+INSERT INTO tt_2 VALUES ("new text 14", 14, "new text 14");
+COMMIT;
+DELETE FROM tt_1;
+BEGIN;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+INSERT INTO tt_2 VALUES ("new text 15", 15, "new text 15");
+COMMIT;
+BEGIN;
+INSERT INTO tt_3 VALUES ("new text 700", 700, '');
+INSERT INTO tt_1 VALUES ("new text 800", 800, '');
+COMMIT;
+BEGIN;
+INSERT INTO tt_3 VALUES("new text 900", 900, f1());
+INSERT INTO tt_1 VALUES ("new text 1000", 1000, '');
+COMMIT;
+BEGIN;
+INSERT INTO tt_3 VALUES(1100, 1100, f2());
+INSERT INTO tt_1 VALUES ("new text 1200", 1200, '');
+COMMIT;
+BEGIN;
+INSERT INTO nt_4 VALUES ("new text 1300", 1300, '');
+INSERT INTO tt_1 VALUES ("new text 1400", 1400, '');
+COMMIT;
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 1500", 1500, f1());
+INSERT INTO tt_1 VALUES ("new text 1600", 1600, '');
+COMMIT;
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 1700", 1700, f2());
+INSERT INTO tt_1 VALUES ("new text 1800", 1800, '');
+COMMIT;
+BEGIN;
+CALL pc_i_tt_3(1900, "Testing...");
+INSERT INTO tt_1 VALUES ("new text 2000", 2000, '');
+COMMIT;
+BEGIN;
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 5", nt_4.a= "new text 5", tt_3.a= "new text 5", tt_4.a= "new text 5" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2100", 2100, '');
+COMMIT;
+BEGIN;
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 6", tt_4.a= "new text 6", nt_3.a= "new text 6", nt_4.a = "new text 6" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2200", 2200, '');
+COMMIT;
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 7", nt_3.a= "new text 7", nt_4.a= "new text 7", tt_4.a = "new text 7" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2300", 2300, '');
+COMMIT;
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 8", nt_3.a= "new text 8", nt_4.a= "new text 8", tt_4.a = "new text 8" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 2400", 2400, '');
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; DELETE FROM nt_1
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 SELECT * FROM tt_1
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 14", 14, "new text 14")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; DELETE FROM tt_1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 SELECT * FROM nt_1
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 15", 15, "new text 15")
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text 700", 700, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 800", 800, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES("new text 900", 900, f1())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 1000", 1000, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES(1100, 1100, f2())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 1200", 1200, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text 1300", 1300, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 1400", 1400, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES("new text 1500", 1500, f1())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 1600", 1600, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES("new text 1700", 1700, f2())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 1800", 1800, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ( NAME_CONST('y',_latin1'Testing...' COLLATE 'latin1_swedish_ci'), NAME_CONST('x',1900), NAME_CONST('x',1900))
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2000", 2000, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 5", nt_4.a= "new text 5", tt_3.a= "new text 5", tt_4.a= "new text 5" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2100", 2100, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 6", tt_4.a= "new text 6", nt_3.a= "new text 6", nt_4.a = "new text 6" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2200", 2200, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 7", nt_3.a= "new text 7", nt_4.a= "new text 7", tt_4.a = "new text 7" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2300", 2300, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 8", nt_3.a= "new text 8", nt_4.a= "new text 8", tt_4.a = "new text 8" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2400", 2400, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#12.e) "B M T C" with error in M generates in the binlog the "B M T C" entries.
+#
+# There is a bug in the slave that needs to be fixed before enabling
+# this part of the test. A bug report will be filed referencing this
+# test case.
+BEGIN;
+INSERT INTO nt_3 VALUES ("new text -28", -28, '');
+INSERT INTO tt_3 VALUES ("new text -27", -27, ''), ("new text -28", -28, '');
+ERROR 23000: Duplicate entry '-28' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -27", -27, '');
+COMMIT;
+BEGIN;
+INSERT INTO tt_4 VALUES ("new text -28", -28, '');
+INSERT INTO nt_4 VALUES ("new text -27", -27, ''), ("new text -28", -28, '');
+ERROR 23000: Duplicate entry '-28' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -28", -28, '');
+COMMIT;
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_3 VALUES ("new text -28", -28, '')
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text -27", -27, ''), ("new text -28", -28, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -27", -27, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_4 VALUES ("new text -28", -28, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text -27", -27, ''), ("new text -28", -28, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -28", -28, '')
+master-bin.000001 # Xid # # COMMIT /* XID */
+
+
+
+
+#
+#13) "B M T R" generates in the binlog the "B M T R" entries
+#
+DELETE FROM nt_1;
+BEGIN;
+INSERT INTO nt_1 SELECT * FROM tt_1;
+INSERT INTO tt_2 VALUES ("new text 17", 17, "new text 17");
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+DELETE FROM tt_1;
+BEGIN;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+INSERT INTO tt_2 VALUES ("new text 18", 18, "new text 18");
+ROLLBACK;
+INSERT INTO tt_1 SELECT * FROM nt_1;
+BEGIN;
+INSERT INTO tt_3 VALUES ("new text 2500", 2500, '');
+INSERT INTO tt_1 VALUES ("new text 2600", 2600, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO tt_3 VALUES("new text 2700", 2700, f1());
+INSERT INTO tt_1 VALUES ("new text 2800", 2800, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO tt_3 VALUES(2900, 2900, f2());
+INSERT INTO tt_1 VALUES ("new text 3000", 3000, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO nt_4 VALUES ("new text 3100", 3100, '');
+INSERT INTO tt_1 VALUES ("new text 3200", 3200, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 3300", 3300, f1());
+INSERT INTO tt_1 VALUES ("new text 3400", 3400, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO nt_4 VALUES("new text 3500", 3500, f2());
+INSERT INTO tt_1 VALUES ("new text 3600", 3600, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+CALL pc_i_tt_3(3700, "Testing...");
+INSERT INTO tt_1 VALUES ("new text 3700", 3700, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 9", nt_4.a= "new text 9", tt_3.a= "new text 9", tt_4.a= "new text 9" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 3800", 3800, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 10", tt_4.a= "new text 10", nt_3.a= "new text 10", nt_4.a = "new text 10" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 3900", 3900, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 11", nt_3.a= "new text 11", nt_4.a= "new text 11", tt_4.a = "new text 11" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 4000", 4000, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 12", nt_3.a= "new text 12", nt_4.a= "new text 12", tt_4.a = "new text 12" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100;
+INSERT INTO tt_1 VALUES ("new text 4100", 4100, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; DELETE FROM nt_1
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_1 SELECT * FROM tt_1
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_2 VALUES ("new text 17", 17, "new text 17")
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; DELETE FROM tt_1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 SELECT * FROM nt_1
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text 2500", 2500, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2600", 2600, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES("new text 2700", 2700, f1())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 2800", 2800, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES(2900, 2900, f2())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3000", 3000, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text 3100", 3100, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3200", 3200, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES("new text 3300", 3300, f1())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3400", 3400, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES("new text 3500", 3500, f2())
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3600", 3600, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ( NAME_CONST('y',_latin1'Testing...' COLLATE 'latin1_swedish_ci'), NAME_CONST('x',3700), NAME_CONST('x',3700))
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3700", 3700, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE nt_3, nt_4, tt_3, tt_4 SET nt_3.a= "new text 9", nt_4.a= "new text 9", tt_3.a= "new text 9", tt_4.a= "new text 9" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3800", 3800, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, tt_4, nt_3, nt_4 SET tt_3.a= "new text 10", tt_4.a= "new text 10", nt_3.a= "new text 10", nt_4.a = "new text 10" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 3900", 3900, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 11", nt_3.a= "new text 11", nt_4.a= "new text 11", tt_4.a = "new text 11" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 4000", 4000, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; UPDATE tt_3, nt_3, nt_4, tt_4 SET tt_3.a= "new text 12", nt_3.a= "new text 12", nt_4.a= "new text 12", tt_4.a = "new text 12" where nt_3.b = nt_4.b and nt_4.b = tt_3.b and tt_3.b = tt_4.b and tt_4.b = 100
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text 4100", 4100, '')
+master-bin.000001 # Query # # ROLLBACK
+
+
+
+
+#
+#13.e) "B M T R" with error in M generates in the binlog the "B M T R" entries.
+#
+BEGIN;
+INSERT INTO nt_3 VALUES ("new text -30", -30, '');
+INSERT INTO tt_3 VALUES ("new text -29", -29, ''), ("new text -30", -30, '');
+ERROR 23000: Duplicate entry '-30' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -30", -30, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+BEGIN;
+INSERT INTO tt_4 VALUES ("new text -30", -30, '');
+INSERT INTO nt_4 VALUES ("new text -29", -29, ''), ("new text -30", -30, '');
+ERROR 23000: Duplicate entry '-30' for key 'PRIMARY'
+INSERT INTO tt_1 VALUES ("new text -31", -31, '');
+ROLLBACK;
+Warnings:
+Warning 1196 Some non-transactional changed tables couldn't be rolled back
+show binlog events from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_3 VALUES ("new text -30", -30, '')
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_3 VALUES ("new text -29", -29, ''), ("new text -30", -30, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -30", -30, '')
+master-bin.000001 # Query # # ROLLBACK
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_4 VALUES ("new text -30", -30, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO nt_4 VALUES ("new text -29", -29, ''), ("new text -30", -30, '')
+master-bin.000001 # Query # # use `test`; INSERT INTO tt_1 VALUES ("new text -31", -31, '')
+master-bin.000001 # Query # # ROLLBACK
+###################################################################################
+# CLEAN
+###################################################################################
+DROP TABLE tt_1;
+DROP TABLE tt_2;
+DROP TABLE tt_3;
+DROP TABLE tt_4;
+DROP TABLE nt_1;
+DROP TABLE nt_2;
+DROP TABLE nt_3;
+DROP TABLE nt_4;
+DROP PROCEDURE pc_i_tt_3;
+DROP FUNCTION f1;
+DROP FUNCTION f2;
=== modified file 'mysql-test/suite/rpl/t/rpl_concurrency_error.test'
--- a/mysql-test/suite/rpl/t/rpl_concurrency_error.test 2009-07-18 20:07:56 +0000
+++ b/mysql-test/suite/rpl/t/rpl_concurrency_error.test 2009-08-13 16:21:01 +0000
@@ -125,14 +125,13 @@ while ($type)
connection master;
sync_slave_with_master;
-# Re-enable this after fixing BUG#46130
-#connection master;
-#let $diff_statement= SELECT * FROM t order by i;
-#source include/diff_master_slave.inc;
-
-#connection master;
-#let $diff_statement= SELECT * FROM n order by d, f;
-#source include/diff_master_slave.inc;
+connection master;
+let $diff_statement= SELECT * FROM t order by i;
+source include/diff_master_slave.inc;
+
+connection master;
+let $diff_statement= SELECT * FROM n order by d, f;
+source include/diff_master_slave.inc;
--echo ########################################################################
--echo # Cleanup
=== added file 'mysql-test/suite/rpl/t/rpl_create_if_not_exists.test'
--- a/mysql-test/suite/rpl/t/rpl_create_if_not_exists.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_create_if_not_exists.test 2009-08-13 02:48:57 +0000
@@ -0,0 +1,70 @@
+# BUG#45574:
+# SP: CREATE DATABASE|TABLE IF NOT EXISTS not binlogged if routine exists.
+#
+# There is an inconsistency with DROP DATABASE|TABLE|EVENT IF EXISTS and
+# CREATE DATABASE|TABLE|EVENT IF NOT EXISTS. DROP IF EXISTS statements are
+# binlogged even if either the DB, TABLE or EVENT does not exist. In
+# contrast, Only the CREATE EVENT IF NOT EXISTS is binlogged when the EVENT
+# exists.
+#
+# This problem caused some of the tests to fail randomly on PB or PB2.
+#
+# Description:
+# Fixed this bug by adding calls to write_bin_log in:
+# mysql_create_db
+# mysql_create_table_no_lock
+# mysql_create_like_table
+# create_table_from_items
+#
+# Test is implemented as follows:
+# i) test each "CREATE IF NOT EXISTS" (DDL), found in MySQL 5.1 manual
+# exclude CREATE TEMPORARY TABLE, on existent objects;
+#
+# Note:
+# rpl_create_tmp_table_if_not_exists.test tests CREATE TEMPORARY TABLE cases.
+#
+# References:
+# http://dev.mysql.com/doc/refman/5.1/en/sql-syntax-data-definition.html
+#
+
+source include/master-slave.inc;
+disable_warnings;
+DROP DATABASE IF EXISTS mysqltest;
+
+CREATE DATABASE IF NOT EXISTS mysqltest;
+USE mysqltest;
+CREATE TABLE IF NOT EXISTS t(c1 int);
+CREATE TABLE IF NOT EXISTS t1 LIKE t;
+CREATE TABLE IF NOT EXISTS t2 SELECT * FROM t;
+CREATE EVENT IF NOT EXISTS e
+ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
+DO SELECT now();
+sync_slave_with_master;
+
+connection slave;
+#DROP database from slave.
+#The database and all tables can be recreated in slave
+#if binlog of the second CREATE command is recorded and sent from master to slave.
+DROP DATABASE mysqltest;
+
+connection master;
+CREATE DATABASE IF NOT EXISTS mysqltest;
+USE mysqltest;
+CREATE TABLE IF NOT EXISTS t(c1 int);
+CREATE TABLE IF NOT EXISTS t1 LIKE t;
+CREATE TABLE IF NOT EXISTS t2 SELECT * FROM t;
+CREATE EVENT IF NOT EXISTS e
+ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR
+DO SELECT now();
+sync_slave_with_master;
+
+connection slave;
+SHOW TABLES in mysqltest;
+#Execution time changes in each run. So we disregard it by calling replace_column.
+replace_column 6 #;
+SHOW EVENTS in mysqltest;
+
+
+connection master;
+DROP DATABASE IF EXISTS mysqltest;
+source include/master-slave-end.inc;
=== added file 'mysql-test/suite/rpl/t/rpl_create_tmp_table_if_not_exists.test'
--- a/mysql-test/suite/rpl/t/rpl_create_tmp_table_if_not_exists.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_create_tmp_table_if_not_exists.test 2009-08-13 02:48:57 +0000
@@ -0,0 +1,41 @@
+# BUG#45574:
+# SP: CREATE DATABASE|TABLE IF NOT EXISTS not binlogged if routine exists.
+#
+# There is an inconsistency with DROP DATABASE|TABLE|EVENT IF EXISTS and
+# CREATE DATABASE|TABLE|EVENT IF NOT EXISTS. DROP IF EXISTS statements are
+# binlogged even if either the DB, TABLE or EVENT does not exist. In
+# contrast, Only the CREATE EVENT IF NOT EXISTS is binlogged when the EVENT
+# exists.
+#
+# This problem caused some of the tests to fail randomly on PB or PB2.
+#
+# Test is implemented as follows:
+#
+# i) test each "CREATE TEMPORARY TABLE IF EXISTS" (DDL), found in MySQL
+# 5.1 manual, on existent objects;
+# ii) show binlog events;
+#
+# Note:
+# rpl_create_if_not_exists.test tests other cases.
+#
+# References:
+# http://dev.mysql.com/doc/refman/5.1/en/sql-syntax-data-definition.html
+#
+
+source include/master-slave.inc;
+#CREATE TEMPORARY TABLE statements are not binlogged in row mode,
+#So it must be test by itself.
+source include/have_binlog_format_mixed_or_statement.inc;
+disable_warnings;
+
+DROP DATABASE IF EXISTS mysqltest;
+
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int);
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp(c1 int);
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp1 LIKE tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp;
+CREATE TEMPORARY TABLE IF NOT EXISTS tmp2 SELECT * FROM tmp;
+source include/show_binlog_events.inc;
+
+source include/master-slave-end.inc;
=== modified file 'mysql-test/suite/rpl/t/rpl_drop_temp.test'
--- a/mysql-test/suite/rpl/t/rpl_drop_temp.test 2008-02-29 21:05:23 +0000
+++ b/mysql-test/suite/rpl/t/rpl_drop_temp.test 2009-08-28 09:45:57 +0000
@@ -12,21 +12,23 @@ source include/have_binlog_format_mixed_
create database if not exists mysqltest;
--enable_warnings
+connect (con_temp,127.0.0.1,root,,test,$MASTER_MYPORT,);
+
+connection con_temp;
+use mysqltest;
create temporary table mysqltest.t1 (n int)ENGINE=MyISAM;
create temporary table mysqltest.t2 (n int)ENGINE=MyISAM;
-connection master;
-disconnect master;
-
-connection master1;
-# Wait until drop of temp tables appears in binlog
-let $wait_binlog_event= DROP;
-source include/wait_for_binlog_event.inc;
+disconnect con_temp;
+--source include/wait_until_disconnected.inc
+connection master;
sync_slave_with_master;
+
+connection slave;
show status like 'Slave_open_temp_tables';
# Cleanup
-connection default;
+connection master;
drop database mysqltest;
sync_slave_with_master;
=== modified file 'mysql-test/suite/rpl/t/rpl_events.test'
--- a/mysql-test/suite/rpl/t/rpl_events.test 2009-01-30 13:44:49 +0000
+++ b/mysql-test/suite/rpl/t/rpl_events.test 2009-08-31 02:26:01 +0000
@@ -46,12 +46,62 @@ connection master;
DROP EVENT event2;
-sync_slave_with_master;
+#
+# BUG#44331
+# This test verifies if the definer is consistent between master and slave,
+# when the event is created without the DEFINER clause set explicitly or the
+# DEFINER is set to CURRENT_USER
+#
+CREATE TABLE test.t1(details CHAR(30));
+
+CREATE EVENT /*!50000 event44331_1 */
+ ON SCHEDULE AT CURRENT_TIMESTAMP
+ ON COMPLETION PRESERVE DISABLE
+ DO INSERT INTO test.t1 VALUES('event event44331_1 fired - no definer');
+
+CREATE DEFINER=CURRENT_USER /*!50000 EVENT event44331_2 */
+ ON SCHEDULE AT CURRENT_TIMESTAMP
+ ON COMPLETION PRESERVE DISABLE
+ DO INSERT INTO test.t1 VALUES('event event44331_2 fired - DEFINER=CURRENT_USER');
+
+CREATE DEFINER=CURRENT_USER() EVENT event44331_3
+ ON SCHEDULE AT CURRENT_TIMESTAMP
+ ON COMPLETION PRESERVE DISABLE
+ DO INSERT INTO test.t1 VALUES('event event44331_3 fired - DEFINER=CURRENT_USER() function');
-# Doing cleanup of the table referred to in the event to guarantee
-# that there is no bad timing cauing it to try to access the table.
+CREATE /*!50000 DEFINER='user44331' */ EVENT event44331_4
+ ON SCHEDULE AT CURRENT_TIMESTAMP
+ ON COMPLETION PRESERVE DISABLE
+ DO INSERT INTO test.t1 VALUES('event event44331_4 fired - DEFINER=user1');
+
+--echo #on master
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_1';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_2';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_3';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_4';
+
+sync_slave_with_master;
+connection slave;
+--echo #on slave
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_1';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_2';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_3';
+select EVENT_SCHEMA, EVENT_NAME, DEFINER from information_schema.events
+ where EVENT_NAME='event44331_4';
connection master;
SET @@global.event_scheduler= @old_event_scheduler;
DROP TABLE t28953;
+DROP TABLE t1;
+DROP EVENT event44331_1;
+DROP EVENT event44331_2;
+DROP EVENT event44331_3;
+DROP EVENT event44331_4;
sync_slave_with_master;
=== modified file 'mysql-test/suite/rpl/t/rpl_loaddata_charset.test'
--- a/mysql-test/suite/rpl/t/rpl_loaddata_charset.test 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/rpl/t/rpl_loaddata_charset.test 2009-08-12 05:31:56 +0000
@@ -31,3 +31,20 @@ select hex(a) from t1;
connection master;
drop table t1;
sync_slave_with_master;
+
+#
+# Bug#45516
+# When slave SQL thread executing LOAD DATA command, the
+# thd->variables.collation_database was not set properly to the default
+# database charset
+#
+
+echo -------------test bug#45516------------------;
+
+# LOAD DATA INFILE
+let $LOAD_LOCAL=1;
+source include/rpl_loaddata_charset.inc;
+
+# LOAD DATA LOCAL INFILE
+let $LOAD_LOCAL=0;
+source include/rpl_loaddata_charset.inc;
=== modified file 'mysql-test/suite/rpl/t/rpl_rewrt_db-slave.opt'
--- a/mysql-test/suite/rpl/t/rpl_rewrt_db-slave.opt 2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/rpl/t/rpl_rewrt_db-slave.opt 2009-08-28 09:45:57 +0000
@@ -1 +1 @@
-"--replicate-rewrite-db=test->rewrite" "--replicate-rewrite-db=mysqltest1->test"
+"--replicate-rewrite-db=test->rewrite" "--replicate-rewrite-db=mysqltest1->test" "--replicate-rewrite-db=database_master_temp_01->database_slave_temp_01" "--replicate-rewrite-db=database_master_temp_02->database_slave_temp_02" "--replicate-rewrite-db=database_master_temp_03->database_slave_temp_03"
=== modified file 'mysql-test/suite/rpl/t/rpl_rewrt_db.test'
--- a/mysql-test/suite/rpl/t/rpl_rewrt_db.test 2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/rpl/t/rpl_rewrt_db.test 2009-08-28 09:45:57 +0000
@@ -76,9 +76,164 @@ connection slave;
# The empty line last comes from the end line field in the file
select * from rewrite.t1;
+set sql_log_bin= 0;
drop database rewrite;
+set sql_log_bin= 1;
connection master;
+set sql_log_bin= 0;
drop table t1;
+set sql_log_bin= 1;
# End of 4.1 tests
+
+--echo
+--echo ****
+--echo **** Bug #46861 Auto-closing of temporary tables broken by replicate-rewrite-db
+--echo ****
+--echo
+
+--echo ****
+--echo **** Preparing the environment
+--echo ****
+connection master;
+
+connect (con_temp_03,127.0.0.1,root,,test,$MASTER_MYPORT,);
+connect (con_temp_02,127.0.0.1,root,,test,$MASTER_MYPORT,);
+connect (con_temp_01,127.0.0.1,root,,test,$MASTER_MYPORT,);
+
+connection master;
+SET sql_log_bin= 0;
+CREATE DATABASE database_master_temp_01;
+CREATE DATABASE database_master_temp_02;
+CREATE DATABASE database_master_temp_03;
+SET sql_log_bin= 1;
+
+connection slave;
+SET sql_log_bin= 0;
+CREATE DATABASE database_slave_temp_01;
+CREATE DATABASE database_slave_temp_02;
+CREATE DATABASE database_slave_temp_03;
+SET sql_log_bin= 1;
+
+--echo
+--echo ****
+--echo **** Creating temporary tables on different databases with different connections
+--echo ****
+--echo **** con_temp_01 --> creates
+--echo **** t_01_01_temp on database_master_temp_01
+--echo ****
+--echo **** con_temp_02 --> creates
+--echo **** t_01_01_temp on database_master_temp_01
+--echo **** t_02_01_temp, t_02_02_temp on database_master_temp_02
+--echo ****
+--echo **** con_temp_02 --> creates
+--echo **** t_01_01_temp on database_master_temp_01
+--echo **** t_02_01_temp, t_02_02_temp on database_master_temp_02
+--echo **** t_03_01_temp, t_03_02_temp, t_03_03_temp on database_master_temp_03
+--echo ****
+
+--echo
+--echo con_temp_01
+--echo
+connection con_temp_01;
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+
+--echo
+--echo con_temp_02
+--echo
+connection con_temp_02;
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+USE database_master_temp_02;
+CREATE TEMPORARY TABLE t_02_01_temp(a int);
+INSERT INTO t_02_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_02_02_temp(a int);
+INSERT INTO t_02_02_temp VALUES(1);
+
+--echo
+--echo con_temp_03
+--echo
+connection con_temp_03;
+USE database_master_temp_01;
+CREATE TEMPORARY TABLE t_01_01_temp(a int);
+INSERT INTO t_01_01_temp VALUES(1);
+USE database_master_temp_02;
+CREATE TEMPORARY TABLE t_02_01_temp(a int);
+INSERT INTO t_02_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_02_02_temp(a int);
+INSERT INTO t_02_02_temp VALUES(1);
+USE database_master_temp_03;
+CREATE TEMPORARY TABLE t_03_01_temp(a int);
+INSERT INTO t_03_01_temp VALUES(1);
+CREATE TEMPORARY TABLE t_03_02_temp(a int);
+INSERT INTO t_03_02_temp VALUES(1);
+CREATE TEMPORARY TABLE t_03_03_temp(a int);
+INSERT INTO t_03_03_temp VALUES(1);
+
+--echo
+--echo **** Dropping the connections
+--echo **** We want to SHOW BINLOG EVENTS, to know what was logged. But there is no
+--echo **** guarantee that logging of the terminated con1 has been done yet.a To be
+--echo **** sure that logging has been done, we use a user lock.
+--echo
+connection master;
+sync_slave_with_master;
+connection slave;
+show status like 'Slave_open_temp_tables';
+
+connection master;
+let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
+connection con_temp_01;
+select get_lock("con_01",10);
+connection master;
+disconnect con_temp_01;
+select get_lock("con_01",10);
+
+connection con_temp_02;
+select get_lock("con_02",10);
+connection master;
+disconnect con_temp_02;
+select get_lock("con_02",10);
+
+connection con_temp_03;
+select get_lock("con_03",10);
+connection master;
+disconnect con_temp_03;
+select get_lock("con_03",10);
+
+--echo
+--echo **** Checking the binary log and temporary tables
+--echo
+connection master;
+sync_slave_with_master;
+connection slave;
+show status like 'Slave_open_temp_tables';
+
+connection master;
+--source include/show_binlog_events.inc
+
+--echo ****
+--echo **** Cleaning up the test case
+--echo ****
+connection master;
+SET sql_log_bin= 0;
+DROP DATABASE database_master_temp_01;
+DROP DATABASE database_master_temp_02;
+DROP DATABASE database_master_temp_03;
+SET sql_log_bin= 1;
+
+connection slave;
+SET sql_log_bin= 0;
+DROP DATABASE database_slave_temp_01;
+DROP DATABASE database_slave_temp_02;
+DROP DATABASE database_slave_temp_03;
+SET sql_log_bin= 1;
+
+connection master;
+sync_slave_with_master;
+
+# end of 5.0 tests
=== added file 'mysql-test/suite/rpl/t/rpl_stm_mixing_engines.test'
--- a/mysql-test/suite/rpl/t/rpl_stm_mixing_engines.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_stm_mixing_engines.test 2009-08-26 23:13:03 +0000
@@ -0,0 +1,5 @@
+--source include/have_binlog_format_statement.inc
+--source include/master-slave.inc
+--source include/have_innodb.inc
+
+--source extra/rpl_tests/rpl_mixing_engines.test
=== modified file 'mysql-test/t/almost_full.test'
--- a/mysql-test/t/almost_full.test 2007-11-12 09:00:22 +0000
+++ b/mysql-test/t/almost_full.test 2009-10-28 07:52:34 +0000
@@ -11,11 +11,13 @@ CREATE TABLE t1 (a int auto_increment pr
--disable_query_log
let $1= 303;
+begin;
while ($1)
{
INSERT INTO t1 SET b=repeat('a',200);
dec $1;
}
+commit;
--enable_query_log
DELETE FROM t1 WHERE a=1 or a=5;
=== modified file 'mysql-test/t/alter_table.test'
--- a/mysql-test/t/alter_table.test 2009-06-07 10:05:19 +0000
+++ b/mysql-test/t/alter_table.test 2009-10-28 07:52:34 +0000
@@ -121,11 +121,15 @@ alter table t1 disable keys;
show keys from t1;
#let $1=10000;
let $1=10;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values($1,RAND()*1000,RAND()*1000,RAND());
dec $1;
}
+commit;
+--enable_query_log
alter table t1 enable keys;
show keys from t1;
drop table t1;
@@ -144,11 +148,15 @@ drop table t1;
create table t1 (a int, b int);
let $1=100;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values(1,$1), (2,$1), (3, $1);
dec $1;
}
+commit;
+--enable_query_log
alter table t1 add unique (a,b), add key (b);
show keys from t1;
analyze table t1;
@@ -966,12 +974,14 @@ DROP TABLE t1;
create table t1(f1 int not null, f2 int not null, key (f1), key (f2));
let $count= 50;
--disable_query_log
+begin;
while ($count)
{
EVAL insert into t1 values (1,1),(1,1),(1,1),(1,1),(1,1);
EVAL insert into t1 values (2,2),(2,2),(2,2),(2,2),(2,2);
dec $count ;
}
+commit;
--enable_query_log
select index_length into @unpaked_keys_size from
=== modified file 'mysql-test/t/analyse.test'
--- a/mysql-test/t/analyse.test 2006-09-28 18:32:30 +0000
+++ b/mysql-test/t/analyse.test 2009-08-27 10:22:19 +0000
@@ -14,6 +14,7 @@ create table t2 select * from t1 procedu
select * from t2;
drop table t1,t2;
+--error ER_WRONG_USAGE
EXPLAIN SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE();
#
@@ -102,4 +103,13 @@ select f2 from t1 procedure analyse(1, 1
select f3 from t1 procedure analyse(1, 1);
drop table t1;
+#
+# Bug#46184 Crash, SELECT ... FROM derived table procedure analyze
+#
+CREATE TABLE t1(a INT,b INT,c INT,d INT,e INT,f INT,g INT,h INT,i INT,j INT,k INT);
+INSERT INTO t1 VALUES ();
+--error ER_WRONG_USAGE
+SELECT * FROM (SELECT * FROM t1) d PROCEDURE ANALYSE();
+DROP TABLE t1;
+
--echo End of 4.1 tests
=== modified file 'mysql-test/t/archive.test'
--- a/mysql-test/t/archive.test 2009-03-26 14:27:34 +0000
+++ b/mysql-test/t/archive.test 2009-10-28 07:52:34 +0000
@@ -1576,11 +1576,13 @@ CREATE TABLE t1(a VARCHAR(510)) ENGINE =
let $bug31036=41;
--disable_query_log
+begin;
while($bug31036)
{
INSERT INTO t1(a) VALUES (REPEAT('a', 510));
dec $bug31036;
}
+commit;
--enable_query_log
INSERT INTO t1(a) VALUES ('');
=== modified file 'mysql-test/t/auto_increment.test'
--- a/mysql-test/t/auto_increment.test 2009-02-05 09:49:32 +0000
+++ b/mysql-test/t/auto_increment.test 2009-08-20 12:30:59 +0000
@@ -324,3 +324,21 @@ insert into t1 values(null,0,0,0,null);
replace into t1 values(null,1,0,2,null);
select last_insert_id();
drop table t1;
+
+--echo #
+--echo # Bug#46616: Assertion `!table->auto_increment_field_not_null' on view
+--echo # manipulations
+--echo #
+CREATE TABLE t1 ( a INT );
+INSERT INTO t1 VALUES (1), (1);
+
+CREATE TABLE t2 ( a INT AUTO_INCREMENT KEY );
+--error ER_DUP_ENTRY
+CREATE TABLE IF NOT EXISTS t2 AS SELECT a FROM t1;
+
+UPDATE t2 SET a = 2;
+
+SELECT a FROM t2;
+
+DROP TABLE t1, t2;
+
=== modified file 'mysql-test/t/bench_count_distinct.test'
--- a/mysql-test/t/bench_count_distinct.test 2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/bench_count_distinct.test 2009-10-28 07:52:34 +0000
@@ -7,14 +7,16 @@ drop table if exists t1;
--enable_warnings
create table t1(n int not null, key(n)) delay_key_write = 1;
let $1=100;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values($1);
eval insert into t1 values($1);
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
select count(distinct n) from t1;
explain extended select count(distinct n) from t1;
drop table t1;
=== modified file 'mysql-test/t/change_user.test'
--- a/mysql-test/t/change_user.test 2009-02-12 14:08:56 +0000
+++ b/mysql-test/t/change_user.test 2009-10-28 07:52:34 +0000
@@ -57,13 +57,13 @@ FLUSH STATUS;
--disable_query_log
let $i = 100;
-
+begin;
while ($i)
{
dec $i;
-
SELECT 1;
}
+commit;
--enable_query_log
--enable_result_log
=== modified file 'mysql-test/t/check.test'
--- a/mysql-test/t/check.test 2009-02-09 21:00:15 +0000
+++ b/mysql-test/t/check.test 2009-10-28 07:52:34 +0000
@@ -12,13 +12,15 @@ drop view if exists v1;
# Add a lot of keys to slow down check
create table t1(n int not null, key(n), key(n), key(n), key(n));
let $1=10000;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values ($1);
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
send check table t1 extended;
connection con2;
insert into t1 values (200000);
=== modified file 'mysql-test/t/count_distinct2.test'
--- a/mysql-test/t/count_distinct2.test 2005-07-28 14:09:54 +0000
+++ b/mysql-test/t/count_distinct2.test 2009-10-28 07:52:34 +0000
@@ -51,13 +51,15 @@ drop table t1;
# test the conversion from tree to MyISAM
create table t1 (n int default NULL);
let $1=5000;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values($1);
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
flush status;
select count(distinct n) from t1;
@@ -67,13 +69,15 @@ drop table t1;
# Test use of MyISAM tmp tables
create table t1 (s text);
let $1=5000;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values('$1');
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
flush status;
select count(distinct s) from t1;
show status like 'Created_tmp_disk_tables';
=== modified file 'mysql-test/t/count_distinct3.test'
--- a/mysql-test/t/count_distinct3.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/count_distinct3.test 2009-10-28 07:52:34 +0000
@@ -14,6 +14,7 @@ CREATE TABLE t1 (id INTEGER, grp TINYINT
--disable_query_log
SET @rnd_max= 2147483647;
let $1 = 1000;
+begin;
while ($1)
{
SET @rnd= RAND();
@@ -23,7 +24,7 @@ while ($1)
INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
dec $1;
}
-
+commit;
# We increase the size of t1 here.
SET @orig_myisam_sort_buffer_size = @@session.myisam_sort_buffer_size;
SET session myisam_sort_buffer_size=20000000;
=== modified file 'mysql-test/t/ctype_euckr.test'
--- a/mysql-test/t/ctype_euckr.test 2009-07-24 06:27:23 +0000
+++ b/mysql-test/t/ctype_euckr.test 2009-10-28 07:52:34 +0000
@@ -77,11 +77,13 @@ DROP TABLE t1;
CREATE TABLE t1 (a binary(1), key(a));
--disable_query_log
let $1=255;
+begin;
while($1)
{
eval INSERT INTO t1 VALUES (unhex(hex($1)));
dec $1;
}
+commit;
--enable_query_log
CREATE TABLE t2 (s VARCHAR(4), a VARCHAR(1) CHARACTER SET euckr);
=== modified file 'mysql-test/t/derived.test'
--- a/mysql-test/t/derived.test 2009-07-11 18:44:29 +0000
+++ b/mysql-test/t/derived.test 2009-10-28 07:52:34 +0000
@@ -45,14 +45,16 @@ select * from (select * from t1 where t1
explain select * from (select t1.*, t2.a as t2a from t1,t2 where t1.a=t2.a) t1;
drop table t1, t2;
create table t1(a int not null, t char(8), index(a));
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 10000;
while ($1)
{
eval insert into t1 values ($1,'$1');
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
SELECT * FROM (SELECT * FROM t1) as b ORDER BY a ASC LIMIT 0,20;
explain select count(*) from t1 as tt1, (select * from t1) as tt2;
drop table t1;
=== modified file 'mysql-test/t/events_time_zone.test'
--- a/mysql-test/t/events_time_zone.test 2009-10-06 18:15:09 +0000
+++ b/mysql-test/t/events_time_zone.test 2009-10-28 07:52:34 +0000
@@ -118,6 +118,7 @@ INSERT INTO mysql.time_zone_transition_t
let $transition_unix_time= `SELECT @unix_time`;
let $count= 30;
--disable_query_log
+begin;
while ($count)
{
eval INSERT INTO mysql.time_zone_transition
@@ -126,6 +127,7 @@ while ($count)
let $transition_unix_time= `SELECT $transition_unix_time + @step3`;
dec $count;
}
+commit;
--enable_query_log
let $tz_name = `SELECT CONCAT('b16420_a',UNIX_TIMESTAMP())`;
--replace_result $tz_name <TZ_NAME_1>
=== modified file 'mysql-test/t/fulltext2.test'
--- a/mysql-test/t/fulltext2.test 2007-07-06 18:39:55 +0000
+++ b/mysql-test/t/fulltext2.test 2009-10-28 07:52:34 +0000
@@ -18,6 +18,7 @@ CREATE TABLE t1 (
# two-level entry, second-level tree with depth 2
--disable_query_log
+begin;
let $1=260;
while ($1)
{
@@ -40,6 +41,7 @@ while ($1)
eval insert t1 (a) values ('aaayyy');
dec $1;
}
+commit;
--enable_query_log
# converting to two-level
@@ -113,6 +115,7 @@ CREATE TABLE t1 (
# two-level entry, second-level tree with depth 2
--disable_query_log
let $1=260;
+begin;
while ($1)
{
eval insert t1 (a) values ('aaaxxx');
@@ -130,6 +133,7 @@ while ($1)
eval insert t1 (a) values ('aaayyy');
dec $1;
}
+commit;
--enable_query_log
select count(*) from t1 where match a against ('aaaxxx');
=== modified file 'mysql-test/t/func_misc.test'
--- a/mysql-test/t/func_misc.test 2009-06-11 16:21:32 +0000
+++ b/mysql-test/t/func_misc.test 2009-10-28 07:52:34 +0000
@@ -213,11 +213,15 @@ start_ts DATETIME, end_ts DATETIME,
start_cached INTEGER, end_cached INTEGER);
CREATE TABLE t1 (f1 BIGINT);
let $num = `SELECT @row_count`;
+--disable_query_log
+begin;
while ($num)
{
INSERT INTO t1 VALUES (1);
dec $num;
}
+commit;
+--enable_query_log
let $loops = 4;
let $num = $loops;
=== modified file 'mysql-test/t/gis-rtree.test'
--- a/mysql-test/t/gis-rtree.test 2009-07-10 23:12:13 +0000
+++ b/mysql-test/t/gis-rtree.test 2009-10-28 07:52:34 +0000
@@ -17,12 +17,16 @@ SHOW CREATE TABLE t1;
let $1=150;
let $2=150;
+--disable_query_log
+begin;
while ($1)
{
eval INSERT INTO t1 (g) VALUES (GeomFromText('LineString($1 $1, $2 $2)'));
dec $1;
inc $2;
}
+commit;
+--enable_query_log
SELECT count(*) FROM t1;
EXPLAIN SELECT fid, AsText(g) FROM t1 WHERE Within(g, GeomFromText('Polygon((140 140,160 140,160 160,140 160,140 140))'));
@@ -35,6 +39,8 @@ CREATE TABLE t2 (
g GEOMETRY NOT NULL
) ENGINE=MyISAM;
+--disable_query_log
+begin;
let $1=10;
while ($1)
{
@@ -46,6 +52,8 @@ while ($1)
}
dec $1;
}
+commit;
+--enable_query_log
ALTER TABLE t2 ADD SPATIAL KEY(g);
SHOW CREATE TABLE t2;
@@ -55,6 +63,8 @@ EXPLAIN SELECT fid, AsText(g) FROM t2 WH
SELECT fid, AsText(g) FROM t2 WHERE Within(g,
GeomFromText('Polygon((40 40,60 40,60 60,40 60,40 40))'));
+--disable_query_log
+begin;
let $1=10;
while ($1)
{
@@ -67,6 +77,8 @@ while ($1)
}
dec $1;
}
+commit;
+--enable_query_log
DROP TABLE t2;
=== modified file 'mysql-test/t/group_min_max.test'
--- a/mysql-test/t/group_min_max.test 2009-07-13 17:36:54 +0000
+++ b/mysql-test/t/group_min_max.test 2009-08-30 07:03:37 +0000
@@ -1018,3 +1018,18 @@ DROP TABLE t;
--echo End of 5.0 tests
+
+--echo #
+--echo # Bug #46607: Assertion failed: (cond_type == Item::FUNC_ITEM) results in
+--echo # server crash
+--echo #
+
+CREATE TABLE t (a INT, b INT, INDEX (a,b));
+INSERT INTO t VALUES (2,0), (2,0), (2,1), (2,1);
+INSERT INTO t SELECT * FROM t;
+
+SELECT a, MAX(b) FROM t WHERE b GROUP BY a;
+
+DROP TABLE t;
+
+--echo End of 5.1 tests
=== modified file 'mysql-test/t/handler_myisam.test'
--- a/mysql-test/t/handler_myisam.test 2006-08-16 17:29:49 +0000
+++ b/mysql-test/t/handler_myisam.test 2009-08-21 05:55:35 +0000
@@ -19,3 +19,22 @@ let $other_engine_type= MEMORY;
let $other_handler_engine_type= MyISAM;
--source include/handler.inc
+
+--echo #
+--echo # BUG #46456: HANDLER OPEN + TRUNCATE + DROP (temporary) TABLE, crash
+--echo #
+CREATE TABLE t1 AS SELECT 1 AS f1;
+HANDLER t1 OPEN;
+TRUNCATE t1;
+--error ER_UNKNOWN_TABLE
+HANDLER t1 READ FIRST;
+DROP TABLE t1;
+
+CREATE TEMPORARY TABLE t1 AS SELECT 1 AS f1;
+HANDLER t1 OPEN;
+TRUNCATE t1;
+--error ER_UNKNOWN_TABLE
+HANDLER t1 READ FIRST;
+DROP TABLE t1;
+
+--echo End of 5.1 tests
=== modified file 'mysql-test/t/heap.test'
--- a/mysql-test/t/heap.test 2007-06-06 17:57:07 +0000
+++ b/mysql-test/t/heap.test 2009-10-28 07:52:34 +0000
@@ -234,7 +234,8 @@ drop table t1,t2,t3;
#
create table t1 (v varchar(10), c char(10), t varchar(50), key(v), key(c), key(t(10)));
show create table t1;
-disable_query_log;
+--disable_query_log
+begin;
let $1=10;
while ($1)
{
@@ -248,7 +249,9 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
+
select count(*) from t1;
insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1)));
select count(*) from t1 where v='a';
@@ -318,7 +321,8 @@ drop table t1;
create table t1 (v varchar(10), c char(10), t varchar(50), key using btree (v), key using btree (c), key using btree (t(10)));
show create table t1;
-disable_query_log;
+--disable_query_log
+begin;
let $1=10;
while ($1)
{
@@ -332,7 +336,8 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
select count(*) from t1;
insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1)));
select count(*) from t1 where v='a';
=== modified file 'mysql-test/t/innodb_xtradb_bug317074.test'
--- a/mysql-test/t/innodb_xtradb_bug317074.test 2009-09-18 19:27:04 +0000
+++ b/mysql-test/t/innodb_xtradb_bug317074.test 2009-10-28 07:52:34 +0000
@@ -1,4 +1,3 @@
---source include/big_test.inc
--source include/have_innodb.inc
SET @old_innodb_file_format=@@innodb_file_format;
@@ -7,16 +6,16 @@ SET @old_innodb_file_format_check=@@inno
SET GLOBAL innodb_file_format='Barracuda';
SET GLOBAL innodb_file_per_table=ON;
--- disable_query_log
--- disable_result_log
-
+--disable_warnings
DROP TABLE IF EXISTS `test1`;
+--enable_warnings
CREATE TABLE IF NOT EXISTS `test1` (
`a` int primary key auto_increment,
`b` int default 0,
`c` char(100) default 'testtest'
) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+set autocommit=0;
delimiter |;
CREATE PROCEDURE insert_many(p1 int)
BEGIN
@@ -26,14 +25,18 @@ REPEAT
insert into test1 set b=1;
SET @x = @x + 1;
SET @y = @y + 1;
- IF @y >= 100 THEN
+ IF @y >= 1000 THEN
commit;
SET @y = 0;
END IF;
UNTIL @x >= p1 END REPEAT;
END|
delimiter ;|
+--disable_query_log
+--disable_result_log
call insert_many(100000);
+--enable_query_log
+--enable_result_log
DROP PROCEDURE insert_many;
# The bug is hangup at the following statement
=== modified file 'mysql-test/t/insert.test'
--- a/mysql-test/t/insert.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/insert.test 2009-10-28 07:52:34 +0000
@@ -151,7 +151,8 @@ drop table t1;
create table t1(id1 int not null auto_increment primary key, t char(12));
create table t2(id2 int not null, t char(12));
create table t3(id3 int not null, t char(12), index(id3));
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 100;
while ($1)
{
@@ -170,7 +171,9 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
+
select count(*) from t2;
insert into t2 select t1.* from t1, t2 t, t3 where t1.id1 = t.id2 and t.id2 = t3.id3;
select count(*) from t2;
=== modified file 'mysql-test/t/kill.test'
--- a/mysql-test/t/kill.test 2008-03-13 17:54:29 +0000
+++ b/mysql-test/t/kill.test 2009-10-28 07:52:34 +0000
@@ -67,12 +67,14 @@ connection conn1;
-- disable_result_log
-- disable_query_log
+begin;
let $1 = 4096;
while ($1)
{
eval insert into t1 values ($1);
dec $1;
}
+commit;
-- enable_query_log
-- enable_result_log
@@ -265,6 +267,8 @@ connection con1;
let $ID= `select connection_id()`;
let $tab_count= 40;
+--disable_query_log
+begin;
let $i= $tab_count;
while ($i)
{
@@ -272,6 +276,8 @@ while ($i)
eval INSERT INTO t$i VALUES (1),(2),(3),(4),(5),(6),(7);
dec $i ;
}
+commit;
+--enable_query_log
set session optimizer_search_depth=0;
let $i=$tab_count;
=== modified file 'mysql-test/t/lock_multi_bug38499.test'
--- a/mysql-test/t/lock_multi_bug38499.test 2009-07-06 10:30:25 +0000
+++ b/mysql-test/t/lock_multi_bug38499.test 2009-08-28 21:49:16 +0000
@@ -5,6 +5,9 @@
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
+SET @odl_sync_frm = @@global.sync_frm;
+SET @@global.sync_frm = OFF;
+
connect (locker,localhost,root,,);
connect (writer,localhost,root,,);
@@ -214,6 +217,8 @@ DROP TABLE t1;
--disconnect locker
--disconnect writer
+SET @@global.sync_frm = @odl_sync_frm;
+
# End of 5.0 tests
# Wait till all disconnects are completed
=== modified file 'mysql-test/t/lock_multi_bug38691.test'
--- a/mysql-test/t/lock_multi_bug38691.test 2009-03-23 14:22:31 +0000
+++ b/mysql-test/t/lock_multi_bug38691.test 2009-08-28 21:49:16 +0000
@@ -8,6 +8,9 @@
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
+SET @odl_sync_frm = @@global.sync_frm;
+SET @@global.sync_frm = OFF;
+
# Test to see if select will get the lock ahead of low priority update
connect (locker,localhost,root,,);
@@ -136,6 +139,8 @@ DROP TABLE t1, t2, t3;
--disconnect locker
--disconnect writer
+SET @@global.sync_frm = @odl_sync_frm;
+
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
=== modified file 'mysql-test/t/merge.test'
--- a/mysql-test/t/merge.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/merge.test 2009-11-06 17:22:32 +0000
@@ -1274,6 +1274,8 @@ DROP TABLE t1, t2, t3;
CREATE TABLE t1 (id INTEGER, grp TINYINT, id_rev INTEGER);
SET @rnd_max= 2147483647;
let $1 = 10;
+--disable_query_log
+begin;
while ($1)
{
SET @rnd= RAND();
@@ -1283,6 +1285,8 @@ while ($1)
INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
dec $1;
}
+commit;
+--enable_query_log
set @@read_buffer_size=2*1024*1024;
CREATE TABLE t2 SELECT * FROM t1;
INSERT INTO t1 (id, grp, id_rev) SELECT id, grp, id_rev FROM t2;
@@ -1628,6 +1632,17 @@ DROP TABLE m1,t1,t2,t3,t4,t5,t6,t7;
--error ER_NO_SUCH_TABLE
SELECT 1 FROM m1; # Should not hang!
+--echo #
+--echo # Bug #46614: Assertion in show_create_trigger()
+--echo #
+CREATE TABLE t1(a int);
+CREATE TABLE t2(a int);
+CREATE TABLE t3(a int) ENGINE = MERGE UNION(t1, t2);
+CREATE TRIGGER tr1 AFTER INSERT ON t3 FOR EACH ROW CALL foo();
+SHOW CREATE TRIGGER tr1;
+DROP TRIGGER tr1;
+DROP TABLE t1, t2, t3;
+
--echo End of 5.1 tests
--disable_result_log
=== modified file 'mysql-test/t/multi_update.test'
--- a/mysql-test/t/multi_update.test 2009-02-09 21:00:15 +0000
+++ b/mysql-test/t/multi_update.test 2009-10-28 07:52:34 +0000
@@ -20,7 +20,8 @@ delete from mysql.user where user=_binar
create table t1(id1 int not null auto_increment primary key, t char(12));
create table t2(id2 int not null, t char(12));
create table t3(id3 int not null, t char(12), index(id3));
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 100;
while ($1)
{
@@ -39,7 +40,8 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
select count(*) from t1 where id1 > 95;
select count(*) from t2 where id2 > 95;
@@ -75,7 +77,8 @@ drop table t1,t2,t3;
create table t1(id1 int not null primary key, t varchar(100)) pack_keys = 1;
create table t2(id2 int not null, t varchar(100), index(id2)) pack_keys = 1;
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 1000;
while ($1)
{
@@ -88,7 +91,8 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
delete t1 from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
drop table t1,t2;
=== modified file 'mysql-test/t/multi_update2.test'
--- a/mysql-test/t/multi_update2.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/multi_update2.test 2009-10-28 07:52:34 +0000
@@ -48,13 +48,14 @@ CREATE TABLE t1 ( a INT NOT NULL, b INT
INSERT INTO t1 VALUES (1,1),(2,2),(3,3),(4,4);
let $1=19;
set @d=4;
+begin;
while ($1)
{
eval INSERT INTO t1 SELECT a+@d,b+@d FROM t1;
eval SET @d=@d*2;
dec $1;
}
-
+commit;
--enable_query_log
ALTER TABLE t1 ADD INDEX i1(a);
DELETE FROM t1 WHERE a > 2000000;
=== modified file 'mysql-test/t/myisam.test'
--- a/mysql-test/t/myisam.test 2009-09-18 01:04:43 +0000
+++ b/mysql-test/t/myisam.test 2009-10-28 07:52:34 +0000
@@ -33,7 +33,8 @@ drop table t1;
create table t1 (a tinyint not null auto_increment, b blob not null, primary key (a));
let $1=100;
-disable_query_log;
+--disable_query_log
+begin;
--disable_warnings
SET SQL_WARNINGS=0;
while ($1)
@@ -41,9 +42,10 @@ while ($1)
eval insert into t1 (b) values(repeat(char(65+$1),65550-$1));
dec $1;
}
+commit;
SET SQL_WARNINGS=1;
--enable_warnings
-enable_query_log;
+--enable_query_log
check table t1;
repair table t1;
delete from t1 where (a & 1);
@@ -380,14 +382,16 @@ check table t1;
# check updating with keys
#
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 100;
while ($1)
{
eval insert into t1 (b) values (repeat(char(($1 & 32)+65), $1));
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
update t1 set b=repeat(left(b,1),255) where a between 1 and 5;
update t1 set b=repeat(left(b,1),10) where a between 32 and 43;
update t1 set b=repeat(left(b,1),2) where a between 64 and 66;
@@ -551,11 +555,13 @@ create table t2 (a int);
let $i=1000;
set @@rand_seed1=31415926,@@rand_seed2=2718281828;
--disable_query_log
+begin;
while ($i)
{
dec $i;
insert t2 values (rand()*100000);
}
+commit;
--enable_query_log
insert t1 select * from t2;
show keys from t1;
@@ -1360,11 +1366,13 @@ CREATE TABLE t1 (
--disable_query_log
let $count= 100;
--echo # Insert $count rows. Query log disabled.
+begin;
while ($count)
{
INSERT INTO t1 VALUES ('a', 'b');
dec $count;
}
+commit;
--enable_query_log
#
# Change most of the rows into long character values with > 127 characters.
@@ -1444,11 +1452,13 @@ CREATE TABLE t1 (
--disable_query_log
let $count= 100;
--echo # Insert $count rows. Query log disabled.
+begin;
while ($count)
{
INSERT INTO t1 VALUES ('a', 'b');
dec $count;
}
+commit;
--enable_query_log
#
# Change most of the rows into long character values with > 42 characters.
=== modified file 'mysql-test/t/myisam_debug.test'
--- a/mysql-test/t/myisam_debug.test 2009-05-04 09:05:16 +0000
+++ b/mysql-test/t/myisam_debug.test 2009-10-28 07:52:34 +0000
@@ -24,11 +24,15 @@ CREATE TABLE `t2` (
INSERT INTO t2 (id) VALUES (123);
let $i = 10;
+--disable_query_log
+begin;
while ($i)
{
INSERT INTO t2 (id) SELECT id FROM t2;
dec $i;
}
+commit;
+--enable_query_log
--echo # Switch to insert Connection
CONNECTION insertConn;
=== modified file 'mysql-test/t/myisampack.test'
--- a/mysql-test/t/myisampack.test 2009-04-07 11:36:15 +0000
+++ b/mysql-test/t/myisampack.test 2009-10-28 07:52:34 +0000
@@ -69,11 +69,13 @@ CREATE TABLE t1(f1 VARCHAR(200), f2 TEX
INSERT INTO t1 VALUES ('foo', 'foo1'), ('bar', 'bar1');
let $i=9;
--disable_query_log
+begin;
while ($i)
{
INSERT INTO t1 SELECT * FROM t1;
dec $i;
}
+commit;
--enable_query_log
FLUSH TABLE t1;
--echo # Compress the table using MYISAMPACK tool
=== modified file 'mysql-test/t/not_partition.test'
--- a/mysql-test/t/not_partition.test 2006-10-26 17:11:09 +0000
+++ b/mysql-test/t/not_partition.test 2009-01-08 14:16:44 +0000
@@ -1,12 +1,35 @@
--disable_abort_on_error
-# Run this tets only when mysqld don't has partitioning
+# Run this test only when mysqld don't has partitioning (not compiled with)
# the statements are not expected to work, just check that we
# can't crash the server
-- require r/not_partition.require
disable_query_log;
show variables like "have_partitioning";
enable_query_log;
-
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+
+#
+# Bug#39893: Crash if select on a partitioned table,
+# when partitioning is disabled
+FLUSH TABLES;
+--copy_file $MYSQLTEST_VARDIR/std_data_ln/parts/t1.frm $MYSQLD_DATADIR/test/t1.frm
+SELECT * FROM t1;
+TRUNCATE TABLE t1;
+ANALYZE TABLE t1;
+CHECK TABLE t1;
+OPTIMIZE TABLE t1;
+REPAIR TABLE t1;
+ALTER TABLE t1 REPAIR PARTITION ALL;
+ALTER TABLE t1 CHECK PARTITION ALL;
+ALTER TABLE t1 OPTIMIZE PARTITION ALL;
+ALTER TABLE t1 ANALYZE PARTITION ALL;
+ALTER TABLE t1 REBUILD PARTITION ALL;
+ALTER TABLE t1 ENGINE Memory;
+ALTER TABLE t1 ADD (new INT);
+DROP TABLE t1;
--error ER_FEATURE_DISABLED
CREATE TABLE t1 (
=== modified file 'mysql-test/t/order_by.test'
--- a/mysql-test/t/order_by.test 2009-08-07 11:51:40 +0000
+++ b/mysql-test/t/order_by.test 2009-10-28 07:52:34 +0000
@@ -374,14 +374,16 @@ DROP TABLE t1;
#
create table t1(id int not null auto_increment primary key, t char(12));
-disable_query_log;
+--disable_query_log
+begin;
let $1 = 1000;
while ($1)
{
eval insert into t1(t) values ('$1');
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
explain select id,t from t1 order by id;
explain select id,t from t1 force index (primary) order by id;
drop table t1;
=== modified file 'mysql-test/t/order_fill_sortbuf.test'
--- a/mysql-test/t/order_fill_sortbuf.test 2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/order_fill_sortbuf.test 2009-10-28 07:52:34 +0000
@@ -12,13 +12,15 @@ CREATE TABLE `t1` (
`id2` int(11) NOT NULL default '0',
`id3` int(11) NOT NULL default '0');
let $1=4000;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 (id,id2,id3) values ($1,$1,$1);
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
create table t2 select id2 from t1 order by id3;
select count(*) from t2;
drop table t1,t2;
=== modified file 'mysql-test/t/partition.test'
--- a/mysql-test/t/partition.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/partition.test 2009-11-06 17:22:32 +0000
@@ -15,6 +15,53 @@ drop table if exists t1, t2;
--enable_warnings
#
+# Bug#46639: 1030 (HY000): Got error 124 from storage engine on
+# INSERT ... SELECT ...
+CREATE TABLE t1 (
+ a int NOT NULL,
+ b int NOT NULL);
+
+CREATE TABLE t2 (
+ a int NOT NULL,
+ b int NOT NULL,
+ INDEX(b)
+)
+PARTITION BY HASH(a) PARTITIONS 2;
+
+INSERT INTO t1 VALUES (399, 22);
+INSERT INTO t2 VALUES (1, 22), (1, 42);
+
+INSERT INTO t2 SELECT 1, 399 FROM t2, t1
+WHERE t1.b = t2.b;
+
+DROP TABLE t1, t2;
+
+#
+# Bug#46478: timestamp field incorrectly defaulted when partition is reorganized
+#
+CREATE TABLE t1 (
+ a timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ b varchar(10),
+ PRIMARY KEY (a)
+)
+PARTITION BY RANGE (to_days(a)) (
+ PARTITION p1 VALUES LESS THAN (733407),
+ PARTITION pmax VALUES LESS THAN MAXVALUE
+);
+
+INSERT INTO t1 VALUES ('2007-07-30 17:35:48', 'p1');
+INSERT INTO t1 VALUES ('2009-07-14 17:35:55', 'pmax');
+INSERT INTO t1 VALUES ('2009-09-21 17:31:42', 'pmax');
+
+SELECT * FROM t1;
+ALTER TABLE t1 REORGANIZE PARTITION pmax INTO (
+ PARTITION p3 VALUES LESS THAN (733969),
+ PARTITION pmax VALUES LESS THAN MAXVALUE);
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+#
# Bug#36001: Partitions: spelling and using some error messages
#
--error ER_FOREIGN_KEY_ON_PARTITIONED
@@ -1680,6 +1727,7 @@ create table t1
insert into t1 values (null,null);
--disable_query_log
+begin;
let $cnt= 1000;
while ($cnt)
{
@@ -1687,6 +1735,7 @@ while ($cnt)
update t1 set s2 = 2;
dec $cnt;
}
+commit;
--enable_query_log
drop table t1;
@@ -1804,11 +1853,13 @@ CREATE TABLE t1(id MEDIUMINT NOT NULL AU
PARTITION pa11 values less than MAXVALUE);
--disable_query_log
let $n= 15;
+begin;
while ($n)
{
insert into t1 (user) values ('mysql');
dec $n;
}
+commit;
--enable_query_log
show create table t1;
drop table t1;
=== modified file 'mysql-test/t/partition_archive.test'
--- a/mysql-test/t/partition_archive.test 2007-12-06 18:17:42 +0000
+++ b/mysql-test/t/partition_archive.test 2009-10-28 07:52:34 +0000
@@ -94,11 +94,13 @@ CREATE TABLE t1(id MEDIUMINT NOT NULL AU
--disable_query_log
let $n= 100;
+begin;
while ($n)
{
insert into t1 (f1) values (repeat('a',25));
dec $n;
}
+commit;
--enable_query_log
show create table t1;
=== added file 'mysql-test/t/partition_disabled-master.opt'
--- a/mysql-test/t/partition_disabled-master.opt 1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/partition_disabled-master.opt 2009-01-08 14:16:44 +0000
@@ -0,0 +1 @@
+--loose-skip-partition
=== added file 'mysql-test/t/partition_disabled.test'
--- a/mysql-test/t/partition_disabled.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/partition_disabled.test 2009-08-12 10:03:05 +0000
@@ -0,0 +1,85 @@
+--disable_abort_on_error
+# Run this test only when mysqld has partitioning, but it is disabled.
+# The statements are not expected to work, just check that we
+# can't crash the server.
+--require r/disabled_partition.require
+--disable_query_log
+show variables like "have_partitioning";
+--enable_query_log
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+
+#
+# Bug#39893: Crash if select on a partitioned table,
+# when partitioning is disabled
+FLUSH TABLES;
+--copy_file $MYSQLTEST_VARDIR/std_data/parts/t1.frm $MYSQLD_DATADIR/test/t1.frm
+SELECT * FROM t1;
+TRUNCATE TABLE t1;
+ANALYZE TABLE t1;
+CHECK TABLE t1;
+OPTIMIZE TABLE t1;
+REPAIR TABLE t1;
+ALTER TABLE t1 REPAIR PARTITION ALL;
+ALTER TABLE t1 CHECK PARTITION ALL;
+ALTER TABLE t1 OPTIMIZE PARTITION ALL;
+ALTER TABLE t1 ANALYZE PARTITION ALL;
+ALTER TABLE t1 REBUILD PARTITION ALL;
+ALTER TABLE t1 ENGINE Memory;
+ALTER TABLE t1 ADD (new INT);
+DROP TABLE t1;
+
+--error ER_OPTION_PREVENTS_STATEMENT
+CREATE TABLE t1 (
+ firstname VARCHAR(25) NOT NULL,
+ lastname VARCHAR(25) NOT NULL,
+ username VARCHAR(16) NOT NULL,
+ email VARCHAR(35),
+ joined DATE NOT NULL
+)
+PARTITION BY KEY(joined)
+PARTITIONS 6;
+
+--error ER_OPTION_PREVENTS_STATEMENT
+ALTER TABLE t1 PARTITION BY KEY(joined) PARTITIONS 2;
+
+--error ER_BAD_TABLE_ERROR
+drop table t1;
+
+--error ER_OPTION_PREVENTS_STATEMENT
+CREATE TABLE t1 (
+ firstname VARCHAR(25) NOT NULL,
+ lastname VARCHAR(25) NOT NULL,
+ username VARCHAR(16) NOT NULL,
+ email VARCHAR(35),
+ joined DATE NOT NULL
+)
+PARTITION BY RANGE( YEAR(joined) ) (
+ PARTITION p0 VALUES LESS THAN (1960),
+ PARTITION p1 VALUES LESS THAN (1970),
+ PARTITION p2 VALUES LESS THAN (1980),
+ PARTITION p3 VALUES LESS THAN (1990),
+ PARTITION p4 VALUES LESS THAN MAXVALUE
+);
+--error ER_BAD_TABLE_ERROR
+drop table t1;
+
+--error ER_OPTION_PREVENTS_STATEMENT
+CREATE TABLE t1 (id INT, purchased DATE)
+ PARTITION BY RANGE( YEAR(purchased) )
+ SUBPARTITION BY HASH( TO_DAYS(purchased) )
+ SUBPARTITIONS 2 (
+ PARTITION p0 VALUES LESS THAN (1990),
+ PARTITION p1 VALUES LESS THAN (2000),
+ PARTITION p2 VALUES LESS THAN MAXVALUE
+ );
+--error ER_BAD_TABLE_ERROR
+drop table t1;
+
+# Create a table without partitions to test "EXPLAIN PARTITIONS"
+create table t1 (a varchar(10) charset latin1 collate latin1_bin);
+insert into t1 values (''),(' '),('a'),('a '),('a ');
+explain partitions select * from t1 where a='a ' OR a='a';
+drop table t1;
=== modified file 'mysql-test/t/partition_pruning.test'
--- a/mysql-test/t/partition_pruning.test 2008-12-28 11:33:49 +0000
+++ b/mysql-test/t/partition_pruning.test 2009-08-28 10:55:59 +0000
@@ -9,6 +9,357 @@ drop table if exists t1,t2,t3,t4,t5,t6,t
--enable_warnings
#
+# Bug#20577: Partitions: use of to_days() function leads to selection failures
+#
+--let $explain_partitions= 1;
+--let $verify_without_partitions= 0;
+--echo # test of RANGE and index
+CREATE TABLE t1 (a DATE, KEY(a))
+PARTITION BY RANGE (TO_DAYS(a))
+(PARTITION `pNULL` VALUES LESS THAN (0),
+ PARTITION `p0001-01-01` VALUES LESS THAN (366 + 1),
+ PARTITION `p1001-01-01` VALUES LESS THAN (TO_DAYS('1001-01-01') + 1),
+ PARTITION `p2001-01-01` VALUES LESS THAN (TO_DAYS('2001-01-01') + 1));
+if ($verify_without_partitions)
+{
+ALTER TABLE t1 REMOVE PARTITIONING;
+}
+INSERT INTO t1 VALUES ('0000-00-00'), ('0000-01-02'), ('0001-01-01'),
+ ('1001-00-00'), ('1001-01-01'), ('1002-00-00'), ('2001-01-01');
+--source include/partition_date_range.inc
+--echo # test without index
+ALTER TABLE t1 DROP KEY a;
+--source include/partition_date_range.inc
+DROP TABLE t1;
+
+--echo # test of LIST and index
+CREATE TABLE t1 (a DATE, KEY(a))
+PARTITION BY LIST (TO_DAYS(a))
+(PARTITION `p0001-01-01` VALUES IN (TO_DAYS('0001-01-01')),
+ PARTITION `p2001-01-01` VALUES IN (TO_DAYS('2001-01-01')),
+ PARTITION `pNULL` VALUES IN (NULL),
+ PARTITION `p0000-01-02` VALUES IN (TO_DAYS('0000-01-02')),
+ PARTITION `p1001-01-01` VALUES IN (TO_DAYS('1001-01-01')));
+if ($verify_without_partitions)
+{
+ALTER TABLE t1 REMOVE PARTITIONING;
+}
+INSERT INTO t1 VALUES ('0000-00-00'), ('0000-01-02'), ('0001-01-01'),
+ ('1001-00-00'), ('1001-01-01'), ('1002-00-00'), ('2001-01-01');
+--source include/partition_date_range.inc
+--echo # test without index
+ALTER TABLE t1 DROP KEY a;
+--source include/partition_date_range.inc
+DROP TABLE t1;
+
+#
+# Bug#46362: Endpoint should be set to false for TO_DAYS(DATE)
+# There is a problem when comparing DATE with DATETIME.
+# In pruning it is converted into the field type
+# and in row evaluation it is converted to longlong
+# (like a DATETIME).
+--echo # Test with DATETIME column NOT NULL
+CREATE TABLE t1 (
+ a int(10) unsigned NOT NULL,
+ b DATETIME NOT NULL,
+ PRIMARY KEY (a, b)
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+ PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+ PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+ PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+ PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+ (1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-02 23:59:59'),
+ (1, '2009-04-03'), (2, '2009-04-03'), (1, '2009-04-04'), (2, '2009-04-04'),
+ (1, '2009-04-05'), (1, '2009-04-06'), (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+DROP TABLE t1;
+
+--echo # Test with DATE column NOT NULL
+CREATE TABLE t1 (
+ a int(10) unsigned NOT NULL,
+ b DATE NOT NULL,
+ PRIMARY KEY (a, b)
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+ PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+ PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+ PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+ PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+ (1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-03'), (2, '2009-04-03'),
+ (1, '2009-04-04'), (2, '2009-04-04'), (1, '2009-04-05'), (1, '2009-04-06'),
+ (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+DROP TABLE t1;
+
+--echo # Test with DATETIME column NULL
+CREATE TABLE t1 (
+ a int(10) unsigned NOT NULL,
+ b DATETIME NULL
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+ PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+ PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+ PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+ PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+ (1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-02 23:59:59'),
+ (1, '2009-04-03'), (2, '2009-04-03'), (1, '2009-04-04'), (2, '2009-04-04'),
+ (1, '2009-04-05'), (1, '2009-04-06'), (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+DROP TABLE t1;
+
+--echo # Test with DATE column NULL
+CREATE TABLE t1 (
+ a int(10) unsigned NOT NULL,
+ b DATE NULL
+) PARTITION BY RANGE (TO_DAYS(b))
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+ PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+ PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+ PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+ PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (1, '2009-04-01'), (2, '2009-04-01'),
+ (1, '2009-04-02'), (2, '2009-04-02'), (1, '2009-04-03'), (2, '2009-04-03'),
+ (1, '2009-04-04'), (2, '2009-04-04'), (1, '2009-04-05'), (1, '2009-04-06'),
+ (1, '2009-04-07');
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:59' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > CAST('2009-04-03' AS DATE);
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03 00:00:00';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-02 23:59:59';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b <= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b = '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b >= '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b > '2009-04-03';
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-03 00:00:01' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b < CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b <= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b = CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b >= CAST('2009-04-02 23:59:58' AS DATETIME);
+EXPLAIN PARTITIONS SELECT * FROM t1
+ WHERE b > CAST('2009-04-02 23:59:58' AS DATETIME);
+DROP TABLE t1;
+
+--echo # For better code coverage of the patch
+CREATE TABLE t1 (
+ a int(10) unsigned NOT NULL,
+ b DATE
+) PARTITION BY RANGE ( TO_DAYS(b) )
+(PARTITION p20090401 VALUES LESS THAN (TO_DAYS('2009-04-02')),
+ PARTITION p20090402 VALUES LESS THAN (TO_DAYS('2009-04-03')),
+ PARTITION p20090403 VALUES LESS THAN (TO_DAYS('2009-04-04')),
+ PARTITION p20090404 VALUES LESS THAN (TO_DAYS('2009-04-05')),
+ PARTITION p20090405 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, '2009-01-01'), (2, NULL);
+--echo # test with an invalid date, which lead to item->null_value is set.
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE b < CAST('2009-04-99' AS DATETIME);
+DROP TABLE t1;
+
+#
# Bug#40972: some sql execution lead the whole database crashing
#
# Setup so the start is at partition pX and end is at p1
=== modified file 'mysql-test/t/select_found.test'
--- a/mysql-test/t/select_found.test 2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/select_found.test 2009-10-28 07:52:34 +0000
@@ -54,7 +54,8 @@ CREATE TABLE t2 (
UNIQUE KEY e_n (email,name)
);
-disable_query_log;
+--disable_query_log
+begin;
let $1=200;
let $2=0;
while ($1)
@@ -63,7 +64,8 @@ while ($1)
eval INSERT INTO t2 VALUES ($2,'name$2','email$2');
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
=== modified file 'mysql-test/t/sp-big.test'
--- a/mysql-test/t/sp-big.test 2005-12-07 14:01:17 +0000
+++ b/mysql-test/t/sp-big.test 2009-10-28 07:52:34 +0000
@@ -43,11 +43,13 @@ create table t2 like t1;
let $1=8;
--disable_query_log
--disable_result_log
+begin;
while ($1)
{
eval insert into t1 select * from t1;
dec $1;
}
+commit;
--enable_result_log
--enable_query_log
select count(*) from t1;
=== modified file 'mysql-test/t/subselect.test'
--- a/mysql-test/t/subselect.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/subselect.test 2009-11-06 17:22:32 +0000
@@ -30,7 +30,7 @@ SELECT 1 IN (SELECT 1);
SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a));
-- error ER_WRONG_USAGE
select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1));
--- error ER_WRONG_PARAMETERS_TO_PROCEDURE
+-- error ER_WRONG_USAGE
SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1));
-- error ER_BAD_FIELD_ERROR
SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL;
@@ -811,7 +811,8 @@ create table t1 (a int, b int, index a (
create table t2 (a int, index a (a));
create table t3 (a int, b int, index a (a));
insert into t1 values (1,10), (2,20), (3,30), (4,40);
-disable_query_log;
+--disable_query_log
+begin;
# making table large enough
let $1 = 10000;
while ($1)
@@ -819,7 +820,8 @@ while ($1)
eval insert into t1 values (rand()*100000+200,rand()*100000);
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
select * from t2 where t2.a in (select a from t1);
@@ -2607,7 +2609,8 @@ CREATE TABLE t1 (a int, b int auto_incre
CREATE TABLE t2 (x int auto_increment, y int, z int,
PRIMARY KEY (x), FOREIGN KEY (y) REFERENCES t1 (b));
-disable_query_log;
+--disable_query_log
+begin;
let $1=3000;
while ($1)
{
@@ -2621,7 +2624,8 @@ while ($1)
}
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
SET SESSION sort_buffer_size = 32 * 1024;
SELECT SQL_NO_CACHE COUNT(*)
@@ -3222,11 +3226,13 @@ insert into t1 values(1,1),(2,2), (3, 3)
let $i=10000;
--disable_query_log
--disable_warnings
+begin;
while ($i)
{
eval insert into t2 values (-1 , $i/5000 + 1, '$i');
dec $i;
}
+commit;
--enable_warnings
--enable_query_log
set session sort_buffer_size= 33*1024;
@@ -3336,6 +3342,38 @@ EXPLAIN EXTENDED SELECT * FROM C WHERE `
DROP TABLE C;
--echo # End of test for bug#45061.
+
+--echo #
+--echo # Bug #46749: Segfault in add_key_fields() with outer subquery level
+--echo # field references
+--echo #
+
+CREATE TABLE t1 (
+ a int,
+ b int,
+ UNIQUE (a), KEY (b)
+);
+INSERT INTO t1 VALUES (1,1), (2,1);
+
+CREATE TABLE st1 like t1;
+INSERT INTO st1 VALUES (1,1), (2,1);
+
+CREATE TABLE st2 like t1;
+INSERT INTO st2 VALUES (1,1), (2,1);
+
+# should have "impossible where"
+EXPLAIN
+SELECT MAX(b), (SELECT COUNT(*) FROM st1,st2 WHERE st2.b <= t1.b)
+FROM t1
+WHERE a = 230;
+
+# should not crash
+SELECT MAX(b), (SELECT COUNT(*) FROM st1,st2 WHERE st2.b <= t1.b)
+FROM t1
+WHERE a = 230;
+
+DROP TABLE t1, st1, st2;
+
--echo End of 5.0 tests.
#
=== added file 'mysql-test/t/table_elim_debug.test'
--- a/mysql-test/t/table_elim_debug.test 1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/table_elim_debug.test 2009-10-29 17:50:33 +0000
@@ -0,0 +1,27 @@
+#
+# Table elimination (MWL#17) tests that need debug build
+#
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+# Check if optimizer_switch works
+
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+
+create table t2 (a int primary key, b int)
+ as select a, a as b from t1 where a in (1,2);
+
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+
+set optimizer_switch='table_elimination=off';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+set optimizer_switch='table_elimination=on';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+set optimizer_switch='table_elimination=default';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+
+drop table t1, t2;
=== modified file 'mysql-test/t/type_newdecimal.test'
--- a/mysql-test/t/type_newdecimal.test 2009-07-03 10:36:04 +0000
+++ b/mysql-test/t/type_newdecimal.test 2009-08-24 19:47:08 +0000
@@ -1286,3 +1286,137 @@ CREATE TABLE t1 SELECT 1 % .123456789123
DESCRIBE t1;
SELECT my_col FROM t1;
DROP TABLE t1;
+
+--echo #
+--echo # Bug#45261: Crash, stored procedure + decimal
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 SELECT
+ /* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001.
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 81 */ 100000000000000000000000000000000000000000000000000000000000000000000000000000001.1 /* 1 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 82 */ 1000000000000000000000000000000000000000000000000000000000000000000000000000000001
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 40 */ 1000000000000000000000000000000000000001.1000000000000000000000000000000000000001 /* 40 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 1 */ 1.10000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 80 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 1 */ 1.100000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 81 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ .100000000000000000000000000000000000000000000000000000000000000000000000000000001 /* 81 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 45 */ 123456789012345678901234567890123456789012345.123456789012345678901234567890123456789012345 /* 45 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 65 */ 12345678901234567890123456789012345678901234567890123456789012345.1 /* 1 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ /* 66 */ 123456789012345678901234567890123456789012345678901234567890123456.1 /* 1 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT
+ .123456789012345678901234567890123456789012345678901234567890123456 /* 66 */
+ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 AS SELECT 123.1234567890123456789012345678901 /* 31 */ AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 SELECT 1.1 + CAST(1 AS DECIMAL(65,30)) AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Test that the integer and decimal parts are properly calculated.
+--echo #
+
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT MIN(a + 0.0000000000000000000000000000001) AS c1 FROM t1;
+DESC t2;
+DROP TABLE t1,t2;
+
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT IFNULL(a + 0.0000000000000000000000000000001, NULL) AS c1 FROM t1;
+DESC t2;
+DROP TABLE t1,t2;
+
+CREATE TABLE t1 (a DECIMAL(30,30));
+INSERT INTO t1 VALUES (0.1),(0.2),(0.3);
+CREATE TABLE t2 SELECT CASE a WHEN 0.1 THEN 0.0000000000000000000000000000000000000000000000000000000000000000001 END AS c1 FROM t1;
+DESC t2;
+DROP TABLE t1,t2;
+
+--echo #
+--echo # Test that variables get maximum precision.
+--echo #
+
+SET @decimal= 1.1;
+CREATE TABLE t1 SELECT @decimal AS c1;
+DESC t1;
+SELECT * FROM t1;
+DROP TABLE t1;
=== modified file 'mysql-test/t/view.test'
--- a/mysql-test/t/view.test 2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/view.test 2009-10-15 21:38:29 +0000
@@ -3703,38 +3703,6 @@ DROP TABLE t1;
--echo # -- End of test case for Bug#40825
--echo
---echo #
---echo # Bug #45806 crash when replacing into a view with a join!
---echo #
-CREATE TABLE t1(a INT UNIQUE);
-CREATE VIEW v1 AS SELECT t1.a FROM t1, t1 AS a;
-INSERT INTO t1 VALUES (1), (2);
-
-REPLACE INTO v1(a) SELECT 1 FROM t1,t1 AS c;
-SELECT * FROM v1;
-REPLACE INTO v1(a) SELECT 3 FROM t1,t1 AS c;
-SELECT * FROM v1;
-DELETE FROM t1 WHERE a=3;
-INSERT INTO v1(a) SELECT 1 FROM t1,t1 AS c
-ON DUPLICATE KEY UPDATE `v1`.`a`= 1;
-SELECT * FROM v1;
-
-CREATE VIEW v2 AS SELECT t1.a FROM t1, v1 AS a;
-
-REPLACE INTO v2(a) SELECT 1 FROM t1,t1 AS c;
-SELECT * FROM v2;
-REPLACE INTO v2(a) SELECT 3 FROM t1,t1 AS c;
-SELECT * FROM v2;
-INSERT INTO v2(a) SELECT 1 FROM t1,t1 AS c
-ON DUPLICATE KEY UPDATE `v2`.`a`= 1;
-SELECT * FROM v2;
-
-DROP VIEW v1;
-DROP VIEW v2;
-DROP TABLE t1;
-
---echo # -- End of test case for Bug#45806
-
--echo # -----------------------------------------------------------------
--echo # -- End of 5.0 tests.
--echo # -----------------------------------------------------------------
=== modified file 'mysql-test/t/warnings.test'
--- a/mysql-test/t/warnings.test 2009-07-06 06:55:53 +0000
+++ b/mysql-test/t/warnings.test 2009-10-28 07:52:34 +0000
@@ -82,13 +82,15 @@ drop table t1, t2;
create table t1(a char(10));
let $1=50;
-disable_query_log;
+--disable_query_log
+begin;
while ($1)
{
eval insert into t1 values('mysql ab');
dec $1;
}
-enable_query_log;
+commit;
+--enable_query_log
alter table t1 add b char;
set max_error_count=10;
update t1 set b=a;
=== modified file 'mysql-test/valgrind.supp'
--- a/mysql-test/valgrind.supp 2009-09-15 10:46:35 +0000
+++ b/mysql-test/valgrind.supp 2009-10-30 18:50:56 +0000
@@ -880,3 +880,17 @@
fun:nptl_pthread_exit_hack_handler
fun:start_thread
}
+
+#
+# Problem with glibc and gethostbyaddr_r
+#
+
+{
+ libc_res_nsend: Conditional jump or move depends on uninitialised value
+ Memcheck:Cond
+ fun: __libc_res_nsend
+ fun: __libc_res_nquery
+ obj: /lib64/libnss_dns-*so)
+ obj: /lib64/libnss_dns-*so)
+ fun: gethostbyaddr_r
+}
=== modified file 'mysys/array.c'
--- a/mysys/array.c 2009-09-07 20:50:10 +0000
+++ b/mysys/array.c 2009-10-15 21:38:29 +0000
@@ -61,7 +61,7 @@ my_bool init_dynamic_array2(DYNAMIC_ARRA
Since the dynamic array is usable even if allocation fails here malloc
should not throw an error
*/
- if (!(array->buffer= (char*) my_malloc_ci(element_size*init_alloc, MYF(0))))
+ if (!(array->buffer= (uchar*) my_malloc_ci(element_size*init_alloc, MYF(0))))
array->max_element=0;
DBUG_RETURN(FALSE);
}
=== modified file 'mysys/mf_iocache.c'
--- a/mysys/mf_iocache.c 2008-04-28 16:24:05 +0000
+++ b/mysys/mf_iocache.c 2009-10-15 21:38:29 +0000
@@ -227,15 +227,21 @@ int init_io_cache(IO_CACHE *info, File f
for (;;)
{
size_t buffer_block;
+ /*
+ Unset MY_WAIT_IF_FULL bit if it is set, to prevent conflict with
+ MY_ZEROFILL.
+ */
+ myf flags= (myf) (cache_myflags & ~(MY_WME | MY_WAIT_IF_FULL));
+
if (cachesize < min_cache)
cachesize = min_cache;
buffer_block= cachesize;
if (type == SEQ_READ_APPEND)
buffer_block *= 2;
- if ((info->buffer=
- (uchar*) my_malloc(buffer_block,
- MYF((cache_myflags & ~ MY_WME) |
- (cachesize == min_cache ? MY_WME : 0)))) != 0)
+ if (cachesize == min_cache)
+ flags|= (myf) MY_WME;
+
+ if ((info->buffer= (uchar*) my_malloc(buffer_block, flags)) != 0)
{
info->write_buffer=info->buffer;
if (type == SEQ_READ_APPEND)
=== modified file 'mysys/mf_pack.c'
--- a/mysys/mf_pack.c 2008-08-18 17:11:55 +0000
+++ b/mysys/mf_pack.c 2009-08-28 16:21:54 +0000
@@ -33,12 +33,11 @@ static char * NEAR_F expand_tilde(char *
void pack_dirname(char * to, const char *from)
{
int cwd_err;
- size_t d_length,length,buff_length;
+ size_t d_length,length,UNINIT_VAR(buff_length);
char * start;
char buff[FN_REFLEN];
DBUG_ENTER("pack_dirname");
- LINT_INIT(buff_length);
(void) intern_filename(to,from); /* Change to intern name */
#ifdef FN_DEVCHAR
=== modified file 'mysys/my_copy.c'
--- a/mysys/my_copy.c 2009-10-26 11:35:42 +0000
+++ b/mysys/my_copy.c 2009-11-06 17:22:32 +0000
@@ -57,6 +57,7 @@ int my_copy(const char *from, const char
File from_file,to_file;
uchar buff[IO_SIZE];
MY_STAT stat_buff,new_stat_buff;
+ int res;
DBUG_ENTER("my_copy");
DBUG_PRINT("my",("from %s to %s MyFlags %d", from, to, MyFlags));
@@ -95,7 +96,7 @@ int my_copy(const char *from, const char
if (MyFlags & MY_HOLD_ORIGINAL_MODES && !new_file_stat)
DBUG_RETURN(0); /* File copyed but not stat */
- VOID(chmod(to, stat_buff.st_mode & 07777)); /* Copy modes */
+ res= chmod(to, stat_buff.st_mode & 07777); /* Copy modes */
#if !defined(__WIN__) && !defined(__NETWARE__)
if (chown(to, stat_buff.st_uid,stat_buff.st_gid))
{
=== modified file 'mysys/my_getopt.c'
--- a/mysys/my_getopt.c 2009-09-15 12:53:07 +0000
+++ b/mysys/my_getopt.c 2009-10-15 21:38:29 +0000
@@ -115,7 +115,7 @@ int handle_options(int *argc, char ***ar
uint opt_found, argvpos= 0, length;
my_bool end_of_options= 0, must_be_var, set_maximum_value,
option_is_loose;
- char **pos, **pos_end, *optend, *prev_found,
+ char **pos, **pos_end, *optend, *UNINIT_VAR(prev_found),
*opt_str, key_name[FN_REFLEN];
const struct my_option *optp;
uchar* *value;
=== modified file 'mysys/my_redel.c'
--- a/mysys/my_redel.c 2009-10-26 11:35:42 +0000
+++ b/mysys/my_redel.c 2009-11-06 17:22:32 +0000
@@ -77,6 +77,9 @@ end:
int my_copystat(const char *from, const char *to, int MyFlags)
{
struct stat statbuf;
+#if !defined(__WIN__) && !defined(__NETWARE__)
+ int res;
+#endif
if (stat((char*) from, &statbuf))
{
=== modified file 'mysys/safemalloc.c'
--- a/mysys/safemalloc.c 2009-10-26 11:38:17 +0000
+++ b/mysys/safemalloc.c 2009-11-06 17:22:32 +0000
@@ -272,6 +272,9 @@ void _myfree(void *ptr, const char *file
irem= (struct st_irem *) ((char*) ptr- ALIGN_SIZE(sizeof(struct st_irem))-
sf_malloc_prehunc);
+ if (sf_malloc_quick)
+ (void) _checkchunk(irem, filename, lineno);
+
/*
Check to make sure that we have a real remember structure.
Note: this test could fail for four reasons:
=== modified file 'mysys/typelib.c'
--- a/mysys/typelib.c 2009-03-12 22:27:35 +0000
+++ b/mysys/typelib.c 2009-10-15 21:38:29 +0000
@@ -78,7 +78,8 @@ uint find_type_or_exit(const char *x, TY
int find_type(char *x, const TYPELIB *typelib, uint full_name)
{
- int find,pos,findpos;
+ int find,pos;
+ int UNINIT_VAR(findpos); /* guarded by find */
reg1 char * i;
reg2 const char *j;
DBUG_ENTER("find_type");
@@ -89,7 +90,6 @@ int find_type(char *x, const TYPELIB *ty
DBUG_PRINT("exit",("no count"));
DBUG_RETURN(0);
}
- LINT_INIT(findpos);
find=0;
for (pos=0 ; (j=typelib->type_names[pos]) ; pos++)
{
=== modified file 'regex/regcomp.c'
--- a/regex/regcomp.c 2007-08-13 13:11:25 +0000
+++ b/regex/regcomp.c 2009-08-28 16:21:54 +0000
@@ -213,11 +213,11 @@ register struct parse *p;
int stop; /* character this ERE should end at */
{
register char c;
- register sopno prevback;
- register sopno prevfwd;
+ register sopno UNINIT_VAR(prevback);
+ register sopno UNINIT_VAR(prevfwd);
register sopno conc;
register int first = 1; /* is this the first alternative? */
- LINT_INIT(prevback); LINT_INIT(prevfwd);
+
for (;;) {
/* do a bunch of concatenated expressions */
conc = HERE();
=== modified file 'scripts/make_binary_distribution.sh'
--- a/scripts/make_binary_distribution.sh 2009-10-23 16:48:54 +0000
+++ b/scripts/make_binary_distribution.sh 2009-10-30 20:28:11 +0000
@@ -231,6 +231,18 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
# ----------------------------------------------------------------------
set -e
+ #
+ # Check that the client is compiled with libmysqlclient.a
+ #
+ if test -f ./client/.libs/mysql
+ then
+ echo ""
+ echo "The MySQL clients are compiled dynamicly, which is not allowed for"
+ echo "a MySQL binary tar file. Please configure with"
+ echo "--with-client-ldflags=-all-static and try again"
+ exit 1;
+ fi
+
# ----------------------------------------------------------------------
# Really ugly, one script, "mysql_install_db", needs prefix set to ".",
# i.e. makes access relative the current directory. This matches
@@ -293,11 +305,6 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
fi
fi
- # FIXME let this script be in "bin/", where it is in the RPMs?
- # http://dev.mysql.com/doc/refman/5.1/en/mysql-install-db-problems.html
- mkdir $DEST/scripts
- mv $DEST/bin/mysql_install_db $DEST/scripts/
-
# Note, no legacy "safe_mysqld" link to "mysqld_safe" in 5.1
# Copy readme and license files
@@ -330,18 +337,25 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
#
# Move things to make them easier to find in tar installation
#
- mv $DEST/libexec/* $DEST/bin
+
+ # The following test is needed if the original configure was done with
+ # something like --libexecdir=/usr/local/mysql/bin
+ if test -f $DEST/libexec/mysqld
+ then
+ mv $DEST/libexec/* $DEST/bin
+ rmdir $DEST/libexec
+ fi
mv $DEST/share/man $DEST
mv $DEST/share/mysql/binary-configure $DEST/configure
mv $DEST/share/mysql/*.sql $DEST/share
mv $DEST/share/mysql/*.cnf $DEST/share/mysql/*.server $DEST/share/mysql/mysql-log-rotate $DEST/support-files
- rmdir $DEST/libexec
#
# Move some scripts that are only run once to 'scripts' directory
# but add symbolic links instead to old place for compatibility
#
- for i in mysql_secure_installation mysql_fix_extensions mysql_fix_privilege_tables
+ mkdir $DEST/scripts
+ for i in mysql_secure_installation mysql_fix_extensions mysql_fix_privilege_tables mysql_install_db
do
mv $DEST/bin/$i $DEST/scripts
ln -s "../scripts/$i" $DEST/bin/$i
=== modified file 'server-tools/instance-manager/mysql_connection.cc'
--- a/server-tools/instance-manager/mysql_connection.cc 2009-04-25 10:05:32 +0000
+++ b/server-tools/instance-manager/mysql_connection.cc 2009-10-15 21:38:29 +0000
@@ -69,6 +69,18 @@ void my_net_local_init(NET *net)
C_MODE_END
+/*
+ Unused stub hook required for linking the client API.
+*/
+
+C_MODE_START
+
+void slave_io_thread_detach_vio()
+{
+}
+
+C_MODE_END
+
/*
Every resource, which we can fail to acquire, is allocated in init().
=== modified file 'sql-common/client.c'
--- a/sql-common/client.c 2009-09-07 20:50:10 +0000
+++ b/sql-common/client.c 2009-10-15 21:38:29 +0000
@@ -482,6 +482,15 @@ HANDLE create_shared_memory(MYSQL *mysql
int i;
/*
+ If this is NULL, somebody freed the MYSQL* options. mysql_close()
+ is a good candidate. We don't just silently (re)set it to
+ def_shared_memory_base_name as that would create really confusing/buggy
+ behavior if the user passed in a different name on the command-line or
+ in a my.cnf.
+ */
+ DBUG_ASSERT(shared_memory_base_name != NULL);
+
+ /*
get enough space base-name + '_' + longest suffix we might ever send
*/
if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L, MYF(MY_FAE))))
@@ -933,6 +942,9 @@ void end_server(MYSQL *mysql)
{
init_sigpipe_variables
DBUG_PRINT("info",("Net: %s", vio_description(mysql->net.vio)));
+#ifdef MYSQL_SERVER
+ slave_io_thread_detach_vio();
+#endif
set_sigpipe(mysql);
vio_delete(mysql->net.vio);
reset_sigpipe(mysql);
@@ -1851,7 +1863,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,cons
uint port, const char *unix_socket,ulong client_flag)
{
char buff[NAME_LEN+USERNAME_LENGTH+100];
- char *end,*host_info;
+ char *end,*host_info= NULL;
my_socket sock;
in_addr_t ip_addr;
struct sockaddr_in sock_addr;
@@ -1869,7 +1881,6 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,cons
#endif
init_sigpipe_variables
DBUG_ENTER("mysql_real_connect");
- LINT_INIT(host_info);
DBUG_PRINT("enter",("host: %s db: %s user: %s",
host ? host : "(Null)",
=== modified file 'sql-common/my_time.c'
--- a/sql-common/my_time.c 2009-06-11 16:21:32 +0000
+++ b/sql-common/my_time.c 2009-09-01 11:04:56 +0000
@@ -160,7 +160,7 @@ enum enum_mysql_timestamp_type
str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
uint flags, int *was_cut)
{
- uint field_length, year_length, digits, i, number_of_fields;
+ uint field_length, UNINIT_VAR(year_length), digits, i, number_of_fields;
uint date[MAX_DATE_PARTS], date_len[MAX_DATE_PARTS];
uint add_hours= 0, start_loop;
ulong not_zero_date, allow_space;
@@ -174,7 +174,6 @@ str_to_datetime(const char *str, uint le
DBUG_PRINT("ENTER",("str: %.*s",length,str));
LINT_INIT(field_length);
- LINT_INIT(year_length);
LINT_INIT(last_field_pos);
*was_cut= 0;
@@ -450,9 +449,7 @@ str_to_datetime(const char *str, uint le
}
}
- DBUG_RETURN(l_time->time_type=
- (number_of_fields <= 3 ? MYSQL_TIMESTAMP_DATE :
- MYSQL_TIMESTAMP_DATETIME));
+ DBUG_RETURN(l_time->time_type);
err:
bzero((char*) l_time, sizeof(*l_time));
@@ -778,11 +775,12 @@ long calc_daynr(uint year,uint month,uin
if (y == 0 && month == 0 && day == 0)
DBUG_RETURN(0); /* Skip errors */
- delsum= (long) (365L * y+ 31*(month-1) +day);
+ /* Cast to int to be able to handle month == 0 */
+ delsum= (long) (365 * y + 31 *((int) month - 1) + (int) day);
if (month <= 2)
y--;
else
- delsum-= (long) (month*4+23)/10;
+ delsum-= (long) ((int) month * 4 + 23) / 10;
temp=(int) ((y/100+1)*3)/4;
DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld",
y+(month <= 2),month,day,delsum+y/4-temp));
=== modified file 'sql/client_settings.h'
--- a/sql/client_settings.h 2006-12-31 00:02:27 +0000
+++ b/sql/client_settings.h 2009-08-13 20:07:20 +0000
@@ -33,3 +33,11 @@
#define mysql_server_init(a,b,c) 0
+#ifdef HAVE_REPLICATION
+C_MODE_START
+void slave_io_thread_detach_vio();
+C_MODE_END
+#else
+#define slave_io_thread_detach_vio()
+#endif
+
=== modified file 'sql/events.cc'
--- a/sql/events.cc 2009-09-15 10:46:35 +0000
+++ b/sql/events.cc 2009-10-15 21:38:29 +0000
@@ -341,6 +341,33 @@ common_1_lev_code:
/**
+ Create a new query string for removing executable comments
+ for avoiding leak and keeping consistency of the execution
+ on master and slave.
+
+ @param[in] thd Thread handler
+ @param[in] buf Query string
+
+ @return
+ 0 ok
+ 1 error
+*/
+static int
+create_query_string(THD *thd, String *buf)
+{
+ /* Append the "CREATE" part of the query */
+ if (buf->append(STRING_WITH_LEN("CREATE ")))
+ return 1;
+ /* Append definer */
+ append_definer(thd, buf, &(thd->lex->definer->user), &(thd->lex->definer->host));
+ /* Append the left part of thd->query after "DEFINER" part */
+ if (buf->append(thd->lex->stmt_definition_begin))
+ return 1;
+
+ return 0;
+}
+
+/**
Create a new event.
@param[in,out] thd THD
@@ -438,7 +465,16 @@ Events::create_event(THD *thd, Event_par
{
/* Binlog the create event. */
DBUG_ASSERT(thd->query && thd->query_length);
- write_bin_log(thd, TRUE, thd->query, thd->query_length);
+ String log_query;
+ if (create_query_string(thd, &log_query))
+ {
+ sql_print_error("Event Error: An error occurred while creating query string, "
+ "before writing it into binary log.");
+ DBUG_RETURN(TRUE);
+ }
+ /* If the definer is not set or set to CURRENT_USER, the value of CURRENT_USER
+ will be written into the binary log as the definer for the SQL thread. */
+ write_bin_log(thd, TRUE, log_query.c_ptr(), log_query.length());
}
}
pthread_mutex_unlock(&LOCK_event_metadata);
=== modified file 'sql/field.cc'
--- a/sql/field.cc 2009-10-16 22:57:48 +0000
+++ b/sql/field.cc 2009-11-09 23:49:25 +0000
@@ -1924,16 +1924,16 @@ int Field_decimal::store(const char *fro
Pointers used when digits move from the left of the '.' to the
right of the '.' (explained below)
*/
- const uchar *int_digits_tail_from;
+ const uchar *UNINIT_VAR(int_digits_tail_from);
/* Number of 0 that need to be added at the left of the '.' (1E3: 3 zeros) */
- uint int_digits_added_zeros;
+ uint UNINIT_VAR(int_digits_added_zeros);
/*
Pointer used when digits move from the right of the '.' to the left
of the '.'
*/
- const uchar *frac_digits_head_end;
+ const uchar *UNINIT_VAR(frac_digits_head_end);
/* Number of 0 that need to be added at the right of the '.' (for 1E-3) */
- uint frac_digits_added_zeros;
+ uint UNINIT_VAR(frac_digits_added_zeros);
uchar *pos,*tmp_left_pos,*tmp_right_pos;
/* Pointers that are used as limits (begin and end of the field buffer) */
uchar *left_wall,*right_wall;
@@ -1944,11 +1944,6 @@ int Field_decimal::store(const char *fro
*/
bool is_cuted_fields_incr=0;
- LINT_INIT(int_digits_tail_from);
- LINT_INIT(int_digits_added_zeros);
- LINT_INIT(frac_digits_head_end);
- LINT_INIT(frac_digits_added_zeros);
-
/*
There are three steps in this function :
- parse the input string
@@ -2487,12 +2482,97 @@ Field_new_decimal::Field_new_decimal(uin
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
set_if_smaller(precision, DECIMAL_MAX_PRECISION);
+ DBUG_ASSERT(precision >= dec);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
(dec <= DECIMAL_MAX_SCALE));
bin_size= my_decimal_get_binary_size(precision, dec);
}
+/**
+ Create a field to hold a decimal value from an item.
+
+ @remark The MySQL DECIMAL data type has a characteristic that needs to be
+ taken into account when deducing the type from a Item_decimal.
+
+ But first, let's briefly recap what is the new MySQL DECIMAL type:
+
+ The declaration syntax for a decimal is DECIMAL(M,D), where:
+
+ * M is the maximum number of digits (the precision).
+ It has a range of 1 to 65.
+ * D is the number of digits to the right of the decimal separator (the scale).
+ It has a range of 0 to 30 and must be no larger than M.
+
+ D and M are used to determine the storage requirements for the integer
+ and fractional parts of each value. The integer part is to the left of
+ the decimal separator and to the right is the fractional part. Hence:
+
+ M is the number of digits for the integer and fractional part.
+ D is the number of digits for the fractional part.
+
+ Consequently, M - D is the number of digits for the integer part. For
+ example, a DECIMAL(20,10) column has ten digits on either side of
+ the decimal separator.
+
+ The characteristic that needs to be taken into account is that the
+ backing type for Item_decimal is a my_decimal that has a higher
+ precision (DECIMAL_MAX_POSSIBLE_PRECISION, see my_decimal.h) than
+ DECIMAL.
+
+ Drawing a comparison between my_decimal and DECIMAL:
+
+ * M has a range of 1 to 81.
+ * D has a range of 0 to 81.
+
+ There can be a difference in range if the decimal contains a integer
+ part. This is because the fractional part must always be on a group
+ boundary, leaving at least one group for the integer part. Since each
+ group is 9 (DIG_PER_DEC1) digits and there are 9 (DECIMAL_BUFF_LENGTH)
+ groups, the fractional part is limited to 72 digits if there is at
+ least one digit in the integral part.
+
+ Although the backing type for a DECIMAL is also my_decimal, every
+ time a my_decimal is stored in a DECIMAL field, the precision and
+ scale are explicitly capped at 65 (DECIMAL_MAX_PRECISION) and 30
+ (DECIMAL_MAX_SCALE) digits, following my_decimal truncation procedure
+ (FIX_INTG_FRAC_ERROR).
+*/
+
+Field_new_decimal *
+Field_new_decimal::new_decimal_field(const Item *item)
+{
+ uint32 len;
+ uint intg= item->decimal_int_part(), scale= item->decimals;
+
+ DBUG_ASSERT(item->decimal_precision() >= item->decimals);
+
+ /*
+ Employ a procedure along the lines of the my_decimal truncation process:
+ - If the integer part is equal to or bigger than the maximum precision:
+ Truncate integer part to fit and the fractional becomes zero.
+ - Otherwise:
+ Truncate fractional part to fit.
+ */
+ if (intg >= DECIMAL_MAX_PRECISION)
+ {
+ intg= DECIMAL_MAX_PRECISION;
+ scale= 0;
+ }
+ else
+ {
+ uint room= min(DECIMAL_MAX_PRECISION - intg, DECIMAL_MAX_SCALE);
+ if (scale > room)
+ scale= room;
+ }
+
+ len= my_decimal_precision_to_length(intg + scale, scale, item->unsigned_flag);
+
+ return new Field_new_decimal(len, item->maybe_null, item->name, scale,
+ item->unsigned_flag);
+}
+
+
int Field_new_decimal::reset(void)
{
store_value(&decimal_zero);
@@ -9954,10 +10034,8 @@ Field *make_field(TABLE_SHARE *share, uc
TYPELIB *interval,
const char *field_name)
{
- uchar *bit_ptr;
- uchar bit_offset;
- LINT_INIT(bit_ptr);
- LINT_INIT(bit_offset);
+ uchar *UNINIT_VAR(bit_ptr);
+ uchar UNINIT_VAR(bit_offset);
if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
{
bit_ptr= null_pos;
=== modified file 'sql/field.h'
--- a/sql/field.h 2009-10-16 22:57:48 +0000
+++ b/sql/field.h 2009-11-09 23:49:25 +0000
@@ -695,6 +695,10 @@ protected:
class Field_num :public Field {
public:
+ /**
+ The scale of the Field's value, i.e. the number of digits to the right
+ of the decimal point.
+ */
const uint8 dec;
bool zerofill,unsigned_flag; // Purify cannot handle bit fields
Field_num(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
@@ -853,6 +857,11 @@ public:
Field_new_decimal(uint32 len_arg, bool maybe_null_arg,
const char *field_name_arg, uint8 dec_arg,
bool unsigned_arg);
+ /*
+ Create a field to hold a decimal value from an item.
+ Truncates the precision and/or scale if necessary.
+ */
+ static Field_new_decimal *new_decimal_field(const Item *item);
enum_field_types type() const { return MYSQL_TYPE_NEWDECIMAL;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
Item_result result_type () const { return DECIMAL_RESULT; }
=== modified file 'sql/ha_partition.cc'
--- a/sql/ha_partition.cc 2009-10-26 20:48:20 +0000
+++ b/sql/ha_partition.cc 2009-11-09 23:49:25 +0000
@@ -4415,17 +4415,6 @@ int ha_partition::handle_unordered_scan_
break;
case partition_index_first:
DBUG_PRINT("info", ("index_first on partition %d", i));
- /* MyISAM engine can fail if we call index_first() when indexes disabled */
- /* that happens if the table is empty. */
- /* Here we use file->stats.records instead of file->records() because */
- /* file->records() is supposed to return an EXACT count, and it can be */
- /* possibly slow. We don't need an exact number, an approximate one- from*/
- /* the last ::info() call - is sufficient. */
- if (file->stats.records == 0)
- {
- error= HA_ERR_END_OF_FILE;
- break;
- }
error= file->index_first(buf);
break;
case partition_index_first_unordered:
@@ -4513,32 +4502,10 @@ int ha_partition::handle_ordered_index_s
m_start_key.flag);
break;
case partition_index_first:
- /* MyISAM engine can fail if we call index_first() when indexes disabled */
- /* that happens if the table is empty. */
- /* Here we use file->stats.records instead of file->records() because */
- /* file->records() is supposed to return an EXACT count, and it can be */
- /* possibly slow. We don't need an exact number, an approximate one- from*/
- /* the last ::info() call - is sufficient. */
- if (file->stats.records == 0)
- {
- error= HA_ERR_END_OF_FILE;
- break;
- }
error= file->index_first(rec_buf_ptr);
reverse_order= FALSE;
break;
case partition_index_last:
- /* MyISAM engine can fail if we call index_last() when indexes disabled */
- /* that happens if the table is empty. */
- /* Here we use file->stats.records instead of file->records() because */
- /* file->records() is supposed to return an EXACT count, and it can be */
- /* possibly slow. We don't need an exact number, an approximate one- from*/
- /* the last ::info() call - is sufficient. */
- if (file->stats.records == 0)
- {
- error= HA_ERR_END_OF_FILE;
- break;
- }
error= file->index_last(rec_buf_ptr);
reverse_order= TRUE;
break;
=== modified file 'sql/item.cc'
--- a/sql/item.cc 2009-10-16 22:57:48 +0000
+++ b/sql/item.cc 2009-11-09 23:49:25 +0000
@@ -433,17 +433,26 @@ Item::Item(THD *thd, Item *item):
}
+/**
+ Decimal precision of the item.
+
+ @remark The precision must not be capped as it can be used in conjunction
+ with Item::decimals to determine the size of the integer part when
+ constructing a decimal data type.
+
+ @see Item::decimal_int_part()
+ @see Item::decimals
+*/
+
uint Item::decimal_precision() const
{
+ uint precision= max_length;
Item_result restype= result_type();
if ((restype == DECIMAL_RESULT) || (restype == INT_RESULT))
- {
- uint prec=
- my_decimal_length_to_precision(max_length, decimals, unsigned_flag);
- return min(prec, DECIMAL_MAX_PRECISION);
- }
- return min(max_length, DECIMAL_MAX_PRECISION);
+ precision= my_decimal_length_to_precision(max_length, decimals, unsigned_flag);
+
+ return precision;
}
@@ -1679,7 +1688,7 @@ bool agg_item_collations_for_comparison(
bool agg_item_set_converter(DTCollation &coll, const char *fname,
Item **args, uint nargs, uint flags, int item_sep)
{
- Item **arg, *safe_args[2];
+ Item **arg, *safe_args[2]= {NULL, NULL};
/*
For better error reporting: save the first and the second argument.
@@ -1688,8 +1697,6 @@ bool agg_item_set_converter(DTCollation
doesn't display each argument's characteristics.
- if nargs is 1, then this error cannot happen.
*/
- LINT_INIT(safe_args[0]);
- LINT_INIT(safe_args[1]);
if (nargs >=2 && nargs <= 3)
{
safe_args[0]= args[0];
@@ -3320,8 +3327,7 @@ Item_copy *Item_copy::create (Item *item
new Item_copy_uint (item) : new Item_copy_int (item);
case DECIMAL_RESULT:
return new Item_copy_decimal (item);
- case IMPOSSIBLE_RESULT:
- case ROW_RESULT:
+ default:
DBUG_ASSERT (0);
}
/* should not happen */
@@ -4941,9 +4947,7 @@ Field *Item::tmp_table_field_from_field_
switch (field_type()) {
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL:
- field= new Field_new_decimal((uchar*) 0, max_length, null_ptr, 0,
- Field::NONE, name, decimals, 0,
- unsigned_flag);
+ field= Field_new_decimal::new_decimal_field(this);
break;
case MYSQL_TYPE_TINY:
field= new Field_tiny((uchar*) 0, max_length, null_ptr, 0, Field::NONE,
@@ -5540,9 +5544,8 @@ bool Item_null::send(Protocol *protocol,
bool Item::send(Protocol *protocol, String *buffer)
{
- bool result;
+ bool UNINIT_VAR(result); // Will be set if null_value == 0
enum_field_types f_type;
- LINT_INIT(result); // Will be set if null_value == 0
switch ((f_type=field_type())) {
default:
@@ -6885,14 +6888,21 @@ void resolve_const_item(THD *thd, Item *
}
/**
- Return true if the value stored in the field is equal to the const
- item.
+ Compare the value stored in field, with the original item.
+
+ @param field field which the item is converted and stored in
+ @param item original item
- We need to use this on the range optimizer because in some cases
- we can't store the value in the field without some precision/character loss.
+ @return Return an integer greater than, equal to, or less than 0 if
+ the value stored in the field is greater than, equal to,
+ or less than the original item
+
+ @note We only use this on the range optimizer/partition pruning,
+ because in some cases we can't store the value in the field
+ without some precision/character loss.
*/
-bool field_is_equal_to_item(Field *field,Item *item)
+int stored_field_cmp_to_item(Field *field, Item *item)
{
Item_result res_type=item_cmp_type(field->result_type(),
@@ -6903,28 +6913,49 @@ bool field_is_equal_to_item(Field *field
char field_buff[MAX_FIELD_WIDTH];
String item_tmp(item_buff,sizeof(item_buff),&my_charset_bin),*item_result;
String field_tmp(field_buff,sizeof(field_buff),&my_charset_bin);
+ enum_field_types field_type;
item_result=item->val_str(&item_tmp);
if (item->null_value)
- return 1; // This must be true
+ return 0;
field->val_str(&field_tmp);
- return !stringcmp(&field_tmp,item_result);
+
+ /*
+ If comparing DATE with DATETIME, append the time-part to the DATE.
+ So that the strings are equally formatted.
+ A DATE converted to string is 10 characters, and a DATETIME converted
+ to string is 19 characters.
+ */
+ field_type= field->type();
+ if (field_type == MYSQL_TYPE_DATE &&
+ item_result->length() == 19)
+ field_tmp.append(" 00:00:00");
+ else if (field_type == MYSQL_TYPE_DATETIME &&
+ item_result->length() == 10)
+ item_result->append(" 00:00:00");
+
+ return stringcmp(&field_tmp,item_result);
}
if (res_type == INT_RESULT)
- return 1; // Both where of type int
+ return 0; // Both are of type int
if (res_type == DECIMAL_RESULT)
{
my_decimal item_buf, *item_val,
field_buf, *field_val;
item_val= item->val_decimal(&item_buf);
if (item->null_value)
- return 1; // This must be true
+ return 0;
field_val= field->val_decimal(&field_buf);
- return !my_decimal_cmp(item_val, field_val);
+ return my_decimal_cmp(item_val, field_val);
}
double result= item->val_real();
if (item->null_value)
+ return 0;
+ double field_result= field->val_real();
+ if (field_result < result)
+ return -1;
+ else if (field_result > result)
return 1;
- return result == field->val_real();
+ return 0;
}
Item_cache* Item_cache::get_cache(const Item *item)
=== modified file 'sql/item.h'
--- a/sql/item.h 2009-10-16 22:57:48 +0000
+++ b/sql/item.h 2009-11-09 23:49:25 +0000
@@ -415,13 +415,20 @@ public:
from INT_RESULT, may be NULL, or are unsigned.
It will be possible to address this issue once the related partitioning bugs
(BUG#16002, BUG#15447, BUG#13436) are fixed.
+
+ The NOT_NULL enums are used in TO_DAYS, since TO_DAYS('2001-00-00') returns
+ NULL which puts those rows into the NULL partition, but
+ '2000-12-31' < '2001-00-00' < '2001-01-01'. So special handling is needed
+ for this (see Bug#20577).
*/
typedef enum monotonicity_info
{
NON_MONOTONIC, /* none of the below holds */
MONOTONIC_INCREASING, /* F() is unary and (x < y) => (F(x) <= F(y)) */
- MONOTONIC_STRICT_INCREASING /* F() is unary and (x < y) => (F(x) < F(y)) */
+ MONOTONIC_INCREASING_NOT_NULL, /* But only for valid/real x and y */
+ MONOTONIC_STRICT_INCREASING,/* F() is unary and (x < y) => (F(x) < F(y)) */
+ MONOTONIC_STRICT_INCREASING_NOT_NULL /* But only for valid/real x and y */
} enum_monotonicity_info;
/*************************************************************************/
@@ -594,8 +601,8 @@ public:
left_endp FALSE <=> The interval is "x < const" or "x <= const"
TRUE <=> The interval is "x > const" or "x >= const"
- incl_endp IN TRUE <=> the comparison is '<' or '>'
- FALSE <=> the comparison is '<=' or '>='
+ incl_endp IN FALSE <=> the comparison is '<' or '>'
+ TRUE <=> the comparison is '<=' or '>='
OUT The same but for the "F(x) $CMP$ F(const)" comparison
DESCRIPTION
@@ -777,9 +784,10 @@ public:
virtual cond_result eq_cmp_result() const { return COND_OK; }
inline uint float_length(uint decimals_par) const
{ return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;}
+ /** Returns the uncapped decimal precision of this item. */
virtual uint decimal_precision() const;
inline int decimal_int_part() const
- { return my_decimal_int_part(decimal_precision(), decimals); }
+ { return decimal_precision() - decimals; }
/*
Returns true if this is constant (during query execution, i.e. its value
will not change until next fix_fields) and its value is known.
@@ -3255,4 +3263,4 @@ void mark_select_range_as_dependent(THD
extern Cached_item *new_Cached_item(THD *thd, Item *item);
extern Item_result item_cmp_type(Item_result a,Item_result b);
extern void resolve_const_item(THD *thd, Item **ref, Item *cmp_item);
-extern bool field_is_equal_to_item(Field *field,Item *item);
+extern int stored_field_cmp_to_item(Field *field, Item *item);
=== modified file 'sql/item_cmpfunc.cc'
--- a/sql/item_cmpfunc.cc 2009-09-15 10:46:35 +0000
+++ b/sql/item_cmpfunc.cc 2009-10-15 21:38:29 +0000
@@ -395,11 +395,10 @@ static bool convert_constant_item(THD *t
ulong orig_sql_mode= thd->variables.sql_mode;
enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields;
my_bitmap_map *old_maps[2];
- ulonglong orig_field_val; /* original field value if valid */
+ ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */
LINT_INIT(old_maps[0]);
LINT_INIT(old_maps[1]);
- LINT_INIT(orig_field_val);
if (table)
dbug_tmp_use_all_columns(table, old_maps,
@@ -2182,7 +2181,7 @@ uint Item_func_ifnull::decimal_precision
int arg1_int_part= args[1]->decimal_int_part();
int max_int_part= max(arg0_int_part, arg1_int_part);
int precision= max_int_part + decimals;
- return min(precision, DECIMAL_MAX_PRECISION);
+ return precision;
}
@@ -2366,7 +2365,7 @@ uint Item_func_if::decimal_precision() c
int arg1_prec= args[1]->decimal_int_part();
int arg2_prec= args[2]->decimal_int_part();
int precision=max(arg1_prec,arg2_prec) + decimals;
- return min(precision, DECIMAL_MAX_PRECISION);
+ return precision;
}
@@ -2783,7 +2782,7 @@ uint Item_func_case::decimal_precision()
if (else_expr_num != -1)
set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part());
- return min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
+ return max_int_part + decimals;
}
=== modified file 'sql/item_create.cc'
--- a/sql/item_create.cc 2009-04-25 10:05:32 +0000
+++ b/sql/item_create.cc 2009-10-15 21:38:29 +0000
@@ -5032,10 +5032,9 @@ create_func_cast(THD *thd, Item *a, Cast
const char *c_len, const char *c_dec,
CHARSET_INFO *cs)
{
- Item *res;
+ Item *UNINIT_VAR(res);
ulong len;
uint dec;
- LINT_INIT(res);
switch (cast_type) {
case ITEM_CAST_BINARY:
=== modified file 'sql/item_func.cc'
--- a/sql/item_func.cc 2009-10-16 22:57:48 +0000
+++ b/sql/item_func.cc 2009-11-09 23:49:25 +0000
@@ -451,45 +451,8 @@ Field *Item_func::tmp_table_field(TABLE
case STRING_RESULT:
return make_string_field(table);
case DECIMAL_RESULT:
- {
- uint8 dec= decimals;
- uint8 intg= decimal_precision() - dec;
- uint32 len= max_length;
-
- /*
- Trying to put too many digits overall in a DECIMAL(prec,dec)
- will always throw a warning. We must limit dec to
- DECIMAL_MAX_SCALE however to prevent an assert() later.
- */
-
- if (dec > 0)
- {
- int overflow;
-
- dec= min(dec, DECIMAL_MAX_SCALE);
-
- /*
- If the value still overflows the field with the corrected dec,
- we'll throw out decimals rather than integers. This is still
- bad and of course throws a truncation warning.
- */
-
- const int required_length=
- my_decimal_precision_to_length(intg + dec, dec,
- unsigned_flag);
-
- overflow= required_length - len;
-
- if (overflow > 0)
- dec= max(0, dec - overflow); // too long, discard fract
- else
- /* Corrected value fits. */
- len= required_length;
- }
-
- field= new Field_new_decimal(len, maybe_null, name, dec, unsigned_flag);
+ field= Field_new_decimal::new_decimal_field(this);
break;
- }
case ROW_RESULT:
default:
// This case should never be chosen
@@ -2304,9 +2267,8 @@ void Item_func_min_max::fix_length_and_d
uint Item_func_min_max::cmp_datetimes(ulonglong *value)
{
- longlong min_max;
+ longlong UNINIT_VAR(min_max);
uint min_max_idx= 0;
- LINT_INIT(min_max);
for (uint i=0; i < arg_count ; i++)
{
@@ -2371,8 +2333,7 @@ String *Item_func_min_max::val_str(Strin
}
case STRING_RESULT:
{
- String *res;
- LINT_INIT(res);
+ String *UNINIT_VAR(res);
for (uint i=0; i < arg_count ; i++)
{
if (i == 0)
@@ -2461,8 +2422,7 @@ longlong Item_func_min_max::val_int()
my_decimal *Item_func_min_max::val_decimal(my_decimal *dec)
{
DBUG_ASSERT(fixed == 1);
- my_decimal tmp_buf, *tmp, *res;
- LINT_INIT(res);
+ my_decimal tmp_buf, *tmp, *UNINIT_VAR(res);
if (compare_as_dates)
{
@@ -4804,6 +4764,19 @@ void Item_func_get_user_var::fix_length_
}
+uint Item_func_get_user_var::decimal_precision() const
+{
+ uint precision= max_length;
+ Item_result restype= result_type();
+
+ /* Default to maximum as the precision is unknown a priori. */
+ if ((restype == DECIMAL_RESULT) || (restype == INT_RESULT))
+ precision= DECIMAL_MAX_PRECISION;
+
+ return precision;
+}
+
+
bool Item_func_get_user_var::const_item() const
{
return (!var_entry || current_thd->query_id != var_entry->update_query_id);
@@ -5467,8 +5440,7 @@ void Item_func_match::init_search(bool n
bool Item_func_match::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
- Item *item;
- LINT_INIT(item); // Safe as arg_count is > 1
+ Item *UNINIT_VAR(item); // Safe as arg_count is > 1
maybe_null=1;
join_key=0;
=== modified file 'sql/item_func.h'
--- a/sql/item_func.h 2009-10-16 22:57:48 +0000
+++ b/sql/item_func.h 2009-11-09 23:49:25 +0000
@@ -1431,6 +1431,7 @@ public:
table_map used_tables() const
{ return const_item() ? 0 : RAND_TABLE_BIT; }
bool eq(const Item *item, bool binary_cmp) const;
+ uint decimal_precision() const;
private:
bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
=== modified file 'sql/item_sum.cc'
--- a/sql/item_sum.cc 2009-09-15 10:46:35 +0000
+++ b/sql/item_sum.cc 2009-10-15 21:38:29 +0000
@@ -517,8 +517,7 @@ Field *Item_sum::create_tmp_field(bool g
name, table->s, collation.collation);
break;
case DECIMAL_RESULT:
- field= new Field_new_decimal(max_length, maybe_null, name,
- decimals, unsigned_flag);
+ field= Field_new_decimal::new_decimal_field(this);
break;
case ROW_RESULT:
default:
=== modified file 'sql/item_timefunc.cc'
--- a/sql/item_timefunc.cc 2009-09-07 20:50:10 +0000
+++ b/sql/item_timefunc.cc 2009-10-15 21:38:29 +0000
@@ -961,9 +961,9 @@ enum_monotonicity_info Item_func_to_days
if (args[0]->type() == Item::FIELD_ITEM)
{
if (args[0]->field_type() == MYSQL_TYPE_DATE)
- return MONOTONIC_STRICT_INCREASING;
+ return MONOTONIC_STRICT_INCREASING_NOT_NULL;
if (args[0]->field_type() == MYSQL_TYPE_DATETIME)
- return MONOTONIC_INCREASING;
+ return MONOTONIC_INCREASING_NOT_NULL;
}
return NON_MONOTONIC;
}
@@ -974,12 +974,27 @@ longlong Item_func_to_days::val_int_endp
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
longlong res;
- if (get_arg0_date(<ime, TIME_NO_ZERO_DATE))
+ int dummy; /* unused */
+ if (get_arg0_date(<ime, TIME_FUZZY_DATE))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
}
res=(longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
+ /* Set to NULL if invalid date, but keep the value */
+ null_value= check_date(<ime,
+ (ltime.year || ltime.month || ltime.day),
+ (TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE),
+ &dummy);
+ if (null_value)
+ {
+ /*
+ Even if the evaluation return NULL, the calc_daynr is useful for pruning
+ */
+ if (args[0]->field_type() != MYSQL_TYPE_DATE)
+ *incl_endp= TRUE;
+ return res;
+ }
if (args[0]->field_type() == MYSQL_TYPE_DATE)
{
@@ -992,15 +1007,19 @@ longlong Item_func_to_days::val_int_endp
point to day bound ("strictly less" comparison stays intact):
col < '2007-09-15 00:00:00' -> TO_DAYS(col) < TO_DAYS('2007-09-15')
+ col > '2007-09-15 23:59:59' -> TO_DAYS(col) > TO_DAYS('2007-09-15')
which is different from the general case ("strictly less" changes to
"less or equal"):
col < '2007-09-15 12:34:56' -> TO_DAYS(col) <= TO_DAYS('2007-09-15')
*/
- if (!left_endp && !(ltime.hour || ltime.minute || ltime.second ||
- ltime.second_part))
- ; /* do nothing */
+ if ((!left_endp && !(ltime.hour || ltime.minute || ltime.second ||
+ ltime.second_part)) ||
+ (left_endp && ltime.hour == 23 && ltime.minute == 59 &&
+ ltime.second == 59))
+ /* do nothing */
+ ;
else
*incl_endp= TRUE;
return res;
=== modified file 'sql/lock.cc'
--- a/sql/lock.cc 2009-09-07 20:50:10 +0000
+++ b/sql/lock.cc 2009-10-15 21:38:29 +0000
@@ -1472,11 +1472,10 @@ void unlock_global_read_lock(THD *thd)
bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh,
bool is_not_commit)
{
- const char *old_message;
+ const char *UNINIT_VAR(old_message);
bool result= 0, need_exit_cond;
DBUG_ENTER("wait_if_global_read_lock");
- LINT_INIT(old_message);
/*
Assert that we do not own LOCK_open. If we would own it, other
threads could not close their tables. This would make a pretty
=== modified file 'sql/log.cc'
--- a/sql/log.cc 2009-10-26 11:35:42 +0000
+++ b/sql/log.cc 2009-11-06 17:22:32 +0000
@@ -1275,6 +1275,25 @@ int LOGGER::set_handlers(uint error_log_
return 0;
}
+/**
+ This function checks if a transactional talbe was updated by the
+ current statement.
+
+ @param thd The client thread that executed the current statement.
+ @return
+ @c true if a transactional table was updated, @false otherwise.
+*/
+static bool stmt_has_updated_trans_table(THD *thd)
+{
+ Ha_trx_info *ha_info;
+
+ for (ha_info= thd->transaction.stmt.ha_list; ha_info; ha_info= ha_info->next())
+ {
+ if (ha_info->is_trx_read_write() && ha_info->ht() != binlog_hton)
+ return (TRUE);
+ }
+ return (FALSE);
+}
/*
Save position of binary log transaction cache.
@@ -4104,7 +4123,8 @@ bool MYSQL_BIN_LOG::write(Log_event *eve
(binlog_trx_data*) thd_get_ha_data(thd, binlog_hton);
IO_CACHE *trans_log= &trx_data->trans_log;
my_off_t trans_log_pos= my_b_tell(trans_log);
- if (event_info->get_cache_stmt() || trans_log_pos != 0)
+ if (event_info->get_cache_stmt() || trans_log_pos != 0 ||
+ stmt_has_updated_trans_table(thd))
{
DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu",
event_info->get_cache_stmt(),
@@ -4856,7 +4876,8 @@ bool flush_error_log()
my_rename(log_error_file,err_renamed,MYF(0));
if (freopen(log_error_file,"a+",stdout))
{
- freopen(log_error_file,"a+",stderr);
+ FILE *reopen;
+ reopen= freopen(log_error_file,"a+",stderr);
setbuf(stderr, NULL);
}
else
=== modified file 'sql/log_event.cc'
--- a/sql/log_event.cc 2009-09-07 20:50:10 +0000
+++ b/sql/log_event.cc 2009-10-15 21:38:29 +0000
@@ -2948,6 +2948,8 @@ int Query_log_event::do_apply_event(Rela
{
LEX_STRING new_db;
int expected_error,actual_error= 0;
+ HA_CREATE_INFO db_options;
+
/*
Colleagues: please never free(thd->catalog) in MySQL. This would
lead to bugs as here thd->catalog is a part of an alloced block,
@@ -2959,6 +2961,13 @@ int Query_log_event::do_apply_event(Rela
new_db.length= db_len;
new_db.str= (char *) rpl_filter->get_rewrite_db(db, &new_db.length);
thd->set_db(new_db.str, new_db.length); /* allocates a copy of 'db' */
+
+ /*
+ Setting the character set and collation of the current database thd->db.
+ */
+ load_db_opt_by_name(thd, thd->db, &db_options);
+ if (db_options.default_table_charset)
+ thd->db_charset= db_options.default_table_charset;
thd->variables.auto_increment_increment= auto_increment_increment;
thd->variables.auto_increment_offset= auto_increment_offset;
@@ -3159,7 +3168,7 @@ compare_errors:
/*
If we expected a non-zero error code, and we don't get the same error
- code, and none of them should be ignored.
+ code, and it should be ignored or is related to a concurrency issue.
*/
actual_error= thd->is_error() ? thd->main_da.sql_errno() : 0;
DBUG_PRINT("info",("expected_error: %d sql_errno: %d",
@@ -3182,7 +3191,8 @@ Default database: '%s'. Query: '%s'",
thd->is_slave_error= 1;
}
/*
- If we get the same error code as expected, or they should be ignored.
+ If we get the same error code as expected and it is not a concurrency
+ issue, or should be ignored.
*/
else if ((expected_error == actual_error &&
!concurrency_error_code(expected_error)) ||
@@ -3191,8 +3201,25 @@ Default database: '%s'. Query: '%s'",
DBUG_PRINT("info",("error ignored"));
clear_all_errors(thd, const_cast<Relay_log_info*>(rli));
thd->killed= THD::NOT_KILLED;
+ /*
+ When an error is expected and matches the actual error the
+ slave does not report any error and by consequence changes
+ on transactional tables are not rolled back in the function
+ close_thread_tables(). For that reason, we explicitly roll
+ them back here.
+ */
+ if (expected_error && expected_error == actual_error)
+ ha_autocommit_or_rollback(thd, TRUE);
}
/*
+ If we expected a non-zero error code and get nothing and, it is a concurrency
+ issue or should be ignored.
+ */
+ else if (expected_error && !actual_error &&
+ (concurrency_error_code(expected_error) ||
+ ignored_error_code(expected_error)))
+ ha_autocommit_or_rollback(thd, TRUE);
+ /*
Other cases: mostly we expected no error and get one.
*/
else if (thd->is_slave_error || thd->is_fatal_error)
=== modified file 'sql/my_decimal.h'
--- a/sql/my_decimal.h 2009-09-07 20:50:10 +0000
+++ b/sql/my_decimal.h 2009-10-15 21:38:29 +0000
@@ -48,10 +48,12 @@ C_MODE_END
digits * number of decimal digits in one our big digit - number of decimal
digits in one our big digit decreased by 1 (because we always put decimal
point on the border of our big digits))
+
+ This value is 65 due to historical reasons partly due to it being used
+ as the maximum allowed precision and not the actual maximum precision.
*/
#define DECIMAL_MAX_PRECISION (DECIMAL_MAX_POSSIBLE_PRECISION - 8*2)
#define DECIMAL_MAX_SCALE 30
-#define DECIMAL_NOT_SPECIFIED 31
/**
maximum length of string representation (number of maximum decimal
@@ -75,12 +77,6 @@ inline uint my_decimal_size(uint precisi
}
-inline int my_decimal_int_part(uint precision, uint decimals)
-{
- return precision - ((decimals == DECIMAL_NOT_SPECIFIED) ? 0 : decimals);
-}
-
-
/**
my_decimal class limits 'decimal_t' type to what we need in MySQL.
@@ -184,7 +180,7 @@ inline uint my_decimal_length_to_precisi
}
inline uint32 my_decimal_precision_to_length_no_truncation(uint precision,
- uint8 scale,
+ uint scale,
bool unsigned_flag)
{
/*
@@ -196,7 +192,7 @@ inline uint32 my_decimal_precision_to_le
(unsigned_flag || !precision ? 0 : 1));
}
-inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale,
+inline uint32 my_decimal_precision_to_length(uint precision, uint scale,
bool unsigned_flag)
{
/*
=== modified file 'sql/mysql_priv.h'
--- a/sql/mysql_priv.h 2009-10-17 19:12:28 +0000
+++ b/sql/mysql_priv.h 2009-11-09 23:49:25 +0000
@@ -2306,7 +2306,8 @@ enum enum_explain_filename_mode
{
EXPLAIN_ALL_VERBOSE= 0,
EXPLAIN_PARTITIONS_VERBOSE,
- EXPLAIN_PARTITIONS_AS_COMMENT
+ EXPLAIN_PARTITIONS_AS_COMMENT,
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING
};
uint explain_filename(const char *from, char *to, uint to_length,
enum_explain_filename_mode explain_mode);
=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc 2009-10-26 11:38:17 +0000
+++ b/sql/mysqld.cc 2009-10-31 19:22:50 +0000
@@ -1889,7 +1889,9 @@ void close_connection(THD *thd, uint err
extern "C" sig_handler end_mysqld_signal(int sig __attribute__((unused)))
{
DBUG_ENTER("end_mysqld_signal");
- kill_mysql(); // Take down mysqld nicely
+ /* Don't call kill_mysql() if signal thread is not running */
+ if (signal_thread_in_use)
+ kill_mysql(); // Take down mysqld nicely
DBUG_VOID_RETURN; /* purecov: deadcode */
}
@@ -8082,7 +8084,7 @@ mysqld_get_one_option(int optid,
switch(optid) {
#ifndef DBUG_OFF
case OPT_DEBUG_FLUSH:
- argument= IF_WIN(default_dbug_option, (char*) "d:t:i:O,/tmp/mysqld.trace");
+ argument= IF_WIN((char*) default_dbug_option, (char*) "d:t:i:O,/tmp/mysqld.trace");
/* fall through */
case '#':
if (!argument)
=== modified file 'sql/opt_range.cc'
--- a/sql/opt_range.cc 2009-09-09 21:59:28 +0000
+++ b/sql/opt_range.cc 2009-10-15 21:38:29 +0000
@@ -2243,7 +2243,7 @@ int SQL_SELECT::test_quick_select(THD *t
KEY *key_info;
PARAM param;
- if (check_stack_overrun(thd, 2*STACK_MIN_SIZE, buff))
+ if (check_stack_overrun(thd, 2*STACK_MIN_SIZE + sizeof(PARAM), buff))
DBUG_RETURN(0); // Fatal error flag is set
/* set up parameter that is passed to all functions */
@@ -4839,11 +4839,10 @@ static TRP_RANGE *get_key_scans_params(P
{
int idx;
SEL_ARG **key,**end, **key_to_read= NULL;
- ha_rows best_records;
+ ha_rows UNINIT_VAR(best_records); /* protected by key_to_read */
TRP_RANGE* read_plan= NULL;
bool pk_is_clustered= param->table->file->primary_key_is_clustered();
DBUG_ENTER("get_key_scans_params");
- LINT_INIT(best_records); /* protected by key_to_read */
/*
Note that there may be trees that have type SEL_TREE::KEY but contain no
key reads at all, e.g. tree for expression "key1 is not null" where key1
@@ -5827,6 +5826,7 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND
{
tree= new (alloc) SEL_ARG(field, 0, 0);
tree->type= SEL_ARG::IMPOSSIBLE;
+ field->table->in_use->variables.sql_mode= orig_sql_mode;
goto end;
}
else
@@ -5856,11 +5856,14 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND
but we'll need to convert '>' to '>=' and '<' to '<='. This will
be done together with other types at the end of this function
- (grep for field_is_equal_to_item)
+ (grep for stored_field_cmp_to_item)
*/
}
else
+ {
+ field->table->in_use->variables.sql_mode= orig_sql_mode;
goto end;
+ }
}
}
@@ -5931,7 +5934,7 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND
switch (type) {
case Item_func::LT_FUNC:
- if (field_is_equal_to_item(field,value))
+ if (stored_field_cmp_to_item(field,value) == 0)
tree->max_flag=NEAR_MAX;
/* fall through */
case Item_func::LE_FUNC:
@@ -5945,11 +5948,16 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND
break;
case Item_func::GT_FUNC:
/* Don't use open ranges for partial key_segments */
- if (field_is_equal_to_item(field,value) &&
- !(key_part->flag & HA_PART_KEY_SEG))
+ if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
+ (stored_field_cmp_to_item(field, value) <= 0))
tree->min_flag=NEAR_MIN;
- /* fall through */
+ tree->max_flag= NO_MAX_RANGE;
+ break;
case Item_func::GE_FUNC:
+ /* Don't use open ranges for partial key_segments */
+ if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
+ (stored_field_cmp_to_item(field,value) < 0))
+ tree->min_flag= NEAR_MIN;
tree->max_flag=NO_MAX_RANGE;
break;
case Item_func::SP_EQUALS_FUNC:
@@ -6440,13 +6448,6 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG
return 0; // Can't optimize this
}
- if ((key1->min_flag | key2->min_flag) & GEOM_FLAG)
- {
- key1->free_tree();
- key2->free_tree();
- return 0; // Can't optimize this
- }
-
key1->use_count--;
key2->use_count--;
SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0;
@@ -6797,9 +6798,7 @@ static bool eq_tree(SEL_ARG* a,SEL_ARG *
SEL_ARG *
SEL_ARG::insert(SEL_ARG *key)
{
- SEL_ARG *element,**par,*last_element;
- LINT_INIT(par);
- LINT_INIT(last_element);
+ SEL_ARG *element,**UNINIT_VAR(par),*UNINIT_VAR(last_element);
for (element= this; element != &null_element ; )
{
@@ -8066,7 +8065,10 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_
if (cur_quick->file->inited != handler::NONE)
cur_quick->file->ha_index_end();
if (cur_quick->init() || cur_quick->reset())
+ {
+ delete unique;
DBUG_RETURN(1);
+ }
}
if (result)
@@ -8074,13 +8076,17 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_
if (result != HA_ERR_END_OF_FILE)
{
cur_quick->range_end();
+ delete unique;
DBUG_RETURN(result);
}
break;
}
if (thd->killed)
+ {
+ delete unique;
DBUG_RETURN(1);
+ }
/* skip row if it will be retrieved by clustered PK scan */
if (pk_quick_select && pk_quick_select->row_in_ranges())
@@ -8089,8 +8095,10 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_
cur_quick->file->position(cur_quick->record);
result= unique->unique_add((char*)cur_quick->file->ref);
if (result)
+ {
+ delete unique;
DBUG_RETURN(1);
-
+ }
}
/*
@@ -9395,7 +9403,9 @@ get_best_group_min_max(PARAM *param, SEL
goto next_index;
}
else
+ {
DBUG_ASSERT(FALSE);
+ }
/* Check (SA2). */
if (min_max_arg_item)
@@ -9629,7 +9639,17 @@ check_group_min_max_predicates(COND *con
*/
if (cond_type == Item::SUBSELECT_ITEM)
DBUG_RETURN(FALSE);
-
+
+ /*
+ Condition of the form 'field' is equivalent to 'field <> 0' and thus
+ satisfies the SA3 condition.
+ */
+ if (cond_type == Item::FIELD_ITEM)
+ {
+ DBUG_PRINT("info", ("Analyzing: %s", cond->full_name()));
+ DBUG_RETURN(TRUE);
+ }
+
/* We presume that at this point there are no other Items than functions. */
DBUG_ASSERT(cond_type == Item::FUNC_ITEM);
@@ -9787,11 +9807,22 @@ get_constant_key_infix(KEY *index_info,
return FALSE;
uint field_length= cur_part->store_length;
- if ((cur_range->maybe_null &&
- cur_range->min_value[0] && cur_range->max_value[0]) ||
- !memcmp(cur_range->min_value, cur_range->max_value, field_length))
- {
- /* cur_range specifies 'IS NULL' or an equality condition. */
+ if (cur_range->maybe_null &&
+ cur_range->min_value[0] && cur_range->max_value[0])
+ {
+ /*
+ cur_range specifies 'IS NULL'. In this case the argument points
+ to a "null value" (is_null_string) that may not always be long
+ enough for a direct memcpy to a field.
+ */
+ DBUG_ASSERT (field_length > 0);
+ *key_ptr= 1;
+ bzero(key_ptr+1,field_length-1);
+ key_ptr+= field_length;
+ *key_infix_len+= field_length;
+ }
+ else if (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0)
+ { /* cur_range specifies an equality condition. */
memcpy(key_ptr, cur_range->min_value, field_length);
key_ptr+= field_length;
*key_infix_len+= field_length;
=== modified file 'sql/partition_info.h'
--- a/sql/partition_info.h 2008-11-10 20:21:49 +0000
+++ b/sql/partition_info.h 2009-09-02 15:42:08 +0000
@@ -300,6 +300,7 @@ static inline void init_single_partition
{
part_iter->part_nums.start= part_iter->part_nums.cur= part_id;
part_iter->part_nums.end= part_id+1;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
part_iter->get_next= get_next_partition_id_range;
}
@@ -310,5 +311,6 @@ void init_all_partitions_iterator(partit
{
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
part_iter->part_nums.end= part_info->no_parts;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
part_iter->get_next= get_next_partition_id_range;
}
=== modified file 'sql/share/errmsg.txt'
--- a/sql/share/errmsg.txt 2009-10-16 22:57:48 +0000
+++ b/sql/share/errmsg.txt 2009-11-09 23:49:25 +0000
@@ -6184,17 +6184,17 @@ ER_FUNC_INEXISTENT_NAME_COLLISION 42000
# When updating these, please update EXPLAIN_FILENAME_MAX_EXTRA_LENGTH in
# mysql_priv.h with the new maximal additional length for explain_filename.
ER_DATABASE_NAME
- eng "Database `%s`"
- swe "Databas `%s`"
+ eng "Database"
+ swe "Databas"
ER_TABLE_NAME
- eng "Table `%s`"
- swe "Tabell `%s`"
+ eng "Table"
+ swe "Tabell"
ER_PARTITION_NAME
- eng "Partition `%s`"
- swe "Partition `%s`"
+ eng "Partition"
+ swe "Partition"
ER_SUBPARTITION_NAME
- eng "Subpartition `%s`"
- swe "Subpartition `%s`"
+ eng "Subpartition"
+ swe "Subpartition"
ER_TEMPORARY_NAME
eng "Temporary"
swe "Tempor�
=== modified file 'sql/slave.cc'
--- a/sql/slave.cc 2009-10-26 11:35:42 +0000
+++ b/sql/slave.cc 2009-11-06 17:22:32 +0000
@@ -1961,9 +1961,6 @@ static int has_temporary_error(THD *thd)
{
DBUG_ENTER("has_temporary_error");
- if (thd->is_fatal_error)
- DBUG_RETURN(0);
-
DBUG_EXECUTE_IF("all_errors_are_temporary_errors",
if (thd->main_da.is_error())
{
@@ -3689,6 +3686,31 @@ void end_relay_log_info(Relay_log_info*
DBUG_VOID_RETURN;
}
+
+/**
+ Hook to detach the active VIO before closing a connection handle.
+
+ The client API might close the connection (and associated data)
+ in case it encounters a unrecoverable (network) error. This hook
+ is called from the client code before the VIO handle is deleted
+ allows the thread to detach the active vio so it does not point
+ to freed memory.
+
+ Other calls to THD::clear_active_vio throughout this module are
+ redundant due to the hook but are left in place for illustrative
+ purposes.
+*/
+
+extern "C" void slave_io_thread_detach_vio()
+{
+#ifdef SIGNAL_WITH_VIO_CLOSE
+ THD *thd= current_thd;
+ if (thd->slave_thread)
+ thd->clear_active_vio();
+#endif
+}
+
+
/*
Try to connect until successful or slave killed
=== modified file 'sql/spatial.cc'
--- a/sql/spatial.cc 2009-04-25 10:05:32 +0000
+++ b/sql/spatial.cc 2009-10-15 21:38:29 +0000
@@ -935,13 +935,10 @@ int Gis_polygon::interior_ring_n(uint32
int Gis_polygon::centroid_xy(double *x, double *y) const
{
uint32 n_linear_rings;
- double res_area;
- double res_cx, res_cy;
+ double UNINIT_VAR(res_area);
+ double UNINIT_VAR(res_cx), UNINIT_VAR(res_cy);
const char *data= m_data;
bool first_loop= 1;
- LINT_INIT(res_area);
- LINT_INIT(res_cx);
- LINT_INIT(res_cy);
if (no_data(data, 4))
return 1;
@@ -1636,14 +1633,10 @@ int Gis_multi_polygon::centroid(String *
uint32 n_polygons;
bool first_loop= 1;
Gis_polygon p;
- double res_area, res_cx, res_cy;
+ double UNINIT_VAR(res_area), UNINIT_VAR(res_cx), UNINIT_VAR(res_cy);
double cur_area, cur_cx, cur_cy;
const char *data= m_data;
- LINT_INIT(res_area);
- LINT_INIT(res_cx);
- LINT_INIT(res_cy);
-
if (no_data(data, 4))
return 1;
n_polygons= uint4korr(data);
=== modified file 'sql/sql_base.cc'
--- a/sql/sql_base.cc 2009-10-16 22:57:48 +0000
+++ b/sql/sql_base.cc 2009-11-09 23:49:25 +0000
@@ -1515,21 +1515,23 @@ void close_temporary_tables(THD *thd)
my_thread_id save_pseudo_thread_id= thd->variables.pseudo_thread_id;
/* Set pseudo_thread_id to be that of the processed table */
thd->variables.pseudo_thread_id= tmpkeyval(thd, table);
- /*
- Loop forward through all tables within the sublist of
- common pseudo_thread_id to create single DROP query.
+ String db;
+ db.append(table->s->db.str);
+ /* Loop forward through all tables that belong to a common database
+ within the sublist of common pseudo_thread_id to create single
+ DROP query
*/
for (s_query.length(stub_len);
table && is_user_table(table) &&
- tmpkeyval(thd, table) == thd->variables.pseudo_thread_id;
+ tmpkeyval(thd, table) == thd->variables.pseudo_thread_id &&
+ table->s->db.length == db.length() &&
+ strcmp(table->s->db.str, db.ptr()) == 0;
table= next)
{
/*
- We are going to add 4 ` around the db/table names and possible more
- due to special characters in the names
+ We are going to add ` around the table names and possible more
+ due to special characters
*/
- append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str));
- s_query.append('.');
append_identifier(thd, &s_query, table->s->table_name.str,
strlen(table->s->table_name.str));
s_query.append(',');
@@ -1542,6 +1544,7 @@ void close_temporary_tables(THD *thd)
Query_log_event qinfo(thd, s_query.ptr(),
s_query.length() - 1 /* to remove trailing ',' */,
0, FALSE, 0);
+ qinfo.db= db.ptr();
thd->variables.character_set_client= cs_save;
mysql_bin_log.write(&qinfo);
thd->variables.pseudo_thread_id= save_pseudo_thread_id;
=== modified file 'sql/sql_db.cc'
--- a/sql/sql_db.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_db.cc 2009-10-15 21:38:29 +0000
@@ -658,10 +658,8 @@ int mysql_create_db(THD *thd, char *db,
}
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
- if (!silent)
- my_ok(thd);
error= 0;
- goto exit;
+ goto not_silent;
}
else
{
@@ -698,7 +696,8 @@ int mysql_create_db(THD *thd, char *db,
happened. (This is a very unlikely senario)
*/
}
-
+
+not_silent:
if (!silent)
{
char *query;
=== modified file 'sql/sql_delete.cc'
--- a/sql/sql_delete.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_delete.cc 2009-10-15 21:38:29 +0000
@@ -1067,6 +1067,9 @@ bool mysql_truncate(THD *thd, TABLE_LIST
bzero((char*) &create_info,sizeof(create_info));
+ /* Remove tables from the HANDLER's hash. */
+ mysql_ha_rm_tables(thd, table_list, FALSE);
+
/* If it is a temporary table, close and regenerate it */
if (!dont_send_ok && (table= find_temporary_table(thd, table_list)))
{
=== modified file 'sql/sql_insert.cc'
--- a/sql/sql_insert.cc 2009-10-16 22:57:48 +0000
+++ b/sql/sql_insert.cc 2009-11-09 23:49:25 +0000
@@ -3423,25 +3423,6 @@ static TABLE *create_table_from_items(TH
bool not_used;
DBUG_ENTER("create_table_from_items");
- DBUG_EXECUTE_IF("sleep_create_select_before_check_if_exists", my_sleep(6000000););
-
- if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
- create_table->table->db_stat)
- {
- /* Table already exists and was open at open_and_lock_tables() stage. */
- if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
- {
- create_info->table_existed= 1; // Mark that table existed
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
- create_table->table_name);
- DBUG_RETURN(create_table->table);
- }
-
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), create_table->table_name);
- DBUG_RETURN(0);
- }
-
tmp_table.alias= 0;
tmp_table.timestamp_field= 0;
tmp_table.s= &share;
@@ -3650,10 +3631,35 @@ select_create::prepare(List<Item> &value
thd->binlog_start_trans_and_stmt();
}
- if (!(table= create_table_from_items(thd, create_info, create_table,
- alter_info, &values,
- &extra_lock, hook_ptr)))
- DBUG_RETURN(-1); // abort() deletes table
+ DBUG_EXECUTE_IF("sleep_create_select_before_check_if_exists", my_sleep(6000000););
+
+ if (!(create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
+ create_table->table->db_stat)
+ {
+ /* Table already exists and was open at open_and_lock_tables() stage. */
+ if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
+ {
+ /* Mark that table existed */
+ create_info->table_existed= 1;
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
+ create_table->table_name);
+ if (thd->current_stmt_binlog_row_based)
+ binlog_show_create_table(&(create_table->table), 1);
+ table= create_table->table;
+ }
+ else
+ {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), create_table->table_name);
+ DBUG_RETURN(-1);
+ }
+ }
+ else
+ if (!(table= create_table_from_items(thd, create_info, create_table,
+ alter_info, &values,
+ &extra_lock, hook_ptr)))
+ /* abort() deletes table */
+ DBUG_RETURN(-1);
if (extra_lock)
{
=== modified file 'sql/sql_parse.cc'
--- a/sql/sql_parse.cc 2009-10-26 20:48:20 +0000
+++ b/sql/sql_parse.cc 2009-11-09 23:49:25 +0000
@@ -464,7 +464,7 @@ pthread_handler_t handle_bootstrap(void
thd->init_for_queries();
while (fgets(buff, thd->net.max_packet, file))
{
- char *query;
+ char *query, *res;
/* strlen() can't be deleted because fgets() doesn't return length */
ulong length= (ulong) strlen(buff);
while (buff[length-1] != '\n' && !feof(file))
=== modified file 'sql/sql_partition.cc'
--- a/sql/sql_partition.cc 2009-10-16 22:57:48 +0000
+++ b/sql/sql_partition.cc 2009-11-09 23:49:25 +0000
@@ -2767,8 +2767,24 @@ uint32 get_list_array_idx_for_endpoint(p
if (part_info->part_expr->null_value)
{
- DBUG_RETURN(0);
+ /*
+ Special handling for MONOTONIC functions that can return NULL for
+ values that are comparable. I.e.
+ '2000-00-00' can be compared to '2000-01-01' but TO_DAYS('2000-00-00')
+ returns NULL which cannot be compared used <, >, <=, >= etc.
+
+ Otherwise, just return the the first index (lowest value).
+ */
+ enum_monotonicity_info monotonic;
+ monotonic= part_info->part_expr->get_monotonicity_info();
+ if (monotonic != MONOTONIC_INCREASING_NOT_NULL &&
+ monotonic != MONOTONIC_STRICT_INCREASING_NOT_NULL)
+ {
+ /* F(col) can not return NULL, return index with lowest value */
+ DBUG_RETURN(0);
+ }
}
+
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
DBUG_ASSERT(part_info->no_list_values);
@@ -2917,11 +2933,29 @@ uint32 get_partition_id_range_for_endpoi
if (part_info->part_expr->null_value)
{
- uint32 ret_part_id= 0;
- if (!left_endpoint && include_endpoint)
- ret_part_id= 1;
- DBUG_RETURN(ret_part_id);
+ /*
+ Special handling for MONOTONIC functions that can return NULL for
+ values that are comparable. I.e.
+ '2000-00-00' can be compared to '2000-01-01' but TO_DAYS('2000-00-00')
+ returns NULL which cannot be compared used <, >, <=, >= etc.
+
+ Otherwise, just return the first partition
+ (may be included if not left endpoint)
+ */
+ enum_monotonicity_info monotonic;
+ monotonic= part_info->part_expr->get_monotonicity_info();
+ if (monotonic != MONOTONIC_INCREASING_NOT_NULL &&
+ monotonic != MONOTONIC_STRICT_INCREASING_NOT_NULL)
+ {
+ /* F(col) can not return NULL, return partition with lowest value */
+ if (!left_endpoint && include_endpoint)
+ DBUG_RETURN(1);
+ DBUG_RETURN(0);
+
+ }
}
+
+
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
if (left_endpoint && !include_endpoint)
@@ -6078,6 +6112,9 @@ uint fast_alter_partition_table(THD *thd
lpt->pack_frm_len= 0;
thd->work_part_info= part_info;
+ /* Never update timestamp columns when alter */
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+
if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
{
/*
@@ -6694,6 +6731,7 @@ int get_part_iter_for_interval_via_mappi
Field *field= part_info->part_field_array[0];
uint32 max_endpoint_val;
get_endpoint_func get_endpoint;
+ bool can_match_multiple_values; /* is not '=' */
uint field_len= field->pack_length_in_rec();
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
@@ -6731,6 +6769,23 @@ int get_part_iter_for_interval_via_mappi
}
else
assert(0);
+
+ can_match_multiple_values= (flags || !min_value || !max_value ||
+ memcmp(min_value, max_value, field_len));
+ if (can_match_multiple_values &&
+ (part_info->part_type == RANGE_PARTITION ||
+ part_info->has_null_value))
+ {
+ /* Range scan on RANGE or LIST partitioned table */
+ enum_monotonicity_info monotonic;
+ monotonic= part_info->part_expr->get_monotonicity_info();
+ if (monotonic == MONOTONIC_INCREASING_NOT_NULL ||
+ monotonic == MONOTONIC_STRICT_INCREASING_NOT_NULL)
+ {
+ /* col is NOT NULL, but F(col) can return NULL, add NULL partition */
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ }
+ }
/*
Find minimum: Do special handling if the interval has left bound in form
@@ -6763,6 +6818,14 @@ int get_part_iter_for_interval_via_mappi
store_key_image_to_rec(field, min_value, field_len);
bool include_endp= !test(flags & NEAR_MIN);
part_iter->part_nums.start= get_endpoint(part_info, 1, include_endp);
+ if (!can_match_multiple_values && part_info->part_expr->null_value)
+ {
+ /* col = x and F(x) = NULL -> only search NULL partition */
+ part_iter->part_nums.cur= part_iter->part_nums.start= 0;
+ part_iter->part_nums.end= 0;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
+ return 1;
+ }
part_iter->part_nums.cur= part_iter->part_nums.start;
if (part_iter->part_nums.start == max_endpoint_val)
return 0; /* No partitions */
@@ -6847,6 +6910,7 @@ int get_part_iter_for_interval_via_walki
Field *field;
uint total_parts;
partition_iter_func get_next_func;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
if (is_subpart)
{
field= part_info->subpart_field_array[0];
@@ -6957,7 +7021,13 @@ uint32 get_next_partition_id_range(PARTI
{
if (part_iter->part_nums.cur >= part_iter->part_nums.end)
{
+ if (part_iter->ret_null_part)
+ {
+ part_iter->ret_null_part= FALSE;
+ return 0; /* NULL always in first range partition */
+ }
part_iter->part_nums.cur= part_iter->part_nums.start;
+ part_iter->ret_null_part= part_iter->ret_null_part_orig;
return NOT_A_PARTITION_ID;
}
else
@@ -6985,7 +7055,7 @@ uint32 get_next_partition_id_range(PARTI
uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
{
- if (part_iter->part_nums.cur == part_iter->part_nums.end)
+ if (part_iter->part_nums.cur >= part_iter->part_nums.end)
{
if (part_iter->ret_null_part)
{
=== modified file 'sql/sql_plugin.cc'
--- a/sql/sql_plugin.cc 2009-10-26 11:35:42 +0000
+++ b/sql/sql_plugin.cc 2009-10-29 00:04:56 +0000
@@ -3226,7 +3226,6 @@ static int test_plugin_options(MEM_ROOT
my_bool can_disable;
bool disable_plugin;
enum_plugin_load_policy plugin_load_policy= PLUGIN_ON;
-
MEM_ROOT *mem_root= alloc_root_inited(&tmp->mem_root) ?
&tmp->mem_root : &plugin_mem_root;
st_mysql_sys_var **opt;
@@ -3240,13 +3239,13 @@ static int test_plugin_options(MEM_ROOT
DBUG_ENTER("test_plugin_options");
DBUG_ASSERT(tmp->plugin && tmp->name.str);
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
/*
- The 'federated' and 'ndbcluster' storage engines are always disabled by
- default.
+ The 'ndbcluster' storage engines is always disabled by default.
*/
- if (!(my_strcasecmp(&my_charset_latin1, tmp->name.str, "federated") &&
- my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster")))
+ if (!my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster"))
plugin_load_policy= PLUGIN_OFF;
+#endif
for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
count+= 2; /* --{plugin}-{optname} and --plugin-{plugin}-{optname} */
@@ -3295,6 +3294,11 @@ static int test_plugin_options(MEM_ROOT
can_disable=
my_strcasecmp(&my_charset_latin1, tmp->name.str, "MyISAM") &&
my_strcasecmp(&my_charset_latin1, tmp->name.str, "MEMORY");
+#ifdef USE_MARIA_FOR_TMP_TABLES
+ if (!can_disable)
+ can_disable= (my_strcasecmp(&my_charset_latin1, tmp->name.str, "Maria")
+ != 0);
+#endif
tmp->is_mandatory= (plugin_load_policy == PLUGIN_FORCE) || !can_disable;
=== modified file 'sql/sql_profile.cc'
--- a/sql/sql_profile.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_profile.cc 2009-10-15 21:38:29 +0000
@@ -33,7 +33,7 @@
#include "my_sys.h"
#define TIME_FLOAT_DIGITS 9
-/** two vals encoded: (dec*100)+len */
+/** two vals encoded: (len*100)+dec */
#define TIME_I_S_DECIMAL_SIZE (TIME_FLOAT_DIGITS*100)+(TIME_FLOAT_DIGITS-3)
#define MAX_QUERY_LENGTH 300
=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc 2009-10-26 20:48:20 +0000
+++ b/sql/sql_select.cc 2009-11-09 23:49:25 +0000
@@ -9493,43 +9493,8 @@ static Field *create_tmp_field_from_item
new_field->set_derivation(item->collation.derivation);
break;
case DECIMAL_RESULT:
- {
- uint8 dec= item->decimals;
- uint8 intg= ((Item_decimal *) item)->decimal_precision() - dec;
- uint32 len= item->max_length;
-
- /*
- Trying to put too many digits overall in a DECIMAL(prec,dec)
- will always throw a warning. We must limit dec to
- DECIMAL_MAX_SCALE however to prevent an assert() later.
- */
-
- if (dec > 0)
- {
- signed int overflow;
-
- dec= min(dec, DECIMAL_MAX_SCALE);
-
- /*
- If the value still overflows the field with the corrected dec,
- we'll throw out decimals rather than integers. This is still
- bad and of course throws a truncation warning.
- +1: for decimal point
- */
-
- overflow= my_decimal_precision_to_length(intg + dec, dec,
- item->unsigned_flag) - len;
-
- if (overflow > 0)
- dec= max(0, dec - overflow); // too long, discard fract
- else
- len -= item->decimals - dec; // corrected value fits
- }
-
- new_field= new Field_new_decimal(len, maybe_null, item->name,
- dec, item->unsigned_flag);
+ new_field= Field_new_decimal::new_decimal_field(item);
break;
- }
case ROW_RESULT:
default:
// This case should never be choosen
=== modified file 'sql/sql_show.cc'
--- a/sql/sql_show.cc 2009-10-16 22:57:48 +0000
+++ b/sql/sql_show.cc 2009-11-09 23:49:25 +0000
@@ -5624,6 +5624,12 @@ TABLE *create_schema_table(THD *thd, TAB
{
DBUG_RETURN(0);
}
+ /*
+ Create a type holder, as we want the type of the item to defined
+ the type of the object, not the value
+ */
+ if (!(item= new Item_type_holder(thd, item)))
+ DBUG_RETURN(0);
item->unsigned_flag= (fields_info->field_flags & MY_I_S_UNSIGNED);
item->decimals= fields_info->field_length%10;
item->max_length= (fields_info->field_length/100)%100;
@@ -7111,8 +7117,6 @@ bool show_create_trigger(THD *thd, const
/* Perform closing actions and return error status. */
}
- DBUG_ASSERT(num_tables == 1);
-
Table_triggers_list *triggers= lst->table->triggers;
if (!triggers)
=== modified file 'sql/sql_table.cc'
--- a/sql/sql_table.cc 2009-10-16 22:57:48 +0000
+++ b/sql/sql_table.cc 2009-11-09 23:49:25 +0000
@@ -72,7 +72,7 @@ static void wait_for_kill_signal(THD *th
@brief Helper function for explain_filename
*/
static char* add_identifier(char *to_p, const char * end_p,
- const char* name, uint name_len, int errcode)
+ const char* name, uint name_len, bool add_quotes)
{
uint res;
uint errors;
@@ -92,18 +92,44 @@ static char* add_identifier(char *to_p,
res= strconvert(&my_charset_filename, conv_name, system_charset_info,
conv_string, FN_REFLEN, &errors);
if (!res || errors)
+ {
+ DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors));
conv_name= name;
+ }
else
{
DBUG_PRINT("info", ("conv '%s' -> '%s'", conv_name, conv_string));
conv_name= conv_string;
}
- if (errcode)
- to_p+= my_snprintf(to_p, end_p - to_p, ER(errcode), conv_name);
+ if (add_quotes && (end_p - to_p > 2))
+ {
+ *(to_p++)= '`';
+ while (*conv_name && (end_p - to_p - 1) > 0)
+ {
+ uint length= my_mbcharlen(system_charset_info, *conv_name);
+ if (!length)
+ length= 1;
+ if (length == 1 && *conv_name == '`')
+ {
+ if ((end_p - to_p) < 3)
+ break;
+ *(to_p++)= '`';
+ *(to_p++)= *(conv_name++);
+ }
+ else if (((long) length) < (end_p - to_p))
+ {
+ to_p= strnmov(to_p, conv_name, length);
+ conv_name+= length;
+ }
+ else
+ break; /* string already filled */
+ }
+ to_p= strnmov(to_p, "`", end_p - to_p);
+ }
else
- to_p+= my_snprintf(to_p, end_p - to_p, "`%s`", conv_name);
- return to_p;
+ to_p= strnmov(to_p, conv_name, end_p - to_p);
+ DBUG_RETURN(to_p);
}
@@ -135,6 +161,8 @@ static char* add_identifier(char *to_p,
[,[ Temporary| Renamed] Partition `p`
[, Subpartition `sp`]] *|
(| is really a /, and it is all in one line)
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING ->
+ same as above but no quotes are added.
@retval Length of returned string
*/
@@ -245,28 +273,39 @@ uint explain_filename(const char *from,
part_name_len-= 5;
}
}
+ else
+ table_name_len= strlen(table_name);
if (db_name)
{
if (explain_mode == EXPLAIN_ALL_VERBOSE)
{
- to_p= add_identifier(to_p, end_p, db_name, db_name_len,
- ER_DATABASE_NAME);
+ to_p= strnmov(to_p, ER(ER_DATABASE_NAME), end_p - to_p);
+ *(to_p++)= ' ';
+ to_p= add_identifier(to_p, end_p, db_name, db_name_len, 1);
to_p= strnmov(to_p, ", ", end_p - to_p);
}
else
{
- to_p= add_identifier(to_p, end_p, db_name, db_name_len, 0);
+ to_p= add_identifier(to_p, end_p, db_name, db_name_len,
+ (explain_mode !=
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING));
to_p= strnmov(to_p, ".", end_p - to_p);
}
}
if (explain_mode == EXPLAIN_ALL_VERBOSE)
- to_p= add_identifier(to_p, end_p, table_name, table_name_len,
- ER_TABLE_NAME);
+ {
+ to_p= strnmov(to_p, ER(ER_TABLE_NAME), end_p - to_p);
+ *(to_p++)= ' ';
+ to_p= add_identifier(to_p, end_p, table_name, table_name_len, 1);
+ }
else
- to_p= add_identifier(to_p, end_p, table_name, table_name_len, 0);
+ to_p= add_identifier(to_p, end_p, table_name, table_name_len,
+ (explain_mode !=
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING));
if (part_name)
{
- if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT)
+ if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT ||
+ explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)
to_p= strnmov(to_p, " /* ", end_p - to_p);
else if (explain_mode == EXPLAIN_PARTITIONS_VERBOSE)
to_p= strnmov(to_p, " ", end_p - to_p);
@@ -280,15 +319,22 @@ uint explain_filename(const char *from,
to_p= strnmov(to_p, ER(ER_RENAMED_NAME), end_p - to_p);
to_p= strnmov(to_p, " ", end_p - to_p);
}
+ to_p= strnmov(to_p, ER(ER_PARTITION_NAME), end_p - to_p);
+ *(to_p++)= ' ';
to_p= add_identifier(to_p, end_p, part_name, part_name_len,
- ER_PARTITION_NAME);
+ (explain_mode !=
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING));
if (subpart_name)
{
to_p= strnmov(to_p, ", ", end_p - to_p);
+ to_p= strnmov(to_p, ER(ER_SUBPARTITION_NAME), end_p - to_p);
+ *(to_p++)= ' ';
to_p= add_identifier(to_p, end_p, subpart_name, subpart_name_len,
- ER_SUBPARTITION_NAME);
+ (explain_mode !=
+ EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING));
}
- if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT)
+ if (explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT ||
+ explain_mode == EXPLAIN_PARTITIONS_AS_COMMENT_NO_QUOTING)
to_p= strnmov(to_p, " */", end_p - to_p);
}
DBUG_PRINT("exit", ("to '%s'", to));
@@ -3508,6 +3554,41 @@ void sp_prepare_create_field(THD *thd, C
/*
+ Write CREATE TABLE binlog
+
+ SYNOPSIS
+ write_create_table_bin_log()
+ thd Thread object
+ create_info Create information
+ internal_tmp_table Set to 1 if this is an internal temporary table
+
+ DESCRIPTION
+ This function only is called in mysql_create_table_no_lock and
+ mysql_create_table
+
+ RETURN VALUES
+ NONE
+ */
+static inline void write_create_table_bin_log(THD *thd,
+ const HA_CREATE_INFO *create_info,
+ bool internal_tmp_table)
+{
+ /*
+ Don't write statement if:
+ - It is an internal temporary table,
+ - Row-based logging is used and it we are creating a temporary table, or
+ - The binary log is not open.
+ Otherwise, the statement shall be binlogged.
+ */
+ if (!internal_tmp_table &&
+ (!thd->current_stmt_binlog_row_based ||
+ (thd->current_stmt_binlog_row_based &&
+ !(create_info->options & HA_LEX_CREATE_TMP_TABLE))))
+ write_bin_log(thd, TRUE, thd->query, thd->query_length);
+}
+
+
+/*
Create a table
SYNOPSIS
@@ -3771,6 +3852,7 @@ bool mysql_create_table_no_lock(THD *thd
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
alias);
error= 0;
+ write_create_table_bin_log(thd, create_info, internal_tmp_table);
goto err;
}
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
@@ -3898,18 +3980,7 @@ bool mysql_create_table_no_lock(THD *thd
thd->thread_specific_used= TRUE;
}
- /*
- Don't write statement if:
- - It is an internal temporary table,
- - Row-based logging is used and it we are creating a temporary table, or
- - The binary log is not open.
- Otherwise, the statement shall be binlogged.
- */
- if (!internal_tmp_table &&
- (!thd->current_stmt_binlog_row_based ||
- (thd->current_stmt_binlog_row_based &&
- !(create_info->options & HA_LEX_CREATE_TMP_TABLE))))
- write_bin_log(thd, TRUE, thd->query, thd->query_length);
+ write_create_table_bin_log(thd, create_info, internal_tmp_table);
error= FALSE;
unlock_and_end:
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -3925,6 +3996,7 @@ warn:
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
alias);
create_info->table_existed= 1; // Mark that table existed
+ write_create_table_bin_log(thd, create_info, internal_tmp_table);
goto unlock_and_end;
}
@@ -3976,6 +4048,7 @@ bool mysql_create_table(THD *thd, const
table_name);
create_info->table_existed= 1;
result= FALSE;
+ write_create_table_bin_log(thd, create_info, internal_tmp_table);
}
else
{
@@ -5317,6 +5390,24 @@ bool mysql_create_like_table(THD* thd, T
goto err; /* purecov: inspected */
}
+goto binlog;
+
+table_exists:
+ if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
+ {
+ char warn_buff[MYSQL_ERRMSG_SIZE];
+ my_snprintf(warn_buff, sizeof(warn_buff),
+ ER(ER_TABLE_EXISTS_ERROR), table_name);
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_TABLE_EXISTS_ERROR,warn_buff);
+ }
+ else
+ {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
+ goto err;
+ }
+
+binlog:
DBUG_EXECUTE_IF("sleep_create_like_before_binlogging", my_sleep(6000000););
/*
@@ -5380,20 +5471,6 @@ bool mysql_create_like_table(THD* thd, T
write_bin_log(thd, TRUE, thd->query, thd->query_length);
res= FALSE;
- goto err;
-
-table_exists:
- if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
- {
- char warn_buff[MYSQL_ERRMSG_SIZE];
- my_snprintf(warn_buff, sizeof(warn_buff),
- ER(ER_TABLE_EXISTS_ERROR), table_name);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_TABLE_EXISTS_ERROR,warn_buff);
- res= FALSE;
- }
- else
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
err:
if (name_lock)
=== modified file 'sql/sql_update.cc'
--- a/sql/sql_update.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_update.cc 2009-10-15 21:38:29 +0000
@@ -730,6 +730,7 @@ int mysql_update(THD *thd,
break;
}
}
+ table->auto_increment_field_not_null= FALSE;
dup_key_found= 0;
/*
Caching the killed status to pass as the arg to query event constuctor;
=== modified file 'sql/sql_view.cc'
--- a/sql/sql_view.cc 2009-09-07 20:50:10 +0000
+++ b/sql/sql_view.cc 2009-10-15 21:38:29 +0000
@@ -1032,7 +1032,8 @@ bool mysql_make_view(THD *thd, File_pars
TABLE_LIST *top_view= table->top_table();
bool parse_status;
bool result, view_is_mergeable;
- TABLE_LIST *view_main_select_tables;
+ TABLE_LIST *UNINIT_VAR(view_main_select_tables);
+
DBUG_ENTER("mysql_make_view");
DBUG_PRINT("info", ("table: 0x%lx (%s)", (ulong) table, table->table_name));
@@ -1309,7 +1310,6 @@ bool mysql_make_view(THD *thd, File_pars
old_lex->set_stmt_unsafe();
view_is_mergeable= (table->algorithm != VIEW_ALGORITHM_TMPTABLE &&
lex->can_be_merged());
- LINT_INIT(view_main_select_tables);
if (view_is_mergeable)
{
=== modified file 'sql/sql_yacc.yy'
--- a/sql/sql_yacc.yy 2009-10-16 22:57:48 +0000
+++ b/sql/sql_yacc.yy 2009-11-09 23:49:25 +0000
@@ -1822,15 +1822,16 @@ server_option:
;
event_tail:
- EVENT_SYM opt_if_not_exists sp_name
+ remember_name EVENT_SYM opt_if_not_exists sp_name
{
THD *thd= YYTHD;
LEX *lex=Lex;
- lex->create_info.options= $2;
+ lex->stmt_definition_begin= $1;
+ lex->create_info.options= $3;
if (!(lex->event_parse_data= Event_parse_data::new_instance(thd)))
MYSQL_YYABORT;
- lex->event_parse_data->identifier= $3;
+ lex->event_parse_data->identifier= $4;
lex->event_parse_data->on_completion=
Event_parse_data::ON_COMPLETION_DROP;
@@ -3767,8 +3768,8 @@ partitioning:
LEX_STRING partition_name={C_STRING_WITH_LEN("partition")};
if (!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN))
{
- my_error(ER_FEATURE_DISABLED, MYF(0),
- "partitioning", "--with-partition");
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
+ "--skip-partition");
MYSQL_YYABORT;
}
lex->part_info= new partition_info();
@@ -9197,7 +9198,8 @@ procedure_clause:
MYSQL_YYABORT;
}
- if (&lex->select_lex != lex->current_select)
+ if (&lex->select_lex != lex->current_select ||
+ lex->select_lex.get_table_list()->derived)
{
my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery");
MYSQL_YYABORT;
=== modified file 'sql/table.cc'
--- a/sql/table.cc 2009-10-16 22:57:48 +0000
+++ b/sql/table.cc 2009-11-09 23:49:25 +0000
@@ -922,6 +922,15 @@ static int open_binary_frm(THD *thd, TAB
we unlock the old value of share->db_plugin before
replacing it with a globally locked version of tmp_plugin
*/
+ /* Check if the partitioning engine is ready */
+ if (!plugin_is_ready(&name, MYSQL_STORAGE_ENGINE_PLUGIN))
+ {
+ error= 8;
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
+ "--skip-partition");
+ my_free(buff, MYF(0));
+ goto err;
+ }
plugin_unlock(NULL, share->db_plugin);
share->db_plugin= ha_lock_engine(NULL, partition_hton);
DBUG_PRINT("info", ("setting dbtype to '%.*s' (%d)",
=== added file 'storage/federated/README'
--- a/storage/federated/README 1970-01-01 00:00:00 +0000
+++ b/storage/federated/README 2009-10-30 18:50:56 +0000
@@ -0,0 +1,7 @@
+The files in this directory are not used by MariaDB
+
+MariaDB uses the new federated storage engine that can be found in the
+federatedx directory.
+
+This directory is only kept around to make it easy to merge code from the
+MySQL source repositories that uses the old and disabled federated code.
=== renamed file 'storage/federated/plug.in' => 'storage/federated/plug.in.disabled'
=== added directory 'storage/federatedx'
=== added file 'storage/federatedx/AUTHORS'
--- a/storage/federatedx/AUTHORS 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/AUTHORS 2009-10-30 18:50:56 +0000
@@ -0,0 +1,11 @@
+FederatedX
+
+Patrick Galbraith <patg@xxxxxxxx> - Federated
+
+Pluggable Storage Engine Skeleton setup
+
+Brian Aker <brian@xxxxxxxxx> | <brian@xxxxxxxxxxx> - Original Design
+Calvin Sun - Windows Support
+Brian Miezejewski - Bug fixes
+Antony T Curtis - Help in inital development, transactions and various help
+Michael Widenius - Bug fixes and some simple early optimizations
=== added file 'storage/federatedx/CMakeLists.txt'
--- a/storage/federatedx/CMakeLists.txt 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/CMakeLists.txt 2009-11-03 14:39:54 +0000
@@ -0,0 +1,3 @@
+INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake")
+SET(FEDERATEDX_SOURCES ha_federatedx.cc federatedx_txn.cc federatedx_io.cc federatedx_io_null.cc federatedx_io_mysql.cc)
+MYSQL_STORAGE_ENGINE(FEDERATEDX)
=== added file 'storage/federatedx/ChangeLog'
--- a/storage/federatedx/ChangeLog 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ChangeLog 2009-10-30 18:50:56 +0000
@@ -0,0 +1,18 @@
+0.2 - Thu March 8 00:00:00 EST 2008
+
+ - Fixed bug #30051 "CREATE TABLE does not connect and check existence of remote table"
+ Modified "real_connect" to take a share and create flag to in order to not rely
+ on any settings that are later instantiated and/or set by get_share
+ Also, put logic in the code to not attempt this if a localhost. There's an annoying
+ functionality that if federated tries to connect to itself during creater table, you
+ get 1159 error (timeout) - only when local. This prevents having this functionality
+ and is probably part of the reason it was removed.
+
+0.1 - Thu Feb 1 00:00:00 EST 2008
+
+ - This is the FederatedX Storage Engine,
+ first release.
+ - Added documentation
+ - Added simple test and README file to explain
+ how to run the test
+ - Added FAQ
=== added file 'storage/federatedx/FAQ'
--- a/storage/federatedx/FAQ 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/FAQ 2009-10-30 18:50:56 +0000
@@ -0,0 +1,40 @@
+Q. What is the FederatedX pluggable storage engine?
+
+A. It is a fork of the Federated Storage Engine that Brian Aker and I
+(Patrick Galbraith) developed originally . It is a storage engine that
+uses a client connection to a remote MySQL data source as its data
+source instead of a local file on disk.
+
+Q. Why did you fork from Federated?
+
+A. To enhance the storage engine independently of the
+MySQL Server release schedule. Many people have been
+mentioning their dissatisfaction with the limitations
+of Federated. I think the engine is a great concept and
+have a sense of obligation to continue to improve it.
+There are some patches already that are in dire need
+of being applied and tested.
+
+Q. What do you plan to do with FederatedX?
+
+A. Many things need addressing:
+
+- Outstanding bugs
+- How do deal with huge result sets
+- Pushdown conditions (being able to pass things like LIMIT
+ to the remote connection to keep from returning huge
+ result sets).
+- Better transactional support
+- Other connection mechanisms (ODBC, JDBC, native drivers
+ of other RDBMSs)
+
+Q. What FederatedX is and is not?
+
+A. FederatedX is not yet a complete "federated" solution in
+ the sense that other venders have developed (IBM, etc). It
+ is essentially a networked storage engine. It is my hope
+ to make it a real federated solution.
+
+Q. In which MySQL distributions/forks/branches can I find FederateX
+
+A. MariaDB (http://www.mariadb.com)
=== added file 'storage/federatedx/Makefile.am'
--- a/storage/federatedx/Makefile.am 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/Makefile.am 2009-11-01 15:09:55 +0000
@@ -0,0 +1,64 @@
+# Used to build Makefile.in
+
+MYSQLDATAdir = $(localstatedir)
+MYSQLSHAREdir = $(pkgdatadir)
+MYSQLBASEdir= $(prefix)
+MYSQLLIBdir= $(pkglibdir)
+pkgplugindir = $(pkglibdir)/plugin
+INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \
+ -I$(top_srcdir)/regex \
+ -I$(top_srcdir)/sql \
+ -I$(srcdir)
+WRAPLIBS=
+
+LDADD =
+
+DEFS = @DEFS@
+
+noinst_HEADERS = ha_federatedx.h federatedx_probes.h
+
+EXTRA_LTLIBRARIES = ha_federatedx.la
+pkgplugin_LTLIBRARIES = @plugin_federated_shared_target@
+ha_federatedx_la_LDFLAGS = -module -rpath $(pkgplugindir)
+ha_federatedx_la_CXXFLAGS= $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_federatedx_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_federatedx_la_SOURCES = ha_federatedx.cc
+
+
+EXTRA_LIBRARIES = libfederatedx.a
+noinst_LIBRARIES = @plugin_federated_static_target@
+libfederatedx_a_CXXFLAGS = $(AM_CFLAGS)
+libfederatedx_a_CFLAGS = $(AM_CFLAGS)
+libfederatedx_a_SOURCES= ha_federatedx.cc federatedx_txn.cc \
+ federatedx_io.cc federatedx_io_null.cc \
+ federatedx_io_mysql.cc
+
+EXTRA_DIST = CMakeLists.txt plug.in ha_federatedx.h \
+ federatedx_probes.h
+
+ha_federatedx_la_SOURCES = ha_federatedx.cc federatedx_txn.cc \
+ federatedx_io.cc federatedx_io_null.cc \
+ federatedx_io_mysql.cc $(top_srcdir)/mysys/string.c
+ha_federatedx_la_LIBADD =
+
+#DTRACE = @DTRACE@
+#DTRACEFLAGS = @DTRACEFLAGS@
+#DTRACEFILES = .libs/libfederatedx_engine_la-ha_federatedx.o
+
+# #if HAVE_DTRACE
+# # libfederatedx_engine_la_LIBADD += federatedx_probes.o
+# #endif
+
+# federatedx_probes.h: federatedx_probes.d
+# $(DTRACE) $(DTRACEFLAGS) -h -s federatedx_probes.d
+# mv federatedx_probes.h federatedx_probes.h.bak
+# sed "s/#include <unistd.h>//g" federatedx_probes.h.bak > federatedx_probes.h
+# rm federatedx_probes.h.bak
+
+#federatedx_probes.o:
+# $(DTRACE) $(DTRACEFLAGS) -G -s federatedx_probes.d $(DTRACEFILES)
+
+# End
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
=== added file 'storage/federatedx/README'
--- a/storage/federatedx/README 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/README 2009-10-30 18:50:56 +0000
@@ -0,0 +1,33 @@
+This is the FederatedX Storage Engine, developed as an external storage engine.
+
+NOTE:
+
+The following is only relevant if you use it for MySQL. MariaDB already comes
+with the latest version of FederatedX.
+
+To install, grab a copy of the mysql source code and run this:
+
+./configure --with-mysql=/path/to/src/mysql-5.x --libdir=/usr/local/lib/mysql/
+
+make install
+
+And then inside of MySQL:
+
+mysql> INSTALL PLUGIN federatedx SONAME 'libfederatedx_engine.so';
+
+mysql> CREATE TABLE `d` (`a` varchar(125), b text, primary key(a)) ENGINE=FEDERATEDX CONNECTION="mysql://root@host/schema/table"
+
+or
+
+mysql> CREATE TABLE `d` (`a` varchar(125), b text, primary key(a)) ENGINE=FEDERATEDX CONNECTION="server" CHARSET=latin1;
+
+You will probably need to edit the Makefile.am in the src/ tree if you want
+to build on anything other then Linux (and the Makefile assumes that the
+server was not compiled for debug). The reason for the two possible
+configure lines is that libdir is dependent on where MySQL was installed. If
+you run the "INSTALL PLUGIN ..." and you get a file not found, check that
+your configured this directory correctly.
+
+For Solaris you can enable DTrace probes by adding to configure
+--enable-dtrace
+
=== added file 'storage/federatedx/README.windows'
--- a/storage/federatedx/README.windows 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/README.windows 2009-10-30 18:50:56 +0000
@@ -0,0 +1,23 @@
+The following files are changed in order to build a new engine on Windows:
+
+- Update win\configure.js with
+case "WITH_FEDERATEDX_STORAGE_ENGINE":
+to make sure it will pass WITH_FEDERATEDX_STORAGE_ENGINE in.
+
+- Update CMakeFiles.txt under mysql root:
+ IF(WITH_FEDERATEDX_STORAGE_ENGINE)
+ ADD_DEFINITIONS(-D WITH_FEDERATEDX_STORAGE_ENGINE)
+ SET (mysql_plugin_defs
+ "${mysql_plugin_defs},builtin_skeleton_plugin")
+ ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)
+
+ and,
+
+ IF(WITH_FEDERATEDX_STORAGE_ENGINE)
+ ADD_SUBDIRECTORY(storage/skeleton/src)
+ ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)
+
+ - Update CMakeFiles.txt under sql:
+ IF(WITH_FEDERATEDX_STORAGE_ENGINE)
+ TARGET_LINK_LIBRARIES(mysqld skeleton)
+ ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)
=== added file 'storage/federatedx/TODO'
--- a/storage/federatedx/TODO 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/TODO 2009-10-30 18:50:56 +0000
@@ -0,0 +1,30 @@
+Features
+
+* Add Pushdown conditions
+* Add other network driver interfaces
+* Handle large result sets
+* Auto-discovery of tables on foreign data sources
+
+Bugs (http://bugs.mysql.com)
+
+20026 2006-05-23 FEDERATED lacks support for auto_increment_increment and auto_increment_offset
+20724 2006-06-27 FEDERATED does not honour SET INSERT_ID
+28269 2007-05-06 Any FEDERATED engine fails to quote reserved words for field names
+25509 2007-01-10 Federated: Failure with non-ASCII characters
+26697 2007-02-27 Every query to a federated table results in a full scan of MyISAM table.
+21360 2006-07-31 Microsoft Windows (Windows/Linux) mysqldump error on federated tables
+34189 2008-01-31 Any ALTER TABLE t1 ENGINE=FEDERATED CONNECTION='connectionString' on MyISAM fails
+31757 2007-10-22 Any Federated tables break replication Antony Curtis
+33953 2008-01-21 Any mysqld dies on search federated table using nullable index with < or <= operator
+34015 2008-01-23 Linux Problems with float fields using federated tables
+21583 2006-08-11 Linux (Linux) Federated table returns broken strings.
+33702 2008-01-05 Accessing a federated table with a non existing server returns random error code
+25512 2007-01-10 Federated: CREATE failures
+32426 2007-11-16 Any FEDERATED query returns corrupt results for ORDER BY on a TEXT field
+25510 2007-01-10 Federated: double trigger activation
+33250 2007-12-14 SELECT * FROM really_big_federated_table eats lots of virtual memory (OOM)
+14874 2005-11-11 Error 2013: Lost connection to MySQL server with Federated table
+25508 2007-01-10 Federated: Failure to Remove Partitioning
+27180 2007-03-15 #1030 - Got error 1 from storage engine with big tables
+33947 2008-01-20 Any Join on Federated tables with Unique index and IS NOT NULL crashes server
+30051 (fixed) CREATE TABLE does not connect and check existence of remote table
=== added file 'storage/federatedx/federatedx_io.cc'
--- a/storage/federatedx/federatedx_io.cc 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io.cc 2009-10-30 18:50:56 +0000
@@ -0,0 +1,103 @@
+/*
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+typedef federatedx_io *(*instantiate_io_type)(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server);
+struct io_schemes_st
+{
+ const char *scheme;
+ instantiate_io_type instantiate;
+};
+
+
+static const io_schemes_st federated_io_schemes[] =
+{
+ { "mysql", &instantiate_io_mysql },
+ { "null", instantiate_io_null } /* must be last element */
+};
+
+const uint federated_io_schemes_count= array_elements(federated_io_schemes);
+
+federatedx_io::federatedx_io(FEDERATEDX_SERVER *aserver)
+ : server(aserver), owner_ptr(0), txn_next(0), idle_next(0),
+ active(FALSE), busy(FALSE), readonly(TRUE)
+{
+ DBUG_ENTER("federatedx_io::federatedx_io");
+ DBUG_ASSERT(server);
+
+ safe_mutex_assert_owner(&server->mutex);
+ server->io_count++;
+
+ DBUG_VOID_RETURN;
+}
+
+
+federatedx_io::~federatedx_io()
+{
+ DBUG_ENTER("federatedx_io::~federatedx_io");
+
+ server->io_count--;
+
+ DBUG_VOID_RETURN;
+}
+
+
+bool federatedx_io::handles_scheme(const char *scheme)
+{
+ const io_schemes_st *ptr = federated_io_schemes;
+ const io_schemes_st *end = ptr + array_elements(federated_io_schemes);
+ while (ptr != end && strcasecmp(scheme, ptr->scheme))
+ ++ptr;
+ return ptr != end;
+}
+
+
+federatedx_io *federatedx_io::construct(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server)
+{
+ const io_schemes_st *ptr = federated_io_schemes;
+ const io_schemes_st *end = ptr + (array_elements(federated_io_schemes) - 1);
+ while (ptr != end && strcasecmp(server->scheme, ptr->scheme))
+ ++ptr;
+ return ptr->instantiate(server_root, server);
+}
+
+
=== added file 'storage/federatedx/federatedx_io_mysql.cc'
--- a/storage/federatedx/federatedx_io_mysql.cc 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io_mysql.cc 2009-10-30 18:50:56 +0000
@@ -0,0 +1,592 @@
+/*
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+
+#define SAVEPOINT_REALIZED 1
+#define SAVEPOINT_RESTRICT 2
+#define SAVEPOINT_EMITTED 4
+
+
+typedef struct federatedx_savepoint
+{
+ ulong level;
+ uint flags;
+} SAVEPT;
+
+
+class federatedx_io_mysql :public federatedx_io
+{
+ MYSQL mysql; /* MySQL connection */
+ DYNAMIC_ARRAY savepoints;
+ bool requested_autocommit;
+ bool actual_autocommit;
+
+ int actual_query(const char *buffer, uint length);
+ bool test_all_restrict() const;
+public:
+ federatedx_io_mysql(FEDERATEDX_SERVER *);
+ ~federatedx_io_mysql();
+
+ int simple_query(const char *fmt, ...);
+ int query(const char *buffer, uint length);
+ virtual FEDERATEDX_IO_RESULT *store_result();
+
+ virtual size_t max_query_size() const;
+
+ virtual my_ulonglong affected_rows() const;
+ virtual my_ulonglong last_insert_id() const;
+
+ virtual int error_code();
+ virtual const char *error_str();
+
+ void reset();
+ int commit();
+ int rollback();
+
+ int savepoint_set(ulong sp);
+ ulong savepoint_release(ulong sp);
+ ulong savepoint_rollback(ulong sp);
+ void savepoint_restrict(ulong sp);
+
+ ulong last_savepoint() const;
+ ulong actual_savepoint() const;
+ bool is_autocommit() const;
+
+ bool table_metadata(ha_statistics *stats, const char *table_name,
+ uint table_name_length, uint flag);
+
+ /* resultset operations */
+
+ virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
+ virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
+ virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+ virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
+ virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+ unsigned int column);
+ virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+ unsigned int column) const;
+};
+
+
+federatedx_io *instantiate_io_mysql(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server)
+{
+ return new (server_root) federatedx_io_mysql(server);
+}
+
+
+federatedx_io_mysql::federatedx_io_mysql(FEDERATEDX_SERVER *aserver)
+ : federatedx_io(aserver),
+ requested_autocommit(TRUE), actual_autocommit(TRUE)
+{
+ DBUG_ENTER("federatedx_io_mysql::federatedx_io_mysql");
+
+ bzero(&mysql, sizeof(MYSQL));
+ bzero(&savepoints, sizeof(DYNAMIC_ARRAY));
+
+ my_init_dynamic_array(&savepoints, sizeof(SAVEPT), 16, 16);
+
+ DBUG_VOID_RETURN;
+}
+
+
+federatedx_io_mysql::~federatedx_io_mysql()
+{
+ DBUG_ENTER("federatedx_io_mysql::~federatedx_io_mysql");
+
+ mysql_close(&mysql);
+ delete_dynamic(&savepoints);
+
+ DBUG_VOID_RETURN;
+}
+
+
+void federatedx_io_mysql::reset()
+{
+ reset_dynamic(&savepoints);
+ set_active(FALSE);
+
+ requested_autocommit= TRUE;
+ mysql.reconnect= 1;
+}
+
+
+int federatedx_io_mysql::commit()
+{
+ int error= 0;
+ DBUG_ENTER("federatedx_io_mysql::commit");
+
+ if (!actual_autocommit && (error= actual_query("COMMIT", 6)))
+ rollback();
+
+ reset();
+
+ DBUG_RETURN(error);
+}
+
+int federatedx_io_mysql::rollback()
+{
+ int error= 0;
+ DBUG_ENTER("federatedx_io_mysql::rollback");
+
+ if (!actual_autocommit)
+ error= actual_query("ROLLBACK", 8);
+ else
+ error= ER_WARNING_NOT_COMPLETE_ROLLBACK;
+
+ reset();
+
+ DBUG_RETURN(error);
+}
+
+
+ulong federatedx_io_mysql::last_savepoint() const
+{
+ SAVEPT *savept= NULL;
+ DBUG_ENTER("federatedx_io_mysql::last_savepoint");
+
+ if (savepoints.elements)
+ savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+
+ DBUG_RETURN(savept ? savept->level : 0);
+}
+
+
+ulong federatedx_io_mysql::actual_savepoint() const
+{
+ SAVEPT *savept= NULL;
+ uint index= savepoints.elements;
+ DBUG_ENTER("federatedx_io_mysql::last_savepoint");
+
+ while (index)
+ {
+ savept= dynamic_element(&savepoints, --index, SAVEPT *);
+ if (savept->flags & SAVEPOINT_REALIZED)
+ break;
+ savept= NULL;
+ }
+
+ DBUG_RETURN(savept ? savept->level : 0);
+}
+
+bool federatedx_io_mysql::is_autocommit() const
+{
+ return actual_autocommit;
+}
+
+
+int federatedx_io_mysql::savepoint_set(ulong sp)
+{
+ int error;
+ SAVEPT savept;
+ DBUG_ENTER("federatedx_io_mysql::savepoint_set");
+ DBUG_PRINT("info",("savepoint=%lu", sp));
+ DBUG_ASSERT(sp > last_savepoint());
+
+ savept.level= sp;
+ savept.flags= 0;
+
+ if ((error= insert_dynamic(&savepoints, (uchar*) &savept) ? -1 : 0))
+ goto err;
+
+ set_active(TRUE);
+ mysql.reconnect= 0;
+ requested_autocommit= FALSE;
+
+err:
+ DBUG_RETURN(error);
+}
+
+
+ulong federatedx_io_mysql::savepoint_release(ulong sp)
+{
+ SAVEPT *savept, *last= NULL;
+ DBUG_ENTER("federatedx_io_mysql::savepoint_release");
+ DBUG_PRINT("info",("savepoint=%lu", sp));
+
+ while (savepoints.elements)
+ {
+ savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+ if (savept->level < sp)
+ break;
+ if ((savept->flags & (SAVEPOINT_REALIZED |
+ SAVEPOINT_RESTRICT)) == SAVEPOINT_REALIZED)
+ last= savept;
+ savepoints.elements--;
+ }
+
+ if (last)
+ {
+ char buffer[STRING_BUFFER_USUAL_SIZE];
+ int length= my_snprintf(buffer, sizeof(buffer),
+ "RELEASE SAVEPOINT save%lu", last->level);
+ actual_query(buffer, length);
+ }
+
+ DBUG_RETURN(last_savepoint());
+}
+
+
+ulong federatedx_io_mysql::savepoint_rollback(ulong sp)
+{
+ SAVEPT *savept;
+ uint index;
+ DBUG_ENTER("federatedx_io_mysql::savepoint_release");
+ DBUG_PRINT("info",("savepoint=%lu", sp));
+
+ while (savepoints.elements)
+ {
+ savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+ if (savept->level <= sp)
+ break;
+ savepoints.elements--;
+ }
+
+ for (index= savepoints.elements, savept= NULL; index;)
+ {
+ savept= dynamic_element(&savepoints, --index, SAVEPT *);
+ if (savept->flags & SAVEPOINT_REALIZED)
+ break;
+ savept= NULL;
+ }
+
+ if (savept && !(savept->flags & SAVEPOINT_RESTRICT))
+ {
+ char buffer[STRING_BUFFER_USUAL_SIZE];
+ int length= my_snprintf(buffer, sizeof(buffer),
+ "ROLLBACK TO SAVEPOINT save%lu", savept->level);
+ actual_query(buffer, length);
+ }
+
+ DBUG_RETURN(last_savepoint());
+}
+
+
+void federatedx_io_mysql::savepoint_restrict(ulong sp)
+{
+ SAVEPT *savept;
+ uint index= savepoints.elements;
+ DBUG_ENTER("federatedx_io_mysql::savepoint_restrict");
+
+ while (index)
+ {
+ savept= dynamic_element(&savepoints, --index, SAVEPT *);
+ if (savept->level > sp)
+ continue;
+ if (savept->level < sp)
+ break;
+ savept->flags|= SAVEPOINT_RESTRICT;
+ break;
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+int federatedx_io_mysql::simple_query(const char *fmt, ...)
+{
+ char buffer[STRING_BUFFER_USUAL_SIZE];
+ int length, error;
+ va_list arg;
+ DBUG_ENTER("federatedx_io_mysql::simple_query");
+
+ va_start(arg, fmt);
+ length= my_vsnprintf(buffer, sizeof(buffer), fmt, arg);
+ va_end(arg);
+
+ error= query(buffer, length);
+
+ DBUG_RETURN(error);
+}
+
+
+bool federatedx_io_mysql::test_all_restrict() const
+{
+ bool result= FALSE;
+ SAVEPT *savept;
+ uint index= savepoints.elements;
+ DBUG_ENTER("federatedx_io_mysql::test_all_restrict");
+
+ while (index)
+ {
+ savept= dynamic_element(&savepoints, --index, SAVEPT *);
+ if ((savept->flags & (SAVEPOINT_REALIZED |
+ SAVEPOINT_RESTRICT)) == SAVEPOINT_REALIZED ||
+ (savept->flags & SAVEPOINT_EMITTED))
+ DBUG_RETURN(FALSE);
+ if (savept->flags & SAVEPOINT_RESTRICT)
+ result= TRUE;
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+int federatedx_io_mysql::query(const char *buffer, uint length)
+{
+ int error;
+ bool wants_autocommit= requested_autocommit | is_readonly();
+ DBUG_ENTER("federatedx_io_mysql::query");
+
+ if (!wants_autocommit && test_all_restrict())
+ wants_autocommit= TRUE;
+
+ if (wants_autocommit != actual_autocommit)
+ {
+ if ((error= actual_query(wants_autocommit ? "SET AUTOCOMMIT=1"
+ : "SET AUTOCOMMIT=0", 16)))
+ DBUG_RETURN(error);
+ mysql.reconnect= wants_autocommit ? 1 : 0;
+ actual_autocommit= wants_autocommit;
+ }
+
+ if (!actual_autocommit && last_savepoint() != actual_savepoint())
+ {
+ SAVEPT *savept= dynamic_element(&savepoints, savepoints.elements - 1,
+ SAVEPT *);
+ if (!(savept->flags & SAVEPOINT_RESTRICT))
+ {
+ char buf[STRING_BUFFER_USUAL_SIZE];
+ int len= my_snprintf(buf, sizeof(buf),
+ "SAVEPOINT save%lu", savept->level);
+ if ((error= actual_query(buf, len)))
+ DBUG_RETURN(error);
+ set_active(TRUE);
+ savept->flags|= SAVEPOINT_EMITTED;
+ }
+ savept->flags|= SAVEPOINT_REALIZED;
+ }
+
+ if (!(error= actual_query(buffer, length)))
+ set_active(is_active() || !actual_autocommit);
+
+ DBUG_RETURN(error);
+}
+
+
+int federatedx_io_mysql::actual_query(const char *buffer, uint length)
+{
+ int error;
+ DBUG_ENTER("federatedx_io_mysql::actual_query");
+
+ if (!mysql.master)
+ {
+ if (!(mysql_init(&mysql)))
+ DBUG_RETURN(-1);
+
+ /*
+ BUG# 17044 Federated Storage Engine is not UTF8 clean
+ Add set names to whatever charset the table is at open
+ of table
+ */
+ /* this sets the csname like 'set names utf8' */
+ mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, get_charsetname());
+
+ if (!mysql_real_connect(&mysql,
+ get_hostname(),
+ get_username(),
+ get_password(),
+ get_database(),
+ get_port(),
+ get_socket(), 0))
+ DBUG_RETURN(ER_CONNECT_TO_FOREIGN_DATA_SOURCE);
+ mysql.reconnect= 1;
+ }
+
+ error= mysql_real_query(&mysql, buffer, length);
+
+ DBUG_RETURN(error);
+}
+
+size_t federatedx_io_mysql::max_query_size() const
+{
+ return mysql.net.max_packet_size;
+}
+
+
+my_ulonglong federatedx_io_mysql::affected_rows() const
+{
+ return mysql.affected_rows;
+}
+
+
+my_ulonglong federatedx_io_mysql::last_insert_id() const
+{
+ return mysql.last_used_con->insert_id;
+}
+
+
+int federatedx_io_mysql::error_code()
+{
+ return mysql_errno(&mysql);
+}
+
+
+const char *federatedx_io_mysql::error_str()
+{
+ return mysql_error(&mysql);
+}
+
+
+FEDERATEDX_IO_RESULT *federatedx_io_mysql::store_result()
+{
+ FEDERATEDX_IO_RESULT *result;
+ DBUG_ENTER("federatedx_io_mysql::store_result");
+
+ result= (FEDERATEDX_IO_RESULT *) mysql_store_result(&mysql);
+
+ DBUG_RETURN(result);
+}
+
+
+void federatedx_io_mysql::free_result(FEDERATEDX_IO_RESULT *io_result)
+{
+ mysql_free_result((MYSQL_RES *) io_result);
+}
+
+
+unsigned int federatedx_io_mysql::get_num_fields(FEDERATEDX_IO_RESULT *io_result)
+{
+ return mysql_num_fields((MYSQL_RES *) io_result);
+}
+
+
+my_ulonglong federatedx_io_mysql::get_num_rows(FEDERATEDX_IO_RESULT *io_result)
+{
+ return mysql_num_rows((MYSQL_RES *) io_result);
+}
+
+
+FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result)
+{
+ return (FEDERATEDX_IO_ROW *) mysql_fetch_row((MYSQL_RES *) io_result);
+}
+
+
+ulong *federatedx_io_mysql::fetch_lengths(FEDERATEDX_IO_RESULT *io_result)
+{
+ return mysql_fetch_lengths((MYSQL_RES *) io_result);
+}
+
+
+const char *federatedx_io_mysql::get_column_data(FEDERATEDX_IO_ROW *row,
+ unsigned int column)
+{
+ return ((MYSQL_ROW)row)[column];
+}
+
+
+bool federatedx_io_mysql::is_column_null(const FEDERATEDX_IO_ROW *row,
+ unsigned int column) const
+{
+ return !((MYSQL_ROW)row)[column];
+}
+
+bool federatedx_io_mysql::table_metadata(ha_statistics *stats,
+ const char *table_name,
+ uint table_name_length, uint flag)
+{
+ char status_buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+ FEDERATEDX_IO_RESULT *result= 0;
+ FEDERATEDX_IO_ROW *row;
+ String status_query_string(status_buf, sizeof(status_buf), &my_charset_bin);
+ int error;
+
+ status_query_string.length(0);
+ status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE "));
+ append_ident(&status_query_string, table_name,
+ table_name_length, value_quote_char);
+
+ if (query(status_query_string.ptr(), status_query_string.length()))
+ goto error;
+
+ status_query_string.length(0);
+
+ result= store_result();
+
+ /*
+ We're going to use fields num. 4, 12 and 13 of the resultset,
+ so make sure we have these fields.
+ */
+ if (!result || (get_num_fields(result) < 14))
+ goto error;
+
+ if (!get_num_rows(result))
+ goto error;
+
+ if (!(row= fetch_row(result)))
+ goto error;
+
+ /*
+ deleted is set in ha_federatedx::info
+ */
+ /*
+ need to figure out what this means as far as federatedx is concerned,
+ since we don't have a "file"
+
+ data_file_length = ?
+ index_file_length = ?
+ delete_length = ?
+ */
+ if (!is_column_null(row, 4))
+ stats->records= (ha_rows) my_strtoll10(get_column_data(row, 4),
+ (char**) 0, &error);
+ if (!is_column_null(row, 5))
+ stats->mean_rec_length= (ulong) my_strtoll10(get_column_data(row, 5),
+ (char**) 0, &error);
+
+ stats->data_file_length= stats->records * stats->mean_rec_length;
+
+ if (!is_column_null(row, 12))
+ stats->update_time= (time_t) my_strtoll10(get_column_data(row, 12),
+ (char**) 0, &error);
+ if (!is_column_null(row, 13))
+ stats->check_time= (time_t) my_strtoll10(get_column_data(row, 13),
+ (char**) 0, &error);
+
+ free_result(result);
+ return 0;
+
+error:
+ free_result(result);
+ return 1;
+}
=== added file 'storage/federatedx/federatedx_io_null.cc'
--- a/storage/federatedx/federatedx_io_null.cc 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io_null.cc 2009-10-30 18:50:56 +0000
@@ -0,0 +1,277 @@
+/*
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+
+#define SAVEPOINT_REALIZED 1
+#define SAVEPOINT_RESTRICT 2
+#define SAVEPOINT_EMITTED 4
+
+
+typedef struct federatedx_savepoint
+{
+ ulong level;
+ uint flags;
+} SAVEPT;
+
+
+class federatedx_io_null :public federatedx_io
+{
+public:
+ federatedx_io_null(FEDERATEDX_SERVER *);
+ ~federatedx_io_null();
+
+ int query(const char *buffer, uint length);
+ virtual FEDERATEDX_IO_RESULT *store_result();
+
+ virtual size_t max_query_size() const;
+
+ virtual my_ulonglong affected_rows() const;
+ virtual my_ulonglong last_insert_id() const;
+
+ virtual int error_code();
+ virtual const char *error_str();
+
+ void reset();
+ int commit();
+ int rollback();
+
+ int savepoint_set(ulong sp);
+ ulong savepoint_release(ulong sp);
+ ulong savepoint_rollback(ulong sp);
+ void savepoint_restrict(ulong sp);
+
+ ulong last_savepoint() const;
+ ulong actual_savepoint() const;
+ bool is_autocommit() const;
+
+ bool table_metadata(ha_statistics *stats, const char *table_name,
+ uint table_name_length, uint flag);
+
+ /* resultset operations */
+
+ virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
+ virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
+ virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+ virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
+ virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+ unsigned int column);
+ virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+ unsigned int column) const;
+};
+
+
+federatedx_io *instantiate_io_null(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server)
+{
+ return new (server_root) federatedx_io_null(server);
+}
+
+
+federatedx_io_null::federatedx_io_null(FEDERATEDX_SERVER *aserver)
+ : federatedx_io(aserver)
+{
+}
+
+
+federatedx_io_null::~federatedx_io_null()
+{
+}
+
+
+void federatedx_io_null::reset()
+{
+}
+
+
+int federatedx_io_null::commit()
+{
+ return 0;
+}
+
+int federatedx_io_null::rollback()
+{
+ return 0;
+}
+
+
+ulong federatedx_io_null::last_savepoint() const
+{
+ return 0;
+}
+
+
+ulong federatedx_io_null::actual_savepoint() const
+{
+ return 0;
+}
+
+bool federatedx_io_null::is_autocommit() const
+{
+ return 0;
+}
+
+
+int federatedx_io_null::savepoint_set(ulong sp)
+{
+ return 0;
+}
+
+
+ulong federatedx_io_null::savepoint_release(ulong sp)
+{
+ return 0;
+}
+
+
+ulong federatedx_io_null::savepoint_rollback(ulong sp)
+{
+ return 0;
+}
+
+
+void federatedx_io_null::savepoint_restrict(ulong sp)
+{
+}
+
+
+int federatedx_io_null::query(const char *buffer, uint length)
+{
+ return 0;
+}
+
+
+size_t federatedx_io_null::max_query_size() const
+{
+ return INT_MAX;
+}
+
+
+my_ulonglong federatedx_io_null::affected_rows() const
+{
+ return 0;
+}
+
+
+my_ulonglong federatedx_io_null::last_insert_id() const
+{
+ return 0;
+}
+
+
+int federatedx_io_null::error_code()
+{
+ return 0;
+}
+
+
+const char *federatedx_io_null::error_str()
+{
+ return "";
+}
+
+
+FEDERATEDX_IO_RESULT *federatedx_io_null::store_result()
+{
+ FEDERATEDX_IO_RESULT *result;
+ DBUG_ENTER("federatedx_io_null::store_result");
+
+ result= NULL;
+
+ DBUG_RETURN(result);
+}
+
+
+void federatedx_io_null::free_result(FEDERATEDX_IO_RESULT *)
+{
+}
+
+
+unsigned int federatedx_io_null::get_num_fields(FEDERATEDX_IO_RESULT *)
+{
+ return 0;
+}
+
+
+my_ulonglong federatedx_io_null::get_num_rows(FEDERATEDX_IO_RESULT *)
+{
+ return 0;
+}
+
+
+FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *)
+{
+ return NULL;
+}
+
+
+ulong *federatedx_io_null::fetch_lengths(FEDERATEDX_IO_RESULT *)
+{
+ return NULL;
+}
+
+
+const char *federatedx_io_null::get_column_data(FEDERATEDX_IO_ROW *,
+ unsigned int)
+{
+ return "";
+}
+
+
+bool federatedx_io_null::is_column_null(const FEDERATEDX_IO_ROW *,
+ unsigned int) const
+{
+ return true;
+}
+
+bool federatedx_io_null::table_metadata(ha_statistics *stats,
+ const char *table_name,
+ uint table_name_length, uint flag)
+{
+ stats->records= (ha_rows) 0;
+ stats->mean_rec_length= (ulong) 0;
+ stats->data_file_length= 0;
+
+ stats->update_time= (time_t) 0;
+ stats->check_time= (time_t) 0;
+
+ return 0;
+}
=== added file 'storage/federatedx/federatedx_probes.h'
--- a/storage/federatedx/federatedx_probes.h 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_probes.h 2009-10-30 18:50:56 +0000
@@ -0,0 +1,45 @@
+/*
+ * Generated by dtrace(1M).
+ */
+
+#ifndef _FEDERATED_PROBES_H
+#define _FEDERATED_PROBES_H
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if _DTRACE_VERSION
+
+#define FEDERATED_CLOSE() \
+ __dtrace_federated___close()
+#define FEDERATED_CLOSE_ENABLED() \
+ __dtraceenabled_federated___close()
+#define FEDERATED_OPEN() \
+ __dtrace_federated___open()
+#define FEDERATED_OPEN_ENABLED() \
+ __dtraceenabled_federated___open()
+
+
+extern void __dtrace_federated___close(void);
+extern int __dtraceenabled_federated___close(void);
+extern void __dtrace_federated___open(void);
+extern int __dtraceenabled_federated___open(void);
+
+#else
+
+#define FEDERATED_CLOSE()
+#define FEDERATED_CLOSE_ENABLED() (0)
+#define FEDERATED_OPEN()
+#define FEDERATED_OPEN_ENABLED() (0)
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FEDERATED_PROBES_H */
=== added file 'storage/federatedx/federatedx_txn.cc'
--- a/storage/federatedx/federatedx_txn.cc 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_txn.cc 2009-10-30 18:50:56 +0000
@@ -0,0 +1,424 @@
+/*
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+
+federatedx_txn::federatedx_txn()
+ : txn_list(0), savepoint_level(0), savepoint_stmt(0), savepoint_next(0)
+{
+ DBUG_ENTER("federatedx_txn::federatedx_txn");
+ DBUG_VOID_RETURN;
+}
+
+federatedx_txn::~federatedx_txn()
+{
+ DBUG_ENTER("federatedx_txn::~federatedx_txn");
+ DBUG_ASSERT(!txn_list);
+ DBUG_VOID_RETURN;
+}
+
+
+void federatedx_txn::close(FEDERATEDX_SERVER *server)
+{
+ uint count= 0;
+ federatedx_io *io, **iop;
+ DBUG_ENTER("federatedx_txn::close");
+
+ DBUG_ASSERT(!server->use_count);
+ DBUG_PRINT("info",("use count: %u connections: %u",
+ server->use_count, server->io_count));
+
+ for (iop= &txn_list; (io= *iop);)
+ {
+ if (io->server != server)
+ iop= &io->txn_next;
+ else
+ {
+ *iop= io->txn_next;
+ io->txn_next= NULL;
+ io->busy= FALSE;
+
+ io->idle_next= server->idle_list;
+ server->idle_list= io;
+ }
+ }
+
+ while ((io= server->idle_list))
+ {
+ server->idle_list= io->idle_next;
+ delete io;
+ count++;
+ }
+
+ DBUG_PRINT("info",("closed %u connections, txn_list: %s", count,
+ txn_list ? "active": "empty"));
+ DBUG_VOID_RETURN;
+}
+
+
+int federatedx_txn::acquire(FEDERATEDX_SHARE *share, bool readonly,
+ federatedx_io **ioptr)
+{
+ federatedx_io *io;
+ FEDERATEDX_SERVER *server= share->s;
+ DBUG_ENTER("federatedx_txn::acquire");
+ DBUG_ASSERT(ioptr && server);
+
+ if (!(io= *ioptr))
+ {
+ /* check to see if we have an available IO connection */
+ for (io= txn_list; io; io= io->txn_next)
+ if (io->server == server)
+ break;
+
+ if (!io)
+ {
+ /* check to see if there are any unowned IO connections */
+ pthread_mutex_lock(&server->mutex);
+ if ((io= server->idle_list))
+ {
+ server->idle_list= io->idle_next;
+ io->idle_next= NULL;
+ }
+ else
+ io= federatedx_io::construct(&server->mem_root, server);
+
+ io->txn_next= txn_list;
+ txn_list= io;
+
+ pthread_mutex_unlock(&server->mutex);
+ }
+
+ if (io->busy)
+ *io->owner_ptr= NULL;
+
+ io->busy= TRUE;
+ io->owner_ptr= ioptr;
+ }
+
+ DBUG_ASSERT(io->busy && io->server == server);
+
+ io->readonly&= readonly;
+
+ DBUG_RETURN((*ioptr= io) ? 0 : -1);
+}
+
+
+void federatedx_txn::release(federatedx_io **ioptr)
+{
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::release");
+ DBUG_ASSERT(ioptr);
+
+ if ((io= *ioptr))
+ {
+ /* mark as available for reuse in this transaction */
+ io->busy= FALSE;
+ *ioptr= NULL;
+
+ DBUG_PRINT("info", ("active: %d autocommit: %d",
+ io->active, io->is_autocommit()));
+
+ if (io->is_autocommit())
+ io->active= FALSE;
+ }
+
+ release_scan();
+
+ DBUG_VOID_RETURN;
+}
+
+
+void federatedx_txn::release_scan()
+{
+ uint count= 0, returned= 0;
+ federatedx_io *io, **pio;
+ DBUG_ENTER("federatedx_txn::release_scan");
+
+ /* return any inactive and idle connections to the server */
+ for (pio= &txn_list; (io= *pio); count++)
+ {
+ if (io->active || io->busy)
+ pio= &io->txn_next;
+ else
+ {
+ FEDERATEDX_SERVER *server= io->server;
+
+ /* unlink from list of connections bound to the transaction */
+ *pio= io->txn_next;
+ io->txn_next= NULL;
+
+ /* reset some values */
+ io->readonly= TRUE;
+
+ pthread_mutex_lock(&server->mutex);
+ io->idle_next= server->idle_list;
+ server->idle_list= io;
+ pthread_mutex_unlock(&server->mutex);
+ returned++;
+ }
+ }
+ DBUG_PRINT("info",("returned %u of %u connections(s)", returned, count));
+
+ DBUG_VOID_RETURN;
+}
+
+
+bool federatedx_txn::txn_begin()
+{
+ ulong level= 0;
+ DBUG_ENTER("federatedx_txn::txn_begin");
+
+ if (savepoint_next == 0)
+ {
+ savepoint_next++;
+ savepoint_level= savepoint_stmt= 0;
+ sp_acquire(&level);
+ }
+
+ DBUG_RETURN(level == 1);
+}
+
+
+int federatedx_txn::txn_commit()
+{
+ int error= 0;
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::txn_commit");
+
+ if (savepoint_next)
+ {
+ DBUG_ASSERT(savepoint_stmt != 1);
+
+ for (io= txn_list; io; io= io->txn_next)
+ {
+ int rc= 0;
+
+ if (io->active)
+ rc= io->commit();
+ else
+ io->rollback();
+
+ if (io->active && rc)
+ error= -1;
+
+ io->reset();
+ }
+
+ release_scan();
+
+ savepoint_next= savepoint_stmt= savepoint_level= 0;
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+int federatedx_txn::txn_rollback()
+{
+ int error= 0;
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::txn_commit");
+
+ if (savepoint_next)
+ {
+ DBUG_ASSERT(savepoint_stmt != 1);
+
+ for (io= txn_list; io; io= io->txn_next)
+ {
+ int rc= io->rollback();
+
+ if (io->active && rc)
+ error= -1;
+
+ io->reset();
+ }
+
+ release_scan();
+
+ savepoint_next= savepoint_stmt= savepoint_level= 0;
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+bool federatedx_txn::sp_acquire(ulong *sp)
+{
+ bool rc= FALSE;
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::sp_acquire");
+ DBUG_ASSERT(sp && savepoint_next);
+
+ *sp= savepoint_level= savepoint_next++;
+
+ for (io= txn_list; io; io= io->txn_next)
+ {
+ if (io->readonly)
+ continue;
+
+ io->savepoint_set(savepoint_level);
+ rc= TRUE;
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+int federatedx_txn::sp_rollback(ulong *sp)
+{
+ ulong level, new_level= savepoint_level;
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::sp_rollback");
+ DBUG_ASSERT(sp && savepoint_next && *sp && *sp <= savepoint_level);
+
+ for (io= txn_list; io; io= io->txn_next)
+ {
+ if (io->readonly)
+ continue;
+
+ if ((level= io->savepoint_rollback(*sp)) < new_level)
+ new_level= level;
+ }
+
+ savepoint_level= new_level;
+
+ DBUG_RETURN(0);
+}
+
+
+int federatedx_txn::sp_release(ulong *sp)
+{
+ ulong level, new_level= savepoint_level;
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::sp_release");
+ DBUG_ASSERT(sp && savepoint_next && *sp && *sp <= savepoint_level);
+
+ for (io= txn_list; io; io= io->txn_next)
+ {
+ if (io->readonly)
+ continue;
+
+ if ((level= io->savepoint_release(*sp)) < new_level)
+ new_level= level;
+ }
+
+ savepoint_level= new_level;
+ *sp= 0;
+
+ DBUG_RETURN(0);
+}
+
+
+bool federatedx_txn::stmt_begin()
+{
+ bool result= FALSE;
+ DBUG_ENTER("federatedx_txn::stmt_begin");
+
+ if (!savepoint_stmt)
+ {
+ if (!savepoint_next)
+ {
+ savepoint_next++;
+ savepoint_level= savepoint_stmt= 0;
+ }
+ result= sp_acquire(&savepoint_stmt);
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+int federatedx_txn::stmt_commit()
+{
+ int result= 0;
+ DBUG_ENTER("federatedx_txn::stmt_commit");
+
+ if (savepoint_stmt == 1)
+ {
+ savepoint_stmt= 0;
+ result= txn_commit();
+ }
+ else
+ if (savepoint_stmt)
+ result= sp_release(&savepoint_stmt);
+
+ DBUG_RETURN(result);
+}
+
+
+int federatedx_txn::stmt_rollback()
+{
+ int result= 0;
+ DBUG_ENTER("federated:txn::stmt_rollback");
+
+ if (savepoint_stmt == 1)
+ {
+ savepoint_stmt= 0;
+ result= txn_rollback();
+ }
+ else
+ if (savepoint_stmt)
+ {
+ result= sp_rollback(&savepoint_stmt);
+ sp_release(&savepoint_stmt);
+ }
+
+ DBUG_RETURN(result);
+}
+
+
+void federatedx_txn::stmt_autocommit()
+{
+ federatedx_io *io;
+ DBUG_ENTER("federatedx_txn::stmt_autocommit");
+
+ for (io= txn_list; savepoint_stmt && io; io= io->txn_next)
+ {
+ if (io->readonly)
+ continue;
+
+ io->savepoint_restrict(savepoint_stmt);
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
=== added file 'storage/federatedx/ha_federatedx.cc'
--- a/storage/federatedx/ha_federatedx.cc 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ha_federatedx.cc 2009-11-03 11:08:09 +0000
@@ -0,0 +1,3493 @@
+/*
+Copyright (c) 2008, Patrick Galbraith
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Neither the name of Patrick Galbraith nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+
+ FederatedX Pluggable Storage Engine
+
+ ha_federatedx.cc - FederatedX Pluggable Storage Engine
+ Patrick Galbraith, 2008
+
+ This is a handler which uses a foreign database as the data file, as
+ opposed to a handler like MyISAM, which uses .MYD files locally.
+
+ How this handler works
+ ----------------------------------
+ Normal database files are local and as such: You create a table called
+ 'users', a file such as 'users.MYD' is created. A handler reads, inserts,
+ deletes, updates data in this file. The data is stored in particular format,
+ so to read, that data has to be parsed into fields, to write, fields have to
+ be stored in this format to write to this data file.
+
+ With FederatedX storage engine, there will be no local files
+ for each table's data (such as .MYD). A foreign database will store
+ the data that would normally be in this file. This will necessitate
+ the use of MySQL client API to read, delete, update, insert this
+ data. The data will have to be retrieve via an SQL call "SELECT *
+ FROM users". Then, to read this data, it will have to be retrieved
+ via mysql_fetch_row one row at a time, then converted from the
+ column in this select into the format that the handler expects.
+
+ The create table will simply create the .frm file, and within the
+ "CREATE TABLE" SQL, there SHALL be any of the following :
+
+ connection=scheme://username:password@hostname:port/database/tablename
+ connection=scheme://username@hostname/database/tablename
+ connection=scheme://username:password@hostname/database/tablename
+ connection=scheme://username:password@hostname/database/tablename
+
+ - OR -
+
+ As of 5.1 federatedx now allows you to use a non-url
+ format, taking advantage of mysql.servers:
+
+ connection="connection_one"
+ connection="connection_one/table_foo"
+
+ An example would be:
+
+ connection=mysql://username:password@hostname:port/database/tablename
+
+ or, if we had:
+
+ create server 'server_one' foreign data wrapper 'mysql' options
+ (HOST '127.0.0.1',
+ DATABASE 'db1',
+ USER 'root',
+ PASSWORD '',
+ PORT 3306,
+ SOCKET '',
+ OWNER 'root');
+
+ CREATE TABLE federatedx.t1 (
+ `id` int(20) NOT NULL,
+ `name` varchar(64) NOT NULL default ''
+ )
+ ENGINE="FEDERATEDX" DEFAULT CHARSET=latin1
+ CONNECTION='server_one';
+
+ So, this will have been the equivalent of
+
+ CONNECTION="mysql://root@127.0.0.1:3306/db1/t1"
+
+ Then, we can also change the server to point to a new schema:
+
+ ALTER SERVER 'server_one' options(DATABASE 'db2');
+
+ All subsequent calls will now be against db2.t1! Guess what? You don't
+ have to perform an alter table!
+
+ This connecton="connection string" is necessary for the handler to be
+ able to connect to the foreign server, either by URL, or by server
+ name.
+
+
+ The basic flow is this:
+
+ SQL calls issues locally ->
+ mysql handler API (data in handler format) ->
+ mysql client API (data converted to SQL calls) ->
+ foreign database -> mysql client API ->
+ convert result sets (if any) to handler format ->
+ handler API -> results or rows affected to local
+
+ What this handler does and doesn't support
+ ------------------------------------------
+ * Tables MUST be created on the foreign server prior to any action on those
+ tables via the handler, first version. IMPORTANT: IF you MUST use the
+ federatedx storage engine type on the REMOTE end, MAKE SURE [ :) ] That
+ the table you connect to IS NOT a table pointing BACK to your ORIGNAL
+ table! You know and have heard the screaching of audio feedback? You
+ know putting two mirror in front of each other how the reflection
+ continues for eternity? Well, need I say more?!
+ * There will not be support for transactions.
+ * There is no way for the handler to know if the foreign database or table
+ has changed. The reason for this is that this database has to work like a
+ data file that would never be written to by anything other than the
+ database. The integrity of the data in the local table could be breached
+ if there was any change to the foreign database.
+ * Support for SELECT, INSERT, UPDATE , DELETE, indexes.
+ * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls.
+ * Prepared statements will not be used in the first implementation, it
+ remains to to be seen whether the limited subset of the client API for the
+ server supports this.
+ * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its
+ implementation.
+ * This will not work with the query cache.
+
+ Method calls
+
+ A two column table, with one record:
+
+ (SELECT)
+
+ "SELECT * FROM foo"
+ ha_federatedx::info
+ ha_federatedx::scan_time:
+ ha_federatedx::rnd_init: share->select_query SELECT * FROM foo
+ ha_federatedx::extra
+
+ <for every row of data retrieved>
+ ha_federatedx::rnd_next
+ ha_federatedx::convert_row_to_internal_format
+ ha_federatedx::rnd_next
+ </for every row of data retrieved>
+
+ ha_federatedx::rnd_end
+ ha_federatedx::extra
+ ha_federatedx::reset
+
+ (INSERT)
+
+ "INSERT INTO foo (id, ts) VALUES (2, now());"
+
+ ha_federatedx::write_row
+
+ ha_federatedx::reset
+
+ (UPDATE)
+
+ "UPDATE foo SET ts = now() WHERE id = 1;"
+
+ ha_federatedx::index_init
+ ha_federatedx::index_read
+ ha_federatedx::index_read_idx
+ ha_federatedx::rnd_next
+ ha_federatedx::convert_row_to_internal_format
+ ha_federatedx::update_row
+
+ ha_federatedx::extra
+ ha_federatedx::extra
+ ha_federatedx::extra
+ ha_federatedx::external_lock
+ ha_federatedx::reset
+
+
+ How do I use this handler?
+ --------------------------
+
+ <insert text about plugin storage engine>
+
+ Next, to use this handler, it's very simple. You must
+ have two databases running, either both on the same host, or
+ on different hosts.
+
+ One the server that will be connecting to the foreign
+ host (client), you create your table as such:
+
+ CREATE TABLE test_table (
+ id int(20) NOT NULL auto_increment,
+ name varchar(32) NOT NULL default '',
+ other int(20) NOT NULL default '0',
+ PRIMARY KEY (id),
+ KEY name (name),
+ KEY other_key (other))
+ ENGINE="FEDERATEDX"
+ DEFAULT CHARSET=latin1
+ CONNECTION='mysql://root@127.0.0.1:9306/federatedx/test_federatedx';
+
+ Notice the "COMMENT" and "ENGINE" field? This is where you
+ respectively set the engine type, "FEDERATEDX" and foreign
+ host information, this being the database your 'client' database
+ will connect to and use as the "data file". Obviously, the foreign
+ database is running on port 9306, so you want to start up your other
+ database so that it is indeed on port 9306, and your federatedx
+ database on a port other than that. In my setup, I use port 5554
+ for federatedx, and port 5555 for the foreign database.
+
+ Then, on the foreign database:
+
+ CREATE TABLE test_table (
+ id int(20) NOT NULL auto_increment,
+ name varchar(32) NOT NULL default '',
+ other int(20) NOT NULL default '0',
+ PRIMARY KEY (id),
+ KEY name (name),
+ KEY other_key (other))
+ ENGINE="<NAME>" <-- whatever you want, or not specify
+ DEFAULT CHARSET=latin1 ;
+
+ This table is exactly the same (and must be exactly the same),
+ except that it is not using the federatedx handler and does
+ not need the URL.
+
+
+ How to see the handler in action
+ --------------------------------
+
+ When developing this handler, I compiled the federatedx database with
+ debugging:
+
+ ./configure --with-federatedx-storage-engine
+ --prefix=/home/mysql/mysql-build/federatedx/ --with-debug
+
+ Once compiled, I did a 'make install' (not for the purpose of installing
+ the binary, but to install all the files the binary expects to see in the
+ diretory I specified in the build with --prefix,
+ "/home/mysql/mysql-build/federatedx".
+
+ Then, I started the foreign server:
+
+ /usr/local/mysql/bin/mysqld_safe
+ --user=mysql --log=/tmp/mysqld.5555.log -P 5555
+
+ Then, I went back to the directory containing the newly compiled mysqld,
+ <builddir>/sql/, started up gdb:
+
+ gdb ./mysqld
+
+ Then, withn the (gdb) prompt:
+ (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug
+
+ Next, I open several windows for each:
+
+ 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed
+ 2. Tail the SQL calls to the foreign database: tail -f /tmp/mysqld.5555.log
+ 3. A window with a client open to the federatedx server on port 5554
+ 4. A window with a client open to the federatedx server on port 5555
+
+ I would create a table on the client to the foreign server on port
+ 5555, and then to the federatedx server on port 5554. At this point,
+ I would run whatever queries I wanted to on the federatedx server,
+ just always remembering that whatever changes I wanted to make on
+ the table, or if I created new tables, that I would have to do that
+ on the foreign server.
+
+ Another thing to look for is 'show variables' to show you that you have
+ support for federatedx handler support:
+
+ show variables like '%federat%'
+
+ and:
+
+ show storage engines;
+
+ Both should display the federatedx storage handler.
+
+
+ Testing
+ -------
+
+ Testing for FederatedX as a pluggable storage engine for
+ now is a manual process that I intend to build a test
+ suite that works for all pluggable storage engines.
+
+ How to test
+
+ 1. cp fed.dat /tmp
+ (make sure you have access to "test". Use a user that has
+ super privileges for now)
+ 2. mysql -f -u root test < federated.test > federated.myresult 2>&1
+ 3. diff federated.result federated.myresult (there _should_ be no differences)
+
+
+*/
+
+
+#define MYSQL_SERVER 1q
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#include <mysql/plugin.h>
+
+/* Variables for federatedx share methods */
+static HASH federatedx_open_tables; // To track open tables
+static HASH federatedx_open_servers; // To track open servers
+pthread_mutex_t federatedx_mutex; // To init the hash
+const char ident_quote_char= '`'; // Character for quoting
+ // identifiers
+const char value_quote_char= '\''; // Character for quoting
+ // literals
+static const int bulk_padding= 64; // bytes "overhead" in packet
+
+/* Variables used when chopping off trailing characters */
+static const uint sizeof_trailing_comma= sizeof(", ") - 1;
+static const uint sizeof_trailing_closeparen= sizeof(") ") - 1;
+static const uint sizeof_trailing_and= sizeof(" AND ") - 1;
+static const uint sizeof_trailing_where= sizeof(" WHERE ") - 1;
+
+/* Static declaration for handerton */
+static handler *federatedx_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
+
+/* FederatedX storage engine handlerton */
+
+static handler *federatedx_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_federatedx(hton, table);
+}
+
+
+/* Function we use in the creation of our hash to get key */
+
+static uchar *
+federatedx_share_get_key(FEDERATEDX_SHARE *share, size_t *length,
+ my_bool not_used __attribute__ ((unused)))
+{
+ *length= share->share_key_length;
+ return (uchar*) share->share_key;
+}
+
+
+static uchar *
+federatedx_server_get_key(FEDERATEDX_SERVER *server, size_t *length,
+ my_bool not_used __attribute__ ((unused)))
+{
+ *length= server->key_length;
+ return server->key;
+}
+
+
+/*
+ Initialize the federatedx handler.
+
+ SYNOPSIS
+ federatedx_db_init()
+ p Handlerton
+
+ RETURN
+ FALSE OK
+ TRUE Error
+*/
+
+int federatedx_db_init(void *p)
+{
+ DBUG_ENTER("federatedx_db_init");
+ handlerton *federatedx_hton= (handlerton *)p;
+ federatedx_hton->state= SHOW_OPTION_YES;
+ /* This is no longer needed for plugin storage engines */
+ federatedx_hton->db_type= DB_TYPE_DEFAULT;
+ federatedx_hton->savepoint_offset= sizeof(ulong);
+ federatedx_hton->close_connection= ha_federatedx::disconnect;
+ federatedx_hton->savepoint_set= ha_federatedx::savepoint_set;
+ federatedx_hton->savepoint_rollback= ha_federatedx::savepoint_rollback;
+ federatedx_hton->savepoint_release= ha_federatedx::savepoint_release;
+ federatedx_hton->commit= ha_federatedx::commit;
+ federatedx_hton->rollback= ha_federatedx::rollback;
+ federatedx_hton->create= federatedx_create_handler;
+ federatedx_hton->flags= HTON_ALTER_NOT_SUPPORTED | HTON_NO_PARTITION;
+
+ if (pthread_mutex_init(&federatedx_mutex, MY_MUTEX_INIT_FAST))
+ goto error;
+ if (!hash_init(&federatedx_open_tables, &my_charset_bin, 32, 0, 0,
+ (hash_get_key) federatedx_share_get_key, 0, 0) &&
+ !hash_init(&federatedx_open_servers, &my_charset_bin, 32, 0, 0,
+ (hash_get_key) federatedx_server_get_key, 0, 0))
+ {
+ DBUG_RETURN(FALSE);
+ }
+
+ VOID(pthread_mutex_destroy(&federatedx_mutex));
+error:
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Release the federatedx handler.
+
+ SYNOPSIS
+ federatedx_db_end()
+
+ RETURN
+ FALSE OK
+*/
+
+int federatedx_done(void *p)
+{
+ hash_free(&federatedx_open_tables);
+ hash_free(&federatedx_open_servers);
+ VOID(pthread_mutex_destroy(&federatedx_mutex));
+
+ return 0;
+}
+
+/**
+ @brief Append identifiers to the string.
+
+ @param[in,out] string The target string.
+ @param[in] name Identifier name
+ @param[in] length Length of identifier name in bytes
+ @param[in] quote_char Quote char to use for quoting identifier.
+
+ @return Operation Status
+ @retval FALSE OK
+ @retval TRUE There was an error appending to the string.
+
+ @note This function is based upon the append_identifier() function
+ in sql_show.cc except that quoting always occurs.
+*/
+
+bool append_ident(String *string, const char *name, uint length,
+ const char quote_char)
+{
+ bool result;
+ uint clen;
+ const char *name_end;
+ DBUG_ENTER("append_ident");
+
+ if (quote_char)
+ {
+ string->reserve(length * 2 + 2);
+ if ((result= string->append("e_char, 1, system_charset_info)))
+ goto err;
+
+ for (name_end= name+length; name < name_end; name+= clen)
+ {
+ uchar c= *(uchar *) name;
+ if (!(clen= my_mbcharlen(system_charset_info, c)))
+ clen= 1;
+ if (clen == 1 && c == (uchar) quote_char &&
+ (result= string->append("e_char, 1, system_charset_info)))
+ goto err;
+ if ((result= string->append(name, clen, string->charset())))
+ goto err;
+ }
+ result= string->append("e_char, 1, system_charset_info);
+ }
+ else
+ result= string->append(name, length, system_charset_info);
+
+err:
+ DBUG_RETURN(result);
+}
+
+
+static int parse_url_error(FEDERATEDX_SHARE *share, TABLE *table, int error_num)
+{
+ char buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+ int buf_len;
+ DBUG_ENTER("ha_federatedx parse_url_error");
+
+ buf_len= min(table->s->connect_string.length,
+ FEDERATEDX_QUERY_BUFFER_SIZE-1);
+ strmake(buf, table->s->connect_string.str, buf_len);
+ my_error(error_num, MYF(0), buf);
+ DBUG_RETURN(error_num);
+}
+
+/*
+ retrieve server object which contains server meta-data
+ from the system table given a server's name, set share
+ connection parameter members
+*/
+int get_connection(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share)
+{
+ int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST;
+ char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ FOREIGN_SERVER *server, server_buffer;
+ DBUG_ENTER("ha_federatedx::get_connection");
+
+ /*
+ get_server_by_name() clones the server if exists and allocates
+ copies of strings in the supplied mem_root
+ */
+ if (!(server=
+ get_server_by_name(mem_root, share->connection_string, &server_buffer)))
+ {
+ DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!"));
+ /* need to come up with error handling */
+ error_num=1;
+ goto error;
+ }
+ DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
+ (long unsigned int) server));
+
+ /*
+ Most of these should never be empty strings, error handling will
+ need to be implemented. Also, is this the best way to set the share
+ members? Is there some allocation needed? In running this code, it works
+ except there are errors in the trace file of the share being overrun
+ at the address of the share.
+ */
+ share->server_name_length= server->server_name_length;
+ share->server_name= server->server_name;
+ share->username= server->username;
+ share->password= server->password;
+ share->database= server->db;
+#ifndef I_AM_PARANOID
+ share->port= server->port > 0 && server->port < 65536 ?
+#else
+ share->port= server->port > 1023 && server->port < 65536 ?
+#endif
+ (ushort) server->port : MYSQL_PORT;
+ share->hostname= server->host;
+ if (!(share->socket= server->socket) &&
+ !strcmp(share->hostname, my_localhost))
+ share->socket= (char *) MYSQL_UNIX_ADDR;
+ share->scheme= server->scheme;
+
+ DBUG_PRINT("info", ("share->username: %s", share->username));
+ DBUG_PRINT("info", ("share->password: %s", share->password));
+ DBUG_PRINT("info", ("share->hostname: %s", share->hostname));
+ DBUG_PRINT("info", ("share->database: %s", share->database));
+ DBUG_PRINT("info", ("share->port: %d", share->port));
+ DBUG_PRINT("info", ("share->socket: %s", share->socket));
+ DBUG_RETURN(0);
+
+error:
+ my_sprintf(error_buffer,
+ (error_buffer, "server name: '%s' doesn't exist!",
+ share->connection_string));
+ my_error(error_num, MYF(0), error_buffer);
+ DBUG_RETURN(error_num);
+}
+
+/*
+ Parse connection info from table->s->connect_string
+
+ SYNOPSIS
+ parse_url()
+ mem_root MEM_ROOT pointer for memory allocation
+ share pointer to FEDERATEDX share
+ table pointer to current TABLE class
+ table_create_flag determines what error to throw
+
+ DESCRIPTION
+ Populates the share with information about the connection
+ to the foreign database that will serve as the data source.
+ This string must be specified (currently) in the "CONNECTION" field,
+ listed in the CREATE TABLE statement.
+
+ This string MUST be in the format of any of these:
+
+ CONNECTION="scheme://username:password@hostname:port/database/table"
+ CONNECTION="scheme://username@hostname/database/table"
+ CONNECTION="scheme://username@hostname:port/database/table"
+ CONNECTION="scheme://username:password@hostname/database/table"
+
+ _OR_
+
+ CONNECTION="connection name"
+
+
+
+ An Example:
+
+ CREATE TABLE t1 (id int(32))
+ ENGINE="FEDERATEDX"
+ CONNECTION="mysql://joe:joespass@192.168.1.111:9308/federatedx/testtable";
+
+ CREATE TABLE t2 (
+ id int(4) NOT NULL auto_increment,
+ name varchar(32) NOT NULL,
+ PRIMARY KEY(id)
+ ) ENGINE="FEDERATEDX" CONNECTION="my_conn";
+
+ ***IMPORTANT***
+ Currently, the FederatedX Storage Engine only supports connecting to another
+ Database ("scheme" of "mysql"). Connections using JDBC as well as
+ other connectors are in the planning stage.
+
+
+ 'password' and 'port' are both optional.
+
+ RETURN VALUE
+ 0 success
+ error_num particular error code
+
+*/
+
+static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
+ uint table_create_flag)
+{
+ uint error_num= (table_create_flag ?
+ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE :
+ ER_FOREIGN_DATA_STRING_INVALID);
+ DBUG_ENTER("ha_federatedx::parse_url");
+
+ share->port= 0;
+ share->socket= 0;
+ DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
+ DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length));
+ DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length,
+ table->s->connect_string.str));
+ share->connection_string= strmake_root(mem_root, table->s->connect_string.str,
+ table->s->connect_string.length);
+
+ DBUG_PRINT("info",("parse_url alloced share->connection_string %lx",
+ (long unsigned int) share->connection_string));
+
+ DBUG_PRINT("info",("share->connection_string: %s",share->connection_string));
+ /*
+ No :// or @ in connection string. Must be a straight connection name of
+ either "servername" or "servername/tablename"
+ */
+ if ((!strstr(share->connection_string, "://") &&
+ (!strchr(share->connection_string, '@'))))
+ {
+
+ DBUG_PRINT("info",
+ ("share->connection_string: %s internal format "
+ "share->connection_string: %lx",
+ share->connection_string,
+ (ulong) share->connection_string));
+
+ /* ok, so we do a little parsing, but not completely! */
+ share->parsed= FALSE;
+ /*
+ If there is a single '/' in the connection string, this means the user is
+ specifying a table name
+ */
+
+ if ((share->table_name= strchr(share->connection_string, '/')))
+ {
+ *share->table_name++= '\0';
+ share->table_name_length= strlen(share->table_name);
+
+ DBUG_PRINT("info",
+ ("internal format, parsed table_name "
+ "share->connection_string: %s share->table_name: %s",
+ share->connection_string, share->table_name));
+
+ /*
+ there better not be any more '/'s !
+ */
+ if (strchr(share->table_name, '/'))
+ goto error;
+ }
+ /*
+ Otherwise, straight server name, use tablename of federatedx table
+ as remote table name
+ */
+ else
+ {
+ /*
+ Connection specifies everything but, resort to
+ expecting remote and foreign table names to match
+ */
+ share->table_name= strmake_root(mem_root, table->s->table_name.str,
+ (share->table_name_length=
+ table->s->table_name.length));
+ DBUG_PRINT("info",
+ ("internal format, default table_name "
+ "share->connection_string: %s share->table_name: %s",
+ share->connection_string, share->table_name));
+ }
+
+ if ((error_num= get_connection(mem_root, share)))
+ goto error;
+ }
+ else
+ {
+ share->parsed= TRUE;
+ // Add a null for later termination of table name
+ share->connection_string[table->s->connect_string.length]= 0;
+ share->scheme= share->connection_string;
+ DBUG_PRINT("info",("parse_url alloced share->scheme: %lx",
+ (ulong) share->scheme));
+
+ /*
+ Remove addition of null terminator and store length
+ for each string in share
+ */
+ if (!(share->username= strstr(share->scheme, "://")))
+ goto error;
+ share->scheme[share->username - share->scheme]= '\0';
+
+ if (!federatedx_io::handles_scheme(share->scheme))
+ goto error;
+
+ share->username+= 3;
+
+ if (!(share->hostname= strchr(share->username, '@')))
+ goto error;
+ *share->hostname++= '\0'; // End username
+
+ if ((share->password= strchr(share->username, ':')))
+ {
+ *share->password++= '\0'; // End username
+
+ /* make sure there isn't an extra / or @ */
+ if ((strchr(share->password, '/') || strchr(share->hostname, '@')))
+ goto error;
+ /*
+ Found that if the string is:
+ user:@hostname:port/db/table
+ Then password is a null string, so set to NULL
+ */
+ if ((share->password[0] == '\0'))
+ share->password= NULL;
+ }
+
+ /* make sure there isn't an extra / or @ */
+ if ((strchr(share->username, '/')) || (strchr(share->hostname, '@')))
+ goto error;
+
+ if (!(share->database= strchr(share->hostname, '/')))
+ goto error;
+ *share->database++= '\0';
+
+ if ((share->sport= strchr(share->hostname, ':')))
+ {
+ *share->sport++= '\0';
+ if (share->sport[0] == '\0')
+ share->sport= NULL;
+ else
+ share->port= atoi(share->sport);
+ }
+
+ if (!(share->table_name= strchr(share->database, '/')))
+ goto error;
+ *share->table_name++= '\0';
+
+ share->table_name_length= strlen(share->table_name);
+
+ /* make sure there's not an extra / */
+ if ((strchr(share->table_name, '/')))
+ goto error;
+
+ if (share->hostname[0] == '\0')
+ share->hostname= NULL;
+
+ }
+ if (!share->port)
+ {
+ if (!share->hostname || strcmp(share->hostname, my_localhost) == 0)
+ share->socket= (char *) MYSQL_UNIX_ADDR;
+ else
+ share->port= MYSQL_PORT;
+ }
+
+ DBUG_PRINT("info",
+ ("scheme: %s username: %s password: %s hostname: %s "
+ "port: %d db: %s tablename: %s",
+ share->scheme, share->username, share->password,
+ share->hostname, share->port, share->database,
+ share->table_name));
+
+ DBUG_RETURN(0);
+
+error:
+ DBUG_RETURN(parse_url_error(share, table, error_num));
+}
+
+/*****************************************************************************
+** FEDERATEDX tables
+*****************************************************************************/
+
+ha_federatedx::ha_federatedx(handlerton *hton,
+ TABLE_SHARE *table_arg)
+ :handler(hton, table_arg),
+ txn(0), io(0), stored_result(0)
+{
+ bzero(&bulk_insert, sizeof(bulk_insert));
+}
+
+
+/*
+ Convert MySQL result set row to handler internal format
+
+ SYNOPSIS
+ convert_row_to_internal_format()
+ record Byte pointer to record
+ row MySQL result set row from fetchrow()
+ result Result set to use
+
+ DESCRIPTION
+ This method simply iterates through a row returned via fetchrow with
+ values from a successful SELECT , and then stores each column's value
+ in the field object via the field object pointer (pointing to the table's
+ array of field object pointers). This is how the handler needs the data
+ to be stored to then return results back to the user
+
+ RETURN VALUE
+ 0 After fields have had field values stored from record
+*/
+
+uint ha_federatedx::convert_row_to_internal_format(uchar *record,
+ FEDERATEDX_IO_ROW *row,
+ FEDERATEDX_IO_RESULT *result)
+{
+ ulong *lengths;
+ Field **field;
+ int column= 0;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ DBUG_ENTER("ha_federatedx::convert_row_to_internal_format");
+
+ lengths= io->fetch_lengths(result);
+
+ for (field= table->field; *field; field++, column++)
+ {
+ /*
+ index variable to move us through the row at the
+ same iterative step as the field
+ */
+ my_ptrdiff_t old_ptr;
+ old_ptr= (my_ptrdiff_t) (record - table->record[0]);
+ (*field)->move_field_offset(old_ptr);
+ if (io->is_column_null(row, column))
+ (*field)->set_null();
+ else
+ {
+ if (bitmap_is_set(table->read_set, (*field)->field_index))
+ {
+ (*field)->set_notnull();
+ (*field)->store(io->get_column_data(row, column), lengths[column], &my_charset_bin);
+ }
+ }
+ (*field)->move_field_offset(-old_ptr);
+ }
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ DBUG_RETURN(0);
+}
+
+static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
+{
+ DBUG_ENTER("emit_key_part_name");
+ if (append_ident(to, part->field->field_name,
+ strlen(part->field->field_name), ident_quote_char))
+ DBUG_RETURN(1); // Out of memory
+ DBUG_RETURN(0);
+}
+
+static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
+ bool needs_quotes, bool is_like,
+ const uchar *ptr, uint len)
+{
+ Field *field= part->field;
+ DBUG_ENTER("emit_key_part_element");
+
+ if (needs_quotes && to->append(STRING_WITH_LEN("'")))
+ DBUG_RETURN(1);
+
+ if (part->type == HA_KEYTYPE_BIT)
+ {
+ char buff[STRING_BUFFER_USUAL_SIZE], *buf= buff;
+
+ *buf++= '0';
+ *buf++= 'x';
+ buf= octet2hex(buf, (char*) ptr, len);
+ if (to->append((char*) buff, (uint)(buf - buff)))
+ DBUG_RETURN(1);
+ }
+ else if (part->key_part_flag & HA_BLOB_PART)
+ {
+ String blob;
+ uint blob_length= uint2korr(ptr);
+ blob.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+ blob_length, &my_charset_bin);
+ if (append_escaped(to, &blob))
+ DBUG_RETURN(1);
+ }
+ else if (part->key_part_flag & HA_VAR_LENGTH_PART)
+ {
+ String varchar;
+ uint var_length= uint2korr(ptr);
+ varchar.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+ var_length, &my_charset_bin);
+ if (append_escaped(to, &varchar))
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ char strbuff[MAX_FIELD_WIDTH];
+ String str(strbuff, sizeof(strbuff), part->field->charset()), *res;
+
+ res= field->val_str(&str, ptr);
+
+ if (field->result_type() == STRING_RESULT)
+ {
+ if (append_escaped(to, res))
+ DBUG_RETURN(1);
+ }
+ else if (to->append(res->ptr(), res->length()))
+ DBUG_RETURN(1);
+ }
+
+ if (is_like && to->append(STRING_WITH_LEN("%")))
+ DBUG_RETURN(1);
+
+ if (needs_quotes && to->append(STRING_WITH_LEN("'")))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Create a WHERE clause based off of values in keys
+ Note: This code was inspired by key_copy from key.cc
+
+ SYNOPSIS
+ create_where_from_key ()
+ to String object to store WHERE clause
+ key_info KEY struct pointer
+ key byte pointer containing key
+ key_length length of key
+ range_type 0 - no range, 1 - min range, 2 - max range
+ (see enum range_operation)
+
+ DESCRIPTION
+ Using iteration through all the keys via a KEY_PART_INFO pointer,
+ This method 'extracts' the value of each key in the byte pointer
+ *key, and for each key found, constructs an appropriate WHERE clause
+
+ RETURN VALUE
+ 0 After all keys have been accounted for to create the WHERE clause
+ 1 No keys found
+
+ Range flags Table per Timour:
+
+ -----------------
+ - start_key:
+ * ">" -> HA_READ_AFTER_KEY
+ * ">=" -> HA_READ_KEY_OR_NEXT
+ * "=" -> HA_READ_KEY_EXACT
+
+ - end_key:
+ * "<" -> HA_READ_BEFORE_KEY
+ * "<=" -> HA_READ_AFTER_KEY
+
+ records_in_range:
+ -----------------
+ - start_key:
+ * ">" -> HA_READ_AFTER_KEY
+ * ">=" -> HA_READ_KEY_EXACT
+ * "=" -> HA_READ_KEY_EXACT
+
+ - end_key:
+ * "<" -> HA_READ_BEFORE_KEY
+ * "<=" -> HA_READ_AFTER_KEY
+ * "=" -> HA_READ_AFTER_KEY
+
+0 HA_READ_KEY_EXACT, Find first record else error
+1 HA_READ_KEY_OR_NEXT, Record or next record
+2 HA_READ_KEY_OR_PREV, Record or previous
+3 HA_READ_AFTER_KEY, Find next rec. after key-record
+4 HA_READ_BEFORE_KEY, Find next rec. before key-record
+5 HA_READ_PREFIX, Key which as same prefix
+6 HA_READ_PREFIX_LAST, Last key with the same prefix
+7 HA_READ_PREFIX_LAST_OR_PREV, Last or prev key with the same prefix
+
+Flags that I've found:
+
+id, primary key, varchar
+
+id = 'ccccc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 'ccccc'
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id < 'ccccc'
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 'ccccc'
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id >= 'ccccc'
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id like 'cc%cc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id < 'ccccc'
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 'aaaaa' and id < 'ccccc';
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+numeric keys:
+
+id = 4
+index_read_idx: start_key 0 end_key NULL
+
+id > 4
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id >= 4
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4
+full table scan, select * from
+
+id > 2 and id < 8
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 8
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 8
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 8
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+multi keys (id int, name varchar, other varchar)
+
+id = 1;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 4;
+id > 2 and name = '333'; remote: id > 2
+id > 2 and name > '333'; remote: id > 2
+id > 2 and name > '333' and other < 'ddd'; remote: id > 2 no results
+id > 2 and name >= '333' and other < 'ddd'; remote: id > 2 1 result
+id >= 4 and name = 'eric was here' and other > 'eeee';
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id >= 4;
+id >= 2 and name = '333' and other < 'ddd';
+remote: `id` >= 2 AND `name` >= '333';
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4;
+id < 3 and name = '222' and other <= 'ccc'; remote: id < 3
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4;
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4;
+full table scan
+
+id > 2 and id < 4;
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 4;
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 4;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 4;
+id = 6 and name = 'eric was here' and other > 'eeee';
+remote: (`id` > 6 AND `name` > 'eric was here' AND `other` > 'eeee')
+AND (`id` <= 6) AND ( AND `name` <= 'eric was here')
+no results
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+Summary:
+
+* If the start key flag is 0 the max key flag shouldn't even be set,
+ and if it is, the query produced would be invalid.
+* Multipart keys, even if containing some or all numeric columns,
+ are treated the same as non-numeric keys
+
+ If the query is " = " (quotes or not):
+ - records in range start key flag HA_READ_KEY_EXACT,
+ end key flag HA_READ_AFTER_KEY (incorrect)
+ - any other: start key flag HA_READ_KEY_OR_NEXT,
+ end key flag HA_READ_AFTER_KEY (correct)
+
+* 'like' queries (of key)
+ - Numeric, full table scan
+ - Non-numeric
+ records_in_range: start_key 0 end_key 3
+ other : start_key 1 end_key 3
+
+* If the key flag is HA_READ_AFTER_KEY:
+ if start_key, append >
+ if end_key, append <=
+
+* If create_where_key was called by records_in_range:
+
+ - if the key is numeric:
+ start key flag is 0 when end key is NULL, end key flag is 3 or 4
+ - if create_where_key was called by any other function:
+ start key flag is 1 when end key is NULL, end key flag is 3 or 4
+ - if the key is non-numeric, or multipart
+ When the query is an exact match, the start key flag is 0,
+ end key flag is 3 for what should be a no-range condition where
+ you should have 0 and max key NULL, which it is if called by
+ read_range_first
+
+Conclusion:
+
+1. Need logic to determin if a key is min or max when the flag is
+HA_READ_AFTER_KEY, and handle appending correct operator accordingly
+
+2. Need a boolean flag to pass to create_where_from_key, used in the
+switch statement. Add 1 to the flag if:
+ - start key flag is HA_READ_KEY_EXACT and the end key is NULL
+
+*/
+
+bool ha_federatedx::create_where_from_key(String *to,
+ KEY *key_info,
+ const key_range *start_key,
+ const key_range *end_key,
+ bool from_records_in_range,
+ bool eq_range)
+{
+ bool both_not_null=
+ (start_key != NULL && end_key != NULL) ? TRUE : FALSE;
+ const uchar *ptr;
+ uint remainder, length;
+ char tmpbuff[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
+ const key_range *ranges[2]= { start_key, end_key };
+ my_bitmap_map *old_map;
+ DBUG_ENTER("ha_federatedx::create_where_from_key");
+
+ tmp.length(0);
+ if (start_key == NULL && end_key == NULL)
+ DBUG_RETURN(1);
+
+ old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ for (uint i= 0; i <= 1; i++)
+ {
+ bool needs_quotes;
+ KEY_PART_INFO *key_part;
+ if (ranges[i] == NULL)
+ continue;
+
+ if (both_not_null)
+ {
+ if (i > 0)
+ tmp.append(STRING_WITH_LEN(") AND ("));
+ else
+ tmp.append(STRING_WITH_LEN(" ("));
+ }
+
+ for (key_part= key_info->key_part,
+ remainder= key_info->key_parts,
+ length= ranges[i]->length,
+ ptr= ranges[i]->key; ;
+ remainder--,
+ key_part++)
+ {
+ Field *field= key_part->field;
+ uint store_length= key_part->store_length;
+ uint part_length= min(store_length, length);
+ needs_quotes= field->str_needs_quotes();
+ DBUG_DUMP("key, start of loop", ptr, length);
+
+ if (key_part->null_bit)
+ {
+ if (*ptr++)
+ {
+ /*
+ We got "IS [NOT] NULL" condition against nullable column. We
+ distinguish between "IS NOT NULL" and "IS NULL" by flag. For
+ "IS NULL", flag is set to HA_READ_KEY_EXACT.
+ */
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(ranges[i]->flag == HA_READ_KEY_EXACT ?
+ " IS NULL " : " IS NOT NULL "))
+ goto err;
+ /*
+ We need to adjust pointer and length to be prepared for next
+ key part. As well as check if this was last key part.
+ */
+ goto prepare_for_next_key_part;
+ }
+ }
+
+ if (tmp.append(STRING_WITH_LEN(" (")))
+ goto err;
+
+ switch (ranges[i]->flag) {
+ case HA_READ_KEY_EXACT:
+ DBUG_PRINT("info", ("federatedx HA_READ_KEY_EXACT %d", i));
+ if (store_length >= length ||
+ !needs_quotes ||
+ key_part->type == HA_KEYTYPE_BIT ||
+ field->result_type() != STRING_RESULT)
+ {
+ if (emit_key_part_name(&tmp, key_part))
+ goto err;
+
+ if (from_records_in_range)
+ {
+ if (tmp.append(STRING_WITH_LEN(" >= ")))
+ goto err;
+ }
+ else
+ {
+ if (tmp.append(STRING_WITH_LEN(" = ")))
+ goto err;
+ }
+
+ if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ goto err;
+ }
+ else
+ {
+ /* LIKE */
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(STRING_WITH_LEN(" LIKE ")) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
+ part_length))
+ goto err;
+ }
+ break;
+ case HA_READ_AFTER_KEY:
+ if (eq_range)
+ {
+ if (tmp.append("1=1")) // Dummy
+ goto err;
+ break;
+ }
+ DBUG_PRINT("info", ("federatedx HA_READ_AFTER_KEY %d", i));
+ if (store_length >= length) /* end key */
+ {
+ if (emit_key_part_name(&tmp, key_part))
+ goto err;
+
+ if (i > 0) /* end key */
+ {
+ if (tmp.append(STRING_WITH_LEN(" <= ")))
+ goto err;
+ }
+ else /* start key */
+ {
+ if (tmp.append(STRING_WITH_LEN(" > ")))
+ goto err;
+ }
+
+ if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ {
+ goto err;
+ }
+ break;
+ }
+ case HA_READ_KEY_OR_NEXT:
+ DBUG_PRINT("info", ("federatedx HA_READ_KEY_OR_NEXT %d", i));
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(STRING_WITH_LEN(" >= ")) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ goto err;
+ break;
+ case HA_READ_BEFORE_KEY:
+ DBUG_PRINT("info", ("federatedx HA_READ_BEFORE_KEY %d", i));
+ if (store_length >= length)
+ {
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(STRING_WITH_LEN(" < ")) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ goto err;
+ break;
+ }
+ case HA_READ_KEY_OR_PREV:
+ DBUG_PRINT("info", ("federatedx HA_READ_KEY_OR_PREV %d", i));
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(STRING_WITH_LEN(" <= ")) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ goto err;
+ break;
+ default:
+ DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
+ goto err;
+ }
+ if (tmp.append(STRING_WITH_LEN(") ")))
+ goto err;
+
+prepare_for_next_key_part:
+ if (store_length >= length)
+ break;
+ DBUG_PRINT("info", ("remainder %d", remainder));
+ DBUG_ASSERT(remainder > 1);
+ length-= store_length;
+ /*
+ For nullable columns, null-byte is already skipped before, that is
+ ptr was incremented by 1. Since store_length still counts null-byte,
+ we need to subtract 1 from store_length.
+ */
+ ptr+= store_length - test(key_part->null_bit);
+ if (tmp.append(STRING_WITH_LEN(" AND ")))
+ goto err;
+
+ DBUG_PRINT("info",
+ ("create_where_from_key WHERE clause: %s",
+ tmp.c_ptr_quick()));
+ }
+ }
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+
+ if (both_not_null)
+ if (tmp.append(STRING_WITH_LEN(") ")))
+ DBUG_RETURN(1);
+
+ if (to->append(STRING_WITH_LEN(" WHERE ")))
+ DBUG_RETURN(1);
+
+ if (to->append(tmp))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+
+err:
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ DBUG_RETURN(1);
+}
+
+static void fill_server(MEM_ROOT *mem_root, FEDERATEDX_SERVER *server,
+ FEDERATEDX_SHARE *share, CHARSET_INFO *table_charset)
+{
+ char buffer[STRING_BUFFER_USUAL_SIZE];
+ String key(buffer, sizeof(buffer), &my_charset_bin);
+ String scheme(share->scheme, &my_charset_latin1);
+ String hostname(share->hostname, &my_charset_latin1);
+ String database(share->database, system_charset_info);
+ String username(share->username, system_charset_info);
+ String socket(share->socket ? share->socket : "", files_charset_info);
+ String password(share->password ? share->password : "", &my_charset_bin);
+ DBUG_ENTER("fill_server");
+
+ /* Do some case conversions */
+ scheme.reserve(scheme.length());
+ scheme.length(my_casedn_str(&my_charset_latin1, scheme.c_ptr_safe()));
+
+ hostname.reserve(hostname.length());
+ hostname.length(my_casedn_str(&my_charset_latin1, hostname.c_ptr_safe()));
+
+ if (lower_case_table_names)
+ {
+ database.reserve(database.length());
+ database.length(my_casedn_str(system_charset_info, database.c_ptr_safe()));
+ }
+
+#ifndef __WIN__
+ /*
+ TODO: there is no unix sockets under windows so the engine should be
+ revised about using sockets in such environment.
+ */
+ if (lower_case_file_system && socket.length())
+ {
+ socket.reserve(socket.length());
+ socket.length(my_casedn_str(files_charset_info, socket.c_ptr_safe()));
+ }
+#endif
+
+ /* start with all bytes zeroed */
+ bzero(server, sizeof(*server));
+
+ key.length(0);
+ key.reserve(scheme.length() + hostname.length() + database.length() +
+ socket.length() + username.length() + password.length() +
+ sizeof(int) + 8);
+ key.append(scheme);
+ key.q_append('\0');
+ server->hostname= (const char *) (intptr) key.length();
+ key.append(hostname);
+ key.q_append('\0');
+ server->database= (const char *) (intptr) key.length();
+ key.append(database);
+ key.q_append('\0');
+ key.q_append((uint32) share->port);
+ server->socket= (const char *) (intptr) key.length();
+ key.append(socket);
+ key.q_append('\0');
+ server->username= (const char *) (intptr) key.length();
+ key.append(username);
+ key.q_append('\0');
+ server->password= (const char *) (intptr) key.length();
+ key.append(password);
+
+ server->key_length= key.length();
+ server->key= (uchar *) memdup_root(mem_root, key.ptr(), key.length()+1);
+
+ /* pointer magic */
+ server->scheme+= (intptr) server->key;
+ server->hostname+= (intptr) server->key;
+ server->database+= (intptr) server->key;
+ server->username+= (intptr) server->key;
+ server->password+= (intptr) server->key;
+ server->socket+= (intptr) server->key;
+ server->port= share->port;
+
+ if (!share->socket)
+ server->socket= NULL;
+ if (!share->password)
+ server->password= NULL;
+
+ if (table_charset)
+ server->csname= strdup_root(mem_root, table_charset->csname);
+
+ DBUG_VOID_RETURN;
+}
+
+
+static FEDERATEDX_SERVER *get_server(FEDERATEDX_SHARE *share, TABLE *table)
+{
+ FEDERATEDX_SERVER *server= NULL, tmp_server;
+ MEM_ROOT mem_root;
+ char buffer[STRING_BUFFER_USUAL_SIZE];
+ String key(buffer, sizeof(buffer), &my_charset_bin);
+ String scheme(share->scheme, &my_charset_latin1);
+ String hostname(share->hostname, &my_charset_latin1);
+ String database(share->database, system_charset_info);
+ String username(share->username, system_charset_info);
+ String socket(share->socket ? share->socket : "", files_charset_info);
+ String password(share->password ? share->password : "", &my_charset_bin);
+ DBUG_ENTER("ha_federated.cc::get_server");
+
+ safe_mutex_assert_owner(&federatedx_mutex);
+
+ init_alloc_root(&mem_root, 4096, 4096);
+
+ fill_server(&mem_root, &tmp_server, share, table ? table->s->table_charset : 0);
+
+ if (!(server= (FEDERATEDX_SERVER *) hash_search(&federatedx_open_servers,
+ tmp_server.key,
+ tmp_server.key_length)))
+ {
+ if (!table || !tmp_server.csname)
+ goto error;
+
+ if (!(server= (FEDERATEDX_SERVER *) memdup_root(&mem_root,
+ (char *) &tmp_server,
+ sizeof(*server))))
+ goto error;
+
+ server->mem_root= mem_root;
+
+ if (my_hash_insert(&federatedx_open_servers, (uchar*) server))
+ goto error;
+
+ pthread_mutex_init(&server->mutex, MY_MUTEX_INIT_FAST);
+ }
+ else
+ free_root(&mem_root, MYF(0)); /* prevents memory leak */
+
+ server->use_count++;
+
+ DBUG_RETURN(server);
+error:
+ free_root(&mem_root, MYF(0));
+ DBUG_RETURN(NULL);
+}
+
+
+/*
+ Example of simple lock controls. The "share" it creates is structure we will
+ pass to each federatedx handler. Do you have to have one of these? Well, you
+ have pieces that are used for locking, and they are needed to function.
+*/
+
+static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ char query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ Field **field;
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ FEDERATEDX_SHARE *share= NULL, tmp_share;
+ MEM_ROOT mem_root;
+ DBUG_ENTER("ha_federatedx.cc::get_share");
+
+ /*
+ In order to use this string, we must first zero it's length,
+ or it will contain garbage
+ */
+ query.length(0);
+
+ bzero(&tmp_share, sizeof(tmp_share));
+ init_alloc_root(&mem_root, 256, 0);
+
+ pthread_mutex_lock(&federatedx_mutex);
+
+ tmp_share.share_key= table_name;
+ tmp_share.share_key_length= strlen(table_name);
+ if (parse_url(&mem_root, &tmp_share, table, 0))
+ goto error;
+
+ /* TODO: change tmp_share.scheme to LEX_STRING object */
+ if (!(share= (FEDERATEDX_SHARE *) hash_search(&federatedx_open_tables,
+ (uchar*) tmp_share.share_key,
+ tmp_share.
+ share_key_length)))
+ {
+ query.set_charset(system_charset_info);
+ query.append(STRING_WITH_LEN("SELECT "));
+ for (field= table->field; *field; field++)
+ {
+ append_ident(&query, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
+ query.append(STRING_WITH_LEN(", "));
+ }
+ /* chops off trailing comma */
+ query.length(query.length() - sizeof_trailing_comma);
+
+ query.append(STRING_WITH_LEN(" FROM "));
+
+ append_ident(&query, tmp_share.table_name,
+ tmp_share.table_name_length, ident_quote_char);
+
+ if (!(share= (FEDERATEDX_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) ||
+ !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length() + 1)))
+ goto error;
+
+ share->mem_root= mem_root;
+
+ DBUG_PRINT("info",
+ ("share->select_query %s", share->select_query));
+
+ if (!(share->s= get_server(share, table)))
+ goto error;
+
+ if (my_hash_insert(&federatedx_open_tables, (uchar*) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ }
+ else
+ free_root(&mem_root, MYF(0)); /* prevents memory leak */
+
+ share->use_count++;
+ pthread_mutex_unlock(&federatedx_mutex);
+
+ DBUG_RETURN(share);
+
+error:
+ pthread_mutex_unlock(&federatedx_mutex);
+ free_root(&mem_root, MYF(0));
+ DBUG_RETURN(NULL);
+}
+
+
+static int free_server(federatedx_txn *txn, FEDERATEDX_SERVER *server)
+{
+ bool destroy;
+ DBUG_ENTER("free_server");
+
+ pthread_mutex_lock(&federatedx_mutex);
+ if ((destroy= !--server->use_count))
+ hash_delete(&federatedx_open_servers, (uchar*) server);
+ pthread_mutex_unlock(&federatedx_mutex);
+
+ if (destroy)
+ {
+ MEM_ROOT mem_root;
+
+ txn->close(server);
+
+ DBUG_ASSERT(server->io_count == 0);
+
+ pthread_mutex_destroy(&server->mutex);
+ mem_root= server->mem_root;
+ free_root(&mem_root, MYF(0));
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Free lock controls. We call this whenever we close a table.
+ If the table had the last reference to the share then we
+ free memory associated with it.
+*/
+
+static int free_share(federatedx_txn *txn, FEDERATEDX_SHARE *share)
+{
+ bool destroy;
+ DBUG_ENTER("free_share");
+
+ pthread_mutex_lock(&federatedx_mutex);
+ if ((destroy= !--share->use_count))
+ hash_delete(&federatedx_open_tables, (uchar*) share);
+ pthread_mutex_unlock(&federatedx_mutex);
+
+ if (destroy)
+ {
+ MEM_ROOT mem_root;
+ FEDERATEDX_SERVER *server= share->s;
+
+ thr_lock_delete(&share->lock);
+
+ mem_root= share->mem_root;
+ free_root(&mem_root, MYF(0));
+
+ free_server(txn, server);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+ha_rows ha_federatedx::records_in_range(uint inx, key_range *start_key,
+ key_range *end_key)
+{
+ /*
+
+ We really want indexes to be used as often as possible, therefore
+ we just need to hard-code the return value to a very low number to
+ force the issue
+
+*/
+ DBUG_ENTER("ha_federatedx::records_in_range");
+ DBUG_RETURN(FEDERATEDX_RECORDS_IN_RANGE);
+}
+/*
+ If frm_error() is called then we will use this to to find out
+ what file extentions exist for the storage engine. This is
+ also used by the default rename_table and delete_table method
+ in handler.cc.
+*/
+
+const char **ha_federatedx::bas_ext() const
+{
+ static const char *ext[]=
+ {
+ NullS
+ };
+ return ext;
+}
+
+
+federatedx_txn *ha_federatedx::get_txn(THD *thd, bool no_create)
+{
+ federatedx_txn **txnp= (federatedx_txn **) ha_data(thd);
+ if (!*txnp && !no_create)
+ *txnp= new federatedx_txn();
+ return *txnp;
+}
+
+
+int ha_federatedx::disconnect(handlerton *hton, MYSQL_THD thd)
+{
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ delete txn;
+ return 0;
+}
+
+
+/*
+ Used for opening tables. The name will be the name of the file.
+ A table is opened when it needs to be opened. For instance
+ when a request comes in for a select on the table (tables are not
+ open and closed for each request, they are cached).
+
+ Called from handler.cc by handler::ha_open(). The server opens
+ all tables by calling ha_open() which then calls the handler
+ specific open().
+*/
+
+int ha_federatedx::open(const char *name, int mode, uint test_if_locked)
+{
+ int error;
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_federatedx::open");
+
+ if (!(share= get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock, &lock, NULL);
+
+ DBUG_ASSERT(io == NULL);
+
+ txn= get_txn(thd);
+
+ if ((error= txn->acquire(share, TRUE, &io)))
+ {
+ free_share(txn, share);
+ DBUG_RETURN(error);
+ }
+
+ txn->release(&io);
+
+ ref_length= (table->s->primary_key != MAX_KEY ?
+ table->key_info[table->s->primary_key].key_length :
+ table->s->reclength);
+ DBUG_PRINT("info", ("ref_length: %u", ref_length));
+
+ reset();
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Closes a table. We call the free_share() function to free any resources
+ that we have allocated in the "shared" structure.
+
+ Called from sql_base.cc, sql_select.cc, and table.cc.
+ In sql_select.cc it is only used to close up temporary tables or during
+ the process where a temporary table is converted over to being a
+ myisam table.
+ For sql_base.cc look at close_data_tables().
+*/
+
+int ha_federatedx::close(void)
+{
+ int retval, error;
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_federatedx::close");
+
+ /* free the result set */
+ if (stored_result)
+ retval= free_result();
+
+ /* Disconnect from mysql */
+ if ((txn= get_txn(thd, true)))
+ txn->release(&io);
+
+ DBUG_ASSERT(io == NULL);
+
+ if ((error= free_share(txn, share)))
+ retval= error;
+ DBUG_RETURN(retval);
+}
+
+/*
+
+ Checks if a field in a record is SQL NULL.
+
+ SYNOPSIS
+ field_in_record_is_null()
+ table TABLE pointer, MySQL table object
+ field Field pointer, MySQL field object
+ record char pointer, contains record
+
+ DESCRIPTION
+ This method uses the record format information in table to track
+ the null bit in record.
+
+ RETURN VALUE
+ 1 if NULL
+ 0 otherwise
+*/
+
+static inline uint field_in_record_is_null(TABLE *table,
+ Field *field,
+ char *record)
+{
+ int null_offset;
+ DBUG_ENTER("ha_federatedx::field_in_record_is_null");
+
+ if (!field->null_ptr)
+ DBUG_RETURN(0);
+
+ null_offset= (uint) ((char*)field->null_ptr - (char*)table->record[0]);
+
+ if (record[null_offset] & field->null_bit)
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Construct the INSERT statement.
+
+ @details This method will construct the INSERT statement and appends it to
+ the supplied query string buffer.
+
+ @return
+ @retval FALSE No error
+ @retval TRUE Failure
+*/
+
+bool ha_federatedx::append_stmt_insert(String *query)
+{
+ char insert_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ Field **field;
+ uint tmp_length;
+ bool added_field= FALSE;
+
+ /* The main insert query string */
+ String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
+ DBUG_ENTER("ha_federatedx::append_stmt_insert");
+
+ insert_string.length(0);
+
+ if (replace_duplicates)
+ insert_string.append(STRING_WITH_LEN("REPLACE INTO "));
+ else if (ignore_duplicates && !insert_dup_update)
+ insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
+ else
+ insert_string.append(STRING_WITH_LEN("INSERT INTO "));
+ append_ident(&insert_string, share->table_name, share->table_name_length,
+ ident_quote_char);
+ tmp_length= insert_string.length();
+ insert_string.append(STRING_WITH_LEN(" ("));
+
+ /*
+ loop through the field pointer array, add any fields to both the values
+ list and the fields list that match the current query id
+ */
+ for (field= table->field; *field; field++)
+ {
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
+ {
+ /* append the field name */
+ append_ident(&insert_string, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
+
+ /* append commas between both fields and fieldnames */
+ /*
+ unfortunately, we can't use the logic if *(fields + 1) to
+ make the following appends conditional as we don't know if the
+ next field is in the write set
+ */
+ insert_string.append(STRING_WITH_LEN(", "));
+ added_field= TRUE;
+ }
+ }
+
+ if (added_field)
+ {
+ /* Remove trailing comma. */
+ insert_string.length(insert_string.length() - sizeof_trailing_comma);
+ insert_string.append(STRING_WITH_LEN(") "));
+ }
+ else
+ {
+ /* If there were no fields, we don't want to add a closing paren. */
+ insert_string.length(tmp_length);
+ }
+
+ insert_string.append(STRING_WITH_LEN(" VALUES "));
+
+ DBUG_RETURN(query->append(insert_string));
+}
+
+
+/*
+ write_row() inserts a row. No extra() hint is given currently if a bulk load
+ is happeneding. buf() is a byte array of data. You can use the field
+ information to extract the data from the native byte array type.
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
+
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::write_row(uchar *buf)
+{
+ char values_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ Field **field;
+ uint tmp_length;
+ int error= 0;
+ bool use_bulk_insert;
+ bool auto_increment_update_required= (table->next_number_field != NULL);
+
+ /* The string containing the values to be added to the insert */
+ String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
+ /* The actual value of the field, to be added to the values_string */
+ String insert_field_value_string(insert_field_value_buffer,
+ sizeof(insert_field_value_buffer),
+ &my_charset_bin);
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ DBUG_ENTER("ha_federatedx::write_row");
+
+ values_string.length(0);
+ insert_field_value_string.length(0);
+ ha_statistic_increment(&SSV::ha_write_count);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
+
+ /*
+ start both our field and field values strings
+ We must disable multi-row insert for "INSERT...ON DUPLICATE KEY UPDATE"
+ Ignore duplicates is always true when insert_dup_update is true.
+ When replace_duplicates == TRUE, we can safely enable multi-row insert.
+ When performing multi-row insert, we only collect the columns values for
+ the row. The start of the statement is only created when the first
+ row is copied in to the bulk_insert string.
+ */
+ if (!(use_bulk_insert= bulk_insert.str &&
+ (!insert_dup_update || replace_duplicates)))
+ append_stmt_insert(&values_string);
+
+ values_string.append(STRING_WITH_LEN(" ("));
+ tmp_length= values_string.length();
+
+ /*
+ loop through the field pointer array, add any fields to both the values
+ list and the fields list that is part of the write set
+ */
+ for (field= table->field; *field; field++)
+ {
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
+ {
+ if ((*field)->is_null())
+ values_string.append(STRING_WITH_LEN(" NULL "));
+ else
+ {
+ bool needs_quote= (*field)->str_needs_quotes();
+ (*field)->val_str(&insert_field_value_string);
+ if (needs_quote)
+ values_string.append(value_quote_char);
+ insert_field_value_string.print(&values_string);
+ if (needs_quote)
+ values_string.append(value_quote_char);
+
+ insert_field_value_string.length(0);
+ }
+
+ /* append commas between both fields and fieldnames */
+ /*
+ unfortunately, we can't use the logic if *(fields + 1) to
+ make the following appends conditional as we don't know if the
+ next field is in the write set
+ */
+ values_string.append(STRING_WITH_LEN(", "));
+ }
+ }
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ /*
+ if there were no fields, we don't want to add a closing paren
+ AND, we don't want to chop off the last char '('
+ insert will be "INSERT INTO t1 VALUES ();"
+ */
+ if (values_string.length() > tmp_length)
+ {
+ /* chops off trailing comma */
+ values_string.length(values_string.length() - sizeof_trailing_comma);
+ }
+ /* we always want to append this, even if there aren't any fields */
+ values_string.append(STRING_WITH_LEN(") "));
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (use_bulk_insert)
+ {
+ /*
+ Send the current bulk insert out if appending the current row would
+ cause the statement to overflow the packet size, otherwise set
+ auto_increment_update_required to FALSE as no query was executed.
+ */
+ if (bulk_insert.length + values_string.length() + bulk_padding >
+ io->max_query_size() && bulk_insert.length)
+ {
+ error= io->query(bulk_insert.str, bulk_insert.length);
+ bulk_insert.length= 0;
+ }
+ else
+ auto_increment_update_required= FALSE;
+
+ if (bulk_insert.length == 0)
+ {
+ char insert_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String insert_string(insert_buffer, sizeof(insert_buffer),
+ &my_charset_bin);
+ insert_string.length(0);
+ append_stmt_insert(&insert_string);
+ dynstr_append_mem(&bulk_insert, insert_string.ptr(),
+ insert_string.length());
+ }
+ else
+ dynstr_append_mem(&bulk_insert, ",", 1);
+
+ dynstr_append_mem(&bulk_insert, values_string.ptr(),
+ values_string.length());
+ }
+ else
+ {
+ error= io->query(values_string.ptr(), values_string.length());
+ }
+
+ if (error)
+ {
+ DBUG_RETURN(stash_remote_error());
+ }
+ /*
+ If the table we've just written a record to contains an auto_increment
+ field, then store the last_insert_id() value from the foreign server
+ */
+ if (auto_increment_update_required)
+ {
+ update_auto_increment();
+
+ /* mysql_insert() uses this for protocol return value */
+ table->next_number_field->store(stats.auto_increment_value, 1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Prepares the storage engine for bulk inserts.
+
+ @param[in] rows estimated number of rows in bulk insert
+ or 0 if unknown.
+
+ @details Initializes memory structures required for bulk insert.
+*/
+
+void ha_federatedx::start_bulk_insert(ha_rows rows)
+{
+ uint page_size;
+ DBUG_ENTER("ha_federatedx::start_bulk_insert");
+
+ dynstr_free(&bulk_insert);
+
+ /**
+ We don't bother with bulk-insert semantics when the estimated rows == 1
+ The rows value will be 0 if the server does not know how many rows
+ would be inserted. This can occur when performing INSERT...SELECT
+ */
+
+ if (rows == 1)
+ DBUG_VOID_RETURN;
+
+ /*
+ Make sure we have an open connection so that we know the
+ maximum packet size.
+ */
+ if (txn->acquire(share, FALSE, &io))
+ DBUG_VOID_RETURN;
+
+ page_size= (uint) my_getpagesize();
+
+ if (init_dynamic_string(&bulk_insert, NULL, page_size, page_size))
+ DBUG_VOID_RETURN;
+
+ bulk_insert.length= 0;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ @brief End bulk insert.
+
+ @details This method will send any remaining rows to the remote server.
+ Finally, it will deinitialize the bulk insert data structure.
+
+ @return Operation status
+ @retval 0 No error
+ @retval != 0 Error occured at remote server. Also sets my_errno.
+*/
+
+int ha_federatedx::end_bulk_insert(bool abort)
+{
+ int error= 0;
+ DBUG_ENTER("ha_federatedx::end_bulk_insert");
+
+ if (bulk_insert.str && bulk_insert.length && !abort)
+ {
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+ if (io->query(bulk_insert.str, bulk_insert.length))
+ error= stash_remote_error();
+ else
+ if (table->next_number_field)
+ update_auto_increment();
+ }
+
+ dynstr_free(&bulk_insert);
+
+ DBUG_RETURN(my_errno= error);
+}
+
+
+/*
+ ha_federatedx::update_auto_increment
+
+ This method ensures that last_insert_id() works properly. What it simply does
+ is calls last_insert_id() on the foreign database immediately after insert
+ (if the table has an auto_increment field) and sets the insert id via
+ thd->insert_id(ID)).
+*/
+void ha_federatedx::update_auto_increment(void)
+{
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_federatedx::update_auto_increment");
+
+ ha_federatedx::info(HA_STATUS_AUTO);
+ thd->first_successful_insert_id_in_cur_stmt=
+ stats.auto_increment_value;
+ DBUG_PRINT("info",("last_insert_id: %ld", (long) stats.auto_increment_value));
+
+ DBUG_VOID_RETURN;
+}
+
+int ha_federatedx::optimize(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ int error= 0;
+ char query_buffer[STRING_BUFFER_USUAL_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ DBUG_ENTER("ha_federatedx::optimize");
+
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append(STRING_WITH_LEN("OPTIMIZE TABLE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
+
+ DBUG_ASSERT(txn == get_txn(thd));
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (io->query(query.ptr(), query.length()))
+ error= stash_remote_error();
+
+ DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::repair(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ int error= 0;
+ char query_buffer[STRING_BUFFER_USUAL_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ DBUG_ENTER("ha_federatedx::repair");
+
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append(STRING_WITH_LEN("REPAIR TABLE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
+ if (check_opt->flags & T_QUICK)
+ query.append(STRING_WITH_LEN(" QUICK"));
+ if (check_opt->flags & T_EXTEND)
+ query.append(STRING_WITH_LEN(" EXTENDED"));
+ if (check_opt->sql_flags & TT_USEFRM)
+ query.append(STRING_WITH_LEN(" USE_FRM"));
+
+ DBUG_ASSERT(txn == get_txn(thd));
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (io->query(query.ptr(), query.length()))
+ error= stash_remote_error();
+
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Yes, update_row() does what you expect, it updates a row. old_data will have
+ the previous row record in it, while new_data will have the newest data in
+ it.
+
+ Keep in mind that the server can do updates based on ordering if an ORDER BY
+ clause was used. Consecutive ordering is not guaranteed.
+ Currently new_data will not have an updated auto_increament record, or
+ and updated timestamp field. You can do these for federatedx by doing these:
+ if (table->timestamp_on_update_now)
+ update_timestamp(new_row+table->timestamp_on_update_now-1);
+ if (table->next_number_field && record == table->record[0])
+ update_auto_increment();
+
+ Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+*/
+
+int ha_federatedx::update_row(const uchar *old_data, uchar *new_data)
+{
+ /*
+ This used to control how the query was built. If there was a
+ primary key, the query would be built such that there was a where
+ clause with only that column as the condition. This is flawed,
+ because if we have a multi-part primary key, it would only use the
+ first part! We don't need to do this anyway, because
+ read_range_first will retrieve the correct record, which is what
+ is used to build the WHERE clause. We can however use this to
+ append a LIMIT to the end if there is NOT a primary key. Why do
+ this? Because we only are updating one record, and LIMIT enforces
+ this.
+ */
+ bool has_a_primary_key= test(table->s->primary_key != MAX_KEY);
+
+ /*
+ buffers for following strings
+ */
+ char field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ char update_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ char where_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+
+ /* Work area for field values */
+ String field_value(field_value_buffer, sizeof(field_value_buffer),
+ &my_charset_bin);
+ /* stores the update query */
+ String update_string(update_buffer,
+ sizeof(update_buffer),
+ &my_charset_bin);
+ /* stores the WHERE clause */
+ String where_string(where_buffer,
+ sizeof(where_buffer),
+ &my_charset_bin);
+ uchar *record= table->record[0];
+ int error;
+ DBUG_ENTER("ha_federatedx::update_row");
+ /*
+ set string lengths to 0 to avoid misc chars in string
+ */
+ field_value.length(0);
+ update_string.length(0);
+ where_string.length(0);
+
+ if (ignore_duplicates)
+ update_string.append(STRING_WITH_LEN("UPDATE IGNORE "));
+ else
+ update_string.append(STRING_WITH_LEN("UPDATE "));
+ append_ident(&update_string, share->table_name,
+ share->table_name_length, ident_quote_char);
+ update_string.append(STRING_WITH_LEN(" SET "));
+
+ /*
+ In this loop, we want to match column names to values being inserted
+ (while building INSERT statement).
+
+ Iterate through table->field (new data) and share->old_field (old_data)
+ using the same index to create an SQL UPDATE statement. New data is
+ used to create SET field=value and old data is used to create WHERE
+ field=oldvalue
+ */
+
+ for (Field **field= table->field; *field; field++)
+ {
+ if (bitmap_is_set(table->write_set, (*field)->field_index))
+ {
+ uint field_name_length= strlen((*field)->field_name);
+ append_ident(&update_string, (*field)->field_name, field_name_length,
+ ident_quote_char);
+ update_string.append(STRING_WITH_LEN(" = "));
+
+ if ((*field)->is_null())
+ update_string.append(STRING_WITH_LEN(" NULL "));
+ else
+ {
+ /* otherwise = */
+ my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ bool needs_quote= (*field)->str_needs_quotes();
+ (*field)->val_str(&field_value);
+ if (needs_quote)
+ update_string.append(value_quote_char);
+ field_value.print(&update_string);
+ if (needs_quote)
+ update_string.append(value_quote_char);
+ field_value.length(0);
+ tmp_restore_column_map(table->read_set, old_map);
+ }
+ update_string.append(STRING_WITH_LEN(", "));
+ }
+
+ if (bitmap_is_set(table->read_set, (*field)->field_index))
+ {
+ uint field_name_length= strlen((*field)->field_name);
+ append_ident(&where_string, (*field)->field_name, field_name_length,
+ ident_quote_char);
+ if (field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append(STRING_WITH_LEN(" IS NULL "));
+ else
+ {
+ bool needs_quote= (*field)->str_needs_quotes();
+ where_string.append(STRING_WITH_LEN(" = "));
+ (*field)->val_str(&field_value,
+ (old_data + (*field)->offset(record)));
+ if (needs_quote)
+ where_string.append(value_quote_char);
+ field_value.print(&where_string);
+ if (needs_quote)
+ where_string.append(value_quote_char);
+ field_value.length(0);
+ }
+ where_string.append(STRING_WITH_LEN(" AND "));
+ }
+ }
+
+ /* Remove last ', '. This works as there must be at least on updated field */
+ update_string.length(update_string.length() - sizeof_trailing_comma);
+
+ if (where_string.length())
+ {
+ /* chop off trailing AND */
+ where_string.length(where_string.length() - sizeof_trailing_and);
+ update_string.append(STRING_WITH_LEN(" WHERE "));
+ update_string.append(where_string);
+ }
+
+ /*
+ If this table has not a primary key, then we could possibly
+ update multiple rows. We want to make sure to only update one!
+ */
+ if (!has_a_primary_key)
+ update_string.append(STRING_WITH_LEN(" LIMIT 1"));
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (io->query(update_string.ptr(), update_string.length()))
+ {
+ DBUG_RETURN(stash_remote_error());
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ This will delete a row. 'buf' will contain a copy of the row to be =deleted.
+ The server will call this right after the current row has been called (from
+ either a previous rnd_next() or index call).
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
+ Keep in mind that the server does no guarentee consecutive deletions.
+ ORDER BY clauses can be used.
+
+ Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+ Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+ it is used for removing duplicates while in insert it is used for REPLACE
+ calls.
+*/
+
+int ha_federatedx::delete_row(const uchar *buf)
+{
+ char delete_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ char data_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
+ String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
+ uint found= 0;
+ int error;
+ DBUG_ENTER("ha_federatedx::delete_row");
+
+ delete_string.length(0);
+ delete_string.append(STRING_WITH_LEN("DELETE FROM "));
+ append_ident(&delete_string, share->table_name,
+ share->table_name_length, ident_quote_char);
+ delete_string.append(STRING_WITH_LEN(" WHERE "));
+
+ for (Field **field= table->field; *field; field++)
+ {
+ Field *cur_field= *field;
+ found++;
+ if (bitmap_is_set(table->read_set, cur_field->field_index))
+ {
+ append_ident(&delete_string, (*field)->field_name,
+ strlen((*field)->field_name), ident_quote_char);
+ data_string.length(0);
+ if (cur_field->is_null())
+ {
+ delete_string.append(STRING_WITH_LEN(" IS NULL "));
+ }
+ else
+ {
+ bool needs_quote= cur_field->str_needs_quotes();
+ delete_string.append(STRING_WITH_LEN(" = "));
+ cur_field->val_str(&data_string);
+ if (needs_quote)
+ delete_string.append(value_quote_char);
+ data_string.print(&delete_string);
+ if (needs_quote)
+ delete_string.append(value_quote_char);
+ }
+ delete_string.append(STRING_WITH_LEN(" AND "));
+ }
+ }
+
+ // Remove trailing AND
+ delete_string.length(delete_string.length() - sizeof_trailing_and);
+ if (!found)
+ delete_string.length(delete_string.length() - sizeof_trailing_where);
+
+ delete_string.append(STRING_WITH_LEN(" LIMIT 1"));
+ DBUG_PRINT("info",
+ ("Delete sql: %s", delete_string.c_ptr_quick()));
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (io->query(delete_string.ptr(), delete_string.length()))
+ {
+ DBUG_RETURN(stash_remote_error());
+ }
+ stats.deleted+= (ha_rows) io->affected_rows();
+ stats.records-= (ha_rows) io->affected_rows();
+ DBUG_PRINT("info",
+ ("rows deleted %ld rows deleted for all time %ld",
+ (long) io->affected_rows(), (long) stats.deleted));
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Positions an index cursor to the index specified in the handle. Fetches the
+ row if available. If the key value is null, begin at the first key of the
+ index. This method, which is called in the case of an SQL statement having
+ a WHERE clause on a non-primary key index, simply calls index_read_idx.
+*/
+
+int ha_federatedx::index_read(uchar *buf, const uchar *key,
+ uint key_len, ha_rkey_function find_flag)
+{
+ DBUG_ENTER("ha_federatedx::index_read");
+
+ if (stored_result)
+ (void) free_result();
+ DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key,
+ key_len, find_flag,
+ &stored_result));
+}
+
+
+/*
+ Positions an index cursor to the index specified in key. Fetches the
+ row if any. This is only used to read whole keys.
+
+ This method is called via index_read in the case of a WHERE clause using
+ a primary key index OR is called DIRECTLY when the WHERE clause
+ uses a PRIMARY KEY index.
+
+ NOTES
+ This uses an internal result set that is deleted before function
+ returns. We need to be able to be callable from ha_rnd_pos()
+*/
+
+int ha_federatedx::index_read_idx(uchar *buf, uint index, const uchar *key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ int retval;
+ FEDERATEDX_IO_RESULT *io_result;
+ DBUG_ENTER("ha_federatedx::index_read_idx");
+
+ if ((retval= index_read_idx_with_result_set(buf, index, key,
+ key_len, find_flag,
+ &io_result)))
+ DBUG_RETURN(retval);
+ /* io is correct, as index_read_idx_with_result_set was ok */
+ io->free_result(io_result);
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ Create result set for rows matching query and return first row
+
+ RESULT
+ 0 ok In this case *result will contain the result set
+ table->status == 0
+ # error In this case *result will contain 0
+ table->status == STATUS_NOT_FOUND
+*/
+
+int ha_federatedx::index_read_idx_with_result_set(uchar *buf, uint index,
+ const uchar *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ FEDERATEDX_IO_RESULT **result)
+{
+ int retval;
+ char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ char index_value[STRING_BUFFER_USUAL_SIZE];
+ char sql_query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String index_string(index_value,
+ sizeof(index_value),
+ &my_charset_bin);
+ String sql_query(sql_query_buffer,
+ sizeof(sql_query_buffer),
+ &my_charset_bin);
+ key_range range;
+ DBUG_ENTER("ha_federatedx::index_read_idx_with_result_set");
+
+ *result= 0; // In case of errors
+ index_string.length(0);
+ sql_query.length(0);
+ ha_statistic_increment(&SSV::ha_read_key_count);
+
+ sql_query.append(share->select_query);
+
+ range.key= key;
+ range.length= key_len;
+ range.flag= find_flag;
+ create_where_from_key(&index_string,
+ &table->key_info[index],
+ &range,
+ NULL, 0, 0);
+ sql_query.append(index_string);
+
+ if ((retval= txn->acquire(share, TRUE, &io)))
+ DBUG_RETURN(retval);
+
+ if (io->query(sql_query.ptr(), sql_query.length()))
+ {
+ my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
+ io->error_code(), io->error_str()));
+ retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ goto error;
+ }
+ if (!(*result= io->store_result()))
+ {
+ retval= HA_ERR_END_OF_FILE;
+ goto error;
+ }
+ if (!(retval= read_next(buf, *result)))
+ DBUG_RETURN(retval);
+
+ io->free_result(*result);
+ *result= 0;
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(retval);
+
+error:
+ table->status= STATUS_NOT_FOUND;
+ my_error(retval, MYF(0), error_buffer);
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ This method is used exlusevely by filesort() to check if we
+ can create sorting buffers of necessary size.
+ If the handler returns more records that it declares
+ here server can just crash on filesort().
+ We cannot guarantee that's not going to happen with
+ the FEDERATEDX engine, as we have records==0 always if the
+ client is a VIEW, and for the table the number of
+ records can inpredictably change during execution.
+ So we return maximum possible value here.
+*/
+
+ha_rows ha_federatedx::estimate_rows_upper_bound()
+{
+ return HA_POS_ERROR;
+}
+
+
+/* Initialized at each key walk (called multiple times unlike rnd_init()) */
+
+int ha_federatedx::index_init(uint keynr, bool sorted)
+{
+ DBUG_ENTER("ha_federatedx::index_init");
+ DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name.str, keynr));
+ active_index= keynr;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Read first range
+*/
+
+int ha_federatedx::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range_arg, bool sorted)
+{
+ char sql_query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ int retval;
+ String sql_query(sql_query_buffer,
+ sizeof(sql_query_buffer),
+ &my_charset_bin);
+ DBUG_ENTER("ha_federatedx::read_range_first");
+
+ DBUG_ASSERT(!(start_key == NULL && end_key == NULL));
+
+ sql_query.length(0);
+ sql_query.append(share->select_query);
+ create_where_from_key(&sql_query,
+ &table->key_info[active_index],
+ start_key, end_key, 0, eq_range_arg);
+
+ if ((retval= txn->acquire(share, TRUE, &io)))
+ DBUG_RETURN(retval);
+
+ if (stored_result)
+ {
+ io->free_result(stored_result);
+ stored_result= 0;
+ }
+
+ if (io->query(sql_query.ptr(), sql_query.length()))
+ {
+ retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ goto error;
+ }
+ sql_query.length(0);
+
+ if (!(stored_result= io->store_result()))
+ {
+ retval= HA_ERR_END_OF_FILE;
+ goto error;
+ }
+
+ retval= read_next(table->record[0], stored_result);
+ DBUG_RETURN(retval);
+
+error:
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(retval);
+}
+
+
+int ha_federatedx::read_range_next()
+{
+ int retval;
+ DBUG_ENTER("ha_federatedx::read_range_next");
+ retval= rnd_next(table->record[0]);
+ DBUG_RETURN(retval);
+}
+
+
+/* Used to read forward through the index. */
+int ha_federatedx::index_next(uchar *buf)
+{
+ DBUG_ENTER("ha_federatedx::index_next");
+ ha_statistic_increment(&SSV::ha_read_next_count);
+ DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+ rnd_init() is called when the system wants the storage engine to do a table
+ scan.
+
+ This is the method that gets data for the SELECT calls.
+
+ See the federatedx in the introduction at the top of this file to see when
+ rnd_init() is called.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_federatedx::rnd_init");
+ /*
+ The use of the 'scan' flag is incredibly important for this handler
+ to work properly, especially with updates containing WHERE clauses
+ using indexed columns.
+
+ When the initial query contains a WHERE clause of the query using an
+ indexed column, it's index_read_idx that selects the exact record from
+ the foreign database.
+
+ When there is NO index in the query, either due to not having a WHERE
+ clause, or the WHERE clause is using columns that are not indexed, a
+ 'full table scan' done by rnd_init, which in this situation simply means
+ a 'select * from ...' on the foreign table.
+
+ In other words, this 'scan' flag gives us the means to ensure that if
+ there is an index involved in the query, we want index_read_idx to
+ retrieve the exact record (scan flag is 0), and do not want rnd_init
+ to do a 'full table scan' and wipe out that result set.
+
+ Prior to using this flag, the problem was most apparent with updates.
+
+ An initial query like 'UPDATE tablename SET anything = whatever WHERE
+ indexedcol = someval', index_read_idx would get called, using a query
+ constructed with a WHERE clause built from the values of index ('indexcol'
+ in this case, having a value of 'someval'). mysql_store_result would
+ then get called (this would be the result set we want to use).
+
+ After this rnd_init (from sql_update.cc) would be called, it would then
+ unecessarily call "select * from table" on the foreign table, then call
+ mysql_store_result, which would wipe out the correct previous result set
+ from the previous call of index_read_idx's that had the result set
+ containing the correct record, hence update the wrong row!
+
+ */
+
+ if (scan)
+ {
+ int error;
+
+ if ((error= txn->acquire(share, TRUE, &io)))
+ DBUG_RETURN(error);
+
+ if (stored_result)
+ {
+ io->free_result(stored_result);
+ stored_result= 0;
+ }
+
+ if (io->query(share->select_query,
+ strlen(share->select_query)))
+ goto error;
+
+ stored_result= io->store_result();
+ if (!stored_result)
+ goto error;
+ }
+ DBUG_RETURN(0);
+
+error:
+ DBUG_RETURN(stash_remote_error());
+}
+
+
+int ha_federatedx::rnd_end()
+{
+ DBUG_ENTER("ha_federatedx::rnd_end");
+ DBUG_RETURN(index_end());
+}
+
+
+int ha_federatedx::free_result()
+{
+ int error;
+ DBUG_ASSERT(stored_result);
+ if ((error= txn->acquire(share, FALSE, &io)))
+ {
+ DBUG_ASSERT(0); // Fail when testing
+ return error;
+ }
+ io->free_result(stored_result);
+ stored_result= 0;
+ return 0;
+}
+
+int ha_federatedx::index_end(void)
+{
+ int error= 0;
+ DBUG_ENTER("ha_federatedx::index_end");
+ if (stored_result)
+ error= free_result();
+ active_index= MAX_KEY;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ This is called for each row of the table scan. When you run out of records
+ you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
+ The Field structure for the table is the key to getting data into buf
+ in a manner that will allow the server to understand it.
+
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::rnd_next(uchar *buf)
+{
+ DBUG_ENTER("ha_federatedx::rnd_next");
+
+ if (stored_result == 0)
+ {
+ /*
+ Return value of rnd_init is not always checked (see records.cc),
+ so we can get here _even_ if there is _no_ pre-fetched result-set!
+ TODO: fix it. We can delete this in 5.1 when rnd_init() is checked.
+ */
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+ ha_federatedx::read_next
+
+ reads from a result set and converts to mysql internal
+ format
+
+ SYNOPSIS
+ field_in_record_is_null()
+ buf byte pointer to record
+ result mysql result set
+
+ DESCRIPTION
+ This method is a wrapper method that reads one record from a result
+ set and converts it to the internal table format
+
+ RETURN VALUE
+ 1 error
+ 0 no error
+*/
+
+int ha_federatedx::read_next(uchar *buf, FEDERATEDX_IO_RESULT *result)
+{
+ int retval;
+ FEDERATEDX_IO_ROW *row;
+ DBUG_ENTER("ha_federatedx::read_next");
+
+ table->status= STATUS_NOT_FOUND; // For easier return
+
+ if ((retval= txn->acquire(share, TRUE, &io)))
+ DBUG_RETURN(retval);
+
+ /* Fetch a row, insert it back in a row format. */
+ if (!(row= io->fetch_row(result)))
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ if (!(retval= convert_row_to_internal_format(buf, row, result)))
+ table->status= 0;
+
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ store reference to current row so that we can later find it for
+ a re-read, update or delete.
+
+ In case of federatedx, a reference is either a primary key or
+ the whole record.
+
+ Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+*/
+
+void ha_federatedx::position(const uchar *record)
+{
+ DBUG_ENTER("ha_federatedx::position");
+ if (table->s->primary_key != MAX_KEY)
+ key_copy(ref, (uchar *)record, table->key_info + table->s->primary_key,
+ ref_length);
+ else
+ memcpy(ref, record, ref_length);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This is like rnd_next, but you are given a position to use to determine the
+ row. The position will be of the type that you stored in ref.
+
+ This method is required for an ORDER BY
+
+ Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
+*/
+
+int ha_federatedx::rnd_pos(uchar *buf, uchar *pos)
+{
+ int result;
+ DBUG_ENTER("ha_federatedx::rnd_pos");
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
+ if (table->s->primary_key != MAX_KEY)
+ {
+ /* We have a primary key, so use index_read_idx to find row */
+ result= index_read_idx(buf, table->s->primary_key, pos,
+ ref_length, HA_READ_KEY_EXACT);
+ }
+ else
+ {
+ /* otherwise, get the old record ref as obtained in ::position */
+ memcpy(buf, pos, ref_length);
+ result= 0;
+ }
+ table->status= result ? STATUS_NOT_FOUND : 0;
+ DBUG_RETURN(result);
+}
+
+
+/*
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+ Another note, you will probably want to have the following in your
+ code:
+ if (records < 2)
+ records = 2;
+ The reason is that the server will optimize for cases of only a single
+ record. If in a table scan you don't know the number of records
+ it will probably be better to set records to two so you can return
+ as many records as you need.
+ Along with records a few more variables you may wish to set are:
+ records
+ deleted
+ data_file_length
+ index_file_length
+ delete_length
+ check_time
+ Take a look at the public variables in handler.h for more information.
+
+ Called in:
+ filesort.cc
+ ha_heap.cc
+ item_sum.cc
+ opt_sum.cc
+ sql_delete.cc
+ sql_delete.cc
+ sql_derived.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_table.cc
+ sql_union.cc
+ sql_update.cc
+
+*/
+
+int ha_federatedx::info(uint flag)
+{
+ char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ uint error_code;
+ federatedx_io *tmp_io= 0;
+ DBUG_ENTER("ha_federatedx::info");
+
+ error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+
+ /* we want not to show table status if not needed to do so */
+ if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_AUTO))
+ {
+ if ((error_code= txn->acquire(share, TRUE, &tmp_io)))
+ goto fail;
+ }
+
+ if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
+ {
+ /*
+ size of IO operations (This is based on a good guess, no high science
+ involved)
+ */
+ if (flag & HA_STATUS_CONST)
+ stats.block_size= 4096;
+
+ if (tmp_io->table_metadata(&stats, share->table_name,
+ share->table_name_length, flag))
+ goto error;
+ }
+
+ if (flag & HA_STATUS_AUTO)
+ stats.auto_increment_value= tmp_io->last_insert_id();
+
+ /*
+ If ::info created it's own transaction, close it. This happens in case
+ of show table status;
+ */
+ txn->release(&tmp_io);
+
+ DBUG_RETURN(0);
+
+error:
+ if (tmp_io)
+ {
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ tmp_io->error_code(), tmp_io->error_str()));
+ my_error(error_code, MYF(0), error_buffer);
+ }
+ else
+ if (remote_error_number != -1 /* error already reported */)
+ {
+ error_code= remote_error_number;
+ my_error(error_code, MYF(0), ER(error_code));
+ }
+fail:
+ txn->release(&tmp_io);
+ DBUG_RETURN(error_code);
+}
+
+
+/**
+ @brief Handles extra signals from MySQL server
+
+ @param[in] operation Hint for storage engine
+
+ @return Operation Status
+ @retval 0 OK
+ */
+int ha_federatedx::extra(ha_extra_function operation)
+{
+ DBUG_ENTER("ha_federatedx::extra");
+ switch (operation) {
+ case HA_EXTRA_IGNORE_DUP_KEY:
+ ignore_duplicates= TRUE;
+ break;
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ insert_dup_update= FALSE;
+ ignore_duplicates= FALSE;
+ break;
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ replace_duplicates= TRUE;
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ /*
+ We use this flag to ensure that we do not create an "INSERT IGNORE"
+ statement when inserting new rows into the remote table.
+ */
+ replace_duplicates= FALSE;
+ break;
+ case HA_EXTRA_INSERT_WITH_UPDATE:
+ insert_dup_update= TRUE;
+ break;
+ default:
+ /* do nothing */
+ DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Reset state of file to after 'open'.
+
+ @detail This function is called after every statement for all tables
+ used by that statement.
+
+ @return Operation status
+ @retval 0 OK
+*/
+
+int ha_federatedx::reset(void)
+{
+ insert_dup_update= FALSE;
+ ignore_duplicates= FALSE;
+ replace_duplicates= FALSE;
+ return 0;
+}
+
+
+/*
+ Used to delete all rows in a table. Both for cases of truncate and
+ for cases where the optimizer realizes that all rows will be
+ removed as a result of a SQL statement.
+
+ Called from item_sum.cc by Item_func_group_concat::clear(),
+ Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+ Called from sql_delete.cc by mysql_delete().
+ Called from sql_select.cc by JOIN::reinit().
+ Called from sql_union.cc by st_select_lex_unit::exec().
+*/
+
+int ha_federatedx::delete_all_rows()
+{
+ char query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ int error;
+ DBUG_ENTER("ha_federatedx::delete_all_rows");
+
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append(STRING_WITH_LEN("TRUNCATE "));
+ append_ident(&query, share->table_name, share->table_name_length,
+ ident_quote_char);
+
+ /* no need for savepoint in autocommit mode */
+ if (!(ha_thd()->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+ txn->stmt_autocommit();
+
+ /*
+ TRUNCATE won't return anything in mysql_affected_rows
+ */
+
+ if ((error= txn->acquire(share, FALSE, &io)))
+ DBUG_RETURN(error);
+
+ if (io->query(query.ptr(), query.length()))
+ {
+ DBUG_RETURN(stash_remote_error());
+ }
+ stats.deleted+= stats.records;
+ stats.records= 0;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ The idea with handler::store_lock() is the following:
+
+ The statement decided which locks we should need for the table
+ for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+ read locks.
+
+ Before adding the lock into the table lock handler (see thr_lock.c)
+ mysqld calls store lock with the requested locks. Store lock can now
+ modify a write lock to a read lock (or some other lock), ignore the
+ lock (if we don't want to use MySQL table locks at all) or add locks
+ for many tables (like we do when we are using a MERGE handler).
+
+ Berkeley DB for federatedx changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ (which signals that we are doing WRITES, but we are still allowing other
+ reader's and writer's.
+
+ When releasing locks, store_lock() are also called. In this case one
+ usually doesn't have to do anything.
+
+ In some exceptional cases MySQL may send a request for a TL_IGNORE;
+ This means that we are requesting the same lock as last time and this
+ should also be ignored. (This may happen when someone does a flush
+ table when we have opened a part of the tables, in which case mysqld
+ closes and reopens the tables and tries to get the same locks at last
+ time). In the future we will probably try to remove this.
+
+ Called from lock.cc by get_lock_data().
+*/
+
+THR_LOCK_DATA **ha_federatedx::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ DBUG_ENTER("ha_federatedx::store_lock");
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ {
+ /*
+ Here is where we get into the guts of a row level lock.
+ If TL_UNLOCK is set
+ If we are not doing a LOCK TABLE or DISCARD/IMPORT
+ TABLESPACE, then allow multiple writers
+ */
+
+ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+ lock_type <= TL_WRITE) && !thd->in_lock_tables)
+ lock_type= TL_WRITE_ALLOW_WRITE;
+
+ /*
+ In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
+ MySQL would use the lock TL_READ_NO_INSERT on t2, and that
+ would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
+ to t2. Convert the lock to a normal read lock to allow
+ concurrent inserts to t2.
+ */
+
+ if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
+ lock_type= TL_READ;
+
+ lock.type= lock_type;
+ }
+
+ *to++= &lock;
+
+ DBUG_RETURN(to);
+}
+
+
+static int test_connection(MYSQL_THD thd, federatedx_io *io,
+ FEDERATEDX_SHARE *share)
+{
+ char buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+ String str(buffer, sizeof(buffer), &my_charset_bin);
+ FEDERATEDX_IO_RESULT *resultset= NULL;
+ int retval;
+
+ str.length(0);
+ str.append(STRING_WITH_LEN("SELECT * FROM "));
+ append_identifier(thd, &str, share->table_name,
+ share->table_name_length);
+ str.append(STRING_WITH_LEN(" WHERE 1=0"));
+
+ if ((retval= io->query(str.ptr(), str.length())))
+ {
+ my_sprintf(buffer, (buffer,
+ "database: '%s' username: '%s' hostname: '%s'",
+ share->database, share->username, share->hostname));
+ DBUG_PRINT("info", ("error-code: %d", io->error_code()));
+ my_error(ER_CANT_CREATE_FEDERATED_TABLE, MYF(0), buffer);
+ }
+ else
+ resultset= io->store_result();
+
+ io->free_result(resultset);
+
+ return retval;
+}
+
+/*
+ create() does nothing, since we have no local setup of our own.
+ FUTURE: We should potentially connect to the foreign database and
+*/
+
+int ha_federatedx::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ int retval;
+ THD *thd= current_thd;
+ FEDERATEDX_SHARE tmp_share; // Only a temporary share, to test the url
+ federatedx_txn *tmp_txn;
+ federatedx_io *tmp_io= NULL;
+ DBUG_ENTER("ha_federatedx::create");
+
+ if ((retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1)))
+ goto error;
+
+ /* loopback socket connections hang due to LOCK_open mutex */
+ if ((!tmp_share.hostname || !strcmp(tmp_share.hostname,my_localhost)) &&
+ !tmp_share.port)
+ goto error;
+
+ /*
+ If possible, we try to use an existing network connection to
+ the remote server. To ensure that no new FEDERATEDX_SERVER
+ instance is created, we pass NULL in get_server() TABLE arg.
+ */
+ pthread_mutex_lock(&federatedx_mutex);
+ tmp_share.s= get_server(&tmp_share, NULL);
+ pthread_mutex_unlock(&federatedx_mutex);
+
+ if (tmp_share.s)
+ {
+ tmp_txn= get_txn(thd);
+ if (!(retval= tmp_txn->acquire(&tmp_share, TRUE, &tmp_io)))
+ {
+ retval= test_connection(thd, tmp_io, &tmp_share);
+ tmp_txn->release(&tmp_io);
+ }
+ free_server(tmp_txn, tmp_share.s);
+ }
+ else
+ {
+ FEDERATEDX_SERVER server;
+
+#ifdef NOT_YET
+ /*
+ Bug#25679
+ Ensure that we do not hold the LOCK_open mutex while attempting
+ to establish FederatedX connection to guard against a trivial
+ Denial of Service scenerio.
+ */
+ safe_mutex_assert_not_owner(&LOCK_open);
+#endif
+
+ fill_server(thd->mem_root, &server, &tmp_share, create_info->table_charset);
+
+#ifndef DBUG_OFF
+ pthread_mutex_init(&server.mutex, MY_MUTEX_INIT_FAST);
+ pthread_mutex_lock(&server.mutex);
+#endif
+
+ tmp_io= federatedx_io::construct(thd->mem_root, &server);
+
+ retval= test_connection(thd, tmp_io, &tmp_share);
+
+#ifndef DBUG_OFF
+ pthread_mutex_unlock(&server.mutex);
+ pthread_mutex_destroy(&server.mutex);
+#endif
+
+ delete tmp_io;
+ }
+
+error:
+ DBUG_RETURN(retval);
+
+}
+
+
+int ha_federatedx::stash_remote_error()
+{
+ DBUG_ENTER("ha_federatedx::stash_remote_error()");
+ if (!io)
+ DBUG_RETURN(remote_error_number);
+ remote_error_number= io->error_code();
+ strmake(remote_error_buf, io->error_str(), sizeof(remote_error_buf)-1);
+ if (remote_error_number == ER_DUP_ENTRY ||
+ remote_error_number == ER_DUP_KEY)
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
+ DBUG_RETURN(HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM);
+}
+
+
+bool ha_federatedx::get_error_message(int error, String* buf)
+{
+ DBUG_ENTER("ha_federatedx::get_error_message");
+ DBUG_PRINT("enter", ("error: %d", error));
+ if (error == HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM)
+ {
+ buf->append(STRING_WITH_LEN("Error on remote system: "));
+ buf->qs_append(remote_error_number);
+ buf->append(STRING_WITH_LEN(": "));
+ buf->append(remote_error_buf);
+
+ remote_error_number= 0;
+ remote_error_buf[0]= '\0';
+ }
+ DBUG_PRINT("exit", ("message: %s", buf->ptr()));
+ DBUG_RETURN(FALSE);
+}
+
+
+int ha_federatedx::start_stmt(MYSQL_THD thd, thr_lock_type lock_type)
+{
+ DBUG_ENTER("ha_federatedx::start_stmt");
+ DBUG_ASSERT(txn == get_txn(thd));
+
+ if (!txn->in_transaction())
+ {
+ txn->stmt_begin();
+ trans_register_ha(thd, FALSE, ht);
+ }
+ DBUG_RETURN(0);
+}
+
+
+int ha_federatedx::external_lock(MYSQL_THD thd, int lock_type)
+{
+ int error= 0;
+ DBUG_ENTER("ha_federatedx::external_lock");
+
+ if (lock_type == F_UNLCK)
+ txn->release(&io);
+ else
+ {
+ txn= get_txn(thd);
+ if (!(error= txn->acquire(share, lock_type == F_RDLCK, &io)) &&
+ (lock_type == F_WRLCK || !io->is_autocommit()))
+ {
+ if (!thd_test_options(thd, (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+ {
+ txn->stmt_begin();
+ trans_register_ha(thd, FALSE, ht);
+ }
+ else
+ {
+ txn->txn_begin();
+ trans_register_ha(thd, TRUE, ht);
+ }
+ }
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_set(handlerton *hton, MYSQL_THD thd, void *sv)
+{
+ int error= 0;
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ DBUG_ENTER("ha_federatedx::savepoint_set");
+
+ if (txn && txn->has_connections())
+ {
+ if (txn->txn_begin())
+ trans_register_ha(thd, TRUE, hton);
+
+ txn->sp_acquire((ulong *) sv);
+
+ DBUG_ASSERT(1 < *(ulong *) sv);
+ }
+
+ DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_rollback(handlerton *hton, MYSQL_THD thd, void *sv)
+ {
+ int error= 0;
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ DBUG_ENTER("ha_federatedx::savepoint_rollback");
+
+ if (txn)
+ error= txn->sp_rollback((ulong *) sv);
+
+ DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_release(handlerton *hton, MYSQL_THD thd, void *sv)
+{
+ int error= 0;
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ DBUG_ENTER("ha_federatedx::savepoint_release");
+
+ if (txn)
+ error= txn->sp_release((ulong *) sv);
+
+ DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::commit(handlerton *hton, MYSQL_THD thd, bool all)
+{
+ int return_val;
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ DBUG_ENTER("ha_federatedx::commit");
+
+ if (all)
+ return_val= txn->txn_commit();
+ else
+ return_val= txn->stmt_commit();
+
+ DBUG_PRINT("info", ("error val: %d", return_val));
+ DBUG_RETURN(return_val);
+}
+
+
+int ha_federatedx::rollback(handlerton *hton, MYSQL_THD thd, bool all)
+{
+ int return_val;
+ federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+ DBUG_ENTER("ha_federatedx::rollback");
+
+ if (all)
+ return_val= txn->txn_rollback();
+ else
+ return_val= txn->stmt_rollback();
+
+ DBUG_PRINT("info", ("error val: %d", return_val));
+ DBUG_RETURN(return_val);
+}
+
+struct st_mysql_storage_engine federatedx_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(federated)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &federatedx_storage_engine,
+ "FEDERATED",
+ "Patrick Galbraith",
+ "FederatedX pluggable storage engine",
+ PLUGIN_LICENSE_GPL,
+ federatedx_db_init, /* Plugin Init */
+ federatedx_done, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+ NULL, /* status variables */
+ NULL, /* system variables */
+ NULL /* config options */
+}
+mysql_declare_plugin_end;
=== added file 'storage/federatedx/ha_federatedx.h'
--- a/storage/federatedx/ha_federatedx.h 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ha_federatedx.h 2009-10-30 18:50:56 +0000
@@ -0,0 +1,446 @@
+/*
+Copyright (c) 2008, Patrick Galbraith
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+ * Neither the name of Patrick Galbraith nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+class federatedx_io;
+
+/*
+ FEDERATEDX_SERVER will eventually be a structure that will be shared among
+ all FEDERATEDX_SHARE instances so that the federated server can minimise
+ the number of open connections. This will eventually lead to the support
+ of reliable XA federated tables.
+*/
+typedef struct st_fedrated_server {
+ MEM_ROOT mem_root;
+ uint use_count, io_count;
+
+ uchar *key;
+ uint key_length;
+
+ const char *scheme;
+ const char *hostname;
+ const char *username;
+ const char *password;
+ const char *database;
+ const char *socket;
+ ushort port;
+
+ const char *csname;
+
+ pthread_mutex_t mutex;
+ federatedx_io *idle_list;
+} FEDERATEDX_SERVER;
+
+/*
+ Please read ha_exmple.cc before reading this file.
+ Please keep in mind that the federatedx storage engine implements all methods
+ that are required to be implemented. handler.h has a full list of methods
+ that you can implement.
+*/
+
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface /* gcc class implementation */
+#endif
+
+#include <mysql.h>
+
+/*
+ handler::print_error has a case statement for error numbers.
+ This value is (10000) is far out of range and will envoke the
+ default: case.
+ (Current error range is 120-159 from include/my_base.h)
+*/
+#define HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM 10000
+
+#define FEDERATEDX_QUERY_BUFFER_SIZE STRING_BUFFER_USUAL_SIZE * 5
+#define FEDERATEDX_RECORDS_IN_RANGE 2
+#define FEDERATEDX_MAX_KEY_LENGTH 3500 // Same as innodb
+
+/*
+ FEDERATEDX_SHARE is a structure that will be shared amoung all open handlers
+ The example implements the minimum of what you will probably need.
+*/
+typedef struct st_federatedx_share {
+ MEM_ROOT mem_root;
+
+ bool parsed;
+ /* this key is unique db/tablename */
+ const char *share_key;
+ /*
+ the primary select query to be used in rnd_init
+ */
+ char *select_query;
+ /*
+ remote host info, parse_url supplies
+ */
+ char *server_name;
+ char *connection_string;
+ char *scheme;
+ char *hostname;
+ char *username;
+ char *password;
+ char *database;
+ char *table_name;
+ char *table;
+ char *socket;
+ char *sport;
+ int share_key_length;
+ ushort port;
+
+ uint table_name_length, server_name_length, connect_string_length;
+ uint use_count;
+ THR_LOCK lock;
+ FEDERATEDX_SERVER *s;
+} FEDERATEDX_SHARE;
+
+
+typedef struct st_federatedx_result FEDERATEDX_IO_RESULT;
+typedef struct st_federatedx_row FEDERATEDX_IO_ROW;
+typedef ptrdiff_t FEDERATEDX_IO_OFFSET;
+
+class federatedx_io
+{
+ friend class federatedx_txn;
+ FEDERATEDX_SERVER * const server;
+ federatedx_io **owner_ptr;
+ federatedx_io *txn_next;
+ federatedx_io *idle_next;
+ bool active; /* currently participating in a transaction */
+ bool busy; /* in use by a ha_federated instance */
+ bool readonly;/* indicates that no updates have occurred */
+
+protected:
+ void set_active(bool new_active)
+ { active= new_active; }
+public:
+ federatedx_io(FEDERATEDX_SERVER *);
+ virtual ~federatedx_io();
+
+ bool is_readonly() const { return readonly; }
+ bool is_active() const { return active; }
+
+ const char * get_charsetname() const
+ { return server->csname ? server->csname : "latin1"; }
+
+ const char * get_hostname() const { return server->hostname; }
+ const char * get_username() const { return server->username; }
+ const char * get_password() const { return server->password; }
+ const char * get_database() const { return server->database; }
+ ushort get_port() const { return server->port; }
+ const char * get_socket() const { return server->socket; }
+
+ static bool handles_scheme(const char *scheme);
+ static federatedx_io *construct(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server);
+
+ static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
+ { return alloc_root(mem_root, size); }
+ static void operator delete(void *ptr, size_t size)
+ { TRASH(ptr, size); }
+
+ virtual int query(const char *buffer, uint length)=0;
+ virtual FEDERATEDX_IO_RESULT *store_result()=0;
+
+ virtual size_t max_query_size() const=0;
+
+ virtual my_ulonglong affected_rows() const=0;
+ virtual my_ulonglong last_insert_id() const=0;
+
+ virtual int error_code()=0;
+ virtual const char *error_str()=0;
+
+ virtual void reset()=0;
+ virtual int commit()=0;
+ virtual int rollback()=0;
+
+ virtual int savepoint_set(ulong sp)=0;
+ virtual ulong savepoint_release(ulong sp)=0;
+ virtual ulong savepoint_rollback(ulong sp)=0;
+ virtual void savepoint_restrict(ulong sp)=0;
+
+ virtual ulong last_savepoint() const=0;
+ virtual ulong actual_savepoint() const=0;
+ virtual bool is_autocommit() const=0;
+
+ virtual bool table_metadata(ha_statistics *stats, const char *table_name,
+ uint table_name_length, uint flag) = 0;
+
+ /* resultset operations */
+
+ virtual void free_result(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result)=0;
+ virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+ unsigned int column)=0;
+ virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+ unsigned int column) const=0;
+};
+
+
+class federatedx_txn
+{
+ federatedx_io *txn_list;
+ ulong savepoint_level;
+ ulong savepoint_stmt;
+ ulong savepoint_next;
+
+ void release_scan();
+public:
+ federatedx_txn();
+ ~federatedx_txn();
+
+ bool has_connections() const { return txn_list != NULL; }
+ bool in_transaction() const { return savepoint_next != 0; }
+ int acquire(FEDERATEDX_SHARE *share, bool readonly, federatedx_io **io);
+ void release(federatedx_io **io);
+ void close(FEDERATEDX_SERVER *);
+
+ bool txn_begin();
+ int txn_commit();
+ int txn_rollback();
+
+ bool sp_acquire(ulong *save);
+ int sp_rollback(ulong *save);
+ int sp_release(ulong *save);
+
+ bool stmt_begin();
+ int stmt_commit();
+ int stmt_rollback();
+ void stmt_autocommit();
+};
+
+
+/*
+ Class definition for the storage engine
+*/
+class ha_federatedx: public handler
+{
+ friend int federatedx_db_init(void *p);
+
+ THR_LOCK_DATA lock; /* MySQL lock */
+ FEDERATEDX_SHARE *share; /* Shared lock info */
+ federatedx_txn *txn;
+ federatedx_io *io;
+ FEDERATEDX_IO_RESULT *stored_result;
+ uint fetch_num; // stores the fetch num
+ FEDERATEDX_IO_OFFSET current_position; // Current position used by ::position()
+ int remote_error_number;
+ char remote_error_buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+ bool ignore_duplicates, replace_duplicates;
+ bool insert_dup_update;
+ DYNAMIC_STRING bulk_insert;
+
+private:
+ /*
+ return 0 on success
+ return errorcode otherwise
+ */
+ uint convert_row_to_internal_format(uchar *buf, FEDERATEDX_IO_ROW *row,
+ FEDERATEDX_IO_RESULT *result);
+ bool create_where_from_key(String *to, KEY *key_info,
+ const key_range *start_key,
+ const key_range *end_key,
+ bool records_in_range, bool eq_range);
+ int stash_remote_error();
+
+ federatedx_txn *get_txn(THD *thd, bool no_create= FALSE);
+
+ static int disconnect(handlerton *hton, MYSQL_THD thd);
+ static int savepoint_set(handlerton *hton, MYSQL_THD thd, void *sv);
+ static int savepoint_rollback(handlerton *hton, MYSQL_THD thd, void *sv);
+ static int savepoint_release(handlerton *hton, MYSQL_THD thd, void *sv);
+ static int commit(handlerton *hton, MYSQL_THD thd, bool all);
+ static int rollback(handlerton *hton, MYSQL_THD thd, bool all);
+
+ bool append_stmt_insert(String *query);
+
+ int read_next(uchar *buf, FEDERATEDX_IO_RESULT *result);
+ int index_read_idx_with_result_set(uchar *buf, uint index,
+ const uchar *key,
+ uint key_len,
+ ha_rkey_function find_flag,
+ FEDERATEDX_IO_RESULT **result);
+ int real_query(const char *query, uint length);
+ int real_connect(FEDERATEDX_SHARE *my_share, uint create_flag);
+public:
+ ha_federatedx(handlerton *hton, TABLE_SHARE *table_arg);
+ ~ha_federatedx() {}
+ /* The name that will be used for display purposes */
+ const char *table_type() const { return "FEDERATED"; }
+ /*
+ The name of the index type that will be used for display
+ don't implement this method unless you really have indexes
+ */
+ // perhaps get index type
+ const char *index_type(uint inx) { return "REMOTE"; }
+ const char **bas_ext() const;
+ /*
+ This is a list of flags that says what the storage engine
+ implements. The current table flags are documented in
+ handler.h
+ */
+ ulonglong table_flags() const
+ {
+ /* fix server to be able to get remote server table flags */
+ return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED
+ | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS |
+ HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
+ HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
+ HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY);
+ }
+ /*
+ This is a bitmap of flags that says how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero
+ here.
+
+ part is the key part to check. First key part is 0
+ If all_parts it's set, MySQL want to know the flags for the combined
+ index up to and including 'part'.
+ */
+ /* fix server to be able to get remote server index flags */
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return (HA_READ_NEXT | HA_READ_RANGE | HA_READ_AFTER_KEY);
+ }
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_supported_keys() const { return MAX_KEY; }
+ uint max_supported_key_parts() const { return MAX_REF_PARTS; }
+ uint max_supported_key_length() const { return FEDERATEDX_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() const { return FEDERATEDX_MAX_KEY_LENGTH; }
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ Normally, we need to know number of blocks . For federatedx we need to
+ know number of blocks on remote side, and number of packets and blocks
+ on the network side (?)
+ Talk to Kostja about this - how to get the
+ number of rows * ...
+ disk scan time on other side (block size, size of the row) + network time ...
+ The reason for "records * 1000" is that such a large number forces
+ this to use indexes "
+ */
+ double scan_time()
+ {
+ DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
+ return (double)(stats.records*1000);
+ }
+ /*
+ The next method will never be called if you do not implement indexes.
+ */
+ double read_time(uint index, uint ranges, ha_rows rows)
+ {
+ /*
+ Per Brian, this number is bugus, but this method must be implemented,
+ and at a later date, he intends to document this issue for handler code
+ */
+ return (double) rows / 20.0+1;
+ }
+
+ const key_map *keys_to_use_for_scanning() { return &key_map_full; }
+ /*
+ Everything below are methods that we implment in ha_federatedx.cc.
+
+ Most of these methods are not obligatory, skip them and
+ MySQL will treat them as not implemented
+ */
+ int open(const char *name, int mode, uint test_if_locked); // required
+ int close(void); // required
+
+ void start_bulk_insert(ha_rows rows);
+ int end_bulk_insert(bool abort);
+ int write_row(uchar *buf);
+ int update_row(const uchar *old_data, uchar *new_data);
+ int delete_row(const uchar *buf);
+ int index_init(uint keynr, bool sorted);
+ ha_rows estimate_rows_upper_bound();
+ int index_read(uchar *buf, const uchar *key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(uchar *buf, uint idx, const uchar *key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_next(uchar *buf);
+ int index_end();
+ int read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted);
+ int read_range_next();
+ /*
+ unlike index_init(), rnd_init() can be called two times
+ without rnd_end() in between (it only makes sense if scan=1).
+ then the second call should prepare for the new table scan
+ (e.g if rnd_init allocates the cursor, second call should
+ position it to the start of the table, no need to deallocate
+ and allocate it again
+ */
+ int rnd_init(bool scan); //required
+ int rnd_end();
+ int rnd_next(uchar *buf); //required
+ int rnd_pos(uchar *buf, uchar *pos); //required
+ void position(const uchar *record); //required
+ int info(uint); //required
+ int extra(ha_extra_function operation);
+
+ void update_auto_increment(void);
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+
+ int delete_all_rows(void);
+ int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info); //required
+ ha_rows records_in_range(uint inx, key_range *start_key,
+ key_range *end_key);
+ uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; }
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type); //required
+ bool get_error_message(int error, String *buf);
+ int start_stmt(THD *thd, thr_lock_type lock_type);
+ int external_lock(THD *thd, int lock_type);
+ int reset(void);
+ int free_result(void);
+};
+
+extern const char ident_quote_char; // Character for quoting
+ // identifiers
+extern const char value_quote_char; // Character for quoting
+ // literals
+
+extern bool append_ident(String *string, const char *name, uint length,
+ const char quote_char);
+
+
+extern federatedx_io *instantiate_io_mysql(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server);
+extern federatedx_io *instantiate_io_null(MEM_ROOT *server_root,
+ FEDERATEDX_SERVER *server);
=== added file 'storage/federatedx/plug.in'
--- a/storage/federatedx/plug.in 1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/plug.in 2009-10-30 18:50:56 +0000
@@ -0,0 +1,5 @@
+MYSQL_STORAGE_ENGINE(federated,,[FederatedX Storage Engine],
+ [FederatedX Storage Engine], [max,max-no-ndb])
+MYSQL_PLUGIN_DYNAMIC(federated, [ha_federatedx.la])
+MYSQL_PLUGIN_STATIC(federated, [libfederatedx.a])
+MYSQL_PLUGIN_DEPENDS_ON_MYSQL_INTERNALS(federated, [ha_federatedx.cc])
=== modified file 'storage/heap/hp_test2.c'
--- a/storage/heap/hp_test2.c 2008-03-29 08:02:54 +0000
+++ b/storage/heap/hp_test2.c 2009-08-28 16:21:54 +0000
@@ -62,11 +62,10 @@ int main(int argc, char *argv[])
HP_SHARE *tmp_share;
HP_KEYDEF keyinfo[MAX_KEYS];
HA_KEYSEG keyseg[MAX_KEYS*5];
- HEAP_PTR position;
+ HEAP_PTR UNINIT_VAR(position);
HP_CREATE_INFO hp_create_info;
CHARSET_INFO *cs= &my_charset_latin1;
MY_INIT(argv[0]); /* init my_sys library & pthreads */
- LINT_INIT(position);
filename= "test2";
filename2= "test2_2";
=== modified file 'storage/myisam/ft_boolean_search.c'
--- a/storage/myisam/ft_boolean_search.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/ft_boolean_search.c 2009-10-15 21:38:29 +0000
@@ -358,9 +358,8 @@ static int _ft2_search(FTB *ftb, FTB_WOR
int subkeys=1;
my_bool can_go_down;
MI_INFO *info=ftb->info;
- uint off, extra=HA_FT_WLEN+info->s->base.rec_reflength;
+ uint UNINIT_VAR(off), extra=HA_FT_WLEN+info->s->base.rec_reflength;
uchar *lastkey_buf=ftbw->word+ftbw->off;
- LINT_INIT(off);
if (ftbw->flags & FTB_FLAG_TRUNC)
lastkey_buf+=ftbw->len;
=== modified file 'storage/myisam/mi_check.c'
--- a/storage/myisam/mi_check.c 2009-06-29 21:03:30 +0000
+++ b/storage/myisam/mi_check.c 2009-10-15 21:38:29 +0000
@@ -140,11 +140,10 @@ int chk_del(HA_CHECK *param, register MI
{
reg2 ha_rows i;
uint delete_link_length;
- my_off_t empty,next_link,old_link;
+ my_off_t empty,next_link,UNINIT_VAR(old_link);
char buff[22],buff2[22];
DBUG_ENTER("chk_del");
- LINT_INIT(old_link);
param->record_checksum=0;
delete_link_length=((info->s->options & HA_OPTION_PACK_RECORD) ? 20 :
info->s->rec_reflength+1);
@@ -940,11 +939,11 @@ static uint isam_key_length(MI_INFO *inf
int chk_data_link(HA_CHECK *param, MI_INFO *info, my_bool extend)
{
int error,got_error,flag;
- uint key,left_length,b_type,field;
+ uint key,UNINIT_VAR(left_length),b_type,field;
ha_rows records,del_blocks;
- my_off_t used,empty,pos,splits,start_recpos,
+ my_off_t used,empty,pos,splits,UNINIT_VAR(start_recpos),
del_length,link_used,start_block;
- uchar *record= 0, *to;
+ uchar *record= 0, *UNINIT_VAR(to);
char llbuff[22],llbuff2[22],llbuff3[22];
ha_checksum intern_record_checksum;
ha_checksum key_checksum[HA_MAX_POSSIBLE_KEY];
@@ -969,7 +968,6 @@ int chk_data_link(HA_CHECK *param, MI_IN
records=del_blocks=0;
used=link_used=splits=del_length=0;
intern_record_checksum=param->glob_crc=0;
- LINT_INIT(left_length); LINT_INIT(start_recpos); LINT_INIT(to);
got_error=error=0;
empty=info->s->pack.header_length;
@@ -1550,7 +1548,7 @@ int mi_repair(HA_CHECK *param, register
if (!param->using_global_keycache)
VOID(init_key_cache(dflt_key_cache, param->key_cache_block_size,
- param->use_buffers, 0, 0));
+ (size_t) param->use_buffers, 0, 0));
if (init_io_cache(¶m->read_cache,info->dfile,
(uint) param->read_buffer_length,
@@ -2227,9 +2225,8 @@ int mi_repair_by_sort(HA_CHECK *param, r
ulong *rec_per_key_part;
char llbuff[22];
MI_SORT_INFO sort_info;
- ulonglong key_map;
+ ulonglong UNINIT_VAR(key_map);
DBUG_ENTER("mi_repair_by_sort");
- LINT_INIT(key_map);
start_records=info->state->records;
got_error=1;
@@ -2651,11 +2648,10 @@ int mi_repair_parallel(HA_CHECK *param,
IO_CACHE new_data_cache; /* For non-quick repair. */
IO_CACHE_SHARE io_share;
MI_SORT_INFO sort_info;
- ulonglong key_map;
+ ulonglong UNINIT_VAR(key_map);
pthread_attr_t thr_attr;
ulong max_pack_reclength;
DBUG_ENTER("mi_repair_parallel");
- LINT_INIT(key_map);
start_records=info->state->records;
got_error=1;
@@ -3242,7 +3238,7 @@ static int sort_get_next_record(MI_SORT_
int parallel_flag;
uint found_record,b_type,left_length;
my_off_t pos;
- uchar *to;
+ uchar *UNINIT_VAR(to);
MI_BLOCK_INFO block_info;
MI_SORT_INFO *sort_info=sort_param->sort_info;
HA_CHECK *param=sort_info->param;
=== modified file 'storage/myisam/mi_create.c'
--- a/storage/myisam/mi_create.c 2009-01-15 22:25:53 +0000
+++ b/storage/myisam/mi_create.c 2009-10-15 21:38:29 +0000
@@ -38,7 +38,7 @@ int mi_create(const char *name,uint keys
MI_CREATE_INFO *ci,uint flags)
{
register uint i,j;
- File dfile,file;
+ File UNINIT_VAR(dfile),file;
int errpos,save_errno, create_mode= O_RDWR | O_TRUNC;
myf create_flag;
uint fields,length,max_key_length,packed,pack_bytes,pointer,real_length_diff,
=== modified file 'storage/myisam/mi_delete.c'
--- a/storage/myisam/mi_delete.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/mi_delete.c 2009-10-15 21:38:29 +0000
@@ -221,7 +221,7 @@ static int d_search(register MI_INFO *in
uint length,nod_flag,search_key_length;
my_bool last_key;
uchar *leaf_buff,*keypos;
- my_off_t leaf_page,next_block;
+ my_off_t UNINIT_VAR(leaf_page),next_block;
uchar lastkey[HA_MAX_KEY_BUFF];
DBUG_ENTER("d_search");
DBUG_DUMP("page",(uchar*) anc_buff,mi_getint(anc_buff));
=== modified file 'storage/myisam/mi_dynrec.c'
--- a/storage/myisam/mi_dynrec.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/mi_dynrec.c 2009-10-15 21:38:29 +0000
@@ -1419,16 +1419,14 @@ void _mi_store_blob_length(uchar *pos,ui
int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *buf)
{
int block_of_record;
- uint b_type,left_length;
- uchar *to;
+ uint b_type,UNINIT_VAR(left_length);
+ uchar *UNINIT_VAR(to);
MI_BLOCK_INFO block_info;
File file;
DBUG_ENTER("mi_read_dynamic_record");
if (filepos != HA_OFFSET_ERROR)
{
- LINT_INIT(to);
- LINT_INIT(left_length);
file=info->dfile;
block_of_record= 0; /* First block of record is numbered as zero. */
block_info.second_read= 0;
@@ -1697,13 +1695,12 @@ int _mi_read_rnd_dynamic_record(MI_INFO
{
int block_of_record, info_read, save_errno;
uint left_len,b_type;
- uchar *to;
+ uchar *UNINIT_VAR(to);
MI_BLOCK_INFO block_info;
MYISAM_SHARE *share=info->s;
DBUG_ENTER("_mi_read_rnd_dynamic_record");
info_read=0;
- LINT_INIT(to);
if (info->lock_type == F_UNLCK)
{
=== modified file 'storage/myisam/mi_open.c'
--- a/storage/myisam/mi_open.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/mi_open.c 2009-10-15 21:38:29 +0000
@@ -709,7 +709,7 @@ err:
uchar *mi_alloc_rec_buff(MI_INFO *info, ulong length, uchar **buf)
{
uint extra;
- uint32 old_length;
+ uint32 UNINIT_VAR(old_length);
LINT_INIT(old_length);
if (! *buf || length > (old_length=mi_get_rec_buff_len(info, *buf)))
=== modified file 'storage/myisam/mi_packrec.c'
--- a/storage/myisam/mi_packrec.c 2009-09-07 20:50:10 +0000
+++ b/storage/myisam/mi_packrec.c 2009-10-15 21:38:29 +0000
@@ -1364,7 +1364,7 @@ uint _mi_pack_get_block_info(MI_INFO *my
File file, my_off_t filepos)
{
uchar *header=info->header;
- uint head_length,ref_length;
+ uint head_length, UNINIT_VAR(ref_length);
LINT_INIT(ref_length);
if (file >= 0)
=== modified file 'storage/myisam/mi_search.c'
--- a/storage/myisam/mi_search.c 2009-05-19 09:28:05 +0000
+++ b/storage/myisam/mi_search.c 2009-10-15 21:38:29 +0000
@@ -28,9 +28,15 @@ int _mi_check_index(MI_INFO *info, int i
{
if (inx == -1) /* Use last index */
inx=info->lastinx;
- if (inx < 0 || ! mi_is_key_active(info->s->state.key_map, inx))
+ if (inx < 0)
{
- my_errno=HA_ERR_WRONG_INDEX;
+ my_errno= HA_ERR_WRONG_INDEX;
+ return -1;
+ }
+ if (!mi_is_key_active(info->s->state.key_map, inx))
+ {
+ my_errno= info->s->state.state.records ? HA_ERR_WRONG_INDEX :
+ HA_ERR_END_OF_FILE;
return -1;
}
if (info->lastinx != inx) /* Index changed */
@@ -240,12 +246,11 @@ int _mi_seq_search(MI_INFO *info, regist
uchar *key, uint key_len, uint comp_flag, uchar **ret_pos,
uchar *buff, my_bool *last_key)
{
- int flag;
- uint nod_flag,length,not_used[2];
+ int UNINIT_VAR(flag);
+ uint nod_flag,UNINIT_VAR(length),not_used[2];
uchar t_buff[HA_MAX_KEY_BUFF],*end;
DBUG_ENTER("_mi_seq_search");
- LINT_INIT(flag); LINT_INIT(length);
end= page+mi_getint(page);
nod_flag=mi_test_if_nod(page);
page+=2+nod_flag;
=== modified file 'storage/myisam/mi_update.c'
--- a/storage/myisam/mi_update.c 2008-02-18 22:35:17 +0000
+++ b/storage/myisam/mi_update.c 2009-10-15 21:38:29 +0000
@@ -27,11 +27,8 @@ int mi_update(register MI_INFO *info, co
my_bool auto_key_changed=0;
ulonglong changed;
MYISAM_SHARE *share=info->s;
- ha_checksum old_checksum;
+ ha_checksum UNINIT_VAR(old_checksum);
DBUG_ENTER("mi_update");
- LINT_INIT(new_key);
- LINT_INIT(changed);
- LINT_INIT(old_checksum);
DBUG_EXECUTE_IF("myisam_pretend_crashed_table_on_usage",
mi_print_error(info->s, HA_ERR_CRASHED);
=== modified file 'storage/myisam/sort.c'
--- a/storage/myisam/sort.c 2008-04-28 16:24:05 +0000
+++ b/storage/myisam/sort.c 2009-10-15 21:38:29 +0000
@@ -489,7 +489,7 @@ int thr_write_keys(MI_SORT_PARAM *sort_p
{
MI_SORT_INFO *sort_info=sort_param->sort_info;
HA_CHECK *param=sort_info->param;
- ulong length, keys;
+ ulong UNINIT_VAR(length), keys;
ulong *rec_per_key_part=param->rec_per_key_part;
int got_error=sort_info->got_error;
uint i;
@@ -896,7 +896,7 @@ merge_buffers(MI_SORT_PARAM *info, uint
int error;
uint sort_length,maxcount;
ha_rows count;
- my_off_t to_start_filepos;
+ my_off_t UNINIT_VAR(to_start_filepos);
uchar *strpos;
BUFFPEK *buffpek,**refpek;
QUEUE queue;
=== modified file 'storage/myisammrg/ha_myisammrg.cc'
--- a/storage/myisammrg/ha_myisammrg.cc 2009-09-17 23:01:09 +0000
+++ b/storage/myisammrg/ha_myisammrg.cc 2009-10-15 21:38:29 +0000
@@ -147,7 +147,7 @@ static void split_file_name(const char *
extern "C" void myrg_print_wrong_table(const char *table_name)
{
- LEX_STRING db, name;
+ LEX_STRING db= {NULL, 0}, name;
char buf[FN_REFLEN];
split_file_name(table_name, &db, &name);
memcpy(buf, db.str, db.length);
=== modified file 'storage/myisammrg/myrg_open.c'
--- a/storage/myisammrg/myrg_open.c 2009-07-10 11:34:03 +0000
+++ b/storage/myisammrg/myrg_open.c 2009-08-28 16:21:54 +0000
@@ -37,7 +37,7 @@
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking)
{
int save_errno,errpos=0;
- uint files= 0, i, dir_length, length, key_parts, min_keys= 0;
+ uint files= 0, i, dir_length, length, UNINIT_VAR(key_parts), min_keys= 0;
ulonglong file_offset=0;
char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end;
MYRG_INFO *m_info=0;
@@ -49,8 +49,6 @@ MYRG_INFO *myrg_open(const char *name, i
my_bool bad_children= FALSE;
DBUG_ENTER("myrg_open");
- LINT_INIT(key_parts);
-
bzero((char*) &file,sizeof(file));
if ((fd=my_open(fn_format(name_buff,name,"",MYRG_NAME_EXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT),
=== modified file 'storage/myisammrg/myrg_rkey.c'
--- a/storage/myisammrg/myrg_rkey.c 2008-03-29 08:02:54 +0000
+++ b/storage/myisammrg/myrg_rkey.c 2009-08-28 16:21:54 +0000
@@ -38,16 +38,13 @@
int myrg_rkey(MYRG_INFO *info,uchar *buf,int inx, const uchar *key,
key_part_map keypart_map, enum ha_rkey_function search_flag)
{
- uchar *key_buff;
- uint pack_key_length;
- uint16 last_used_keyseg;
+ uchar *UNINIT_VAR(key_buff);
+ uint UNINIT_VAR(pack_key_length);
+ uint16 UNINIT_VAR(last_used_keyseg);
MYRG_TABLE *table;
MI_INFO *mi;
int err;
DBUG_ENTER("myrg_rkey");
- LINT_INIT(key_buff);
- LINT_INIT(pack_key_length);
- LINT_INIT(last_used_keyseg);
if (_myrg_init_queue(info,inx,search_flag))
DBUG_RETURN(my_errno);
=== modified file 'storage/ndb/include/mgmapi/ndb_logevent.h'
--- a/storage/ndb/include/mgmapi/ndb_logevent.h 2007-11-14 02:52:29 +0000
+++ b/storage/ndb/include/mgmapi/ndb_logevent.h 2009-08-28 15:06:59 +0000
@@ -272,6 +272,300 @@ extern "C" {
#endif
};
+ struct ndb_logevent_Connected {
+ unsigned node;
+ };
+
+ struct ndb_logevent_Disconnected {
+ unsigned node;
+ };
+
+ struct ndb_logevent_CommunicationClosed {
+ unsigned node;
+ };
+
+ struct ndb_logevent_CommunicationOpened {
+ unsigned node;
+ };
+
+ struct ndb_logevent_ConnectedApiVersion {
+ unsigned node;
+ unsigned version;
+ };
+
+ /* CHECKPOINT */
+ struct ndb_logevent_GlobalCheckpointStarted {
+ unsigned gci;
+ };
+ struct ndb_logevent_GlobalCheckpointCompleted {
+ unsigned gci;
+ };
+ struct ndb_logevent_LocalCheckpointStarted {
+ unsigned lci;
+ unsigned keep_gci;
+ unsigned restore_gci;
+ };
+ struct ndb_logevent_LocalCheckpointCompleted {
+ unsigned lci;
+ };
+ struct ndb_logevent_LCPStoppedInCalcKeepGci {
+ unsigned data;
+ };
+ struct ndb_logevent_LCPFragmentCompleted {
+ unsigned node;
+ unsigned table_id;
+ unsigned fragment_id;
+ };
+ struct ndb_logevent_UndoLogBlocked {
+ unsigned acc_count;
+ unsigned tup_count;
+ };
+
+ /* STARTUP */
+ struct ndb_logevent_NDBStartStarted {
+ unsigned version;
+ };
+ struct ndb_logevent_NDBStartCompleted {
+ unsigned version;
+ };
+ struct ndb_logevent_STTORRYRecieved {
+ };
+ struct ndb_logevent_StartPhaseCompleted {
+ unsigned phase;
+ unsigned starttype;
+ };
+ struct ndb_logevent_CM_REGCONF {
+ unsigned own_id;
+ unsigned president_id;
+ unsigned dynamic_id;
+ };
+ struct ndb_logevent_CM_REGREF {
+ unsigned own_id;
+ unsigned other_id;
+ unsigned cause;
+ };
+ struct ndb_logevent_FIND_NEIGHBOURS {
+ unsigned own_id;
+ unsigned left_id;
+ unsigned right_id;
+ unsigned dynamic_id;
+ };
+ struct ndb_logevent_NDBStopStarted {
+ unsigned stoptype;
+ };
+ struct ndb_logevent_NDBStopCompleted {
+ unsigned action;
+ unsigned signum;
+ };
+ struct ndb_logevent_NDBStopForced {
+ unsigned action;
+ unsigned signum;
+ unsigned error;
+ unsigned sphase;
+ unsigned extra;
+ };
+ struct ndb_logevent_NDBStopAborted {
+ };
+ struct ndb_logevent_StartREDOLog {
+ unsigned node;
+ unsigned keep_gci;
+ unsigned completed_gci;
+ unsigned restorable_gci;
+ };
+ struct ndb_logevent_StartLog {
+ unsigned log_part;
+ unsigned start_mb;
+ unsigned stop_mb;
+ unsigned gci;
+ };
+ struct ndb_logevent_UNDORecordsExecuted {
+ unsigned block;
+ unsigned data1;
+ unsigned data2;
+ unsigned data3;
+ unsigned data4;
+ unsigned data5;
+ unsigned data6;
+ unsigned data7;
+ unsigned data8;
+ unsigned data9;
+ unsigned data10;
+ };
+
+ /* NODERESTART */
+ struct ndb_logevent_NR_CopyDict {
+ };
+ struct ndb_logevent_NR_CopyDistr {
+ };
+ struct ndb_logevent_NR_CopyFragsStarted {
+ unsigned dest_node;
+ };
+ struct ndb_logevent_NR_CopyFragDone {
+ unsigned dest_node;
+ unsigned table_id;
+ unsigned fragment_id;
+ };
+ struct ndb_logevent_NR_CopyFragsCompleted {
+ unsigned dest_node;
+ };
+
+ struct ndb_logevent_NodeFailCompleted {
+ unsigned block; /* 0 = all */
+ unsigned failed_node;
+ unsigned completing_node; /* 0 = all */
+ };
+ struct ndb_logevent_NODE_FAILREP {
+ unsigned failed_node;
+ unsigned failure_state;
+ };
+ struct ndb_logevent_ArbitState {
+ unsigned code; /* code & state << 16 */
+ unsigned arbit_node;
+ unsigned ticket_0;
+ unsigned ticket_1;
+ /* TODO */
+ };
+ struct ndb_logevent_ArbitResult {
+ unsigned code; /* code & state << 16 */
+ unsigned arbit_node;
+ unsigned ticket_0;
+ unsigned ticket_1;
+ /* TODO */
+ };
+ struct ndb_logevent_GCP_TakeoverStarted {
+ };
+ struct ndb_logevent_GCP_TakeoverCompleted {
+ };
+ struct ndb_logevent_LCP_TakeoverStarted {
+ };
+ struct ndb_logevent_LCP_TakeoverCompleted {
+ unsigned state;
+ };
+
+ /* STATISTIC */
+ struct ndb_logevent_TransReportCounters {
+ unsigned trans_count;
+ unsigned commit_count;
+ unsigned read_count;
+ unsigned simple_read_count;
+ unsigned write_count;
+ unsigned attrinfo_count;
+ unsigned conc_op_count;
+ unsigned abort_count;
+ unsigned scan_count;
+ unsigned range_scan_count;
+ };
+ struct ndb_logevent_OperationReportCounters {
+ unsigned ops;
+ };
+ struct ndb_logevent_TableCreated {
+ unsigned table_id;
+ };
+ struct ndb_logevent_JobStatistic {
+ unsigned mean_loop_count;
+ };
+ struct ndb_logevent_SendBytesStatistic {
+ unsigned to_node;
+ unsigned mean_sent_bytes;
+ };
+ struct ndb_logevent_ReceiveBytesStatistic {
+ unsigned from_node;
+ unsigned mean_received_bytes;
+ };
+ struct ndb_logevent_MemoryUsage {
+ int gth;
+ /* union is for compatibility backward.
+ * page_size_kb member variable should be removed in the future
+ */
+ union {
+ unsigned page_size_kb;
+ unsigned page_size_bytes;
+ };
+ unsigned pages_used;
+ unsigned pages_total;
+ unsigned block;
+ };
+
+ /* ERROR */
+ struct ndb_logevent_TransporterError {
+ unsigned to_node;
+ unsigned code;
+ };
+ struct ndb_logevent_TransporterWarning {
+ unsigned to_node;
+ unsigned code;
+ };
+ struct ndb_logevent_MissedHeartbeat {
+ unsigned node;
+ unsigned count;
+ };
+ struct ndb_logevent_DeadDueToHeartbeat {
+ unsigned node;
+ };
+ struct ndb_logevent_WarningEvent {
+ /* TODO */
+ };
+
+ /* INFO */
+ struct ndb_logevent_SentHeartbeat {
+ unsigned node;
+ };
+ struct ndb_logevent_CreateLogBytes {
+ unsigned node;
+ };
+ struct ndb_logevent_InfoEvent {
+ /* TODO */
+ };
+ struct ndb_logevent_EventBufferStatus {
+ unsigned usage;
+ unsigned alloc;
+ unsigned max;
+ unsigned apply_gci_l;
+ unsigned apply_gci_h;
+ unsigned latest_gci_l;
+ unsigned latest_gci_h;
+ };
+
+ /** Log event data for @ref NDB_LE_BackupStarted */
+ struct ndb_logevent_BackupStarted {
+ unsigned starting_node;
+ unsigned backup_id;
+ };
+ /** Log event data @ref NDB_LE_BackupFailedToStart */
+ struct ndb_logevent_BackupFailedToStart {
+ unsigned starting_node;
+ unsigned error;
+ };
+ /** Log event data @ref NDB_LE_BackupCompleted */
+ struct ndb_logevent_BackupCompleted {
+ unsigned starting_node;
+ unsigned backup_id;
+ unsigned start_gci;
+ unsigned stop_gci;
+ unsigned n_records;
+ unsigned n_log_records;
+ unsigned n_bytes;
+ unsigned n_log_bytes;
+ };
+ /** Log event data @ref NDB_LE_BackupAborted */
+ struct ndb_logevent_BackupAborted {
+ unsigned starting_node;
+ unsigned backup_id;
+ unsigned error;
+ };
+ /** Log event data @ref NDB_LE_SingleUser */
+ struct ndb_logevent_SingleUser {
+ unsigned type;
+ unsigned node_id;
+ };
+ /** Log even data @ref NDB_LE_StartReport */
+ struct ndb_logevent_StartReport {
+ unsigned report_type;
+ unsigned remaining_time;
+ unsigned bitmask_size;
+ unsigned bitmask_data[1];
+ };
+
/**
* Structure to store and retrieve log event information.
* @see @ref secSLogEvents
@@ -305,354 +599,87 @@ extern "C" {
*/
union {
/* CONNECT */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } Connected;
-
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } Disconnected;
-
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } CommunicationClosed;
-
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } CommunicationOpened;
-
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- unsigned version;
- } ConnectedApiVersion;
+ struct ndb_logevent_Connected Connected;
+ struct ndb_logevent_Disconnected Disconnected;
+ struct ndb_logevent_CommunicationClosed CommunicationClosed;
+ struct ndb_logevent_CommunicationOpened CommunicationOpened;
+ struct ndb_logevent_ConnectedApiVersion ConnectedApiVersion;
/* CHECKPOINT */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned gci;
- } GlobalCheckpointStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned gci;
- } GlobalCheckpointCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned lci;
- unsigned keep_gci;
- unsigned restore_gci;
- } LocalCheckpointStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned lci;
- } LocalCheckpointCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned data;
- } LCPStoppedInCalcKeepGci;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- unsigned table_id;
- unsigned fragment_id;
- } LCPFragmentCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned acc_count;
- unsigned tup_count;
- } UndoLogBlocked;
+ struct ndb_logevent_GlobalCheckpointStarted GlobalCheckpointStarted;
+ struct ndb_logevent_GlobalCheckpointCompleted GlobalCheckpointCompleted;
+ struct ndb_logevent_LocalCheckpointStarted LocalCheckpointStarted;
+ struct ndb_logevent_LocalCheckpointCompleted LocalCheckpointCompleted;
+ struct ndb_logevent_LCPStoppedInCalcKeepGci LCPStoppedInCalcKeepGci;
+ struct ndb_logevent_LCPFragmentCompleted LCPFragmentCompleted;
+ struct ndb_logevent_UndoLogBlocked UndoLogBlocked;
/* STARTUP */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned version;
- } NDBStartStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned version;
- } NDBStartCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } STTORRYRecieved;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned phase;
- unsigned starttype;
- } StartPhaseCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned own_id;
- unsigned president_id;
- unsigned dynamic_id;
- } CM_REGCONF;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned own_id;
- unsigned other_id;
- unsigned cause;
- } CM_REGREF;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned own_id;
- unsigned left_id;
- unsigned right_id;
- unsigned dynamic_id;
- } FIND_NEIGHBOURS;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned stoptype;
- } NDBStopStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned action;
- unsigned signum;
- } NDBStopCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned action;
- unsigned signum;
- unsigned error;
- unsigned sphase;
- unsigned extra;
- } NDBStopForced;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } NDBStopAborted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- unsigned keep_gci;
- unsigned completed_gci;
- unsigned restorable_gci;
- } StartREDOLog;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned log_part;
- unsigned start_mb;
- unsigned stop_mb;
- unsigned gci;
- } StartLog;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned block;
- unsigned data1;
- unsigned data2;
- unsigned data3;
- unsigned data4;
- unsigned data5;
- unsigned data6;
- unsigned data7;
- unsigned data8;
- unsigned data9;
- unsigned data10;
- } UNDORecordsExecuted;
+ struct ndb_logevent_NDBStartStarted NDBStartStarted;
+ struct ndb_logevent_NDBStartCompleted NDBStartCompleted;
+ struct ndb_logevent_STTORRYRecieved STTORRYRecieved;
+ struct ndb_logevent_StartPhaseCompleted StartPhaseCompleted;
+ struct ndb_logevent_CM_REGCONF CM_REGCONF;
+ struct ndb_logevent_CM_REGREF CM_REGREF;
+ struct ndb_logevent_FIND_NEIGHBOURS FIND_NEIGHBOURS;
+ struct ndb_logevent_NDBStopStarted NDBStopStarted;
+ struct ndb_logevent_NDBStopCompleted NDBStopCompleted;
+ struct ndb_logevent_NDBStopForced NDBStopForced;
+ struct ndb_logevent_NDBStopAborted NDBStopAborted;
+ struct ndb_logevent_StartREDOLog StartREDOLog;
+ struct ndb_logevent_StartLog StartLog;
+ struct ndb_logevent_UNDORecordsExecuted UNDORecordsExecuted;
/* NODERESTART */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } NR_CopyDict;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } NR_CopyDistr;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned dest_node;
- } NR_CopyFragsStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned dest_node;
- unsigned table_id;
- unsigned fragment_id;
- } NR_CopyFragDone;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned dest_node;
- } NR_CopyFragsCompleted;
-
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned block; /* 0 = all */
- unsigned failed_node;
- unsigned completing_node; /* 0 = all */
- } NodeFailCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned failed_node;
- unsigned failure_state;
- } NODE_FAILREP;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned code; /* code & state << 16 */
- unsigned arbit_node;
- unsigned ticket_0;
- unsigned ticket_1;
- /* TODO */
- } ArbitState;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned code; /* code & state << 16 */
- unsigned arbit_node;
- unsigned ticket_0;
- unsigned ticket_1;
- /* TODO */
- } ArbitResult;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } GCP_TakeoverStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } GCP_TakeoverCompleted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- } LCP_TakeoverStarted;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned state;
- } LCP_TakeoverCompleted;
+ struct ndb_logevent_NR_CopyDict NR_CopyDict;
+ struct ndb_logevent_NR_CopyDistr NR_CopyDistr;
+ struct ndb_logevent_NR_CopyFragsStarted NR_CopyFragsStarted;
+ struct ndb_logevent_NR_CopyFragDone NR_CopyFragDone;
+ struct ndb_logevent_NR_CopyFragsCompleted NR_CopyFragsCompleted;
+
+ struct ndb_logevent_NodeFailCompleted NodeFailCompleted;
+ struct ndb_logevent_NODE_FAILREP NODE_FAILREP;
+ struct ndb_logevent_ArbitState ArbitState;
+ struct ndb_logevent_ArbitResult ArbitResult;
+ struct ndb_logevent_GCP_TakeoverStarted GCP_TakeoverStarted;
+ struct ndb_logevent_GCP_TakeoverCompleted GCP_TakeoverCompleted;
+ struct ndb_logevent_LCP_TakeoverStarted LCP_TakeoverStarted;
+ struct ndb_logevent_LCP_TakeoverCompleted LCP_TakeoverCompleted;
/* STATISTIC */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned trans_count;
- unsigned commit_count;
- unsigned read_count;
- unsigned simple_read_count;
- unsigned write_count;
- unsigned attrinfo_count;
- unsigned conc_op_count;
- unsigned abort_count;
- unsigned scan_count;
- unsigned range_scan_count;
- } TransReportCounters;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned ops;
- } OperationReportCounters;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned table_id;
- } TableCreated;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned mean_loop_count;
- } JobStatistic;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned to_node;
- unsigned mean_sent_bytes;
- } SendBytesStatistic;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned from_node;
- unsigned mean_received_bytes;
- } ReceiveBytesStatistic;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- int gth;
- /* union is for compatibility backward.
- * page_size_kb member variable should be removed in the future
- */
- union {
- unsigned page_size_kb;
- unsigned page_size_bytes;
- };
- unsigned pages_used;
- unsigned pages_total;
- unsigned block;
- } MemoryUsage;
+ struct ndb_logevent_TransReportCounters TransReportCounters;
+ struct ndb_logevent_OperationReportCounters OperationReportCounters;
+ struct ndb_logevent_TableCreated TableCreated;
+ struct ndb_logevent_JobStatistic JobStatistic;
+ struct ndb_logevent_SendBytesStatistic SendBytesStatistic;
+ struct ndb_logevent_ReceiveBytesStatistic ReceiveBytesStatistic;
+ struct ndb_logevent_MemoryUsage MemoryUsage;
/* ERROR */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned to_node;
- unsigned code;
- } TransporterError;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned to_node;
- unsigned code;
- } TransporterWarning;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- unsigned count;
- } MissedHeartbeat;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } DeadDueToHeartbeat;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- /* TODO */
- } WarningEvent;
+ struct ndb_logevent_TransporterError TransporterError;
+ struct ndb_logevent_TransporterWarning TransporterWarning;
+ struct ndb_logevent_MissedHeartbeat MissedHeartbeat;
+ struct ndb_logevent_DeadDueToHeartbeat DeadDueToHeartbeat;
+ struct ndb_logevent_WarningEvent WarningEvent;
/* INFO */
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } SentHeartbeat;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned node;
- } CreateLogBytes;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- /* TODO */
- } InfoEvent;
- /** Log event specific data for for corresponding NDB_LE_ log event */
- struct {
- unsigned usage;
- unsigned alloc;
- unsigned max;
- unsigned apply_gci_l;
- unsigned apply_gci_h;
- unsigned latest_gci_l;
- unsigned latest_gci_h;
- } EventBufferStatus;
+ struct ndb_logevent_SentHeartbeat SentHeartbeat;
+ struct ndb_logevent_CreateLogBytes CreateLogBytes;
+ struct ndb_logevent_InfoEvent InfoEvent;
+ struct ndb_logevent_EventBufferStatus EventBufferStatus;
/** Log event data for @ref NDB_LE_BackupStarted */
- struct {
- unsigned starting_node;
- unsigned backup_id;
- } BackupStarted;
+ struct ndb_logevent_BackupStarted BackupStarted;
/** Log event data @ref NDB_LE_BackupFailedToStart */
- struct {
- unsigned starting_node;
- unsigned error;
- } BackupFailedToStart;
+ struct ndb_logevent_BackupFailedToStart BackupFailedToStart;
/** Log event data @ref NDB_LE_BackupCompleted */
- struct {
- unsigned starting_node;
- unsigned backup_id;
- unsigned start_gci;
- unsigned stop_gci;
- unsigned n_records;
- unsigned n_log_records;
- unsigned n_bytes;
- unsigned n_log_bytes;
- } BackupCompleted;
+ struct ndb_logevent_BackupCompleted BackupCompleted;
/** Log event data @ref NDB_LE_BackupAborted */
- struct {
- unsigned starting_node;
- unsigned backup_id;
- unsigned error;
- } BackupAborted;
+ struct ndb_logevent_BackupAborted BackupAborted;
/** Log event data @ref NDB_LE_SingleUser */
- struct {
- unsigned type;
- unsigned node_id;
- } SingleUser;
+ struct ndb_logevent_SingleUser SingleUser;
/** Log even data @ref NDB_LE_StartReport */
- struct {
- unsigned report_type;
- unsigned remaining_time;
- unsigned bitmask_size;
- unsigned bitmask_data[1];
- } StartReport;
+ struct ndb_logevent_StartReport StartReport;
#ifndef DOXYGEN_FIX
};
#else
=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp 2008-01-04 10:34:23 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp 2009-09-01 12:32:26 +0000
@@ -7521,8 +7521,8 @@ void Dbdict::execGET_TABINFOREQ(Signal*
return;
}
releaseSections(signal);
-
- DictObject * old_ptr_p = old_ptr_p = get_object(tableName, len);
+
+ DictObject * old_ptr_p = get_object(tableName, len);
if(old_ptr_p)
obj_id = old_ptr_p->m_id;
} else {
=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp 2007-08-27 18:58:49 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp 2009-09-02 11:45:48 +0000
@@ -106,8 +106,13 @@ void
AsyncFile::doStart()
{
// Stacksize for filesystem threads
- // An 8k stack should be enough
+#if !defined(DBUG_OFF) && defined (__hpux)
+ // Empirical evidence indicates at least 32k
+ const NDB_THREAD_STACKSIZE stackSize = 32768;
+#else
+ // Otherwise an 8k stack should be enough
const NDB_THREAD_STACKSIZE stackSize = 8192;
+#endif
char buf[16];
numAsyncFiles++;
=== modified file 'storage/ndb/src/mgmsrv/InitConfigFileParser.cpp'
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp 2007-05-24 10:24:36 +0000
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp 2009-09-01 12:32:26 +0000
@@ -208,11 +208,10 @@ InitConfigFileParser::run_config_rules(C
ctx.m_config->put("NoOfNodes", nNodes);
char tmpLine[MAX_LINE_LENGTH];
- BaseString::snprintf(tmpLine, MAX_LINE_LENGTH, "EXTERNAL SYSTEM_");
- strncat(tmpLine, system, MAX_LINE_LENGTH);
- strncat(tmpLine, ":NoOfConnections", MAX_LINE_LENGTH);
+ BaseString::snprintf(tmpLine, MAX_LINE_LENGTH,
+ "EXTERNAL SYSTEM_%s:NoOfConnections", system);
ctx.m_config->put(tmpLine, nExtConnections);
-
+
Config * ret = new Config();
ret->m_configValues = (struct ndb_mgm_configuration*)ctx.m_configValues.getConfigValues();
ret->m_oldConfig = ctx.m_config; ctx.m_config = 0;
=== modified file 'storage/pbxt/src/cache_xt.cc'
--- a/storage/pbxt/src/cache_xt.cc 2009-08-17 11:12:36 +0000
+++ b/storage/pbxt/src/cache_xt.cc 2009-10-30 18:50:56 +0000
@@ -374,7 +374,7 @@ xtPublic void xt_ind_release_handle(XTIn
{
DcHandleSlotPtr hs;
XTIndBlockPtr block = NULL;
- u_int hash_idx = NULL;
+ u_int hash_idx = 0;
DcSegmentPtr seg = NULL;
XTIndBlockPtr xblock;
@@ -1379,7 +1379,7 @@ xtPublic xtBool xt_ind_fetch(XTOpenTable
ASSERT_NS(iref->ir_xlock == 2);
#endif
if (!(block = ind_cac_fetch(ot, ind, address, &seg, TRUE)))
- return NULL;
+ return 0;
branch_size = XT_GET_DISK_2(((XTIdxBranchDPtr) block->cb_data)->tb_size_2);
if (XT_GET_INDEX_BLOCK_LEN(branch_size) < 2 || XT_GET_INDEX_BLOCK_LEN(branch_size) > XT_INDEX_PAGE_SIZE) {
=== modified file 'storage/xtradb/include/buf0buf.ic'
--- a/storage/xtradb/include/buf0buf.ic 2009-06-25 01:43:25 +0000
+++ b/storage/xtradb/include/buf0buf.ic 2009-10-30 18:50:56 +0000
@@ -1056,7 +1056,7 @@ buf_page_release(
buf_block_t* block, /* in: buffer block */
ulint rw_latch, /* in: RW_S_LATCH, RW_X_LATCH,
RW_NO_LATCH */
- mtr_t* mtr) /* in: mtr */
+ mtr_t* mtr __attribute__((unused))) /* in: mtr */
{
ut_ad(block);
=== modified file 'storage/xtradb/include/srv0srv.h'
--- a/storage/xtradb/include/srv0srv.h 2009-08-10 22:36:10 +0000
+++ b/storage/xtradb/include/srv0srv.h 2009-10-31 19:22:50 +0000
@@ -116,8 +116,8 @@ extern ulint srv_log_file_size;
extern ulint srv_log_buffer_size;
extern ulong srv_flush_log_at_trx_commit;
-extern ulint srv_show_locks_held;
-extern ulint srv_show_verbose_locks;
+extern ulong srv_show_locks_held;
+extern ulong srv_show_verbose_locks;
/* The sort order table of the MySQL latin1_swedish_ci character set
collation */
@@ -166,11 +166,11 @@ extern ulint srv_fast_shutdown; /* If t
extern ibool srv_innodb_status;
extern unsigned long long srv_stats_sample_pages;
-extern ulint srv_stats_method;
+extern ulong srv_stats_method;
#define SRV_STATS_METHOD_NULLS_EQUAL 0
#define SRV_STATS_METHOD_NULLS_NOT_EQUAL 1
#define SRV_STATS_METHOD_IGNORE_NULLS 2
-extern ulint srv_stats_auto_update;
+extern ulong srv_stats_auto_update;
extern ibool srv_use_doublewrite_buf;
extern ibool srv_use_checksums;
@@ -183,19 +183,19 @@ extern ulong srv_max_purge_lag;
extern ulong srv_replication_delay;
-extern ulint srv_io_capacity;
+extern ulong srv_io_capacity;
extern long long srv_ibuf_max_size;
-extern ulint srv_ibuf_active_contract;
-extern ulint srv_ibuf_accel_rate;
-extern ulint srv_flush_neighbor_pages;
-extern ulint srv_enable_unsafe_group_commit;
-extern ulint srv_read_ahead;
-extern ulint srv_adaptive_checkpoint;
+extern ulong srv_ibuf_active_contract;
+extern ulong srv_ibuf_accel_rate;
+extern ulong srv_flush_neighbor_pages;
+extern ulong srv_enable_unsafe_group_commit;
+extern ulong srv_read_ahead;
+extern ulong srv_adaptive_checkpoint;
-extern ulint srv_expand_import;
+extern ulong srv_expand_import;
-extern ulint srv_extra_rsegments;
-extern ulint srv_dict_size_limit;
+extern ulong srv_extra_rsegments;
+extern ulong srv_dict_size_limit;
/*-------------------------------------------*/
extern ulint srv_n_rows_inserted;
=== modified file 'storage/xtradb/srv/srv0srv.c'
--- a/storage/xtradb/srv/srv0srv.c 2009-09-15 10:46:35 +0000
+++ b/storage/xtradb/srv/srv0srv.c 2009-10-31 19:22:50 +0000
@@ -160,8 +160,8 @@ UNIV_INTERN ulint srv_log_file_size = UL
UNIV_INTERN ulint srv_log_buffer_size = ULINT_MAX;
UNIV_INTERN ulong srv_flush_log_at_trx_commit = 1;
-UNIV_INTERN ulint srv_show_locks_held = 10;
-UNIV_INTERN ulint srv_show_verbose_locks = 0;
+UNIV_INTERN ulong srv_show_locks_held = 10;
+UNIV_INTERN ulong srv_show_verbose_locks = 0;
/* The sort order table of the MySQL latin1_swedish_ci character set
@@ -338,8 +338,8 @@ UNIV_INTERN ibool srv_innodb_status = FA
/* When estimating number of different key values in an index, sample
this many index pages */
UNIV_INTERN unsigned long long srv_stats_sample_pages = 8;
-UNIV_INTERN ulint srv_stats_method = 0;
-UNIV_INTERN ulint srv_stats_auto_update = 1;
+UNIV_INTERN ulong srv_stats_method = 0;
+UNIV_INTERN ulong srv_stats_auto_update = 1;
UNIV_INTERN ibool srv_use_doublewrite_buf = TRUE;
UNIV_INTERN ibool srv_use_checksums = TRUE;
@@ -349,7 +349,7 @@ UNIV_INTERN int srv_query_thread_priorit
UNIV_INTERN ulong srv_replication_delay = 0;
-UNIV_INTERN ulint srv_io_capacity = 100;
+UNIV_INTERN ulong srv_io_capacity = 100;
/* Returns the number of IO operations that is X percent of the capacity.
PCT_IO(5) -> returns the number of IO operations that is 5% of the max
@@ -357,20 +357,20 @@ where max is srv_io_capacity. */
#define PCT_IO(pct) ((ulint) (srv_io_capacity * ((double) pct / 100.0)))
UNIV_INTERN long long srv_ibuf_max_size = 0;
-UNIV_INTERN ulint srv_ibuf_active_contract = 0; /* 0:disable 1:enable */
-UNIV_INTERN ulint srv_ibuf_accel_rate = 100;
+UNIV_INTERN ulong srv_ibuf_active_contract = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong srv_ibuf_accel_rate = 100;
#define PCT_IBUF_IO(pct) ((ulint) (srv_io_capacity * srv_ibuf_accel_rate * ((double) pct / 10000.0)))
-UNIV_INTERN ulint srv_flush_neighbor_pages = 1; /* 0:disable 1:enable */
+UNIV_INTERN ulong srv_flush_neighbor_pages = 1; /* 0:disable 1:enable */
-UNIV_INTERN ulint srv_enable_unsafe_group_commit = 0; /* 0:disable 1:enable */
-UNIV_INTERN ulint srv_read_ahead = 3; /* 1: random 2: linear 3: Both */
-UNIV_INTERN ulint srv_adaptive_checkpoint = 0; /* 0: none 1: reflex 2: estimate */
+UNIV_INTERN ulong srv_enable_unsafe_group_commit = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong srv_read_ahead = 3; /* 1: random 2: linear 3: Both */
+UNIV_INTERN ulong srv_adaptive_checkpoint = 0; /* 0: none 1: reflex 2: estimate */
-UNIV_INTERN ulint srv_expand_import = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong srv_expand_import = 0; /* 0:disable 1:enable */
-UNIV_INTERN ulint srv_extra_rsegments = 0; /* extra rseg for users */
-UNIV_INTERN ulint srv_dict_size_limit = 0;
+UNIV_INTERN ulong srv_extra_rsegments = 0; /* extra rseg for users */
+UNIV_INTERN ulong srv_dict_size_limit = 0;
/*-------------------------------------------*/
UNIV_INTERN ulong srv_n_spin_wait_rounds = 20;
UNIV_INTERN ulong srv_n_free_tickets_to_enter = 500;
=== modified file 'strings/ctype-ucs2.c'
--- a/strings/ctype-ucs2.c 2009-07-02 10:15:33 +0000
+++ b/strings/ctype-ucs2.c 2009-10-15 21:38:29 +0000
@@ -203,11 +203,10 @@ static int my_strnncoll_ucs2(CHARSET_INF
my_bool t_is_prefix)
{
int s_res,t_res;
- my_wc_t s_wc,t_wc;
+ my_wc_t UNINIT_VAR(s_wc),t_wc;
const uchar *se=s+slen;
const uchar *te=t+tlen;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
- LINT_INIT(s_wc);
while ( s < se && t < te )
{
@@ -318,12 +317,10 @@ static int my_strncasecmp_ucs2(CHARSET_I
const char *s, const char *t, size_t len)
{
int s_res,t_res;
- my_wc_t s_wc,t_wc;
+ my_wc_t UNINIT_VAR(s_wc),t_wc;
const char *se=s+len;
const char *te=t+len;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
- LINT_INIT(s_wc);
- LINT_INIT(t_wc);
while ( s < se && t < te )
{
@@ -1387,11 +1384,9 @@ int my_strnncoll_ucs2_bin(CHARSET_INFO *
my_bool t_is_prefix)
{
int s_res,t_res;
- my_wc_t s_wc,t_wc;
+ my_wc_t UNINIT_VAR(s_wc),t_wc;
const uchar *se=s+slen;
const uchar *te=t+tlen;
- LINT_INIT(s_wc);
- LINT_INIT(t_wc);
while ( s < se && t < te )
{
=== modified file 'strings/ctype-utf8.c'
--- a/strings/ctype-utf8.c 2009-07-02 10:15:33 +0000
+++ b/strings/ctype-utf8.c 2009-10-15 21:38:29 +0000
@@ -2310,12 +2310,10 @@ static int my_strnncoll_utf8(CHARSET_INF
my_bool t_is_prefix)
{
int s_res,t_res;
- my_wc_t s_wc,t_wc;
+ my_wc_t UNINIT_VAR(s_wc), t_wc;
const uchar *se=s+slen;
const uchar *te=t+tlen;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
- LINT_INIT(s_wc);
- LINT_INIT(t_wc);
while ( s < se && t < te )
{
@@ -2382,11 +2380,9 @@ static int my_strnncollsp_utf8(CHARSET_I
my_bool diff_if_only_endspace_difference)
{
int s_res, t_res, res;
- my_wc_t s_wc,t_wc;
+ my_wc_t UNINIT_VAR(s_wc),t_wc;
const uchar *se= s+slen, *te= t+tlen;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
- LINT_INIT(s_wc);
- LINT_INIT(t_wc);
#ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
diff_if_only_endspace_difference= 0;
=== modified file 'strings/decimal.c'
--- a/strings/decimal.c 2009-09-15 10:46:35 +0000
+++ b/strings/decimal.c 2009-10-15 21:38:29 +0000
@@ -1365,8 +1365,7 @@ int bin2decimal(const uchar *from, decim
if (intg0x)
{
int i=dig2bytes[intg0x];
- dec1 x;
- LINT_INIT(x);
+ dec1 UNINIT_VAR(x);
switch (i)
{
case 1: x=mi_sint1korr(from); break;
@@ -1407,8 +1406,7 @@ int bin2decimal(const uchar *from, decim
if (frac0x)
{
int i=dig2bytes[frac0x];
- dec1 x;
- LINT_INIT(x);
+ dec1 UNINIT_VAR(x);
switch (i)
{
case 1: x=mi_sint1korr(from); break;
@@ -1486,7 +1484,7 @@ decimal_round(decimal_t *from, decimal_t
decimal_round_mode mode)
{
int frac0=scale>0 ? ROUND_UP(scale) : scale/DIG_PER_DEC1,
- frac1=ROUND_UP(from->frac), round_digit,
+ frac1=ROUND_UP(from->frac), UNINIT_VAR(round_digit),
intg0=ROUND_UP(from->intg), error=E_DEC_OK, len=to->len,
intg1=ROUND_UP(from->intg +
(((intg0 + frac0)>0) && (from->buf[0] == DIG_MAX)));
@@ -1495,7 +1493,6 @@ decimal_round(decimal_t *from, decimal_t
sanity(to);
- LINT_INIT(round_digit);
switch (mode) {
case HALF_UP:
case HALF_EVEN: round_digit=5; break;
@@ -2123,13 +2120,11 @@ static int do_div_mod(decimal_t *from1,
{
int frac1=ROUND_UP(from1->frac)*DIG_PER_DEC1, prec1=from1->intg+frac1,
frac2=ROUND_UP(from2->frac)*DIG_PER_DEC1, prec2=from2->intg+frac2,
- error, i, intg0, frac0, len1, len2, dintg, div_mod=(!mod);
+ UNINIT_VAR(error), i, intg0, frac0, len1, len2, dintg, div_mod=(!mod);
dec1 *buf0, *buf1=from1->buf, *buf2=from2->buf, *tmp1,
*start2, *stop2, *stop1, *stop0, norm2, carry, *start1, dcarry;
dec2 norm_factor, x, guess, y;
- LINT_INIT(error);
-
if (mod)
to=mod;
=== modified file 'support-files/mysql.spec.sh'
--- a/support-files/mysql.spec.sh 2009-09-15 12:12:51 +0000
+++ b/support-files/mysql.spec.sh 2009-10-15 21:38:29 +0000
@@ -926,7 +926,7 @@ fi
- Install plugin libraries in appropriate packages.
- Disable libdaemon_example and ftexample plugins.
-* Thu Aug 20 2009 Jonathan Perkin <jperkin@stripped>
+* Thu Aug 20 2009 Jonathan Perkin <jperkin@xxxxxxx>
- Update variable used for mysql-test suite location to match source.
=== modified file 'tests/mysql_client_test.c'
--- a/tests/mysql_client_test.c 2009-10-26 11:35:42 +0000
+++ b/tests/mysql_client_test.c 2009-11-06 17:22:32 +0000
@@ -4267,7 +4267,7 @@ static void test_fetch_date()
myheader("test_fetch_date");
- /* Will not work if sql_mode is NO_ZERO_DATE (implicit if TRADITIONAL) /*/
+ /* Will not work if sql_mode is NO_ZERO_DATE (implicit if TRADITIONAL) */
rc= mysql_query(mysql, "SET SQL_MODE=''");
myquery(rc);
@@ -17004,7 +17004,7 @@ static void test_bug20023()
{
MYSQL con;
- int sql_big_selects_orig;
+ int sql_big_selects_orig= 0;
/*
Type of max_join_size is ha_rows, which might be ulong or off_t
depending on the platform or configure options. Preserve the string
@@ -17012,10 +17012,10 @@ static void test_bug20023()
*/
char max_join_size_orig[32];
- int sql_big_selects_2;
- int sql_big_selects_3;
- int sql_big_selects_4;
- int sql_big_selects_5;
+ int sql_big_selects_2= 0;
+ int sql_big_selects_3= 0;
+ int sql_big_selects_4= 0;
+ int sql_big_selects_5= 0;
char query_buffer[MAX_TEST_QUERY_LENGTH];
@@ -17154,7 +17154,7 @@ static void bug31418_impl()
MYSQL con;
my_bool is_null;
- int rc;
+ int rc= 0;
/* Create a new connection. */
=== modified file 'win/make_mariadb_win_dist'
--- a/win/make_mariadb_win_dist 2009-10-12 16:50:20 +0000
+++ b/win/make_mariadb_win_dist 2009-10-30 10:50:48 +0000
@@ -66,7 +66,7 @@ ls -lah $ZIPFILE
echo "$ZIPFILE is the Windows noinstall binary zip"
if [ $RES ] ; then
- echo "Archive contents differ from the standard file list, check the diff output above"
+ echo "Archive contents differ from the standard file list, check the diff output above"
else
echo "Archive contents match the standard list, OK"
fi