← Back to team overview

maria-developers team mailing list archive

bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (igor:2749)

 

#At lp:maria based on revid:igor@xxxxxxxxxxxx-20091030003635-ve43s6ub4tctpnrg

 2749 Igor Babaev	2009-11-03 [merge]
      Merge
added:
  mysql-test/r/table_elim_debug.result
  mysql-test/t/table_elim_debug.test
  storage/federated/README
  storage/federatedx/
  storage/federatedx/AUTHORS
  storage/federatedx/CMakeLists.txt
  storage/federatedx/ChangeLog
  storage/federatedx/FAQ
  storage/federatedx/Makefile.am
  storage/federatedx/README
  storage/federatedx/README.windows
  storage/federatedx/TODO
  storage/federatedx/federatedx_io.cc
  storage/federatedx/federatedx_io_mysql.cc
  storage/federatedx/federatedx_io_null.cc
  storage/federatedx/federatedx_probes.h
  storage/federatedx/federatedx_txn.cc
  storage/federatedx/ha_federatedx.cc
  storage/federatedx/ha_federatedx.h
  storage/federatedx/plug.in
renamed:
  storage/federated/plug.in => storage/federated/plug.in.disabled
modified:
  .bzrignore
  BUILD/SETUP.sh
  client/mysqladmin.cc
  extra/yassl/taocrypt/src/twofish.cpp
  libmysqld/Makefile.am
  mysql-test/mysql-test-run.pl
  mysql-test/r/alter_table.result
  mysql-test/r/func_misc.result
  mysql-test/r/gis-rtree.result
  mysql-test/r/innodb_xtradb_bug317074.result
  mysql-test/r/merge.result
  mysql-test/r/myisam_debug.result
  mysql-test/suite/federated/disabled.def
  mysql-test/suite/federated/federated.result
  mysql-test/suite/federated/federated.test
  mysql-test/suite/federated/federated_archive.result
  mysql-test/suite/federated/federated_bug_13118.result
  mysql-test/suite/federated/federated_bug_25714.result
  mysql-test/suite/federated/federated_cleanup.inc
  mysql-test/suite/federated/federated_innodb.result
  mysql-test/suite/federated/federated_server.result
  mysql-test/suite/federated/federated_server.test
  mysql-test/suite/federated/federated_transactions.result
  mysql-test/suite/parts/inc/partition.pre
  mysql-test/suite/parts/inc/partition_bigint.inc
  mysql-test/suite/parts/inc/partition_binary.inc
  mysql-test/suite/parts/inc/partition_bit.inc
  mysql-test/suite/parts/inc/partition_char.inc
  mysql-test/suite/parts/inc/partition_date.inc
  mysql-test/suite/parts/inc/partition_datetime.inc
  mysql-test/suite/parts/inc/partition_decimal.inc
  mysql-test/suite/parts/inc/partition_double.inc
  mysql-test/suite/parts/inc/partition_enum.inc
  mysql-test/suite/parts/inc/partition_float.inc
  mysql-test/suite/parts/inc/partition_int.inc
  mysql-test/suite/parts/inc/partition_mediumint.inc
  mysql-test/suite/parts/inc/partition_smallint.inc
  mysql-test/suite/parts/inc/partition_time.inc
  mysql-test/suite/parts/inc/partition_timestamp.inc
  mysql-test/suite/parts/inc/partition_tinyint.inc
  mysql-test/suite/parts/inc/partition_varbinary.inc
  mysql-test/suite/parts/inc/partition_varchar.inc
  mysql-test/suite/parts/inc/partition_year.inc
  mysql-test/suite/parts/r/partition_char_innodb.result
  mysql-test/suite/parts/r/partition_char_myisam.result
  mysql-test/suite/parts/r/partition_datetime_innodb.result
  mysql-test/suite/parts/r/partition_datetime_myisam.result
  mysql-test/suite/parts/t/part_supported_sql_func_innodb.test
  mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test
  mysql-test/suite/parts/t/partition_alter4_myisam.test
  mysql-test/t/almost_full.test
  mysql-test/t/alter_table.test
  mysql-test/t/archive.test
  mysql-test/t/bench_count_distinct.test
  mysql-test/t/change_user.test
  mysql-test/t/check.test
  mysql-test/t/count_distinct2.test
  mysql-test/t/count_distinct3.test
  mysql-test/t/ctype_euckr.test
  mysql-test/t/derived.test
  mysql-test/t/events_time_zone.test
  mysql-test/t/fulltext2.test
  mysql-test/t/func_misc.test
  mysql-test/t/gis-rtree.test
  mysql-test/t/heap.test
  mysql-test/t/innodb_xtradb_bug317074.test
  mysql-test/t/insert.test
  mysql-test/t/kill.test
  mysql-test/t/merge.test
  mysql-test/t/multi_update.test
  mysql-test/t/multi_update2.test
  mysql-test/t/myisam.test
  mysql-test/t/myisam_debug.test
  mysql-test/t/myisampack.test
  mysql-test/t/order_by.test
  mysql-test/t/order_fill_sortbuf.test
  mysql-test/t/partition.test
  mysql-test/t/partition_archive.test
  mysql-test/t/select_found.test
  mysql-test/t/sp-big.test
  mysql-test/t/subselect.test
  mysql-test/t/warnings.test
  mysql-test/valgrind.supp
  scripts/make_binary_distribution.sh
  sql/mysqld.cc
  sql/sql_plugin.cc
  storage/pbxt/src/cache_xt.cc
  storage/xtradb/include/buf0buf.ic
  storage/xtradb/include/srv0srv.h
  storage/xtradb/srv/srv0srv.c
  win/make_mariadb_win_dist

=== modified file '.bzrignore'
--- a/.bzrignore	2009-09-15 12:12:51 +0000
+++ b/.bzrignore	2009-10-30 18:51:46 +0000
@@ -1921,3 +1921,4 @@ sql/share/ukrainian
 libmysqld/examples/mysqltest.cc
 extra/libevent/event-config.h
 libmysqld/opt_table_elimination.cc
+libmysqld/ha_federatedx.cc

=== modified file 'BUILD/SETUP.sh'
--- a/BUILD/SETUP.sh	2009-10-06 14:53:46 +0000
+++ b/BUILD/SETUP.sh	2009-10-29 00:04:56 +0000
@@ -146,6 +146,13 @@ then
   debug_cflags="$debug_cflags $debug_extra_cflags"
 fi
 
+static_link="--with-mysqld-ldflags=-all-static "
+static_link="$static_link --with-client-ldflags=-all-static"
+# we need local-infile in all binaries for rpl000001
+# if you need to disable local-infile in the client, write a build script
+# and unset local_infile_configs
+local_infile_configs="--enable-local-infile"
+
 #
 # Configuration options.
 #
@@ -154,6 +161,8 @@ base_configs="$base_configs --with-extra
 base_configs="$base_configs --enable-thread-safe-client "
 base_configs="$base_configs --with-big-tables"
 base_configs="$base_configs --with-plugin-maria --with-maria-tmp-tables --without-plugin-innodb_plugin"
+# Compile our client programs with static libraries to allow them to be moved
+base_configs="$base_configs --with-mysqld-ldflags=-static --with-client-ldflags=-static"
 
 if test -d "$path/../cmd-line-utils/readline"
 then
@@ -163,14 +172,6 @@ then
     base_configs="$base_configs --with-libedit"
 fi
 
-static_link="--with-mysqld-ldflags=-all-static "
-static_link="$static_link --with-client-ldflags=-all-static"
-# we need local-infile in all binaries for rpl000001
-# if you need to disable local-infile in the client, write a build script
-# and unset local_infile_configs
-local_infile_configs="--enable-local-infile"
-
-
 max_no_embedded_configs="$SSL_LIBRARY --with-plugins=max"
 max_no_qc_configs="$SSL_LIBRARY --with-plugins=max --without-query-cache"
 max_no_ndb_configs="$SSL_LIBRARY --with-plugins=max-no-ndb --with-embedded-server --with-libevent"

=== modified file 'client/mysqladmin.cc'
--- a/client/mysqladmin.cc	2009-10-26 11:35:42 +0000
+++ b/client/mysqladmin.cc	2009-10-30 18:50:56 +0000
@@ -1043,7 +1043,7 @@ static int drop_db(MYSQL *mysql, const c
     printf("Do you really want to drop the '%s' database [y/N] ",db);
     fflush(stdout);
     if (fgets(buf,sizeof(buf)-1,stdin) == 0 ||
-        (*buf != 'y') && (*buf != 'Y'))
+        ((*buf != 'y') && (*buf != 'Y')))
     {
       puts("\nOK, aborting database drop!");
       return -1;

=== modified file 'extra/yassl/taocrypt/src/twofish.cpp'
--- a/extra/yassl/taocrypt/src/twofish.cpp	2007-01-29 15:54:40 +0000
+++ b/extra/yassl/taocrypt/src/twofish.cpp	2009-10-30 18:50:56 +0000
@@ -55,6 +55,7 @@ void Twofish::Process(byte* out, const b
             in  += BLOCK_SIZE;
         }
     else if (mode_ == CBC)
+    {
         if (dir_ == ENCRYPTION)
             while (blocks--) {
                 r_[0] ^= *(word32*)in;
@@ -82,6 +83,7 @@ void Twofish::Process(byte* out, const b
                 out += BLOCK_SIZE;
                 in  += BLOCK_SIZE;
             }
+    }
 }
 
 #endif // DO_TWOFISH_ASM

=== modified file 'libmysqld/Makefile.am'
--- a/libmysqld/Makefile.am	2009-09-15 10:46:35 +0000
+++ b/libmysqld/Makefile.am	2009-10-30 18:50:56 +0000
@@ -124,7 +124,7 @@ handler.o:	handler.cc
 # found to append fileslists that collected by configure
 # to the sources list
 
-ha_federated.o:ha_federated.cc
+ha_federatedx.o:ha_federatedx.cc
 		$(CXXCOMPILE) $(LM_CFLAGS) -c $<
 
 ha_heap.o:ha_heap.cc

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2009-10-26 11:35:42 +0000
+++ b/mysql-test/mysql-test-run.pl	2009-10-30 18:50:56 +0000
@@ -127,7 +127,6 @@ my $path_config_file;           # The ge
 our $opt_vs_config = $ENV{'MTR_VS_CONFIG'};
 
 my $DEFAULT_SUITES= "binlog,federated,main,maria,rpl,innodb,parts";
-my $opt_suites;
 
 our $opt_usage;
 our $opt_list_options;

=== modified file 'mysql-test/r/alter_table.result'
--- a/mysql-test/r/alter_table.result	2009-06-07 10:05:19 +0000
+++ b/mysql-test/r/alter_table.result	2009-10-28 07:52:34 +0000
@@ -143,16 +143,6 @@ t1	1	n4	1	n4	A	NULL	NULL	NULL	YES	BTREE	
 t1	1	n4	2	n1	A	NULL	NULL	NULL		BTREE	disabled
 t1	1	n4	3	n2	A	NULL	NULL	NULL	YES	BTREE	disabled
 t1	1	n4	4	n3	A	NULL	NULL	NULL	YES	BTREE	disabled
-insert into t1 values(10,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(9,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(8,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(7,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(6,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(5,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(4,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(3,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(2,RAND()*1000,RAND()*1000,RAND());
-insert into t1 values(1,RAND()*1000,RAND()*1000,RAND());
 alter table t1 enable keys;
 show keys from t1;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment
@@ -183,106 +173,6 @@ i	int(10) unsigned	NO	PRI	NULL	auto_incr
 c	char(10)	YES		NULL	
 drop table t1;
 create table t1 (a int, b int);
-insert into t1 values(1,100), (2,100), (3, 100);
-insert into t1 values(1,99), (2,99), (3, 99);
-insert into t1 values(1,98), (2,98), (3, 98);
-insert into t1 values(1,97), (2,97), (3, 97);
-insert into t1 values(1,96), (2,96), (3, 96);
-insert into t1 values(1,95), (2,95), (3, 95);
-insert into t1 values(1,94), (2,94), (3, 94);
-insert into t1 values(1,93), (2,93), (3, 93);
-insert into t1 values(1,92), (2,92), (3, 92);
-insert into t1 values(1,91), (2,91), (3, 91);
-insert into t1 values(1,90), (2,90), (3, 90);
-insert into t1 values(1,89), (2,89), (3, 89);
-insert into t1 values(1,88), (2,88), (3, 88);
-insert into t1 values(1,87), (2,87), (3, 87);
-insert into t1 values(1,86), (2,86), (3, 86);
-insert into t1 values(1,85), (2,85), (3, 85);
-insert into t1 values(1,84), (2,84), (3, 84);
-insert into t1 values(1,83), (2,83), (3, 83);
-insert into t1 values(1,82), (2,82), (3, 82);
-insert into t1 values(1,81), (2,81), (3, 81);
-insert into t1 values(1,80), (2,80), (3, 80);
-insert into t1 values(1,79), (2,79), (3, 79);
-insert into t1 values(1,78), (2,78), (3, 78);
-insert into t1 values(1,77), (2,77), (3, 77);
-insert into t1 values(1,76), (2,76), (3, 76);
-insert into t1 values(1,75), (2,75), (3, 75);
-insert into t1 values(1,74), (2,74), (3, 74);
-insert into t1 values(1,73), (2,73), (3, 73);
-insert into t1 values(1,72), (2,72), (3, 72);
-insert into t1 values(1,71), (2,71), (3, 71);
-insert into t1 values(1,70), (2,70), (3, 70);
-insert into t1 values(1,69), (2,69), (3, 69);
-insert into t1 values(1,68), (2,68), (3, 68);
-insert into t1 values(1,67), (2,67), (3, 67);
-insert into t1 values(1,66), (2,66), (3, 66);
-insert into t1 values(1,65), (2,65), (3, 65);
-insert into t1 values(1,64), (2,64), (3, 64);
-insert into t1 values(1,63), (2,63), (3, 63);
-insert into t1 values(1,62), (2,62), (3, 62);
-insert into t1 values(1,61), (2,61), (3, 61);
-insert into t1 values(1,60), (2,60), (3, 60);
-insert into t1 values(1,59), (2,59), (3, 59);
-insert into t1 values(1,58), (2,58), (3, 58);
-insert into t1 values(1,57), (2,57), (3, 57);
-insert into t1 values(1,56), (2,56), (3, 56);
-insert into t1 values(1,55), (2,55), (3, 55);
-insert into t1 values(1,54), (2,54), (3, 54);
-insert into t1 values(1,53), (2,53), (3, 53);
-insert into t1 values(1,52), (2,52), (3, 52);
-insert into t1 values(1,51), (2,51), (3, 51);
-insert into t1 values(1,50), (2,50), (3, 50);
-insert into t1 values(1,49), (2,49), (3, 49);
-insert into t1 values(1,48), (2,48), (3, 48);
-insert into t1 values(1,47), (2,47), (3, 47);
-insert into t1 values(1,46), (2,46), (3, 46);
-insert into t1 values(1,45), (2,45), (3, 45);
-insert into t1 values(1,44), (2,44), (3, 44);
-insert into t1 values(1,43), (2,43), (3, 43);
-insert into t1 values(1,42), (2,42), (3, 42);
-insert into t1 values(1,41), (2,41), (3, 41);
-insert into t1 values(1,40), (2,40), (3, 40);
-insert into t1 values(1,39), (2,39), (3, 39);
-insert into t1 values(1,38), (2,38), (3, 38);
-insert into t1 values(1,37), (2,37), (3, 37);
-insert into t1 values(1,36), (2,36), (3, 36);
-insert into t1 values(1,35), (2,35), (3, 35);
-insert into t1 values(1,34), (2,34), (3, 34);
-insert into t1 values(1,33), (2,33), (3, 33);
-insert into t1 values(1,32), (2,32), (3, 32);
-insert into t1 values(1,31), (2,31), (3, 31);
-insert into t1 values(1,30), (2,30), (3, 30);
-insert into t1 values(1,29), (2,29), (3, 29);
-insert into t1 values(1,28), (2,28), (3, 28);
-insert into t1 values(1,27), (2,27), (3, 27);
-insert into t1 values(1,26), (2,26), (3, 26);
-insert into t1 values(1,25), (2,25), (3, 25);
-insert into t1 values(1,24), (2,24), (3, 24);
-insert into t1 values(1,23), (2,23), (3, 23);
-insert into t1 values(1,22), (2,22), (3, 22);
-insert into t1 values(1,21), (2,21), (3, 21);
-insert into t1 values(1,20), (2,20), (3, 20);
-insert into t1 values(1,19), (2,19), (3, 19);
-insert into t1 values(1,18), (2,18), (3, 18);
-insert into t1 values(1,17), (2,17), (3, 17);
-insert into t1 values(1,16), (2,16), (3, 16);
-insert into t1 values(1,15), (2,15), (3, 15);
-insert into t1 values(1,14), (2,14), (3, 14);
-insert into t1 values(1,13), (2,13), (3, 13);
-insert into t1 values(1,12), (2,12), (3, 12);
-insert into t1 values(1,11), (2,11), (3, 11);
-insert into t1 values(1,10), (2,10), (3, 10);
-insert into t1 values(1,9), (2,9), (3, 9);
-insert into t1 values(1,8), (2,8), (3, 8);
-insert into t1 values(1,7), (2,7), (3, 7);
-insert into t1 values(1,6), (2,6), (3, 6);
-insert into t1 values(1,5), (2,5), (3, 5);
-insert into t1 values(1,4), (2,4), (3, 4);
-insert into t1 values(1,3), (2,3), (3, 3);
-insert into t1 values(1,2), (2,2), (3, 2);
-insert into t1 values(1,1), (2,1), (3, 1);
 alter table t1 add unique (a,b), add key (b);
 show keys from t1;
 Table	Non_unique	Key_name	Seq_in_index	Column_name	Collation	Cardinality	Sub_part	Packed	Null	Index_type	Comment

=== modified file 'mysql-test/r/func_misc.result'
--- a/mysql-test/r/func_misc.result	2009-06-11 16:21:32 +0000
+++ b/mysql-test/r/func_misc.result	2009-10-28 07:52:34 +0000
@@ -116,10 +116,6 @@ CREATE TEMPORARY TABLE t_history (attemp
 start_ts DATETIME, end_ts DATETIME,
 start_cached INTEGER, end_cached INTEGER);
 CREATE TABLE t1 (f1 BIGINT);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
-INSERT INTO t1 VALUES (1);
 INSERT INTO t_history
 SET attempt = 4 - 4 + 1, start_ts = NOW(),
 start_cached = 0;

=== modified file 'mysql-test/r/gis-rtree.result'
--- a/mysql-test/r/gis-rtree.result	2009-07-10 23:12:13 +0000
+++ b/mysql-test/r/gis-rtree.result	2009-10-28 07:52:34 +0000
@@ -12,156 +12,6 @@ t1	CREATE TABLE `t1` (
   PRIMARY KEY (`fid`),
   SPATIAL KEY `g` (`g`)
 ) ENGINE=MyISAM DEFAULT CHARSET=latin1
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(150 150, 150 150)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(149 149, 151 151)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(148 148, 152 152)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(147 147, 153 153)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(146 146, 154 154)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(145 145, 155 155)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(144 144, 156 156)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(143 143, 157 157)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(142 142, 158 158)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(141 141, 159 159)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(140 140, 160 160)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(139 139, 161 161)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(138 138, 162 162)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(137 137, 163 163)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(136 136, 164 164)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(135 135, 165 165)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(134 134, 166 166)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(133 133, 167 167)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(132 132, 168 168)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(131 131, 169 169)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(130 130, 170 170)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(129 129, 171 171)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(128 128, 172 172)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(127 127, 173 173)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(126 126, 174 174)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(125 125, 175 175)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(124 124, 176 176)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(123 123, 177 177)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(122 122, 178 178)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(121 121, 179 179)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(120 120, 180 180)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(119 119, 181 181)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(118 118, 182 182)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(117 117, 183 183)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(116 116, 184 184)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(115 115, 185 185)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(114 114, 186 186)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(113 113, 187 187)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(112 112, 188 188)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(111 111, 189 189)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(110 110, 190 190)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(109 109, 191 191)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(108 108, 192 192)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(107 107, 193 193)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(106 106, 194 194)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(105 105, 195 195)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(104 104, 196 196)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(103 103, 197 197)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(102 102, 198 198)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(101 101, 199 199)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(100 100, 200 200)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(99 99, 201 201)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(98 98, 202 202)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(97 97, 203 203)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(96 96, 204 204)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(95 95, 205 205)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(94 94, 206 206)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(93 93, 207 207)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(92 92, 208 208)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(91 91, 209 209)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(90 90, 210 210)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(89 89, 211 211)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(88 88, 212 212)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(87 87, 213 213)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(86 86, 214 214)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(85 85, 215 215)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(84 84, 216 216)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(83 83, 217 217)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(82 82, 218 218)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(81 81, 219 219)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(80 80, 220 220)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(79 79, 221 221)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(78 78, 222 222)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(77 77, 223 223)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(76 76, 224 224)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(75 75, 225 225)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(74 74, 226 226)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(73 73, 227 227)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(72 72, 228 228)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(71 71, 229 229)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(70 70, 230 230)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(69 69, 231 231)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(68 68, 232 232)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(67 67, 233 233)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(66 66, 234 234)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(65 65, 235 235)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(64 64, 236 236)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(63 63, 237 237)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(62 62, 238 238)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(61 61, 239 239)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(60 60, 240 240)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(59 59, 241 241)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(58 58, 242 242)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(57 57, 243 243)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(56 56, 244 244)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(55 55, 245 245)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(54 54, 246 246)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(53 53, 247 247)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(52 52, 248 248)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(51 51, 249 249)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(50 50, 250 250)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(49 49, 251 251)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(48 48, 252 252)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(47 47, 253 253)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(46 46, 254 254)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(45 45, 255 255)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(44 44, 256 256)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(43 43, 257 257)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(42 42, 258 258)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(41 41, 259 259)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(40 40, 260 260)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(39 39, 261 261)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(38 38, 262 262)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(37 37, 263 263)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(36 36, 264 264)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(35 35, 265 265)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(34 34, 266 266)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(33 33, 267 267)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(32 32, 268 268)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(31 31, 269 269)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(30 30, 270 270)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(29 29, 271 271)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(28 28, 272 272)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(27 27, 273 273)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(26 26, 274 274)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(25 25, 275 275)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(24 24, 276 276)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(23 23, 277 277)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(22 22, 278 278)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(21 21, 279 279)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(20 20, 280 280)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(19 19, 281 281)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(18 18, 282 282)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(17 17, 283 283)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(16 16, 284 284)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(15 15, 285 285)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(14 14, 286 286)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(13 13, 287 287)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(12 12, 288 288)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(11 11, 289 289)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(10 10, 290 290)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(9 9, 291 291)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(8 8, 292 292)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(7 7, 293 293)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(6 6, 294 294)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(5 5, 295 295)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(4 4, 296 296)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(3 3, 297 297)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(2 2, 298 298)'));
-INSERT INTO t1 (g) VALUES (GeomFromText('LineString(1 1, 299 299)'));
 SELECT count(*) FROM t1;
 count(*)
 150
@@ -186,106 +36,6 @@ CREATE TABLE t2 (
 fid INT NOT NULL AUTO_INCREMENT PRIMARY KEY, 
 g GEOMETRY NOT NULL
 ) ENGINE=MyISAM;
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 10 * 10 - 9), Point(10 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 9 * 10 - 9), Point(10 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 8 * 10 - 9), Point(10 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 7 * 10 - 9), Point(10 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 6 * 10 - 9), Point(10 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 5 * 10 - 9), Point(10 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 4 * 10 - 9), Point(10 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 3 * 10 - 9), Point(10 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 2 * 10 - 9), Point(10 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(10 * 10 - 9, 1 * 10 - 9), Point(10 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 10 * 10 - 9), Point(9 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 9 * 10 - 9), Point(9 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 8 * 10 - 9), Point(9 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 7 * 10 - 9), Point(9 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 6 * 10 - 9), Point(9 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 5 * 10 - 9), Point(9 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 4 * 10 - 9), Point(9 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 3 * 10 - 9), Point(9 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 2 * 10 - 9), Point(9 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(9 * 10 - 9, 1 * 10 - 9), Point(9 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 10 * 10 - 9), Point(8 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 9 * 10 - 9), Point(8 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 8 * 10 - 9), Point(8 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 7 * 10 - 9), Point(8 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 6 * 10 - 9), Point(8 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 5 * 10 - 9), Point(8 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 4 * 10 - 9), Point(8 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 3 * 10 - 9), Point(8 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 2 * 10 - 9), Point(8 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(8 * 10 - 9, 1 * 10 - 9), Point(8 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 10 * 10 - 9), Point(7 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 9 * 10 - 9), Point(7 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 8 * 10 - 9), Point(7 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 7 * 10 - 9), Point(7 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 6 * 10 - 9), Point(7 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 5 * 10 - 9), Point(7 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 4 * 10 - 9), Point(7 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 3 * 10 - 9), Point(7 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 2 * 10 - 9), Point(7 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(7 * 10 - 9, 1 * 10 - 9), Point(7 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 10 * 10 - 9), Point(6 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 9 * 10 - 9), Point(6 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 8 * 10 - 9), Point(6 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 7 * 10 - 9), Point(6 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 6 * 10 - 9), Point(6 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 5 * 10 - 9), Point(6 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 4 * 10 - 9), Point(6 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 3 * 10 - 9), Point(6 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 2 * 10 - 9), Point(6 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(6 * 10 - 9, 1 * 10 - 9), Point(6 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 10 * 10 - 9), Point(5 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 9 * 10 - 9), Point(5 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 8 * 10 - 9), Point(5 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 7 * 10 - 9), Point(5 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 6 * 10 - 9), Point(5 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 5 * 10 - 9), Point(5 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 4 * 10 - 9), Point(5 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 3 * 10 - 9), Point(5 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 2 * 10 - 9), Point(5 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(5 * 10 - 9, 1 * 10 - 9), Point(5 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 10 * 10 - 9), Point(4 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 9 * 10 - 9), Point(4 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 8 * 10 - 9), Point(4 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 7 * 10 - 9), Point(4 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 6 * 10 - 9), Point(4 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 5 * 10 - 9), Point(4 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 4 * 10 - 9), Point(4 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 3 * 10 - 9), Point(4 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 2 * 10 - 9), Point(4 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(4 * 10 - 9, 1 * 10 - 9), Point(4 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 10 * 10 - 9), Point(3 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 9 * 10 - 9), Point(3 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 8 * 10 - 9), Point(3 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 7 * 10 - 9), Point(3 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 6 * 10 - 9), Point(3 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 5 * 10 - 9), Point(3 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 4 * 10 - 9), Point(3 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 3 * 10 - 9), Point(3 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 2 * 10 - 9), Point(3 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(3 * 10 - 9, 1 * 10 - 9), Point(3 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 10 * 10 - 9), Point(2 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 9 * 10 - 9), Point(2 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 8 * 10 - 9), Point(2 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 7 * 10 - 9), Point(2 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 6 * 10 - 9), Point(2 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 5 * 10 - 9), Point(2 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 4 * 10 - 9), Point(2 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 3 * 10 - 9), Point(2 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 2 * 10 - 9), Point(2 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(2 * 10 - 9, 1 * 10 - 9), Point(2 * 10, 1 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 10 * 10 - 9), Point(1 * 10, 10 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 9 * 10 - 9), Point(1 * 10, 9 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 8 * 10 - 9), Point(1 * 10, 8 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 7 * 10 - 9), Point(1 * 10, 7 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 6 * 10 - 9), Point(1 * 10, 6 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 5 * 10 - 9), Point(1 * 10, 5 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 4 * 10 - 9), Point(1 * 10, 4 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 3 * 10 - 9), Point(1 * 10, 3 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 2 * 10 - 9), Point(1 * 10, 2 * 10)));
-INSERT INTO t2 (g) VALUES (LineString(Point(1 * 10 - 9, 1 * 10 - 9), Point(1 * 10, 1 * 10)));
 ALTER TABLE t2 ADD SPATIAL KEY(g);
 SHOW CREATE TABLE t2;
 Table	Create Table
@@ -309,404 +59,204 @@ fid	AsText(g)
 56	LINESTRING(41 41,50 50)
 45	LINESTRING(51 51,60 60)
 55	LINESTRING(41 51,50 60)
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 10 * 10 - 9), Point(10 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 9 * 10 - 9), Point(10 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 8 * 10 - 9), Point(10 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 7 * 10 - 9), Point(10 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 6 * 10 - 9), Point(10 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 5 * 10 - 9), Point(10 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 4 * 10 - 9), Point(10 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 3 * 10 - 9), Point(10 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 2 * 10 - 9), Point(10 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(10 * 10 - 9, 1 * 10 - 9), Point(10 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 10 * 10 - 9), Point(9 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 9 * 10 - 9), Point(9 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 8 * 10 - 9), Point(9 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 7 * 10 - 9), Point(9 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 6 * 10 - 9), Point(9 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 5 * 10 - 9), Point(9 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 4 * 10 - 9), Point(9 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 3 * 10 - 9), Point(9 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 2 * 10 - 9), Point(9 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(9 * 10 - 9, 1 * 10 - 9), Point(9 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 10 * 10 - 9), Point(8 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 9 * 10 - 9), Point(8 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 8 * 10 - 9), Point(8 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 7 * 10 - 9), Point(8 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 6 * 10 - 9), Point(8 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 5 * 10 - 9), Point(8 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 4 * 10 - 9), Point(8 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 3 * 10 - 9), Point(8 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 2 * 10 - 9), Point(8 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(8 * 10 - 9, 1 * 10 - 9), Point(8 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 10 * 10 - 9), Point(7 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 9 * 10 - 9), Point(7 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 8 * 10 - 9), Point(7 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 7 * 10 - 9), Point(7 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 6 * 10 - 9), Point(7 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 5 * 10 - 9), Point(7 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 4 * 10 - 9), Point(7 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 3 * 10 - 9), Point(7 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 2 * 10 - 9), Point(7 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(7 * 10 - 9, 1 * 10 - 9), Point(7 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 10 * 10 - 9), Point(6 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 9 * 10 - 9), Point(6 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 8 * 10 - 9), Point(6 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 7 * 10 - 9), Point(6 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 6 * 10 - 9), Point(6 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 5 * 10 - 9), Point(6 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 4 * 10 - 9), Point(6 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 3 * 10 - 9), Point(6 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 2 * 10 - 9), Point(6 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(6 * 10 - 9, 1 * 10 - 9), Point(6 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 10 * 10 - 9), Point(5 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 9 * 10 - 9), Point(5 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 8 * 10 - 9), Point(5 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 7 * 10 - 9), Point(5 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 6 * 10 - 9), Point(5 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 5 * 10 - 9), Point(5 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 4 * 10 - 9), Point(5 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 3 * 10 - 9), Point(5 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 2 * 10 - 9), Point(5 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(5 * 10 - 9, 1 * 10 - 9), Point(5 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 10 * 10 - 9), Point(4 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 9 * 10 - 9), Point(4 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 8 * 10 - 9), Point(4 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 7 * 10 - 9), Point(4 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 6 * 10 - 9), Point(4 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 5 * 10 - 9), Point(4 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 4 * 10 - 9), Point(4 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 3 * 10 - 9), Point(4 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 2 * 10 - 9), Point(4 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(4 * 10 - 9, 1 * 10 - 9), Point(4 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 10 * 10 - 9), Point(3 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 9 * 10 - 9), Point(3 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 8 * 10 - 9), Point(3 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 7 * 10 - 9), Point(3 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 6 * 10 - 9), Point(3 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 5 * 10 - 9), Point(3 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 4 * 10 - 9), Point(3 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 3 * 10 - 9), Point(3 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 2 * 10 - 9), Point(3 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(3 * 10 - 9, 1 * 10 - 9), Point(3 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 10 * 10 - 9), Point(2 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 9 * 10 - 9), Point(2 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 8 * 10 - 9), Point(2 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 7 * 10 - 9), Point(2 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 6 * 10 - 9), Point(2 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 5 * 10 - 9), Point(2 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 4 * 10 - 9), Point(2 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 3 * 10 - 9), Point(2 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 2 * 10 - 9), Point(2 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(2 * 10 - 9, 1 * 10 - 9), Point(2 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 10 * 10 - 9), Point(1 * 10, 10 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 9 * 10 - 9), Point(1 * 10, 9 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 8 * 10 - 9), Point(1 * 10, 8 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 7 * 10 - 9), Point(1 * 10, 7 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 6 * 10 - 9), Point(1 * 10, 6 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 5 * 10 - 9), Point(1 * 10, 5 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 4 * 10 - 9), Point(1 * 10, 4 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 3 * 10 - 9), Point(1 * 10, 3 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 2 * 10 - 9), Point(1 * 10, 2 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
-DELETE FROM t2 WHERE Within(g, Envelope(GeometryFromWKB(Point(1 * 10 - 9, 1 * 10 - 9), Point(1 * 10, 1 * 10))));
-SELECT count(*) FROM t2;
 count(*)
 100
 DROP TABLE t2;

=== modified file 'mysql-test/r/innodb_xtradb_bug317074.result'
--- a/mysql-test/r/innodb_xtradb_bug317074.result	2009-09-08 16:04:58 +0000
+++ b/mysql-test/r/innodb_xtradb_bug317074.result	2009-10-28 07:52:34 +0000
@@ -3,3 +3,30 @@ SET @old_innodb_file_per_table=@@innodb_
 SET @old_innodb_file_format_check=@@innodb_file_format_check;
 SET GLOBAL innodb_file_format='Barracuda';
 SET GLOBAL innodb_file_per_table=ON;
+DROP TABLE IF EXISTS `test1`;
+CREATE TABLE IF NOT EXISTS `test1` (
+`a` int primary key auto_increment,
+`b` int default 0,
+`c` char(100) default 'testtest'
+) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+set autocommit=0;
+CREATE PROCEDURE insert_many(p1 int)
+BEGIN
+SET @x = 0;
+SET @y = 0;
+REPEAT
+insert into test1 set b=1;
+SET @x = @x + 1;
+SET @y = @y + 1;
+IF @y >= 1000 THEN
+commit;
+SET @y = 0;
+END IF;
+UNTIL @x >= p1 END REPEAT;
+END|
+DROP PROCEDURE insert_many;
+ALTER TABLE test1 ENGINE=MyISAM;
+DROP TABLE test1;
+SET GLOBAL innodb_file_format=@old_innodb_file_format;
+SET GLOBAL innodb_file_per_table=@old_innodb_file_per_table;
+SET GLOBAL innodb_file_format_check=@old_innodb_file_format_check;

=== modified file 'mysql-test/r/merge.result'
--- a/mysql-test/r/merge.result	2009-09-07 20:50:10 +0000
+++ b/mysql-test/r/merge.result	2009-10-28 07:52:34 +0000
@@ -1846,56 +1846,6 @@ c1
 DROP TABLE t1, t2, t3;
 CREATE TABLE t1 (id INTEGER, grp TINYINT, id_rev INTEGER);
 SET @rnd_max= 2147483647;
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
-SET @rnd= RAND();
-SET @id = CAST(@rnd * @rnd_max AS UNSIGNED);
-SET @id_rev= @rnd_max - @id;
-SET @grp= CAST(127.0 * @rnd AS UNSIGNED);
-INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev);
 set @@read_buffer_size=2*1024*1024;
 CREATE TABLE t2 SELECT * FROM t1;
 INSERT INTO t1 (id, grp, id_rev) SELECT id, grp, id_rev FROM t2;

=== modified file 'mysql-test/r/myisam_debug.result'
--- a/mysql-test/r/myisam_debug.result	2009-04-30 11:03:44 +0000
+++ b/mysql-test/r/myisam_debug.result	2009-10-28 07:52:34 +0000
@@ -12,16 +12,6 @@ CREATE TABLE `t2` (
 KEY (id1), KEY(id)
 ) ENGINE=MyISAM;
 INSERT INTO t2 (id) VALUES (123);
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
-INSERT INTO t2 (id) SELECT id  FROM t2;
 # Switch to insert Connection
 SET SESSION debug='+d,wait_in_enable_indexes';
 # Send insert data

=== added file 'mysql-test/r/table_elim_debug.result'
--- a/mysql-test/r/table_elim_debug.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/r/table_elim_debug.result	2009-10-29 17:50:33 +0000
@@ -0,0 +1,22 @@
+drop table if exists t1, t2;
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+create table t2 (a int primary key, b int) 
+as select a, a as b from t1 where a in (1,2);
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	4	
+set optimizer_switch='table_elimination=off';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	4	
+1	SIMPLE	t2	eq_ref	PRIMARY	PRIMARY	4	test.t1.a	1	Using index
+set optimizer_switch='table_elimination=on';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	4	
+set optimizer_switch='table_elimination=default';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	4	
+drop table t1, t2;

=== modified file 'mysql-test/suite/federated/disabled.def'
--- a/mysql-test/suite/federated/disabled.def	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/disabled.def	2009-10-30 18:50:56 +0000
@@ -9,4 +9,5 @@
 #  Do not use any TAB characters for whitespace.
 #
 ##############################################################################
-federated_transactions : Bug#29523 Transactions do not work
+federated_server : needs fixup
+

=== modified file 'mysql-test/suite/federated/federated.result'
--- a/mysql-test/suite/federated/federated.result	2009-03-19 08:49:51 +0000
+++ b/mysql-test/suite/federated/federated.result	2009-10-30 18:50:56 +0000
@@ -47,9 +47,10 @@ CREATE TABLE federated.t1 (
     )
 ENGINE="FEDERATED" DEFAULT CHARSET=latin1
 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t3';
-SELECT * FROM federated.t1;
-ERROR HY000: The foreign data source you are trying to reference does not exist. Data source error:  error: 1146  'Table 'federated.t3' doesn't exist'
-DROP TABLE federated.t1;
+ERROR HY000: Can't create federated table. Foreign data src error:  database: 'federated'  username: 'root'  hostname: '127.0.0.1'
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note	1051	Unknown table 't1'
 CREATE TABLE federated.t1 (
 `id` int(20) NOT NULL,
 `group` int NOT NULL default 0,
@@ -59,9 +60,10 @@ CREATE TABLE federated.t1 (
     )
 ENGINE="FEDERATED" DEFAULT CHARSET=latin1
 CONNECTION='mysql://user:pass@127.0.0.1:SLAVE_PORT/federated/t1';
-SELECT * FROM federated.t1;
-ERROR HY000: Unable to connect to foreign data source: Access denied for user 'user'@'localhost' (using password: YES)
-DROP TABLE federated.t1;
+ERROR HY000: Can't create federated table. Foreign data src error:  database: 'federated'  username: 'user'  hostname: '127.0.0.1'
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note	1051	Unknown table 't1'
 CREATE TABLE federated.t1 (
 `id` int(20) NOT NULL,
 `group` int NOT NULL default 0,
@@ -1944,15 +1946,7 @@ Bug#18287 create federated table always 
 
 Test that self-references work
 
-create table federated.t1 (a int primary key);
-create table federated.t2 (a int primary key)
-ENGINE=FEDERATED
-connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
-insert into federated.t1 (a) values (1);
-select * from federated.t2;
-a
-1
-drop table federated.t1, federated.t2;
+fix LOCK_open before reenabling test for Bug#18287
 CREATE TABLE federated.t1 (a INT PRIMARY KEY) DEFAULT CHARSET=utf8;
 CREATE TABLE federated.t1 (a INT PRIMARY KEY)
 ENGINE=FEDERATED
@@ -1960,13 +1954,11 @@ CONNECTION='mysql://root@127.0.0.1:SLAVE
   DEFAULT CHARSET=utf8;
 SELECT transactions FROM information_schema.engines WHERE engine="FEDERATED";
 transactions
-NO
+YES
 INSERT INTO federated.t1 VALUES (1);
 SET autocommit=0;
 INSERT INTO federated.t1 VALUES (2);
 ROLLBACK;
-Warnings:
-Warning	1196	Some non-transactional changed tables couldn't be rolled back
 SET autocommit=1;
 SELECT * FROM federated.t1;
 a
@@ -2157,6 +2149,6 @@ End of 5.1 tests
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MASTER_CONCURRENT_INSERT;
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;

=== modified file 'mysql-test/suite/federated/federated.test'
--- a/mysql-test/suite/federated/federated.test	2009-03-19 08:49:51 +0000
+++ b/mysql-test/suite/federated/federated.test	2009-10-30 18:50:56 +0000
@@ -57,6 +57,7 @@ CREATE TABLE federated.t1 (
 
 # test non-existant table
 --replace_result $SLAVE_MYPORT SLAVE_PORT
+--error ER_CANT_CREATE_FEDERATED_TABLE
 eval CREATE TABLE federated.t1 (
     `id` int(20) NOT NULL,
     `group` int NOT NULL default 0,
@@ -66,12 +67,11 @@ eval CREATE TABLE federated.t1 (
     )
   ENGINE="FEDERATED" DEFAULT CHARSET=latin1
   CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3';
---error ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST
-SELECT * FROM federated.t1;
-DROP TABLE federated.t1;
+DROP TABLE IF EXISTS federated.t1;
 
 # test bad user/password 
 --replace_result $SLAVE_MYPORT SLAVE_PORT
+--error ER_CANT_CREATE_FEDERATED_TABLE
 eval CREATE TABLE federated.t1 (
     `id` int(20) NOT NULL,
     `group` int NOT NULL default 0,
@@ -81,9 +81,7 @@ eval CREATE TABLE federated.t1 (
     )
   ENGINE="FEDERATED" DEFAULT CHARSET=latin1
   CONNECTION='mysql://user:pass@127.0.0.1:$SLAVE_MYPORT/federated/t1';
---error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
-SELECT * FROM federated.t1;
-DROP TABLE federated.t1;
+DROP TABLE IF EXISTS federated.t1;
 
 # # correct connection, same named tables
 --replace_result $SLAVE_MYPORT SLAVE_PORT
@@ -1806,6 +1804,8 @@ drop table federated.t1;
 --echo
 --echo Test that self-references work
 --echo
+--echo fix LOCK_open before reenabling test for Bug#18287
+--disable_parsing
 connection slave;
 create table federated.t1 (a int primary key);
 --replace_result $SLAVE_MYPORT SLAVE_PORT
@@ -1815,7 +1815,7 @@ eval create table federated.t2 (a int pr
 insert into federated.t1 (a) values (1);
 select * from federated.t2;
 drop table federated.t1, federated.t2;
-
+--enable_parsing
 #
 # BUG#29875 Disable support for transactions
 #

=== modified file 'mysql-test/suite/federated/federated_archive.result'
--- a/mysql-test/suite/federated/federated_archive.result	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_archive.result	2009-10-30 18:50:56 +0000
@@ -34,6 +34,6 @@ id	name
 DROP TABLE federated.t1;
 DROP TABLE federated.archive_table;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;

=== modified file 'mysql-test/suite/federated/federated_bug_13118.result'
--- a/mysql-test/suite/federated/federated_bug_13118.result	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_bug_13118.result	2009-10-30 18:50:56 +0000
@@ -25,6 +25,6 @@ foo	bar
 DROP TABLE federated.t1;
 DROP TABLE federated.bug_13118_table;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;

=== modified file 'mysql-test/suite/federated/federated_bug_25714.result'
--- a/mysql-test/suite/federated/federated_bug_25714.result	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_bug_25714.result	2009-10-30 18:50:56 +0000
@@ -48,6 +48,6 @@ SET @@GLOBAL.CONCURRENT_INSERT= @OLD_MAS
 DROP TABLE federated.t1;
 SET @@GLOBAL.CONCURRENT_INSERT= @OLD_SLAVE_CONCURRENT_INSERT;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;

=== modified file 'mysql-test/suite/federated/federated_cleanup.inc'
--- a/mysql-test/suite/federated/federated_cleanup.inc	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_cleanup.inc	2009-10-30 18:50:56 +0000
@@ -1,9 +1,9 @@
 connection master;
 --disable_warnings
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 
 connection slave;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 --enable_warnings

=== modified file 'mysql-test/suite/federated/federated_innodb.result'
--- a/mysql-test/suite/federated/federated_innodb.result	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_innodb.result	2009-10-30 18:50:56 +0000
@@ -20,6 +20,6 @@ a	b
 drop table federated.t1;
 drop table federated.t1;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;
 DROP TABLE IF EXISTS federated.t1;
-DROP DATABASE federated;
+DROP DATABASE IF EXISTS federated;

=== modified file 'mysql-test/suite/federated/federated_server.result'
--- a/mysql-test/suite/federated/federated_server.result	2009-02-02 11:36:03 +0000
+++ b/mysql-test/suite/federated/federated_server.result	2009-10-30 18:50:56 +0000
@@ -178,8 +178,8 @@ INSERT INTO db_bogus.t1 VALUES ('2','thi
 create server 's1' foreign data wrapper 'mysql' options
 (HOST '127.0.0.1',
 DATABASE 'db_legitimate',
-USER 'root',
-PASSWORD '',
+USER 'test_fed',
+PASSWORD 'foo',
 PORT SLAVE_PORT,
 SOCKET '',
 OWNER 'root');

=== modified file 'mysql-test/suite/federated/federated_server.test'
--- a/mysql-test/suite/federated/federated_server.test	2009-06-05 15:35:22 +0000
+++ b/mysql-test/suite/federated/federated_server.test	2009-10-30 18:50:56 +0000
@@ -3,6 +3,7 @@
 
 # Slow test, don't run during staging part
 -- source include/not_staging.inc
+-- source include/big_test.inc
 -- source federated.inc
 
 connection slave;
@@ -182,13 +183,17 @@ CREATE TABLE db_bogus.t1 (
   ;
 INSERT INTO db_bogus.t1 VALUES ('2','this is bogus');
 
+connection slave;
+create user test_fed@localhost identified by 'foo';
+grant all on db_legitimate.* to test_fed@localhost;
+
 connection master;
 --replace_result $SLAVE_MYPORT SLAVE_PORT
 eval create server 's1' foreign data wrapper 'mysql' options
   (HOST '127.0.0.1',
   DATABASE 'db_legitimate',
-  USER 'root',
-  PASSWORD '',
+  USER 'test_fed',
+  PASSWORD 'foo',
   PORT $SLAVE_MYPORT,
   SOCKET '',
   OWNER 'root');

=== modified file 'mysql-test/suite/federated/federated_transactions.result'
--- a/mysql-test/suite/federated/federated_transactions.result	2007-12-12 17:19:24 +0000
+++ b/mysql-test/suite/federated/federated_transactions.result	2009-10-30 18:50:56 +0000
@@ -1,13 +1,4 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-stop slave;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
-DROP DATABASE IF EXISTS federated;
 CREATE DATABASE federated;
 DROP TABLE IF EXISTS federated.t1;
 Warnings:

=== modified file 'mysql-test/suite/parts/inc/partition.pre'
--- a/mysql-test/suite/parts/inc/partition.pre	2008-01-10 15:50:37 +0000
+++ b/mysql-test/suite/parts/inc/partition.pre	2009-10-28 07:52:34 +0000
@@ -152,6 +152,7 @@ ENGINE = MEMORY;
 --echo #     Logging of <max_row> INSERTs into t0_template suppressed
 --disable_query_log
 let $num= `SELECT @max_row`;
+begin;
 while ($num)
 {
   eval INSERT INTO t0_template
@@ -160,6 +161,7 @@ f_charbig = '===$num===';
 
   dec $num;
 }
+commit;
 --enable_query_log
 
 # Auxiliary table used for comparisons of table definitions and file lists

=== modified file 'mysql-test/suite/parts/inc/partition_bigint.inc'
--- a/mysql-test/suite/parts/inc/partition_bigint.inc	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_bigint.inc	2009-10-28 07:52:34 +0000
@@ -32,11 +32,13 @@ delete from t2;
 let $count=$maxrows;
 --echo $maxrows inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;

=== modified file 'mysql-test/suite/parts/inc/partition_binary.inc'
--- a/mysql-test/suite/parts/inc/partition_binary.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_binary.inc	2009-10-28 07:52:34 +0000
@@ -22,13 +22,16 @@ show create table t2;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select hex(a) from t2;
 drop table t2;
@@ -48,13 +51,16 @@ show create table t3;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select hex(a) from t3;
 drop table t3;
@@ -73,14 +79,16 @@ show create table t4;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
-
+commit;
+--enable_query_log
 select count(*) from t4;
 select hex(a) from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_bit.inc'
--- a/mysql-test/suite/parts/inc/partition_bit.inc	2008-02-07 15:26:22 +0000
+++ b/mysql-test/suite/parts/inc/partition_bit.inc	2009-10-28 07:52:34 +0000
@@ -74,11 +74,13 @@ show create table t3;
 let $count=255;
 --echo $count inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select hex(a) from t3 where a=b'01010101';
 delete from t3 where a=b'01010101';
@@ -96,11 +98,13 @@ show create table t4;
 let $count=32;
 --echo $count inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select hex(a) from t4 where a=b'00000001';
 delete from t4 where a=b'00000001';

=== modified file 'mysql-test/suite/parts/inc/partition_char.inc'
--- a/mysql-test/suite/parts/inc/partition_char.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_char.inc	2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -47,13 +50,16 @@ show create table t3;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select a from t3;
 drop table t3;
@@ -71,13 +77,16 @@ show create table t4;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select a from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_date.inc'
--- a/mysql-test/suite/parts/inc/partition_date.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_date.inc	2009-10-28 07:52:34 +0000
@@ -23,7 +23,8 @@ select * from t2;
 delete from t2;
 let $count=28;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (19700101+$count-1);
@@ -31,7 +32,8 @@ eval insert into t2 values (19700201+$co
 eval insert into t2 values (19700301+$count-1);
 dec $count;
 }
-#--enable_query_log
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -47,11 +49,15 @@ partition quarter4 values less than (13)
 show create table t3;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (adddate(19700101,interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -67,11 +73,15 @@ partition quarter4 values in (10,11,12)
 show create table t4;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (adddate(19700101,interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_datetime.inc'
--- a/mysql-test/suite/parts/inc/partition_datetime.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_datetime.inc	2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
 delete from t2;
 let $count=59;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (19700101000000+$count);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (13)
 show create table t3;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (adddate(19700101000000,interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (10,11,12)
 show create table t4;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (adddate(19700101000000,interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_decimal.inc'
--- a/mysql-test/suite/parts/inc/partition_decimal.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_decimal.inc	2009-10-28 07:52:34 +0000
@@ -24,6 +24,7 @@ delete from t2;
 let $count=$maxrows;
 --echo $count*3 inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
@@ -31,6 +32,7 @@ eval insert into t2 values ($count+0.333
 eval insert into t2 values ($count+0.755555555);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;
@@ -53,6 +55,8 @@ partition pa10 values less than (10)
 show create table t3;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values ($count);
@@ -60,6 +64,7 @@ eval insert into t3 values ($count+0.333
 eval insert into t3 values ($count+0.755555555);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t3;
 drop table t3;
@@ -75,6 +80,8 @@ partition pa10 values in (9,10)
 show create table t4;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values ($count);
@@ -82,6 +89,7 @@ eval insert into t4 values ($count+0.333
 eval insert into t4 values ($count+0.755555555);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_double.inc'
--- a/mysql-test/suite/parts/inc/partition_double.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_double.inc	2009-10-28 07:52:34 +0000
@@ -24,6 +24,7 @@ delete from t2;
 let $count=$maxrows;
 --echo $maxrows*3 inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
@@ -31,6 +32,7 @@ eval insert into t2 values ($count+0.33)
 eval insert into t2 values ($count+0.75);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;
@@ -52,6 +54,8 @@ partition pa10 values less than (10)
 show create table t3;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values ($count);
@@ -59,6 +63,8 @@ eval insert into t3 values ($count+0.33)
 eval insert into t3 values ($count+0.75);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -72,6 +78,8 @@ partition pa10 values in (7,8,9,10)
 show create table t4;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values ($count);
@@ -79,6 +87,8 @@ eval insert into t4 values ($count+0.33)
 eval insert into t4 values ($count+0.75);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_enum.inc'
--- a/mysql-test/suite/parts/inc/partition_enum.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_enum.inc	2009-10-28 07:52:34 +0000
@@ -26,12 +26,15 @@ partition by key (a) partitions 27;
 show create table t2;
 let $letter=26;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($letter)
 {
 eval insert into t2 values (char(ascii('A')+$letter));
 dec $letter;
 }
+commit;
+--enable_query_log
 insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
 select count(*) from t2;
 select * from t2;
@@ -55,12 +58,15 @@ partition pa36 values less than (37)
 show create table t3;
 let $letter=36;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+  begin;
 while ($letter)
 {
 #eval insert into t3 values ($letter);
 dec $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;

=== modified file 'mysql-test/suite/parts/inc/partition_float.inc'
--- a/mysql-test/suite/parts/inc/partition_float.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_float.inc	2009-10-28 07:52:34 +0000
@@ -28,6 +28,7 @@ delete from t2;
 let $count=$maxrows;
 --echo $maxrows*3 inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
@@ -35,6 +36,7 @@ eval insert into t2 values ($count+0.33)
 eval insert into t2 values ($count+0.75);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;
@@ -55,6 +57,8 @@ partition pa10 values less than (10)
 show create table t3;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values ($count);
@@ -62,6 +66,8 @@ eval insert into t3 values ($count+0.33)
 eval insert into t3 values ($count+0.75);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -75,6 +81,8 @@ partition pa10 values in (7,8,9,10)
 show create table t4;
 let $count=9;
 --echo $count*3 inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values ($count);
@@ -82,6 +90,8 @@ eval insert into t4 values ($count+0.33)
 eval insert into t4 values ($count+0.75);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_int.inc'
--- a/mysql-test/suite/parts/inc/partition_int.inc	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_int.inc	2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
 let $count=$maxrows;
 --echo $count inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;

=== modified file 'mysql-test/suite/parts/inc/partition_mediumint.inc'
--- a/mysql-test/suite/parts/inc/partition_mediumint.inc	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_mediumint.inc	2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
 let $count=$maxrows;
 --echo $maxrows inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;

=== modified file 'mysql-test/suite/parts/inc/partition_smallint.inc'
--- a/mysql-test/suite/parts/inc/partition_smallint.inc	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_smallint.inc	2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
 let $count=$maxrows;
 --echo $count inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;

=== modified file 'mysql-test/suite/parts/inc/partition_time.inc'
--- a/mysql-test/suite/parts/inc/partition_time.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_time.inc	2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
 delete from t2;
 let $count=59;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (000100+$count);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (61)
 show create table t3;
 let $count=59;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (100000+$count);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (46,47,48,4
 show create table t4;
 let $count=59;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (100000+$count);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_timestamp.inc'
--- a/mysql-test/suite/parts/inc/partition_timestamp.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_timestamp.inc	2009-10-28 07:52:34 +0000
@@ -23,12 +23,15 @@ select * from t2;
 delete from t2;
 let $count=59;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (19710101000000+$count);
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -44,11 +47,15 @@ partition quarter4 values less than (13)
 show create table t3;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (date_add('1970-01-01 00:00:00',interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -64,11 +71,15 @@ partition quarter4 values in (10,11,12)
 show create table t4;
 let $count=12;
 --echo $count inserts;
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (date_add('1970-01-01 00:00:00',interval $count-1 month));
 dec $count;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_tinyint.inc'
--- a/mysql-test/suite/parts/inc/partition_tinyint.inc	2008-07-01 18:38:15 +0000
+++ b/mysql-test/suite/parts/inc/partition_tinyint.inc	2009-10-28 07:52:34 +0000
@@ -28,11 +28,13 @@ delete from t2;
 let $count=255;
 --echo 255 inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values ($count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 drop table t2;

=== modified file 'mysql-test/suite/parts/inc/partition_varbinary.inc'
--- a/mysql-test/suite/parts/inc/partition_varbinary.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_varbinary.inc	2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (repeat(char(ascii('a')+$letter),$count*$count));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -47,13 +50,16 @@ show create table t3;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select hex(a) from t3;
 drop table t3;
@@ -71,13 +77,16 @@ show create table t4;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select hex(a) from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_varchar.inc'
--- a/mysql-test/suite/parts/inc/partition_varchar.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_varchar.inc	2009-10-28 07:52:34 +0000
@@ -21,13 +21,16 @@ show create table t2;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (repeat(char(ascii('a')+$letter),$count*$count));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t2;
 select * from t2;
 drop table t2;
@@ -46,13 +49,16 @@ show create table t3;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t3 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t3;
 select * from t3;
 drop table t3;
@@ -70,13 +76,16 @@ show create table t4;
 let $count=26;
 let $letter=0;
 --echo $count inserts;
-#--disable_query_log
+--disable_query_log
+begin;
 while ($count)
 {
 eval insert into t4 values (repeat(char(ascii('a')+$letter),$count+54));
 dec $count;
 inc $letter;
 }
+commit;
+--enable_query_log
 select count(*) from t4;
 select * from t4;
 drop table t4;

=== modified file 'mysql-test/suite/parts/inc/partition_year.inc'
--- a/mysql-test/suite/parts/inc/partition_year.inc	2008-02-06 14:13:56 +0000
+++ b/mysql-test/suite/parts/inc/partition_year.inc	2009-10-28 07:52:34 +0000
@@ -24,11 +24,13 @@ delete from t2;
 let $count=255;
 --echo $count inserts;
 --disable_query_log
+begin;
 while ($count)
 {
 eval insert into t2 values (1901+$count);
 dec $count;
 }
+commit;
 --enable_query_log
 select count(*) from t2;
 select * from t2;

=== modified file 'mysql-test/suite/parts/r/partition_char_innodb.result'
--- a/mysql-test/suite/parts/r/partition_char_innodb.result	2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_char_innodb.result	2009-10-28 07:52:34 +0000
@@ -45,32 +45,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
 select count(*) from t2;
 count(*)
 26
@@ -153,32 +127,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
 select count(*) from t2;
 count(*)
 26
@@ -258,32 +206,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
 select count(*) from t2;
 count(*)
 26
@@ -363,32 +285,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 30 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
 select count(*) from t2;
 count(*)
 26
@@ -479,34 +375,8 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 0 inserts;
-insert into t2 values (char(ascii('A')+26));
 Warnings:
 Warning	1265	Data truncated for column 'a' at row 1
-insert into t2 values (char(ascii('A')+25));
-insert into t2 values (char(ascii('A')+24));
-insert into t2 values (char(ascii('A')+23));
-insert into t2 values (char(ascii('A')+22));
-insert into t2 values (char(ascii('A')+21));
-insert into t2 values (char(ascii('A')+20));
-insert into t2 values (char(ascii('A')+19));
-insert into t2 values (char(ascii('A')+18));
-insert into t2 values (char(ascii('A')+17));
-insert into t2 values (char(ascii('A')+16));
-insert into t2 values (char(ascii('A')+15));
-insert into t2 values (char(ascii('A')+14));
-insert into t2 values (char(ascii('A')+13));
-insert into t2 values (char(ascii('A')+12));
-insert into t2 values (char(ascii('A')+11));
-insert into t2 values (char(ascii('A')+10));
-insert into t2 values (char(ascii('A')+9));
-insert into t2 values (char(ascii('A')+8));
-insert into t2 values (char(ascii('A')+7));
-insert into t2 values (char(ascii('A')+6));
-insert into t2 values (char(ascii('A')+5));
-insert into t2 values (char(ascii('A')+4));
-insert into t2 values (char(ascii('A')+3));
-insert into t2 values (char(ascii('A')+2));
-insert into t2 values (char(ascii('A')+1));
 insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
 select count(*) from t2;
 count(*)

=== modified file 'mysql-test/suite/parts/r/partition_char_myisam.result'
--- a/mysql-test/suite/parts/r/partition_char_myisam.result	2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_char_myisam.result	2009-10-28 07:52:34 +0000
@@ -45,32 +45,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
 select count(*) from t2;
 count(*)
 26
@@ -153,32 +127,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26+54));
-insert into t2 values (repeat(char(ascii('a')+1),25+54));
-insert into t2 values (repeat(char(ascii('a')+2),24+54));
-insert into t2 values (repeat(char(ascii('a')+3),23+54));
-insert into t2 values (repeat(char(ascii('a')+4),22+54));
-insert into t2 values (repeat(char(ascii('a')+5),21+54));
-insert into t2 values (repeat(char(ascii('a')+6),20+54));
-insert into t2 values (repeat(char(ascii('a')+7),19+54));
-insert into t2 values (repeat(char(ascii('a')+8),18+54));
-insert into t2 values (repeat(char(ascii('a')+9),17+54));
-insert into t2 values (repeat(char(ascii('a')+10),16+54));
-insert into t2 values (repeat(char(ascii('a')+11),15+54));
-insert into t2 values (repeat(char(ascii('a')+12),14+54));
-insert into t2 values (repeat(char(ascii('a')+13),13+54));
-insert into t2 values (repeat(char(ascii('a')+14),12+54));
-insert into t2 values (repeat(char(ascii('a')+15),11+54));
-insert into t2 values (repeat(char(ascii('a')+16),10+54));
-insert into t2 values (repeat(char(ascii('a')+17),9+54));
-insert into t2 values (repeat(char(ascii('a')+18),8+54));
-insert into t2 values (repeat(char(ascii('a')+19),7+54));
-insert into t2 values (repeat(char(ascii('a')+20),6+54));
-insert into t2 values (repeat(char(ascii('a')+21),5+54));
-insert into t2 values (repeat(char(ascii('a')+22),4+54));
-insert into t2 values (repeat(char(ascii('a')+23),3+54));
-insert into t2 values (repeat(char(ascii('a')+24),2+54));
-insert into t2 values (repeat(char(ascii('a')+25),1+54));
 select count(*) from t2;
 count(*)
 26
@@ -258,32 +206,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
 select count(*) from t2;
 count(*)
 26
@@ -363,32 +285,6 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 30 */
 26 inserts;
-insert into t2 values (repeat(char(ascii('a')+0),26*26));
-insert into t2 values (repeat(char(ascii('a')+1),25*25));
-insert into t2 values (repeat(char(ascii('a')+2),24*24));
-insert into t2 values (repeat(char(ascii('a')+3),23*23));
-insert into t2 values (repeat(char(ascii('a')+4),22*22));
-insert into t2 values (repeat(char(ascii('a')+5),21*21));
-insert into t2 values (repeat(char(ascii('a')+6),20*20));
-insert into t2 values (repeat(char(ascii('a')+7),19*19));
-insert into t2 values (repeat(char(ascii('a')+8),18*18));
-insert into t2 values (repeat(char(ascii('a')+9),17*17));
-insert into t2 values (repeat(char(ascii('a')+10),16*16));
-insert into t2 values (repeat(char(ascii('a')+11),15*15));
-insert into t2 values (repeat(char(ascii('a')+12),14*14));
-insert into t2 values (repeat(char(ascii('a')+13),13*13));
-insert into t2 values (repeat(char(ascii('a')+14),12*12));
-insert into t2 values (repeat(char(ascii('a')+15),11*11));
-insert into t2 values (repeat(char(ascii('a')+16),10*10));
-insert into t2 values (repeat(char(ascii('a')+17),9*9));
-insert into t2 values (repeat(char(ascii('a')+18),8*8));
-insert into t2 values (repeat(char(ascii('a')+19),7*7));
-insert into t2 values (repeat(char(ascii('a')+20),6*6));
-insert into t2 values (repeat(char(ascii('a')+21),5*5));
-insert into t2 values (repeat(char(ascii('a')+22),4*4));
-insert into t2 values (repeat(char(ascii('a')+23),3*3));
-insert into t2 values (repeat(char(ascii('a')+24),2*2));
-insert into t2 values (repeat(char(ascii('a')+25),1*1));
 select count(*) from t2;
 count(*)
 26
@@ -479,34 +375,8 @@ t2	CREATE TABLE `t2` (
 /*!50100 PARTITION BY KEY (a)
 PARTITIONS 27 */
 0 inserts;
-insert into t2 values (char(ascii('A')+26));
 Warnings:
 Warning	1265	Data truncated for column 'a' at row 1
-insert into t2 values (char(ascii('A')+25));
-insert into t2 values (char(ascii('A')+24));
-insert into t2 values (char(ascii('A')+23));
-insert into t2 values (char(ascii('A')+22));
-insert into t2 values (char(ascii('A')+21));
-insert into t2 values (char(ascii('A')+20));
-insert into t2 values (char(ascii('A')+19));
-insert into t2 values (char(ascii('A')+18));
-insert into t2 values (char(ascii('A')+17));
-insert into t2 values (char(ascii('A')+16));
-insert into t2 values (char(ascii('A')+15));
-insert into t2 values (char(ascii('A')+14));
-insert into t2 values (char(ascii('A')+13));
-insert into t2 values (char(ascii('A')+12));
-insert into t2 values (char(ascii('A')+11));
-insert into t2 values (char(ascii('A')+10));
-insert into t2 values (char(ascii('A')+9));
-insert into t2 values (char(ascii('A')+8));
-insert into t2 values (char(ascii('A')+7));
-insert into t2 values (char(ascii('A')+6));
-insert into t2 values (char(ascii('A')+5));
-insert into t2 values (char(ascii('A')+4));
-insert into t2 values (char(ascii('A')+3));
-insert into t2 values (char(ascii('A')+2));
-insert into t2 values (char(ascii('A')+1));
 insert into t2 values ('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'),('0');
 select count(*) from t2;
 count(*)

=== modified file 'mysql-test/suite/parts/r/partition_datetime_innodb.result'
--- a/mysql-test/suite/parts/r/partition_datetime_innodb.result	2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_datetime_innodb.result	2009-10-28 07:52:34 +0000
@@ -60,65 +60,6 @@ a
 2020-12-31 10:11:12
 delete from t2;
 59 inserts;
-insert into t2 values (19710101000000+59);
-insert into t2 values (19710101000000+58);
-insert into t2 values (19710101000000+57);
-insert into t2 values (19710101000000+56);
-insert into t2 values (19710101000000+55);
-insert into t2 values (19710101000000+54);
-insert into t2 values (19710101000000+53);
-insert into t2 values (19710101000000+52);
-insert into t2 values (19710101000000+51);
-insert into t2 values (19710101000000+50);
-insert into t2 values (19710101000000+49);
-insert into t2 values (19710101000000+48);
-insert into t2 values (19710101000000+47);
-insert into t2 values (19710101000000+46);
-insert into t2 values (19710101000000+45);
-insert into t2 values (19710101000000+44);
-insert into t2 values (19710101000000+43);
-insert into t2 values (19710101000000+42);
-insert into t2 values (19710101000000+41);
-insert into t2 values (19710101000000+40);
-insert into t2 values (19710101000000+39);
-insert into t2 values (19710101000000+38);
-insert into t2 values (19710101000000+37);
-insert into t2 values (19710101000000+36);
-insert into t2 values (19710101000000+35);
-insert into t2 values (19710101000000+34);
-insert into t2 values (19710101000000+33);
-insert into t2 values (19710101000000+32);
-insert into t2 values (19710101000000+31);
-insert into t2 values (19710101000000+30);
-insert into t2 values (19710101000000+29);
-insert into t2 values (19710101000000+28);
-insert into t2 values (19710101000000+27);
-insert into t2 values (19710101000000+26);
-insert into t2 values (19710101000000+25);
-insert into t2 values (19710101000000+24);
-insert into t2 values (19710101000000+23);
-insert into t2 values (19710101000000+22);
-insert into t2 values (19710101000000+21);
-insert into t2 values (19710101000000+20);
-insert into t2 values (19710101000000+19);
-insert into t2 values (19710101000000+18);
-insert into t2 values (19710101000000+17);
-insert into t2 values (19710101000000+16);
-insert into t2 values (19710101000000+15);
-insert into t2 values (19710101000000+14);
-insert into t2 values (19710101000000+13);
-insert into t2 values (19710101000000+12);
-insert into t2 values (19710101000000+11);
-insert into t2 values (19710101000000+10);
-insert into t2 values (19710101000000+9);
-insert into t2 values (19710101000000+8);
-insert into t2 values (19710101000000+7);
-insert into t2 values (19710101000000+6);
-insert into t2 values (19710101000000+5);
-insert into t2 values (19710101000000+4);
-insert into t2 values (19710101000000+3);
-insert into t2 values (19710101000000+2);
-insert into t2 values (19710101000000+1);
 select count(*) from t2;
 count(*)
 59
@@ -206,18 +147,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
 12 inserts;
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
 Warnings:
 Warning	1264	Out of range value for column 'a' at row 1
 select count(*) from t3;
@@ -260,18 +189,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
 12 inserts;
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
 Warnings:
 Warning	1264	Out of range value for column 'a' at row 1
 select count(*) from t4;
@@ -354,90 +271,6 @@ a
 2020-12-31
 delete from t2;
 28 inserts;
-insert into t2 values (19700101+28-1);
-insert into t2 values (19700201+28-1);
-insert into t2 values (19700301+28-1);
-insert into t2 values (19700101+27-1);
-insert into t2 values (19700201+27-1);
-insert into t2 values (19700301+27-1);
-insert into t2 values (19700101+26-1);
-insert into t2 values (19700201+26-1);
-insert into t2 values (19700301+26-1);
-insert into t2 values (19700101+25-1);
-insert into t2 values (19700201+25-1);
-insert into t2 values (19700301+25-1);
-insert into t2 values (19700101+24-1);
-insert into t2 values (19700201+24-1);
-insert into t2 values (19700301+24-1);
-insert into t2 values (19700101+23-1);
-insert into t2 values (19700201+23-1);
-insert into t2 values (19700301+23-1);
-insert into t2 values (19700101+22-1);
-insert into t2 values (19700201+22-1);
-insert into t2 values (19700301+22-1);
-insert into t2 values (19700101+21-1);
-insert into t2 values (19700201+21-1);
-insert into t2 values (19700301+21-1);
-insert into t2 values (19700101+20-1);
-insert into t2 values (19700201+20-1);
-insert into t2 values (19700301+20-1);
-insert into t2 values (19700101+19-1);
-insert into t2 values (19700201+19-1);
-insert into t2 values (19700301+19-1);
-insert into t2 values (19700101+18-1);
-insert into t2 values (19700201+18-1);
-insert into t2 values (19700301+18-1);
-insert into t2 values (19700101+17-1);
-insert into t2 values (19700201+17-1);
-insert into t2 values (19700301+17-1);
-insert into t2 values (19700101+16-1);
-insert into t2 values (19700201+16-1);
-insert into t2 values (19700301+16-1);
-insert into t2 values (19700101+15-1);
-insert into t2 values (19700201+15-1);
-insert into t2 values (19700301+15-1);
-insert into t2 values (19700101+14-1);
-insert into t2 values (19700201+14-1);
-insert into t2 values (19700301+14-1);
-insert into t2 values (19700101+13-1);
-insert into t2 values (19700201+13-1);
-insert into t2 values (19700301+13-1);
-insert into t2 values (19700101+12-1);
-insert into t2 values (19700201+12-1);
-insert into t2 values (19700301+12-1);
-insert into t2 values (19700101+11-1);
-insert into t2 values (19700201+11-1);
-insert into t2 values (19700301+11-1);
-insert into t2 values (19700101+10-1);
-insert into t2 values (19700201+10-1);
-insert into t2 values (19700301+10-1);
-insert into t2 values (19700101+9-1);
-insert into t2 values (19700201+9-1);
-insert into t2 values (19700301+9-1);
-insert into t2 values (19700101+8-1);
-insert into t2 values (19700201+8-1);
-insert into t2 values (19700301+8-1);
-insert into t2 values (19700101+7-1);
-insert into t2 values (19700201+7-1);
-insert into t2 values (19700301+7-1);
-insert into t2 values (19700101+6-1);
-insert into t2 values (19700201+6-1);
-insert into t2 values (19700301+6-1);
-insert into t2 values (19700101+5-1);
-insert into t2 values (19700201+5-1);
-insert into t2 values (19700301+5-1);
-insert into t2 values (19700101+4-1);
-insert into t2 values (19700201+4-1);
-insert into t2 values (19700301+4-1);
-insert into t2 values (19700101+3-1);
-insert into t2 values (19700201+3-1);
-insert into t2 values (19700301+3-1);
-insert into t2 values (19700101+2-1);
-insert into t2 values (19700201+2-1);
-insert into t2 values (19700301+2-1);
-insert into t2 values (19700101+1-1);
-insert into t2 values (19700201+1-1);
-insert into t2 values (19700301+1-1);
 select count(*) from t2;
 count(*)
 84
@@ -550,18 +383,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
 12 inserts;
-insert into t3 values (adddate(19700101,interval 12-1 month));
-insert into t3 values (adddate(19700101,interval 11-1 month));
-insert into t3 values (adddate(19700101,interval 10-1 month));
-insert into t3 values (adddate(19700101,interval 9-1 month));
-insert into t3 values (adddate(19700101,interval 8-1 month));
-insert into t3 values (adddate(19700101,interval 7-1 month));
-insert into t3 values (adddate(19700101,interval 6-1 month));
-insert into t3 values (adddate(19700101,interval 5-1 month));
-insert into t3 values (adddate(19700101,interval 4-1 month));
-insert into t3 values (adddate(19700101,interval 3-1 month));
-insert into t3 values (adddate(19700101,interval 2-1 month));
-insert into t3 values (adddate(19700101,interval 1-1 month));
 select count(*) from t3;
 count(*)
 12
@@ -602,18 +423,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
 12 inserts;
-insert into t4 values (adddate(19700101,interval 12-1 month));
-insert into t4 values (adddate(19700101,interval 11-1 month));
-insert into t4 values (adddate(19700101,interval 10-1 month));
-insert into t4 values (adddate(19700101,interval 9-1 month));
-insert into t4 values (adddate(19700101,interval 8-1 month));
-insert into t4 values (adddate(19700101,interval 7-1 month));
-insert into t4 values (adddate(19700101,interval 6-1 month));
-insert into t4 values (adddate(19700101,interval 5-1 month));
-insert into t4 values (adddate(19700101,interval 4-1 month));
-insert into t4 values (adddate(19700101,interval 3-1 month));
-insert into t4 values (adddate(19700101,interval 2-1 month));
-insert into t4 values (adddate(19700101,interval 1-1 month));
 select count(*) from t4;
 count(*)
 12
@@ -694,65 +503,6 @@ a
 14:15:16
 delete from t2;
 59 inserts;
-insert into t2 values (000100+59);
-insert into t2 values (000100+58);
-insert into t2 values (000100+57);
-insert into t2 values (000100+56);
-insert into t2 values (000100+55);
-insert into t2 values (000100+54);
-insert into t2 values (000100+53);
-insert into t2 values (000100+52);
-insert into t2 values (000100+51);
-insert into t2 values (000100+50);
-insert into t2 values (000100+49);
-insert into t2 values (000100+48);
-insert into t2 values (000100+47);
-insert into t2 values (000100+46);
-insert into t2 values (000100+45);
-insert into t2 values (000100+44);
-insert into t2 values (000100+43);
-insert into t2 values (000100+42);
-insert into t2 values (000100+41);
-insert into t2 values (000100+40);
-insert into t2 values (000100+39);
-insert into t2 values (000100+38);
-insert into t2 values (000100+37);
-insert into t2 values (000100+36);
-insert into t2 values (000100+35);
-insert into t2 values (000100+34);
-insert into t2 values (000100+33);
-insert into t2 values (000100+32);
-insert into t2 values (000100+31);
-insert into t2 values (000100+30);
-insert into t2 values (000100+29);
-insert into t2 values (000100+28);
-insert into t2 values (000100+27);
-insert into t2 values (000100+26);
-insert into t2 values (000100+25);
-insert into t2 values (000100+24);
-insert into t2 values (000100+23);
-insert into t2 values (000100+22);
-insert into t2 values (000100+21);
-insert into t2 values (000100+20);
-insert into t2 values (000100+19);
-insert into t2 values (000100+18);
-insert into t2 values (000100+17);
-insert into t2 values (000100+16);
-insert into t2 values (000100+15);
-insert into t2 values (000100+14);
-insert into t2 values (000100+13);
-insert into t2 values (000100+12);
-insert into t2 values (000100+11);
-insert into t2 values (000100+10);
-insert into t2 values (000100+9);
-insert into t2 values (000100+8);
-insert into t2 values (000100+7);
-insert into t2 values (000100+6);
-insert into t2 values (000100+5);
-insert into t2 values (000100+4);
-insert into t2 values (000100+3);
-insert into t2 values (000100+2);
-insert into t2 values (000100+1);
 select count(*) from t2;
 count(*)
 59
@@ -840,65 +590,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (46) ENGINE = InnoDB,
  PARTITION quarter4 VALUES LESS THAN (61) ENGINE = InnoDB) */
 59 inserts;
-insert into t3 values (100000+59);
-insert into t3 values (100000+58);
-insert into t3 values (100000+57);
-insert into t3 values (100000+56);
-insert into t3 values (100000+55);
-insert into t3 values (100000+54);
-insert into t3 values (100000+53);
-insert into t3 values (100000+52);
-insert into t3 values (100000+51);
-insert into t3 values (100000+50);
-insert into t3 values (100000+49);
-insert into t3 values (100000+48);
-insert into t3 values (100000+47);
-insert into t3 values (100000+46);
-insert into t3 values (100000+45);
-insert into t3 values (100000+44);
-insert into t3 values (100000+43);
-insert into t3 values (100000+42);
-insert into t3 values (100000+41);
-insert into t3 values (100000+40);
-insert into t3 values (100000+39);
-insert into t3 values (100000+38);
-insert into t3 values (100000+37);
-insert into t3 values (100000+36);
-insert into t3 values (100000+35);
-insert into t3 values (100000+34);
-insert into t3 values (100000+33);
-insert into t3 values (100000+32);
-insert into t3 values (100000+31);
-insert into t3 values (100000+30);
-insert into t3 values (100000+29);
-insert into t3 values (100000+28);
-insert into t3 values (100000+27);
-insert into t3 values (100000+26);
-insert into t3 values (100000+25);
-insert into t3 values (100000+24);
-insert into t3 values (100000+23);
-insert into t3 values (100000+22);
-insert into t3 values (100000+21);
-insert into t3 values (100000+20);
-insert into t3 values (100000+19);
-insert into t3 values (100000+18);
-insert into t3 values (100000+17);
-insert into t3 values (100000+16);
-insert into t3 values (100000+15);
-insert into t3 values (100000+14);
-insert into t3 values (100000+13);
-insert into t3 values (100000+12);
-insert into t3 values (100000+11);
-insert into t3 values (100000+10);
-insert into t3 values (100000+9);
-insert into t3 values (100000+8);
-insert into t3 values (100000+7);
-insert into t3 values (100000+6);
-insert into t3 values (100000+5);
-insert into t3 values (100000+4);
-insert into t3 values (100000+3);
-insert into t3 values (100000+2);
-insert into t3 values (100000+1);
 select count(*) from t3;
 count(*)
 59
@@ -986,65 +677,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (31,32,33,34,35,36,37,38,39,40,41,42,43,44,45) ENGINE = InnoDB,
  PARTITION quarter4 VALUES IN (46,47,48,49,50,51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */
 59 inserts;
-insert into t4 values (100000+59);
-insert into t4 values (100000+58);
-insert into t4 values (100000+57);
-insert into t4 values (100000+56);
-insert into t4 values (100000+55);
-insert into t4 values (100000+54);
-insert into t4 values (100000+53);
-insert into t4 values (100000+52);
-insert into t4 values (100000+51);
-insert into t4 values (100000+50);
-insert into t4 values (100000+49);
-insert into t4 values (100000+48);
-insert into t4 values (100000+47);
-insert into t4 values (100000+46);
-insert into t4 values (100000+45);
-insert into t4 values (100000+44);
-insert into t4 values (100000+43);
-insert into t4 values (100000+42);
-insert into t4 values (100000+41);
-insert into t4 values (100000+40);
-insert into t4 values (100000+39);
-insert into t4 values (100000+38);
-insert into t4 values (100000+37);
-insert into t4 values (100000+36);
-insert into t4 values (100000+35);
-insert into t4 values (100000+34);
-insert into t4 values (100000+33);
-insert into t4 values (100000+32);
-insert into t4 values (100000+31);
-insert into t4 values (100000+30);
-insert into t4 values (100000+29);
-insert into t4 values (100000+28);
-insert into t4 values (100000+27);
-insert into t4 values (100000+26);
-insert into t4 values (100000+25);
-insert into t4 values (100000+24);
-insert into t4 values (100000+23);
-insert into t4 values (100000+22);
-insert into t4 values (100000+21);
-insert into t4 values (100000+20);
-insert into t4 values (100000+19);
-insert into t4 values (100000+18);
-insert into t4 values (100000+17);
-insert into t4 values (100000+16);
-insert into t4 values (100000+15);
-insert into t4 values (100000+14);
-insert into t4 values (100000+13);
-insert into t4 values (100000+12);
-insert into t4 values (100000+11);
-insert into t4 values (100000+10);
-insert into t4 values (100000+9);
-insert into t4 values (100000+8);
-insert into t4 values (100000+7);
-insert into t4 values (100000+6);
-insert into t4 values (100000+5);
-insert into t4 values (100000+4);
-insert into t4 values (100000+3);
-insert into t4 values (100000+2);
-insert into t4 values (100000+1);
 select count(*) from t4;
 count(*)
 59
@@ -1172,65 +804,6 @@ a
 2020-12-31 10:11:12
 delete from t2;
 59 inserts;
-insert into t2 values (19700101000000+59);
-insert into t2 values (19700101000000+58);
-insert into t2 values (19700101000000+57);
-insert into t2 values (19700101000000+56);
-insert into t2 values (19700101000000+55);
-insert into t2 values (19700101000000+54);
-insert into t2 values (19700101000000+53);
-insert into t2 values (19700101000000+52);
-insert into t2 values (19700101000000+51);
-insert into t2 values (19700101000000+50);
-insert into t2 values (19700101000000+49);
-insert into t2 values (19700101000000+48);
-insert into t2 values (19700101000000+47);
-insert into t2 values (19700101000000+46);
-insert into t2 values (19700101000000+45);
-insert into t2 values (19700101000000+44);
-insert into t2 values (19700101000000+43);
-insert into t2 values (19700101000000+42);
-insert into t2 values (19700101000000+41);
-insert into t2 values (19700101000000+40);
-insert into t2 values (19700101000000+39);
-insert into t2 values (19700101000000+38);
-insert into t2 values (19700101000000+37);
-insert into t2 values (19700101000000+36);
-insert into t2 values (19700101000000+35);
-insert into t2 values (19700101000000+34);
-insert into t2 values (19700101000000+33);
-insert into t2 values (19700101000000+32);
-insert into t2 values (19700101000000+31);
-insert into t2 values (19700101000000+30);
-insert into t2 values (19700101000000+29);
-insert into t2 values (19700101000000+28);
-insert into t2 values (19700101000000+27);
-insert into t2 values (19700101000000+26);
-insert into t2 values (19700101000000+25);
-insert into t2 values (19700101000000+24);
-insert into t2 values (19700101000000+23);
-insert into t2 values (19700101000000+22);
-insert into t2 values (19700101000000+21);
-insert into t2 values (19700101000000+20);
-insert into t2 values (19700101000000+19);
-insert into t2 values (19700101000000+18);
-insert into t2 values (19700101000000+17);
-insert into t2 values (19700101000000+16);
-insert into t2 values (19700101000000+15);
-insert into t2 values (19700101000000+14);
-insert into t2 values (19700101000000+13);
-insert into t2 values (19700101000000+12);
-insert into t2 values (19700101000000+11);
-insert into t2 values (19700101000000+10);
-insert into t2 values (19700101000000+9);
-insert into t2 values (19700101000000+8);
-insert into t2 values (19700101000000+7);
-insert into t2 values (19700101000000+6);
-insert into t2 values (19700101000000+5);
-insert into t2 values (19700101000000+4);
-insert into t2 values (19700101000000+3);
-insert into t2 values (19700101000000+2);
-insert into t2 values (19700101000000+1);
 select count(*) from t2;
 count(*)
 59
@@ -1318,18 +891,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = InnoDB,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = InnoDB) */
 12 inserts;
-insert into t3 values (adddate(19700101000000,interval 12-1 month));
-insert into t3 values (adddate(19700101000000,interval 11-1 month));
-insert into t3 values (adddate(19700101000000,interval 10-1 month));
-insert into t3 values (adddate(19700101000000,interval 9-1 month));
-insert into t3 values (adddate(19700101000000,interval 8-1 month));
-insert into t3 values (adddate(19700101000000,interval 7-1 month));
-insert into t3 values (adddate(19700101000000,interval 6-1 month));
-insert into t3 values (adddate(19700101000000,interval 5-1 month));
-insert into t3 values (adddate(19700101000000,interval 4-1 month));
-insert into t3 values (adddate(19700101000000,interval 3-1 month));
-insert into t3 values (adddate(19700101000000,interval 2-1 month));
-insert into t3 values (adddate(19700101000000,interval 1-1 month));
 select count(*) from t3;
 count(*)
 12
@@ -1370,18 +931,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = InnoDB,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = InnoDB) */
 12 inserts;
-insert into t4 values (adddate(19700101000000,interval 12-1 month));
-insert into t4 values (adddate(19700101000000,interval 11-1 month));
-insert into t4 values (adddate(19700101000000,interval 10-1 month));
-insert into t4 values (adddate(19700101000000,interval 9-1 month));
-insert into t4 values (adddate(19700101000000,interval 8-1 month));
-insert into t4 values (adddate(19700101000000,interval 7-1 month));
-insert into t4 values (adddate(19700101000000,interval 6-1 month));
-insert into t4 values (adddate(19700101000000,interval 5-1 month));
-insert into t4 values (adddate(19700101000000,interval 4-1 month));
-insert into t4 values (adddate(19700101000000,interval 3-1 month));
-insert into t4 values (adddate(19700101000000,interval 2-1 month));
-insert into t4 values (adddate(19700101000000,interval 1-1 month));
 select count(*) from t4;
 count(*)
 12

=== modified file 'mysql-test/suite/parts/r/partition_datetime_myisam.result'
--- a/mysql-test/suite/parts/r/partition_datetime_myisam.result	2008-11-04 07:43:21 +0000
+++ b/mysql-test/suite/parts/r/partition_datetime_myisam.result	2009-10-28 07:52:34 +0000
@@ -60,65 +60,6 @@ a
 2020-12-31 10:11:12
 delete from t2;
 59 inserts;
-insert into t2 values (19710101000000+59);
-insert into t2 values (19710101000000+58);
-insert into t2 values (19710101000000+57);
-insert into t2 values (19710101000000+56);
-insert into t2 values (19710101000000+55);
-insert into t2 values (19710101000000+54);
-insert into t2 values (19710101000000+53);
-insert into t2 values (19710101000000+52);
-insert into t2 values (19710101000000+51);
-insert into t2 values (19710101000000+50);
-insert into t2 values (19710101000000+49);
-insert into t2 values (19710101000000+48);
-insert into t2 values (19710101000000+47);
-insert into t2 values (19710101000000+46);
-insert into t2 values (19710101000000+45);
-insert into t2 values (19710101000000+44);
-insert into t2 values (19710101000000+43);
-insert into t2 values (19710101000000+42);
-insert into t2 values (19710101000000+41);
-insert into t2 values (19710101000000+40);
-insert into t2 values (19710101000000+39);
-insert into t2 values (19710101000000+38);
-insert into t2 values (19710101000000+37);
-insert into t2 values (19710101000000+36);
-insert into t2 values (19710101000000+35);
-insert into t2 values (19710101000000+34);
-insert into t2 values (19710101000000+33);
-insert into t2 values (19710101000000+32);
-insert into t2 values (19710101000000+31);
-insert into t2 values (19710101000000+30);
-insert into t2 values (19710101000000+29);
-insert into t2 values (19710101000000+28);
-insert into t2 values (19710101000000+27);
-insert into t2 values (19710101000000+26);
-insert into t2 values (19710101000000+25);
-insert into t2 values (19710101000000+24);
-insert into t2 values (19710101000000+23);
-insert into t2 values (19710101000000+22);
-insert into t2 values (19710101000000+21);
-insert into t2 values (19710101000000+20);
-insert into t2 values (19710101000000+19);
-insert into t2 values (19710101000000+18);
-insert into t2 values (19710101000000+17);
-insert into t2 values (19710101000000+16);
-insert into t2 values (19710101000000+15);
-insert into t2 values (19710101000000+14);
-insert into t2 values (19710101000000+13);
-insert into t2 values (19710101000000+12);
-insert into t2 values (19710101000000+11);
-insert into t2 values (19710101000000+10);
-insert into t2 values (19710101000000+9);
-insert into t2 values (19710101000000+8);
-insert into t2 values (19710101000000+7);
-insert into t2 values (19710101000000+6);
-insert into t2 values (19710101000000+5);
-insert into t2 values (19710101000000+4);
-insert into t2 values (19710101000000+3);
-insert into t2 values (19710101000000+2);
-insert into t2 values (19710101000000+1);
 select count(*) from t2;
 count(*)
 59
@@ -206,18 +147,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
 12 inserts;
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t3 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
 Warnings:
 Warning	1264	Out of range value for column 'a' at row 1
 select count(*) from t3;
@@ -260,18 +189,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
 12 inserts;
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 12-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 11-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 10-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 9-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 8-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 7-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 6-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 5-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 4-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 3-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 2-1 month));
-insert into t4 values (date_add('1970-01-01 00:00:00',interval 1-1 month));
 Warnings:
 Warning	1264	Out of range value for column 'a' at row 1
 select count(*) from t4;
@@ -354,90 +271,6 @@ a
 2020-12-31
 delete from t2;
 28 inserts;
-insert into t2 values (19700101+28-1);
-insert into t2 values (19700201+28-1);
-insert into t2 values (19700301+28-1);
-insert into t2 values (19700101+27-1);
-insert into t2 values (19700201+27-1);
-insert into t2 values (19700301+27-1);
-insert into t2 values (19700101+26-1);
-insert into t2 values (19700201+26-1);
-insert into t2 values (19700301+26-1);
-insert into t2 values (19700101+25-1);
-insert into t2 values (19700201+25-1);
-insert into t2 values (19700301+25-1);
-insert into t2 values (19700101+24-1);
-insert into t2 values (19700201+24-1);
-insert into t2 values (19700301+24-1);
-insert into t2 values (19700101+23-1);
-insert into t2 values (19700201+23-1);
-insert into t2 values (19700301+23-1);
-insert into t2 values (19700101+22-1);
-insert into t2 values (19700201+22-1);
-insert into t2 values (19700301+22-1);
-insert into t2 values (19700101+21-1);
-insert into t2 values (19700201+21-1);
-insert into t2 values (19700301+21-1);
-insert into t2 values (19700101+20-1);
-insert into t2 values (19700201+20-1);
-insert into t2 values (19700301+20-1);
-insert into t2 values (19700101+19-1);
-insert into t2 values (19700201+19-1);
-insert into t2 values (19700301+19-1);
-insert into t2 values (19700101+18-1);
-insert into t2 values (19700201+18-1);
-insert into t2 values (19700301+18-1);
-insert into t2 values (19700101+17-1);
-insert into t2 values (19700201+17-1);
-insert into t2 values (19700301+17-1);
-insert into t2 values (19700101+16-1);
-insert into t2 values (19700201+16-1);
-insert into t2 values (19700301+16-1);
-insert into t2 values (19700101+15-1);
-insert into t2 values (19700201+15-1);
-insert into t2 values (19700301+15-1);
-insert into t2 values (19700101+14-1);
-insert into t2 values (19700201+14-1);
-insert into t2 values (19700301+14-1);
-insert into t2 values (19700101+13-1);
-insert into t2 values (19700201+13-1);
-insert into t2 values (19700301+13-1);
-insert into t2 values (19700101+12-1);
-insert into t2 values (19700201+12-1);
-insert into t2 values (19700301+12-1);
-insert into t2 values (19700101+11-1);
-insert into t2 values (19700201+11-1);
-insert into t2 values (19700301+11-1);
-insert into t2 values (19700101+10-1);
-insert into t2 values (19700201+10-1);
-insert into t2 values (19700301+10-1);
-insert into t2 values (19700101+9-1);
-insert into t2 values (19700201+9-1);
-insert into t2 values (19700301+9-1);
-insert into t2 values (19700101+8-1);
-insert into t2 values (19700201+8-1);
-insert into t2 values (19700301+8-1);
-insert into t2 values (19700101+7-1);
-insert into t2 values (19700201+7-1);
-insert into t2 values (19700301+7-1);
-insert into t2 values (19700101+6-1);
-insert into t2 values (19700201+6-1);
-insert into t2 values (19700301+6-1);
-insert into t2 values (19700101+5-1);
-insert into t2 values (19700201+5-1);
-insert into t2 values (19700301+5-1);
-insert into t2 values (19700101+4-1);
-insert into t2 values (19700201+4-1);
-insert into t2 values (19700301+4-1);
-insert into t2 values (19700101+3-1);
-insert into t2 values (19700201+3-1);
-insert into t2 values (19700301+3-1);
-insert into t2 values (19700101+2-1);
-insert into t2 values (19700201+2-1);
-insert into t2 values (19700301+2-1);
-insert into t2 values (19700101+1-1);
-insert into t2 values (19700201+1-1);
-insert into t2 values (19700301+1-1);
 select count(*) from t2;
 count(*)
 84
@@ -550,18 +383,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
 12 inserts;
-insert into t3 values (adddate(19700101,interval 12-1 month));
-insert into t3 values (adddate(19700101,interval 11-1 month));
-insert into t3 values (adddate(19700101,interval 10-1 month));
-insert into t3 values (adddate(19700101,interval 9-1 month));
-insert into t3 values (adddate(19700101,interval 8-1 month));
-insert into t3 values (adddate(19700101,interval 7-1 month));
-insert into t3 values (adddate(19700101,interval 6-1 month));
-insert into t3 values (adddate(19700101,interval 5-1 month));
-insert into t3 values (adddate(19700101,interval 4-1 month));
-insert into t3 values (adddate(19700101,interval 3-1 month));
-insert into t3 values (adddate(19700101,interval 2-1 month));
-insert into t3 values (adddate(19700101,interval 1-1 month));
 select count(*) from t3;
 count(*)
 12
@@ -602,18 +423,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
 12 inserts;
-insert into t4 values (adddate(19700101,interval 12-1 month));
-insert into t4 values (adddate(19700101,interval 11-1 month));
-insert into t4 values (adddate(19700101,interval 10-1 month));
-insert into t4 values (adddate(19700101,interval 9-1 month));
-insert into t4 values (adddate(19700101,interval 8-1 month));
-insert into t4 values (adddate(19700101,interval 7-1 month));
-insert into t4 values (adddate(19700101,interval 6-1 month));
-insert into t4 values (adddate(19700101,interval 5-1 month));
-insert into t4 values (adddate(19700101,interval 4-1 month));
-insert into t4 values (adddate(19700101,interval 3-1 month));
-insert into t4 values (adddate(19700101,interval 2-1 month));
-insert into t4 values (adddate(19700101,interval 1-1 month));
 select count(*) from t4;
 count(*)
 12
@@ -694,65 +503,6 @@ a
 14:15:16
 delete from t2;
 59 inserts;
-insert into t2 values (000100+59);
-insert into t2 values (000100+58);
-insert into t2 values (000100+57);
-insert into t2 values (000100+56);
-insert into t2 values (000100+55);
-insert into t2 values (000100+54);
-insert into t2 values (000100+53);
-insert into t2 values (000100+52);
-insert into t2 values (000100+51);
-insert into t2 values (000100+50);
-insert into t2 values (000100+49);
-insert into t2 values (000100+48);
-insert into t2 values (000100+47);
-insert into t2 values (000100+46);
-insert into t2 values (000100+45);
-insert into t2 values (000100+44);
-insert into t2 values (000100+43);
-insert into t2 values (000100+42);
-insert into t2 values (000100+41);
-insert into t2 values (000100+40);
-insert into t2 values (000100+39);
-insert into t2 values (000100+38);
-insert into t2 values (000100+37);
-insert into t2 values (000100+36);
-insert into t2 values (000100+35);
-insert into t2 values (000100+34);
-insert into t2 values (000100+33);
-insert into t2 values (000100+32);
-insert into t2 values (000100+31);
-insert into t2 values (000100+30);
-insert into t2 values (000100+29);
-insert into t2 values (000100+28);
-insert into t2 values (000100+27);
-insert into t2 values (000100+26);
-insert into t2 values (000100+25);
-insert into t2 values (000100+24);
-insert into t2 values (000100+23);
-insert into t2 values (000100+22);
-insert into t2 values (000100+21);
-insert into t2 values (000100+20);
-insert into t2 values (000100+19);
-insert into t2 values (000100+18);
-insert into t2 values (000100+17);
-insert into t2 values (000100+16);
-insert into t2 values (000100+15);
-insert into t2 values (000100+14);
-insert into t2 values (000100+13);
-insert into t2 values (000100+12);
-insert into t2 values (000100+11);
-insert into t2 values (000100+10);
-insert into t2 values (000100+9);
-insert into t2 values (000100+8);
-insert into t2 values (000100+7);
-insert into t2 values (000100+6);
-insert into t2 values (000100+5);
-insert into t2 values (000100+4);
-insert into t2 values (000100+3);
-insert into t2 values (000100+2);
-insert into t2 values (000100+1);
 select count(*) from t2;
 count(*)
 59
@@ -840,65 +590,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (46) ENGINE = MyISAM,
  PARTITION quarter4 VALUES LESS THAN (61) ENGINE = MyISAM) */
 59 inserts;
-insert into t3 values (100000+59);
-insert into t3 values (100000+58);
-insert into t3 values (100000+57);
-insert into t3 values (100000+56);
-insert into t3 values (100000+55);
-insert into t3 values (100000+54);
-insert into t3 values (100000+53);
-insert into t3 values (100000+52);
-insert into t3 values (100000+51);
-insert into t3 values (100000+50);
-insert into t3 values (100000+49);
-insert into t3 values (100000+48);
-insert into t3 values (100000+47);
-insert into t3 values (100000+46);
-insert into t3 values (100000+45);
-insert into t3 values (100000+44);
-insert into t3 values (100000+43);
-insert into t3 values (100000+42);
-insert into t3 values (100000+41);
-insert into t3 values (100000+40);
-insert into t3 values (100000+39);
-insert into t3 values (100000+38);
-insert into t3 values (100000+37);
-insert into t3 values (100000+36);
-insert into t3 values (100000+35);
-insert into t3 values (100000+34);
-insert into t3 values (100000+33);
-insert into t3 values (100000+32);
-insert into t3 values (100000+31);
-insert into t3 values (100000+30);
-insert into t3 values (100000+29);
-insert into t3 values (100000+28);
-insert into t3 values (100000+27);
-insert into t3 values (100000+26);
-insert into t3 values (100000+25);
-insert into t3 values (100000+24);
-insert into t3 values (100000+23);
-insert into t3 values (100000+22);
-insert into t3 values (100000+21);
-insert into t3 values (100000+20);
-insert into t3 values (100000+19);
-insert into t3 values (100000+18);
-insert into t3 values (100000+17);
-insert into t3 values (100000+16);
-insert into t3 values (100000+15);
-insert into t3 values (100000+14);
-insert into t3 values (100000+13);
-insert into t3 values (100000+12);
-insert into t3 values (100000+11);
-insert into t3 values (100000+10);
-insert into t3 values (100000+9);
-insert into t3 values (100000+8);
-insert into t3 values (100000+7);
-insert into t3 values (100000+6);
-insert into t3 values (100000+5);
-insert into t3 values (100000+4);
-insert into t3 values (100000+3);
-insert into t3 values (100000+2);
-insert into t3 values (100000+1);
 select count(*) from t3;
 count(*)
 59
@@ -986,65 +677,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (31,32,33,34,35,36,37,38,39,40,41,42,43,44,45) ENGINE = MyISAM,
  PARTITION quarter4 VALUES IN (46,47,48,49,50,51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */
 59 inserts;
-insert into t4 values (100000+59);
-insert into t4 values (100000+58);
-insert into t4 values (100000+57);
-insert into t4 values (100000+56);
-insert into t4 values (100000+55);
-insert into t4 values (100000+54);
-insert into t4 values (100000+53);
-insert into t4 values (100000+52);
-insert into t4 values (100000+51);
-insert into t4 values (100000+50);
-insert into t4 values (100000+49);
-insert into t4 values (100000+48);
-insert into t4 values (100000+47);
-insert into t4 values (100000+46);
-insert into t4 values (100000+45);
-insert into t4 values (100000+44);
-insert into t4 values (100000+43);
-insert into t4 values (100000+42);
-insert into t4 values (100000+41);
-insert into t4 values (100000+40);
-insert into t4 values (100000+39);
-insert into t4 values (100000+38);
-insert into t4 values (100000+37);
-insert into t4 values (100000+36);
-insert into t4 values (100000+35);
-insert into t4 values (100000+34);
-insert into t4 values (100000+33);
-insert into t4 values (100000+32);
-insert into t4 values (100000+31);
-insert into t4 values (100000+30);
-insert into t4 values (100000+29);
-insert into t4 values (100000+28);
-insert into t4 values (100000+27);
-insert into t4 values (100000+26);
-insert into t4 values (100000+25);
-insert into t4 values (100000+24);
-insert into t4 values (100000+23);
-insert into t4 values (100000+22);
-insert into t4 values (100000+21);
-insert into t4 values (100000+20);
-insert into t4 values (100000+19);
-insert into t4 values (100000+18);
-insert into t4 values (100000+17);
-insert into t4 values (100000+16);
-insert into t4 values (100000+15);
-insert into t4 values (100000+14);
-insert into t4 values (100000+13);
-insert into t4 values (100000+12);
-insert into t4 values (100000+11);
-insert into t4 values (100000+10);
-insert into t4 values (100000+9);
-insert into t4 values (100000+8);
-insert into t4 values (100000+7);
-insert into t4 values (100000+6);
-insert into t4 values (100000+5);
-insert into t4 values (100000+4);
-insert into t4 values (100000+3);
-insert into t4 values (100000+2);
-insert into t4 values (100000+1);
 select count(*) from t4;
 count(*)
 59
@@ -1172,65 +804,6 @@ a
 2020-12-31 10:11:12
 delete from t2;
 59 inserts;
-insert into t2 values (19700101000000+59);
-insert into t2 values (19700101000000+58);
-insert into t2 values (19700101000000+57);
-insert into t2 values (19700101000000+56);
-insert into t2 values (19700101000000+55);
-insert into t2 values (19700101000000+54);
-insert into t2 values (19700101000000+53);
-insert into t2 values (19700101000000+52);
-insert into t2 values (19700101000000+51);
-insert into t2 values (19700101000000+50);
-insert into t2 values (19700101000000+49);
-insert into t2 values (19700101000000+48);
-insert into t2 values (19700101000000+47);
-insert into t2 values (19700101000000+46);
-insert into t2 values (19700101000000+45);
-insert into t2 values (19700101000000+44);
-insert into t2 values (19700101000000+43);
-insert into t2 values (19700101000000+42);
-insert into t2 values (19700101000000+41);
-insert into t2 values (19700101000000+40);
-insert into t2 values (19700101000000+39);
-insert into t2 values (19700101000000+38);
-insert into t2 values (19700101000000+37);
-insert into t2 values (19700101000000+36);
-insert into t2 values (19700101000000+35);
-insert into t2 values (19700101000000+34);
-insert into t2 values (19700101000000+33);
-insert into t2 values (19700101000000+32);
-insert into t2 values (19700101000000+31);
-insert into t2 values (19700101000000+30);
-insert into t2 values (19700101000000+29);
-insert into t2 values (19700101000000+28);
-insert into t2 values (19700101000000+27);
-insert into t2 values (19700101000000+26);
-insert into t2 values (19700101000000+25);
-insert into t2 values (19700101000000+24);
-insert into t2 values (19700101000000+23);
-insert into t2 values (19700101000000+22);
-insert into t2 values (19700101000000+21);
-insert into t2 values (19700101000000+20);
-insert into t2 values (19700101000000+19);
-insert into t2 values (19700101000000+18);
-insert into t2 values (19700101000000+17);
-insert into t2 values (19700101000000+16);
-insert into t2 values (19700101000000+15);
-insert into t2 values (19700101000000+14);
-insert into t2 values (19700101000000+13);
-insert into t2 values (19700101000000+12);
-insert into t2 values (19700101000000+11);
-insert into t2 values (19700101000000+10);
-insert into t2 values (19700101000000+9);
-insert into t2 values (19700101000000+8);
-insert into t2 values (19700101000000+7);
-insert into t2 values (19700101000000+6);
-insert into t2 values (19700101000000+5);
-insert into t2 values (19700101000000+4);
-insert into t2 values (19700101000000+3);
-insert into t2 values (19700101000000+2);
-insert into t2 values (19700101000000+1);
 select count(*) from t2;
 count(*)
 59
@@ -1318,18 +891,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES LESS THAN (10) ENGINE = MyISAM,
  PARTITION quarter4 VALUES LESS THAN (13) ENGINE = MyISAM) */
 12 inserts;
-insert into t3 values (adddate(19700101000000,interval 12-1 month));
-insert into t3 values (adddate(19700101000000,interval 11-1 month));
-insert into t3 values (adddate(19700101000000,interval 10-1 month));
-insert into t3 values (adddate(19700101000000,interval 9-1 month));
-insert into t3 values (adddate(19700101000000,interval 8-1 month));
-insert into t3 values (adddate(19700101000000,interval 7-1 month));
-insert into t3 values (adddate(19700101000000,interval 6-1 month));
-insert into t3 values (adddate(19700101000000,interval 5-1 month));
-insert into t3 values (adddate(19700101000000,interval 4-1 month));
-insert into t3 values (adddate(19700101000000,interval 3-1 month));
-insert into t3 values (adddate(19700101000000,interval 2-1 month));
-insert into t3 values (adddate(19700101000000,interval 1-1 month));
 select count(*) from t3;
 count(*)
 12
@@ -1370,18 +931,6 @@ SUBPARTITIONS 3
  PARTITION quarter3 VALUES IN (7,8,9) ENGINE = MyISAM,
  PARTITION quarter4 VALUES IN (10,11,12) ENGINE = MyISAM) */
 12 inserts;
-insert into t4 values (adddate(19700101000000,interval 12-1 month));
-insert into t4 values (adddate(19700101000000,interval 11-1 month));
-insert into t4 values (adddate(19700101000000,interval 10-1 month));
-insert into t4 values (adddate(19700101000000,interval 9-1 month));
-insert into t4 values (adddate(19700101000000,interval 8-1 month));
-insert into t4 values (adddate(19700101000000,interval 7-1 month));
-insert into t4 values (adddate(19700101000000,interval 6-1 month));
-insert into t4 values (adddate(19700101000000,interval 5-1 month));
-insert into t4 values (adddate(19700101000000,interval 4-1 month));
-insert into t4 values (adddate(19700101000000,interval 3-1 month));
-insert into t4 values (adddate(19700101000000,interval 2-1 month));
-insert into t4 values (adddate(19700101000000,interval 1-1 month));
 select count(*) from t4;
 count(*)
 12

=== modified file 'mysql-test/suite/parts/t/part_supported_sql_func_innodb.test'
--- a/mysql-test/suite/parts/t/part_supported_sql_func_innodb.test	2007-11-20 15:04:07 +0000
+++ b/mysql-test/suite/parts/t/part_supported_sql_func_innodb.test	2009-10-28 08:08:54 +0000
@@ -25,6 +25,8 @@
 let $debug= 0;
 let $do_long_tests= 1;
 
+#
+--source include/big_test.inc
 # The server must support partitioning.
 --source include/have_partition.inc
 

=== modified file 'mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test'
--- a/mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test	2009-10-09 13:08:09 +0000
+++ b/mysql-test/suite/parts/t/partition_alter1_1_2_innodb.test	2009-10-28 08:08:54 +0000
@@ -43,6 +43,8 @@ SET @max_row = 20;
 let $more_trigger_tests= 0;
 let $more_pk_ui_tests= 0;
 
+# Slow running test
+--source include/big_test.inc
 # This test relies on connecting externally from mysqltest, doesn't
 # work with embedded.
 --source include/not_embedded.inc

=== modified file 'mysql-test/suite/parts/t/partition_alter4_myisam.test'
--- a/mysql-test/suite/parts/t/partition_alter4_myisam.test	2009-10-09 13:08:09 +0000
+++ b/mysql-test/suite/parts/t/partition_alter4_myisam.test	2009-10-28 08:08:54 +0000
@@ -40,6 +40,8 @@ SET @max_row = 20;
 let $more_trigger_tests= 0;
 let $more_pk_ui_tests= 0;
 
+# Slow running test
+--source include/big_test.inc
 # This test relies on connecting externally from mysqltest, doesn't
 # work with embedded.
 --source include/not_embedded.inc

=== modified file 'mysql-test/t/almost_full.test'
--- a/mysql-test/t/almost_full.test	2007-11-12 09:00:22 +0000
+++ b/mysql-test/t/almost_full.test	2009-10-28 07:52:34 +0000
@@ -11,11 +11,13 @@ CREATE TABLE t1 (a int auto_increment pr
 
 --disable_query_log
 let $1= 303;
+begin;
 while ($1)
 {
   INSERT INTO t1 SET b=repeat('a',200);
   dec $1;
 }
+commit;
 --enable_query_log
 
 DELETE FROM t1 WHERE a=1 or a=5;

=== modified file 'mysql-test/t/alter_table.test'
--- a/mysql-test/t/alter_table.test	2009-06-07 10:05:19 +0000
+++ b/mysql-test/t/alter_table.test	2009-10-28 07:52:34 +0000
@@ -121,11 +121,15 @@ alter table t1 disable keys;
 show keys from t1;
 #let $1=10000;
 let $1=10;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values($1,RAND()*1000,RAND()*1000,RAND());
  dec $1;
 }
+commit;
+--enable_query_log
 alter table t1 enable keys;
 show keys from t1;
 drop table t1;
@@ -144,11 +148,15 @@ drop table t1;
 
 create table t1 (a int, b int);
 let $1=100;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values(1,$1), (2,$1), (3, $1);
  dec $1;
 }
+commit;
+--enable_query_log
 alter table t1 add unique (a,b), add key (b);
 show keys from t1;
 analyze table t1;
@@ -966,12 +974,14 @@ DROP TABLE t1;
 create table t1(f1 int not null, f2 int not null, key  (f1), key (f2));
 let $count= 50;
 --disable_query_log
+begin;
 while ($count)
 {
   EVAL insert into t1 values (1,1),(1,1),(1,1),(1,1),(1,1);
   EVAL insert into t1 values (2,2),(2,2),(2,2),(2,2),(2,2);
   dec $count ;
 }
+commit;
 --enable_query_log
 
 select index_length into @unpaked_keys_size from

=== modified file 'mysql-test/t/archive.test'
--- a/mysql-test/t/archive.test	2009-03-26 14:27:34 +0000
+++ b/mysql-test/t/archive.test	2009-10-28 07:52:34 +0000
@@ -1576,11 +1576,13 @@ CREATE TABLE t1(a VARCHAR(510)) ENGINE =
 
 let $bug31036=41;
 --disable_query_log
+begin;
 while($bug31036)
 {
   INSERT INTO t1(a) VALUES (REPEAT('a', 510));
   dec $bug31036;
 }
+commit;
 --enable_query_log
 INSERT INTO t1(a) VALUES ('');
 

=== modified file 'mysql-test/t/bench_count_distinct.test'
--- a/mysql-test/t/bench_count_distinct.test	2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/bench_count_distinct.test	2009-10-28 07:52:34 +0000
@@ -7,14 +7,16 @@ drop table if exists t1;
 --enable_warnings
 create table t1(n int not null, key(n)) delay_key_write = 1;
 let $1=100;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values($1);
  eval insert into t1 values($1);
  dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 select count(distinct n) from t1;
 explain extended select count(distinct n) from t1;
 drop table t1;

=== modified file 'mysql-test/t/change_user.test'
--- a/mysql-test/t/change_user.test	2009-02-12 14:08:56 +0000
+++ b/mysql-test/t/change_user.test	2009-10-28 07:52:34 +0000
@@ -57,13 +57,13 @@ FLUSH STATUS;
 --disable_query_log
 
 let $i = 100;
-
+begin;
 while ($i)
 {
   dec $i;
-
   SELECT 1;
 }
+commit;
 
 --enable_query_log
 --enable_result_log

=== modified file 'mysql-test/t/check.test'
--- a/mysql-test/t/check.test	2009-02-09 21:00:15 +0000
+++ b/mysql-test/t/check.test	2009-10-28 07:52:34 +0000
@@ -12,13 +12,15 @@ drop view if exists v1;
 # Add a lot of keys to slow down check
 create table t1(n int not null, key(n), key(n), key(n), key(n));
 let $1=10000;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values ($1);
  dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 send check table t1 extended;
 connection con2;
 insert into t1 values (200000);

=== modified file 'mysql-test/t/count_distinct2.test'
--- a/mysql-test/t/count_distinct2.test	2005-07-28 14:09:54 +0000
+++ b/mysql-test/t/count_distinct2.test	2009-10-28 07:52:34 +0000
@@ -51,13 +51,15 @@ drop table t1;
 # test the conversion from tree to MyISAM
 create table t1 (n int default NULL);
 let $1=5000;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values($1);
  dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 
 flush status;
 select count(distinct n) from t1;
@@ -67,13 +69,15 @@ drop table t1;
 # Test use of MyISAM tmp tables
 create table t1 (s text);
 let $1=5000;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
 {
  eval insert into t1 values('$1');
  dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 flush status;
 select count(distinct s) from t1;
 show status like 'Created_tmp_disk_tables';

=== modified file 'mysql-test/t/count_distinct3.test'
--- a/mysql-test/t/count_distinct3.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/count_distinct3.test	2009-10-28 07:52:34 +0000
@@ -14,6 +14,7 @@ CREATE TABLE t1 (id INTEGER, grp TINYINT
 --disable_query_log
 SET @rnd_max= 2147483647;
 let $1 = 1000;
+begin;
 while ($1)
 {
   SET @rnd= RAND();
@@ -23,7 +24,7 @@ while ($1)
   INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev); 
   dec $1;
 }
-
+commit;
 # We increase the size of t1 here.
 SET @orig_myisam_sort_buffer_size = @@session.myisam_sort_buffer_size;
 SET session myisam_sort_buffer_size=20000000;

=== modified file 'mysql-test/t/ctype_euckr.test'
--- a/mysql-test/t/ctype_euckr.test	2009-07-24 06:27:23 +0000
+++ b/mysql-test/t/ctype_euckr.test	2009-10-28 07:52:34 +0000
@@ -77,11 +77,13 @@ DROP TABLE t1;
 CREATE TABLE t1 (a binary(1), key(a));
 --disable_query_log
 let $1=255;
+begin;
 while($1)
 {
   eval INSERT INTO t1 VALUES (unhex(hex($1)));
   dec $1;
 }
+commit;
 --enable_query_log
 
 CREATE TABLE t2 (s VARCHAR(4), a VARCHAR(1) CHARACTER SET euckr);

=== modified file 'mysql-test/t/derived.test'
--- a/mysql-test/t/derived.test	2009-07-11 18:44:29 +0000
+++ b/mysql-test/t/derived.test	2009-10-28 07:52:34 +0000
@@ -45,14 +45,16 @@ select * from (select * from t1 where t1
 explain select * from (select t1.*, t2.a as t2a from t1,t2 where t1.a=t2.a) t1;
 drop table t1, t2;
 create table t1(a int not null, t char(8), index(a));
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 10000;
 while ($1)
  {
   eval insert into t1 values ($1,'$1'); 
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 SELECT * FROM (SELECT * FROM t1) as b ORDER BY a  ASC LIMIT 0,20;
 explain select count(*) from t1 as tt1, (select * from t1) as tt2;
 drop table t1;

=== modified file 'mysql-test/t/events_time_zone.test'
--- a/mysql-test/t/events_time_zone.test	2009-10-06 18:15:09 +0000
+++ b/mysql-test/t/events_time_zone.test	2009-10-28 07:52:34 +0000
@@ -118,6 +118,7 @@ INSERT INTO mysql.time_zone_transition_t
 let $transition_unix_time= `SELECT @unix_time`;
 let $count= 30;
 --disable_query_log
+begin;
 while ($count)
 {
   eval INSERT INTO mysql.time_zone_transition
@@ -126,6 +127,7 @@ while ($count)
   let $transition_unix_time= `SELECT $transition_unix_time + @step3`;
   dec $count;
 }
+commit;
 --enable_query_log
 let $tz_name = `SELECT CONCAT('b16420_a',UNIX_TIMESTAMP())`;
 --replace_result $tz_name <TZ_NAME_1>

=== modified file 'mysql-test/t/fulltext2.test'
--- a/mysql-test/t/fulltext2.test	2007-07-06 18:39:55 +0000
+++ b/mysql-test/t/fulltext2.test	2009-10-28 07:52:34 +0000
@@ -18,6 +18,7 @@ CREATE TABLE t1 (
 
 # two-level entry, second-level tree with depth 2
 --disable_query_log
+begin;
 let $1=260;
 while ($1)
 {
@@ -40,6 +41,7 @@ while ($1)
   eval insert t1 (a) values ('aaayyy');
   dec $1;
 }
+commit;
 --enable_query_log
 
 # converting to two-level
@@ -113,6 +115,7 @@ CREATE TABLE t1 (
 # two-level entry, second-level tree with depth 2
 --disable_query_log
 let $1=260;
+begin;
 while ($1)
 {
   eval insert t1 (a) values ('aaaxxx');
@@ -130,6 +133,7 @@ while ($1)
   eval insert t1 (a) values ('aaayyy');
   dec $1;
 }
+commit;
 --enable_query_log
 
 select count(*) from t1 where match a against ('aaaxxx');

=== modified file 'mysql-test/t/func_misc.test'
--- a/mysql-test/t/func_misc.test	2009-06-11 16:21:32 +0000
+++ b/mysql-test/t/func_misc.test	2009-10-28 07:52:34 +0000
@@ -213,11 +213,15 @@ start_ts DATETIME, end_ts DATETIME,
 start_cached INTEGER, end_cached INTEGER);
 CREATE TABLE t1 (f1 BIGINT);
 let $num = `SELECT @row_count`;
+--disable_query_log
+begin;
 while ($num)
 {
    INSERT INTO t1 VALUES (1);
    dec $num;
 }
+commit;
+--enable_query_log
 
 let $loops = 4;
 let $num = $loops;

=== modified file 'mysql-test/t/gis-rtree.test'
--- a/mysql-test/t/gis-rtree.test	2009-07-10 23:12:13 +0000
+++ b/mysql-test/t/gis-rtree.test	2009-10-28 07:52:34 +0000
@@ -17,12 +17,16 @@ SHOW CREATE TABLE t1;
 
 let $1=150;
 let $2=150;
+--disable_query_log
+begin;
 while ($1)
 {
   eval INSERT INTO t1 (g) VALUES (GeomFromText('LineString($1 $1, $2 $2)'));
   dec $1;
   inc $2;
 }
+commit;
+--enable_query_log
 
 SELECT count(*) FROM t1;
 EXPLAIN SELECT fid, AsText(g) FROM t1 WHERE Within(g, GeomFromText('Polygon((140 140,160 140,160 160,140 160,140 140))'));
@@ -35,6 +39,8 @@ CREATE TABLE t2 (
   g GEOMETRY NOT NULL
 ) ENGINE=MyISAM;
 
+--disable_query_log
+begin;
 let $1=10;
 while ($1)
 {
@@ -46,6 +52,8 @@ while ($1)
   }
   dec $1;
 }
+commit;
+--enable_query_log
 
 ALTER TABLE t2 ADD SPATIAL KEY(g);
 SHOW CREATE TABLE t2;
@@ -55,6 +63,8 @@ EXPLAIN SELECT fid, AsText(g) FROM t2 WH
 SELECT fid, AsText(g) FROM t2 WHERE Within(g, 
   GeomFromText('Polygon((40 40,60 40,60 60,40 60,40 40))'));
 
+--disable_query_log
+begin;
 let $1=10;
 while ($1)
 {
@@ -67,6 +77,8 @@ while ($1)
   }
   dec $1;
 }
+commit;
+--enable_query_log
 
 DROP TABLE t2;
 

=== modified file 'mysql-test/t/heap.test'
--- a/mysql-test/t/heap.test	2007-06-06 17:57:07 +0000
+++ b/mysql-test/t/heap.test	2009-10-28 07:52:34 +0000
@@ -234,7 +234,8 @@ drop table t1,t2,t3;
 #
 create table t1 (v varchar(10), c char(10), t varchar(50), key(v), key(c), key(t(10)));
 show create table t1;
-disable_query_log;
+--disable_query_log
+begin;
 let $1=10;
 while ($1)
 {
@@ -248,7 +249,9 @@ while ($1)
   }
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
+
 select count(*) from t1;
 insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1)));
 select count(*) from t1 where v='a';
@@ -318,7 +321,8 @@ drop table t1;
 
 create table t1 (v varchar(10), c char(10), t varchar(50), key using btree (v), key using btree (c), key using btree (t(10)));
 show create table t1;
-disable_query_log;
+--disable_query_log
+begin;
 let $1=10;
 while ($1)
 {
@@ -332,7 +336,8 @@ while ($1)
   }
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 select count(*) from t1;
 insert into t1 values(concat('a',char(1)),concat('a',char(1)),concat('a',char(1)));
 select count(*) from t1 where v='a';

=== modified file 'mysql-test/t/innodb_xtradb_bug317074.test'
--- a/mysql-test/t/innodb_xtradb_bug317074.test	2009-09-18 19:27:04 +0000
+++ b/mysql-test/t/innodb_xtradb_bug317074.test	2009-10-28 07:52:34 +0000
@@ -1,4 +1,3 @@
---source include/big_test.inc
 --source include/have_innodb.inc
 
 SET @old_innodb_file_format=@@innodb_file_format;
@@ -7,16 +6,16 @@ SET @old_innodb_file_format_check=@@inno
 SET GLOBAL innodb_file_format='Barracuda';
 SET GLOBAL innodb_file_per_table=ON;
 
--- disable_query_log
--- disable_result_log
-
+--disable_warnings
 DROP TABLE IF EXISTS `test1`;
+--enable_warnings
 CREATE TABLE IF NOT EXISTS `test1` (
  `a` int primary key auto_increment,
  `b` int default 0,
  `c` char(100) default 'testtest'
 ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
 
+set autocommit=0;
 delimiter |;
 CREATE PROCEDURE insert_many(p1 int)
 BEGIN
@@ -26,14 +25,18 @@ REPEAT
   insert into test1 set b=1;
   SET @x = @x + 1;
   SET @y = @y + 1;
-  IF @y >= 100 THEN
+  IF @y >= 1000 THEN
     commit;
     SET @y = 0;
   END IF;
 UNTIL @x >= p1 END REPEAT;
 END|
 delimiter ;|
+--disable_query_log
+--disable_result_log
 call insert_many(100000);
+--enable_query_log
+--enable_result_log
 DROP PROCEDURE insert_many;
 
 # The bug is hangup at the following statement

=== modified file 'mysql-test/t/insert.test'
--- a/mysql-test/t/insert.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/insert.test	2009-10-28 07:52:34 +0000
@@ -151,7 +151,8 @@ drop table t1;
 create table t1(id1 int not null auto_increment primary key, t char(12));
 create table t2(id2 int not null, t char(12));
 create table t3(id3 int not null, t char(12), index(id3));
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 100;
 while ($1)
  {
@@ -170,7 +171,9 @@ while ($1)
    }
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
+
 select count(*) from t2;
 insert into  t2 select t1.* from t1, t2 t, t3 where  t1.id1 = t.id2 and t.id2 = t3.id3;
 select count(*) from t2;

=== modified file 'mysql-test/t/kill.test'
--- a/mysql-test/t/kill.test	2008-03-13 17:54:29 +0000
+++ b/mysql-test/t/kill.test	2009-10-28 07:52:34 +0000
@@ -67,12 +67,14 @@ connection conn1;
 
 -- disable_result_log
 -- disable_query_log
+begin;
 let $1 = 4096;
 while ($1)
 {
   eval insert into t1 values ($1);
   dec $1;
 }
+commit;
 -- enable_query_log
 -- enable_result_log
 
@@ -265,6 +267,8 @@ connection con1;
 let $ID= `select connection_id()`;
 let $tab_count= 40;
 
+--disable_query_log
+begin;
 let $i= $tab_count;
 while ($i)
 {
@@ -272,6 +276,8 @@ while ($i)
   eval INSERT INTO t$i VALUES (1),(2),(3),(4),(5),(6),(7);
   dec $i ;
 }
+commit;
+--enable_query_log
 set session optimizer_search_depth=0;
 
 let $i=$tab_count;

=== modified file 'mysql-test/t/merge.test'
--- a/mysql-test/t/merge.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/merge.test	2009-10-28 07:52:34 +0000
@@ -1274,6 +1274,8 @@ DROP TABLE t1, t2, t3;
 CREATE TABLE t1 (id INTEGER, grp TINYINT, id_rev INTEGER);
 SET @rnd_max= 2147483647;
 let $1 = 10;
+--disable_query_log
+begin;
 while ($1)
 {
   SET @rnd= RAND();
@@ -1283,6 +1285,8 @@ while ($1)
   INSERT INTO t1 (id, grp, id_rev) VALUES (@id, @grp, @id_rev); 
   dec $1;
 }
+commit;
+--enable_query_log
 set @@read_buffer_size=2*1024*1024;
 CREATE TABLE t2 SELECT * FROM t1;
 INSERT INTO t1 (id, grp, id_rev) SELECT id, grp, id_rev FROM t2;

=== modified file 'mysql-test/t/multi_update.test'
--- a/mysql-test/t/multi_update.test	2009-02-09 21:00:15 +0000
+++ b/mysql-test/t/multi_update.test	2009-10-28 07:52:34 +0000
@@ -20,7 +20,8 @@ delete from mysql.user where user=_binar
 create table t1(id1 int not null auto_increment primary key, t char(12));
 create table t2(id2 int not null, t char(12));
 create table t3(id3 int not null, t char(12), index(id3));
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 100;
 while ($1)
  {
@@ -39,7 +40,8 @@ while ($1)
    }
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 
 select count(*) from t1 where id1 > 95;
 select count(*) from t2 where id2 > 95;
@@ -75,7 +77,8 @@ drop table t1,t2,t3;
 
 create table t1(id1 int not null  primary key, t varchar(100)) pack_keys = 1;
 create table t2(id2 int not null, t varchar(100), index(id2)) pack_keys = 1;
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 1000;
 while ($1)
  {
@@ -88,7 +91,8 @@ while ($1)
    }
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 delete t1  from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
 drop table t1,t2;
 

=== modified file 'mysql-test/t/multi_update2.test'
--- a/mysql-test/t/multi_update2.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/multi_update2.test	2009-10-28 07:52:34 +0000
@@ -48,13 +48,14 @@ CREATE TABLE t1 ( a INT NOT NULL, b INT 
 INSERT INTO t1 VALUES (1,1),(2,2),(3,3),(4,4);
 let $1=19;
 set @d=4;
+begin;
 while ($1)
 {
   eval INSERT INTO t1 SELECT a+@d,b+@d FROM t1;
   eval SET @d=@d*2;
   dec $1;
 }
-
+commit;
 --enable_query_log
 ALTER TABLE t1 ADD INDEX i1(a);
 DELETE FROM t1 WHERE a > 2000000;

=== modified file 'mysql-test/t/myisam.test'
--- a/mysql-test/t/myisam.test	2009-09-18 01:04:43 +0000
+++ b/mysql-test/t/myisam.test	2009-10-28 07:52:34 +0000
@@ -33,7 +33,8 @@ drop table t1;
 create table t1 (a tinyint not null auto_increment, b blob not null, primary key (a));
 
 let $1=100;
-disable_query_log;
+--disable_query_log
+begin;
 --disable_warnings
 SET SQL_WARNINGS=0;
 while ($1)
@@ -41,9 +42,10 @@ while ($1)
   eval insert into t1 (b) values(repeat(char(65+$1),65550-$1));
   dec $1;
 }
+commit;
 SET SQL_WARNINGS=1;
 --enable_warnings
-enable_query_log;
+--enable_query_log
 check table t1;
 repair table t1;
 delete from t1 where (a & 1);
@@ -380,14 +382,16 @@ check table t1;
 # check updating with keys
 #
 
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 100;
 while ($1)
 {
   eval insert into t1 (b) values (repeat(char(($1 & 32)+65), $1));
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 update t1 set b=repeat(left(b,1),255) where a between 1 and 5;
 update t1 set b=repeat(left(b,1),10) where a between 32 and 43;
 update t1 set b=repeat(left(b,1),2) where a between 64 and 66;
@@ -551,11 +555,13 @@ create table t2 (a int);
 let $i=1000;
 set @@rand_seed1=31415926,@@rand_seed2=2718281828;
 --disable_query_log
+begin;
 while ($i)
 {
   dec $i;
   insert t2 values (rand()*100000);
 }
+commit;
 --enable_query_log
 insert t1 select * from t2;
 show keys from t1;
@@ -1360,11 +1366,13 @@ CREATE TABLE t1 (
 --disable_query_log
 let $count= 100;
 --echo # Insert $count rows. Query log disabled.
+begin;
 while ($count)
 {
   INSERT INTO t1 VALUES ('a', 'b');
   dec $count;
 }
+commit;
 --enable_query_log
 #
 # Change most of the rows into long character values with > 127 characters.
@@ -1444,11 +1452,13 @@ CREATE TABLE t1 (
 --disable_query_log
 let $count= 100;
 --echo # Insert $count rows. Query log disabled.
+begin;
 while ($count)
 {
   INSERT INTO t1 VALUES ('a', 'b');
   dec $count;
 }
+commit;
 --enable_query_log
 #
 # Change most of the rows into long character values with > 42 characters.

=== modified file 'mysql-test/t/myisam_debug.test'
--- a/mysql-test/t/myisam_debug.test	2009-05-04 09:05:16 +0000
+++ b/mysql-test/t/myisam_debug.test	2009-10-28 07:52:34 +0000
@@ -24,11 +24,15 @@ CREATE TABLE `t2` (
 INSERT INTO t2 (id) VALUES (123);
 
 let $i = 10; 
+--disable_query_log
+begin;
 while ($i)
 {
   INSERT INTO t2 (id) SELECT id  FROM t2; 
   dec $i; 
 }
+commit;
+--enable_query_log
 
 --echo # Switch to insert Connection
 CONNECTION insertConn;

=== modified file 'mysql-test/t/myisampack.test'
--- a/mysql-test/t/myisampack.test	2009-04-07 11:36:15 +0000
+++ b/mysql-test/t/myisampack.test	2009-10-28 07:52:34 +0000
@@ -69,11 +69,13 @@ CREATE TABLE  t1(f1 VARCHAR(200), f2 TEX
 INSERT INTO  t1 VALUES ('foo', 'foo1'), ('bar', 'bar1');
 let $i=9;
 --disable_query_log
+begin;
 while ($i)
 {
  INSERT INTO t1 SELECT * FROM t1; 
  dec $i; 
 }
+commit;
 --enable_query_log
 FLUSH TABLE t1; 
 --echo # Compress the table using MYISAMPACK tool

=== modified file 'mysql-test/t/order_by.test'
--- a/mysql-test/t/order_by.test	2009-08-07 11:51:40 +0000
+++ b/mysql-test/t/order_by.test	2009-10-28 07:52:34 +0000
@@ -374,14 +374,16 @@ DROP TABLE t1;
 #
 
 create table t1(id int not null auto_increment primary key, t char(12));
-disable_query_log;
+--disable_query_log
+begin;
 let $1 = 1000;
 while ($1)
  {
   eval insert into t1(t) values ('$1'); 
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 explain select id,t from t1 order by id;
 explain select id,t from t1 force index (primary) order by id;
 drop table t1;

=== modified file 'mysql-test/t/order_fill_sortbuf.test'
--- a/mysql-test/t/order_fill_sortbuf.test	2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/order_fill_sortbuf.test	2009-10-28 07:52:34 +0000
@@ -12,13 +12,15 @@ CREATE TABLE `t1` (
   `id2` int(11) NOT NULL default '0',
   `id3` int(11) NOT NULL default '0');
 let $1=4000;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
  {
    eval insert into t1 (id,id2,id3) values ($1,$1,$1);
    dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 create table t2 select id2 from t1 order by id3;
 select count(*) from t2;
 drop table t1,t2;

=== modified file 'mysql-test/t/partition.test'
--- a/mysql-test/t/partition.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/partition.test	2009-10-28 07:52:34 +0000
@@ -1680,6 +1680,7 @@ create table t1
 
 insert into t1 values (null,null);
 --disable_query_log
+begin;
 let $cnt= 1000;
 while ($cnt)
 {
@@ -1687,6 +1688,7 @@ while ($cnt)
   update t1 set s2 = 2;
   dec $cnt;
 }
+commit;
 --enable_query_log
 
 drop table t1;
@@ -1804,11 +1806,13 @@ CREATE TABLE t1(id MEDIUMINT NOT NULL AU
                     PARTITION pa11 values less than MAXVALUE);
 --disable_query_log
 let $n= 15;
+begin;
 while ($n)
 {
   insert into t1 (user) values ('mysql');
   dec $n;
 }
+commit;
 --enable_query_log
 show create table t1;
 drop table t1;

=== modified file 'mysql-test/t/partition_archive.test'
--- a/mysql-test/t/partition_archive.test	2007-12-06 18:17:42 +0000
+++ b/mysql-test/t/partition_archive.test	2009-10-28 07:52:34 +0000
@@ -94,11 +94,13 @@ CREATE TABLE t1(id MEDIUMINT NOT NULL AU
 
 --disable_query_log
 let $n= 100;
+begin;
 while ($n)
 {
   insert into t1 (f1) values (repeat('a',25));
   dec $n;
 }
+commit;
 --enable_query_log
 
 show create table t1;

=== modified file 'mysql-test/t/select_found.test'
--- a/mysql-test/t/select_found.test	2005-07-28 00:22:47 +0000
+++ b/mysql-test/t/select_found.test	2009-10-28 07:52:34 +0000
@@ -54,7 +54,8 @@ CREATE TABLE t2 (
   UNIQUE KEY e_n (email,name)
 );
 
-disable_query_log;
+--disable_query_log
+begin;
 let $1=200;
 let $2=0;
 while ($1) 
@@ -63,7 +64,8 @@ while ($1) 
   eval INSERT INTO t2 VALUES ($2,'name$2','email$2');
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 
 EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1  ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;
 SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1  ON kid = t2.id WHERE t1.id IS NULL LIMIT 10;

=== modified file 'mysql-test/t/sp-big.test'
--- a/mysql-test/t/sp-big.test	2005-12-07 14:01:17 +0000
+++ b/mysql-test/t/sp-big.test	2009-10-28 07:52:34 +0000
@@ -43,11 +43,13 @@ create table t2 like t1;
 let $1=8;
 --disable_query_log
 --disable_result_log
+begin;
 while ($1)
 {
   eval insert into t1 select * from t1;
   dec $1;
 }
+commit;
 --enable_result_log
 --enable_query_log
 select count(*) from t1;

=== modified file 'mysql-test/t/subselect.test'
--- a/mysql-test/t/subselect.test	2009-09-07 20:50:10 +0000
+++ b/mysql-test/t/subselect.test	2009-10-28 07:52:34 +0000
@@ -811,7 +811,8 @@ create table t1 (a int, b int, index a (
 create table t2 (a int, index a (a));
 create table t3 (a int, b int, index a (a));
 insert into t1 values (1,10), (2,20), (3,30), (4,40);
-disable_query_log;
+--disable_query_log
+begin;
 # making table large enough
 let $1 = 10000;
 while ($1)
@@ -819,7 +820,8 @@ while ($1)
   eval insert into t1 values (rand()*100000+200,rand()*100000);
   dec $1;
  }
-enable_query_log;
+commit;
+--enable_query_log
 insert into t2 values (2), (3), (4), (5);
 insert into t3 values (10,3), (20,4), (30,5);
 select * from t2 where t2.a in (select a from t1);
@@ -2607,7 +2609,8 @@ CREATE TABLE t1 (a int, b int auto_incre
 CREATE TABLE t2 (x int auto_increment, y int, z int,
                  PRIMARY KEY (x), FOREIGN KEY (y) REFERENCES t1 (b));
 
-disable_query_log;
+--disable_query_log
+begin;
 let $1=3000;
 while ($1)
 {
@@ -2621,7 +2624,8 @@ while ($1)
   }
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 
 SET SESSION sort_buffer_size = 32 * 1024;
 SELECT SQL_NO_CACHE COUNT(*)
@@ -3222,11 +3226,13 @@ insert into t1 values(1,1),(2,2), (3, 3)
 let $i=10000;
 --disable_query_log
 --disable_warnings
+begin;
 while ($i)
 {
   eval insert into t2 values (-1 , $i/5000 + 1, '$i');
   dec $i;
 }
+commit;
 --enable_warnings
 --enable_query_log
 set session sort_buffer_size= 33*1024;

=== added file 'mysql-test/t/table_elim_debug.test'
--- a/mysql-test/t/table_elim_debug.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/t/table_elim_debug.test	2009-10-29 17:50:33 +0000
@@ -0,0 +1,27 @@
+#
+# Table elimination (MWL#17) tests that need debug build
+#
+--source include/have_debug.inc
+
+--disable_warnings
+drop table if exists t1, t2;
+--enable_warnings
+
+# Check if optimizer_switch works
+
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+
+create table t2 (a int primary key, b int) 
+  as select a, a as b from t1 where a in (1,2);
+
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+
+set optimizer_switch='table_elimination=off';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+set optimizer_switch='table_elimination=on';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+set optimizer_switch='table_elimination=default';
+explain select t1.a from t1 left join t2 on t2.a=t1.a;
+
+drop table t1, t2;

=== modified file 'mysql-test/t/warnings.test'
--- a/mysql-test/t/warnings.test	2009-07-06 06:55:53 +0000
+++ b/mysql-test/t/warnings.test	2009-10-28 07:52:34 +0000
@@ -82,13 +82,15 @@ drop table t1, t2;
 
 create table t1(a char(10));
 let $1=50;
-disable_query_log;
+--disable_query_log
+begin;
 while ($1)
 {
   eval insert into t1 values('mysql ab');
   dec $1;
 }
-enable_query_log;
+commit;
+--enable_query_log
 alter table t1 add b char;
 set max_error_count=10;
 update t1 set b=a;

=== modified file 'mysql-test/valgrind.supp'
--- a/mysql-test/valgrind.supp	2009-09-15 10:46:35 +0000
+++ b/mysql-test/valgrind.supp	2009-10-30 18:50:56 +0000
@@ -880,3 +880,17 @@
    fun:nptl_pthread_exit_hack_handler
    fun:start_thread
 }
+
+#
+# Problem with glibc and gethostbyaddr_r
+#
+
+{
+   libc_res_nsend: Conditional jump or move depends on uninitialised value
+   Memcheck:Cond
+   fun: __libc_res_nsend
+   fun: __libc_res_nquery
+   obj: /lib64/libnss_dns-*so)
+   obj: /lib64/libnss_dns-*so)
+   fun: gethostbyaddr_r
+}

=== modified file 'scripts/make_binary_distribution.sh'
--- a/scripts/make_binary_distribution.sh	2009-10-23 16:48:54 +0000
+++ b/scripts/make_binary_distribution.sh	2009-10-30 20:28:11 +0000
@@ -231,6 +231,18 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
   # ----------------------------------------------------------------------
   set -e
 
+  #
+  # Check that the client is compiled with libmysqlclient.a
+  #
+  if test -f ./client/.libs/mysql
+  then
+    echo ""
+    echo "The MySQL clients are compiled dynamicly, which is not allowed for"
+    echo "a MySQL binary tar file.  Please configure with"
+    echo "--with-client-ldflags=-all-static and try again"
+    exit 1;
+  fi
+
   # ----------------------------------------------------------------------
   # Really ugly, one script, "mysql_install_db", needs prefix set to ".",
   # i.e. makes access relative the current directory. This matches
@@ -293,11 +305,6 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
     fi
   fi
 
-  # FIXME let this script be in "bin/", where it is in the RPMs?
-  # http://dev.mysql.com/doc/refman/5.1/en/mysql-install-db-problems.html
-  mkdir $DEST/scripts
-  mv $DEST/bin/mysql_install_db $DEST/scripts/
-
   # Note, no legacy "safe_mysqld" link to "mysqld_safe" in 5.1
 
   # Copy readme and license files
@@ -330,18 +337,25 @@ if [ x"$BASE_SYSTEM" != x"netware" ] ; t
   #
   # Move things to make them easier to find in tar installation
   #
-  mv $DEST/libexec/* $DEST/bin
+
+  # The following test is needed if the original configure was done with
+  # something like --libexecdir=/usr/local/mysql/bin
+  if test -f $DEST/libexec/mysqld
+  then
+    mv $DEST/libexec/* $DEST/bin
+    rmdir $DEST/libexec
+  fi
   mv $DEST/share/man $DEST
   mv $DEST/share/mysql/binary-configure $DEST/configure
   mv $DEST/share/mysql/*.sql $DEST/share
   mv $DEST/share/mysql/*.cnf $DEST/share/mysql/*.server $DEST/share/mysql/mysql-log-rotate $DEST/support-files
-  rmdir $DEST/libexec
 
   #
   # Move some scripts that are only run once to 'scripts' directory
   # but add symbolic links instead to old place for compatibility
   #
-  for i in mysql_secure_installation mysql_fix_extensions mysql_fix_privilege_tables
+  mkdir $DEST/scripts
+  for i in mysql_secure_installation mysql_fix_extensions mysql_fix_privilege_tables mysql_install_db
   do
     mv $DEST/bin/$i $DEST/scripts
     ln -s "../scripts/$i" $DEST/bin/$i

=== modified file 'sql/mysqld.cc'
--- a/sql/mysqld.cc	2009-10-26 11:38:17 +0000
+++ b/sql/mysqld.cc	2009-10-31 19:22:50 +0000
@@ -1889,7 +1889,9 @@ void close_connection(THD *thd, uint err
 extern "C" sig_handler end_mysqld_signal(int sig __attribute__((unused)))
 {
   DBUG_ENTER("end_mysqld_signal");
-  kill_mysql();                                 // Take down mysqld nicely 
+  /* Don't call kill_mysql() if signal thread is not running */
+  if (signal_thread_in_use)
+    kill_mysql();                          // Take down mysqld nicely 
   DBUG_VOID_RETURN;				/* purecov: deadcode */
 }
 
@@ -8082,7 +8084,7 @@ mysqld_get_one_option(int optid,
   switch(optid) {
 #ifndef DBUG_OFF
   case OPT_DEBUG_FLUSH:
-    argument= IF_WIN(default_dbug_option, (char*) "d:t:i:O,/tmp/mysqld.trace");
+    argument= IF_WIN((char*) default_dbug_option, (char*) "d:t:i:O,/tmp/mysqld.trace");
   /* fall through */
   case '#':
     if (!argument)

=== modified file 'sql/sql_plugin.cc'
--- a/sql/sql_plugin.cc	2009-10-26 11:35:42 +0000
+++ b/sql/sql_plugin.cc	2009-10-29 00:04:56 +0000
@@ -3226,7 +3226,6 @@ static int test_plugin_options(MEM_ROOT 
   my_bool can_disable;
   bool disable_plugin;
   enum_plugin_load_policy plugin_load_policy= PLUGIN_ON;
-
   MEM_ROOT *mem_root= alloc_root_inited(&tmp->mem_root) ?
                       &tmp->mem_root : &plugin_mem_root;
   st_mysql_sys_var **opt;
@@ -3240,13 +3239,13 @@ static int test_plugin_options(MEM_ROOT 
   DBUG_ENTER("test_plugin_options");
   DBUG_ASSERT(tmp->plugin && tmp->name.str);
 
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
   /*
-    The 'federated' and 'ndbcluster' storage engines are always disabled by
-    default.
+    The 'ndbcluster' storage engines is always disabled by default.
   */
-  if (!(my_strcasecmp(&my_charset_latin1, tmp->name.str, "federated") &&
-      my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster")))
+  if (!my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster"))
     plugin_load_policy= PLUGIN_OFF;
+#endif
 
   for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
     count+= 2; /* --{plugin}-{optname} and --plugin-{plugin}-{optname} */
@@ -3295,6 +3294,11 @@ static int test_plugin_options(MEM_ROOT 
   can_disable=
     my_strcasecmp(&my_charset_latin1, tmp->name.str, "MyISAM") &&
     my_strcasecmp(&my_charset_latin1, tmp->name.str, "MEMORY");
+#ifdef USE_MARIA_FOR_TMP_TABLES
+  if (!can_disable)
+    can_disable= (my_strcasecmp(&my_charset_latin1, tmp->name.str, "Maria")
+                  != 0);
+#endif
 
   tmp->is_mandatory= (plugin_load_policy == PLUGIN_FORCE) || !can_disable;
 

=== added file 'storage/federated/README'
--- a/storage/federated/README	1970-01-01 00:00:00 +0000
+++ b/storage/federated/README	2009-10-30 18:50:56 +0000
@@ -0,0 +1,7 @@
+The files in this directory are not used by MariaDB
+
+MariaDB uses the new federated storage engine that can be found in the
+federatedx directory.
+
+This directory is only kept around to make it easy to merge code from the
+MySQL source repositories that uses the old and disabled federated code.

=== renamed file 'storage/federated/plug.in' => 'storage/federated/plug.in.disabled'
=== added directory 'storage/federatedx'
=== added file 'storage/federatedx/AUTHORS'
--- a/storage/federatedx/AUTHORS	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/AUTHORS	2009-10-30 18:50:56 +0000
@@ -0,0 +1,11 @@
+FederatedX
+
+Patrick Galbraith <patg@xxxxxxxx> - Federated
+
+Pluggable Storage Engine Skeleton setup
+
+Brian Aker  <brian@xxxxxxxxx> | <brian@xxxxxxxxxxx> - Original Design
+Calvin Sun - Windows Support
+Brian Miezejewski - Bug fixes
+Antony T Curtis   - Help in inital development, transactions and various help
+Michael Widenius  - Bug fixes and some simple early optimizations

=== added file 'storage/federatedx/CMakeLists.txt'
--- a/storage/federatedx/CMakeLists.txt	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/CMakeLists.txt	2009-11-03 14:39:54 +0000
@@ -0,0 +1,3 @@
+INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake")

+SET(FEDERATEDX_SOURCES  ha_federatedx.cc federatedx_txn.cc federatedx_io.cc federatedx_io_null.cc federatedx_io_mysql.cc)

+MYSQL_STORAGE_ENGINE(FEDERATEDX)


=== added file 'storage/federatedx/ChangeLog'
--- a/storage/federatedx/ChangeLog	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ChangeLog	2009-10-30 18:50:56 +0000
@@ -0,0 +1,18 @@
+0.2 -  Thu March 8 00:00:00 EST 2008
+
+  - Fixed bug #30051 "CREATE TABLE does not connect and check existence of remote table"
+    Modified "real_connect" to take a share and create flag to in order to not rely
+    on any settings that are later instantiated and/or set by get_share
+    Also, put logic in the code to not attempt this if a localhost. There's an annoying
+    functionality that if federated tries to connect to itself during creater table, you 
+    get 1159 error (timeout) - only when local. This prevents having this functionality
+    and is probably part of the reason it was removed.
+
+0.1 -  Thu Feb 1 00:00:00 EST 2008
+
+  - This is the FederatedX Storage Engine, 
+    first release.
+  - Added documentation
+  - Added simple test and README file to explain
+    how to run the test
+  - Added FAQ

=== added file 'storage/federatedx/FAQ'
--- a/storage/federatedx/FAQ	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/FAQ	2009-10-30 18:50:56 +0000
@@ -0,0 +1,40 @@
+Q. What is the FederatedX pluggable storage engine?
+
+A. It is a fork of the Federated Storage Engine that Brian Aker and I
+(Patrick Galbraith) developed originally . It is a storage engine that
+uses a client connection to a remote MySQL data source as its data
+source instead of a local file on disk.
+
+Q. Why did you fork from Federated?
+
+A. To enhance the storage engine independently of the
+MySQL Server release schedule. Many people have been 
+mentioning their dissatisfaction with the limitations
+of Federated. I think the engine is a great concept and 
+have a sense of obligation to continue to improve it.
+There are some patches already that are in dire need
+of being applied and tested.
+
+Q. What do you plan to do with FederatedX?
+
+A. Many things need addressing:
+
+- Outstanding bugs
+- How do deal with huge result sets
+- Pushdown conditions (being able to pass things like LIMIT
+  to the remote connection to keep from returning huge
+  result sets).
+- Better transactional support
+- Other connection mechanisms (ODBC, JDBC, native drivers
+  of other RDBMSs)
+
+Q. What FederatedX is and is not?
+
+A. FederatedX is not yet a complete "federated" solution in 
+   the sense that other venders have developed (IBM, etc). It
+   is essentially a networked storage engine. It is my hope
+   to make it a real federated solution.
+
+Q. In which MySQL distributions/forks/branches can I find FederateX
+
+A. MariaDB (http://www.mariadb.com)

=== added file 'storage/federatedx/Makefile.am'
--- a/storage/federatedx/Makefile.am	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/Makefile.am	2009-11-01 15:09:55 +0000
@@ -0,0 +1,64 @@
+# Used to build Makefile.in
+
+MYSQLDATAdir =          $(localstatedir)
+MYSQLSHAREdir =         $(pkgdatadir)
+MYSQLBASEdir=           $(prefix)
+MYSQLLIBdir=            $(pkglibdir)
+pkgplugindir =		$(pkglibdir)/plugin
+INCLUDES =              -I$(top_srcdir)/include -I$(top_builddir)/include \
+			-I$(top_srcdir)/regex \
+			-I$(top_srcdir)/sql \
+                        -I$(srcdir)
+WRAPLIBS=
+
+LDADD =
+
+DEFS =                  @DEFS@
+
+noinst_HEADERS =	ha_federatedx.h federatedx_probes.h
+
+EXTRA_LTLIBRARIES =	ha_federatedx.la
+pkgplugin_LTLIBRARIES =	@plugin_federated_shared_target@
+ha_federatedx_la_LDFLAGS =	-module -rpath $(pkgplugindir)
+ha_federatedx_la_CXXFLAGS=	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_federatedx_la_CFLAGS =	$(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN
+ha_federatedx_la_SOURCES =	ha_federatedx.cc
+
+
+EXTRA_LIBRARIES =	libfederatedx.a
+noinst_LIBRARIES =	@plugin_federated_static_target@
+libfederatedx_a_CXXFLAGS =	$(AM_CFLAGS)
+libfederatedx_a_CFLAGS =	$(AM_CFLAGS)
+libfederatedx_a_SOURCES=	ha_federatedx.cc federatedx_txn.cc \
+			        federatedx_io.cc federatedx_io_null.cc \
+			        federatedx_io_mysql.cc
+
+EXTRA_DIST =		CMakeLists.txt plug.in ha_federatedx.h \
+			federatedx_probes.h
+
+ha_federatedx_la_SOURCES = ha_federatedx.cc federatedx_txn.cc \
+			   federatedx_io.cc federatedx_io_null.cc \
+			   federatedx_io_mysql.cc $(top_srcdir)/mysys/string.c
+ha_federatedx_la_LIBADD =
+
+#DTRACE =                @DTRACE@
+#DTRACEFLAGS =           @DTRACEFLAGS@
+#DTRACEFILES =           .libs/libfederatedx_engine_la-ha_federatedx.o
+
+# #if HAVE_DTRACE
+# #  libfederatedx_engine_la_LIBADD += federatedx_probes.o
+# #endif
+
+# federatedx_probes.h: federatedx_probes.d
+#	$(DTRACE) $(DTRACEFLAGS) -h -s federatedx_probes.d
+#	mv federatedx_probes.h federatedx_probes.h.bak
+#	sed "s/#include <unistd.h>//g" federatedx_probes.h.bak > federatedx_probes.h
+#	rm federatedx_probes.h.bak
+
+#federatedx_probes.o:
+#	$(DTRACE) $(DTRACEFLAGS) -G -s federatedx_probes.d $(DTRACEFILES)
+
+# End
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%

=== added file 'storage/federatedx/README'
--- a/storage/federatedx/README	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/README	2009-10-30 18:50:56 +0000
@@ -0,0 +1,33 @@
+This is the FederatedX Storage Engine, developed as an external storage engine.
+
+NOTE:
+
+The following is only relevant if you use it for MySQL.  MariaDB already comes
+with the latest version of FederatedX.
+
+To install, grab a copy of the mysql source code and run this:
+
+./configure --with-mysql=/path/to/src/mysql-5.x --libdir=/usr/local/lib/mysql/
+
+make install
+
+And then inside of MySQL:
+
+mysql> INSTALL PLUGIN federatedx SONAME 'libfederatedx_engine.so';
+
+mysql> CREATE TABLE `d` (`a` varchar(125), b text, primary key(a)) ENGINE=FEDERATEDX CONNECTION="mysql://root@host/schema/table"
+
+or 
+
+mysql> CREATE TABLE `d` (`a` varchar(125), b text, primary key(a)) ENGINE=FEDERATEDX CONNECTION="server" CHARSET=latin1;
+
+You will probably need to edit the Makefile.am in the src/ tree if you want
+to build on anything other then Linux (and the Makefile assumes that the
+server was not compiled for debug). The reason for the two possible
+configure lines is that libdir is dependent on where MySQL was installed. If
+you run the "INSTALL PLUGIN ..." and you get a file not found, check that
+your configured this directory correctly.
+
+For Solaris you can enable DTrace probes by adding to configure
+--enable-dtrace
+

=== added file 'storage/federatedx/README.windows'
--- a/storage/federatedx/README.windows	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/README.windows	2009-10-30 18:50:56 +0000
@@ -0,0 +1,23 @@
+The following files are changed in order to build a new engine on Windows:

+

+- Update win\configure.js with

+case "WITH_FEDERATEDX_STORAGE_ENGINE":

+to make sure it will pass WITH_FEDERATEDX_STORAGE_ENGINE in.

+

+- Update CMakeFiles.txt under mysql root:

+  IF(WITH_FEDERATEDX_STORAGE_ENGINE)

+      ADD_DEFINITIONS(-D WITH_FEDERATEDX_STORAGE_ENGINE)

+  SET (mysql_plugin_defs

+      "${mysql_plugin_defs},builtin_skeleton_plugin")

+  ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)

+

+  and,

+

+  IF(WITH_FEDERATEDX_STORAGE_ENGINE)

+    ADD_SUBDIRECTORY(storage/skeleton/src)

+  ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)

+

+  - Update CMakeFiles.txt under sql:

+  IF(WITH_FEDERATEDX_STORAGE_ENGINE)

+    TARGET_LINK_LIBRARIES(mysqld skeleton)

+  ENDIF(WITH_FEDERATEDX_STORAGE_ENGINE)


=== added file 'storage/federatedx/TODO'
--- a/storage/federatedx/TODO	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/TODO	2009-10-30 18:50:56 +0000
@@ -0,0 +1,30 @@
+Features
+
+* Add Pushdown conditions
+* Add other network driver interfaces
+* Handle large result sets
+* Auto-discovery of tables on foreign data sources
+
+Bugs (http://bugs.mysql.com)
+
+20026 2006-05-23 FEDERATED lacks support for auto_increment_increment and auto_increment_offset   
+20724 2006-06-27 FEDERATED does not honour SET INSERT_ID    
+28269 2007-05-06 Any FEDERATED engine fails to quote reserved words for field names
+25509 2007-01-10 Federated: Failure with non-ASCII characters   
+26697 2007-02-27 Every query to a federated table results in a full scan of MyISAM table.
+21360 2006-07-31 Microsoft Windows (Windows/Linux) mysqldump error on federated tables    
+34189 2008-01-31 Any ALTER TABLE t1 ENGINE=FEDERATED CONNECTION='connectionString' on MyISAM fails    
+31757 2007-10-22 Any Federated tables break replication  Antony Curtis
+33953 2008-01-21 Any mysqld dies on search federated table using nullable index with < or <= operator
+34015 2008-01-23 Linux Problems with float fields using federated tables
+21583 2006-08-11 Linux (Linux) Federated table returns broken strings.    
+33702 2008-01-05 Accessing a federated table with a non existing server returns random error code   
+25512 2007-01-10 Federated: CREATE failures   
+32426 2007-11-16 Any FEDERATED query returns corrupt results for ORDER BY on a TEXT field 
+25510 2007-01-10 Federated: double trigger activation   
+33250 2007-12-14 SELECT * FROM really_big_federated_table eats lots of virtual memory (OOM)   
+14874 2005-11-11 Error 2013: Lost connection to MySQL server with Federated table   
+25508 2007-01-10 Federated: Failure to Remove Partitioning    
+27180 2007-03-15 #1030 - Got error 1 from storage engine with big tables
+33947 2008-01-20 Any Join on Federated tables with Unique index and IS NOT NULL crashes server
+30051 (fixed) CREATE TABLE does not connect and check existence of remote table

=== added file 'storage/federatedx/federatedx_io.cc'
--- a/storage/federatedx/federatedx_io.cc	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io.cc	2009-10-30 18:50:56 +0000
@@ -0,0 +1,103 @@
+/* 
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation                          // gcc: Class implementation
+#endif
+
+typedef federatedx_io *(*instantiate_io_type)(MEM_ROOT *server_root,
+                                              FEDERATEDX_SERVER *server);
+struct io_schemes_st
+{
+  const char *scheme;
+  instantiate_io_type instantiate;
+};
+
+
+static const io_schemes_st federated_io_schemes[] =
+{
+  { "mysql", &instantiate_io_mysql },
+  { "null", instantiate_io_null } /* must be last element */
+};
+
+const uint federated_io_schemes_count= array_elements(federated_io_schemes);
+
+federatedx_io::federatedx_io(FEDERATEDX_SERVER *aserver)
+  : server(aserver), owner_ptr(0), txn_next(0), idle_next(0),
+    active(FALSE), busy(FALSE), readonly(TRUE)
+{
+  DBUG_ENTER("federatedx_io::federatedx_io");
+  DBUG_ASSERT(server);
+
+  safe_mutex_assert_owner(&server->mutex);
+  server->io_count++;
+
+  DBUG_VOID_RETURN;
+}
+
+
+federatedx_io::~federatedx_io()
+{
+  DBUG_ENTER("federatedx_io::~federatedx_io");
+
+  server->io_count--;
+
+  DBUG_VOID_RETURN;
+}
+
+
+bool federatedx_io::handles_scheme(const char *scheme)
+{
+  const io_schemes_st *ptr = federated_io_schemes;
+  const io_schemes_st *end = ptr + array_elements(federated_io_schemes);
+  while (ptr != end && strcasecmp(scheme, ptr->scheme))
+    ++ptr;
+  return ptr != end;
+}
+
+
+federatedx_io *federatedx_io::construct(MEM_ROOT *server_root,
+                                        FEDERATEDX_SERVER *server)
+{
+  const io_schemes_st *ptr = federated_io_schemes;
+  const io_schemes_st *end = ptr + (array_elements(federated_io_schemes) - 1);
+  while (ptr != end && strcasecmp(server->scheme, ptr->scheme))
+    ++ptr;
+  return ptr->instantiate(server_root, server);
+}
+
+

=== added file 'storage/federatedx/federatedx_io_mysql.cc'
--- a/storage/federatedx/federatedx_io_mysql.cc	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io_mysql.cc	2009-10-30 18:50:56 +0000
@@ -0,0 +1,592 @@
+/* 
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation                          // gcc: Class implementation
+#endif
+
+
+#define SAVEPOINT_REALIZED  1
+#define SAVEPOINT_RESTRICT  2
+#define SAVEPOINT_EMITTED 4
+
+
+typedef struct federatedx_savepoint
+{
+  ulong level;
+  uint  flags;
+} SAVEPT;
+
+
+class federatedx_io_mysql :public federatedx_io
+{
+  MYSQL mysql; /* MySQL connection */
+  DYNAMIC_ARRAY savepoints;
+  bool requested_autocommit;
+  bool actual_autocommit;
+
+  int actual_query(const char *buffer, uint length);
+  bool test_all_restrict() const;
+public:
+  federatedx_io_mysql(FEDERATEDX_SERVER *);
+  ~federatedx_io_mysql();
+
+  int simple_query(const char *fmt, ...);
+  int query(const char *buffer, uint length);
+  virtual FEDERATEDX_IO_RESULT *store_result();
+
+  virtual size_t max_query_size() const;
+
+  virtual my_ulonglong affected_rows() const;
+  virtual my_ulonglong last_insert_id() const;
+
+  virtual int error_code();
+  virtual const char *error_str();
+  
+  void reset();
+  int commit();
+  int rollback();
+  
+  int savepoint_set(ulong sp);
+  ulong savepoint_release(ulong sp);
+  ulong savepoint_rollback(ulong sp);
+  void savepoint_restrict(ulong sp);
+  
+  ulong last_savepoint() const;
+  ulong actual_savepoint() const;
+  bool is_autocommit() const;
+
+  bool table_metadata(ha_statistics *stats, const char *table_name,
+                      uint table_name_length, uint flag);
+
+  /* resultset operations */
+  
+  virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
+  virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
+  virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
+  virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+  virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
+  virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+                                      unsigned int column);
+  virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+                              unsigned int column) const;
+};
+
+
+federatedx_io *instantiate_io_mysql(MEM_ROOT *server_root,
+                                    FEDERATEDX_SERVER *server)
+{
+  return new (server_root) federatedx_io_mysql(server);
+}
+
+
+federatedx_io_mysql::federatedx_io_mysql(FEDERATEDX_SERVER *aserver)
+  : federatedx_io(aserver),
+    requested_autocommit(TRUE), actual_autocommit(TRUE)
+{
+  DBUG_ENTER("federatedx_io_mysql::federatedx_io_mysql");
+
+  bzero(&mysql, sizeof(MYSQL));
+  bzero(&savepoints, sizeof(DYNAMIC_ARRAY));
+
+  my_init_dynamic_array(&savepoints, sizeof(SAVEPT), 16, 16);  
+  
+  DBUG_VOID_RETURN;
+}
+
+
+federatedx_io_mysql::~federatedx_io_mysql()
+{
+  DBUG_ENTER("federatedx_io_mysql::~federatedx_io_mysql");
+
+  mysql_close(&mysql);
+  delete_dynamic(&savepoints);
+
+  DBUG_VOID_RETURN;
+}
+
+
+void federatedx_io_mysql::reset()
+{
+  reset_dynamic(&savepoints);
+  set_active(FALSE);
+  
+  requested_autocommit= TRUE;
+  mysql.reconnect= 1;
+}
+
+
+int federatedx_io_mysql::commit()
+{
+  int error= 0;
+  DBUG_ENTER("federatedx_io_mysql::commit");
+  
+  if (!actual_autocommit && (error= actual_query("COMMIT", 6)))
+    rollback();
+  
+  reset();
+  
+  DBUG_RETURN(error);
+}
+
+int federatedx_io_mysql::rollback()
+{
+  int error= 0;
+  DBUG_ENTER("federatedx_io_mysql::rollback");
+  
+  if (!actual_autocommit)
+    error= actual_query("ROLLBACK", 8);
+  else
+    error= ER_WARNING_NOT_COMPLETE_ROLLBACK;
+
+  reset();
+  
+  DBUG_RETURN(error);
+}
+
+
+ulong federatedx_io_mysql::last_savepoint() const
+{
+  SAVEPT *savept= NULL;
+  DBUG_ENTER("federatedx_io_mysql::last_savepoint");
+
+  if (savepoints.elements)
+    savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+
+  DBUG_RETURN(savept ? savept->level : 0);
+}
+
+
+ulong federatedx_io_mysql::actual_savepoint() const
+{
+  SAVEPT *savept= NULL;
+  uint index= savepoints.elements;
+  DBUG_ENTER("federatedx_io_mysql::last_savepoint");
+
+  while (index)
+  {
+    savept= dynamic_element(&savepoints, --index, SAVEPT *);
+    if (savept->flags & SAVEPOINT_REALIZED)
+    break;
+  savept= NULL;
+  }
+
+  DBUG_RETURN(savept ? savept->level : 0);
+}
+
+bool federatedx_io_mysql::is_autocommit() const
+{
+  return actual_autocommit;
+}
+
+
+int federatedx_io_mysql::savepoint_set(ulong sp)
+{
+  int error;
+  SAVEPT savept;
+  DBUG_ENTER("federatedx_io_mysql::savepoint_set");
+  DBUG_PRINT("info",("savepoint=%lu", sp));
+  DBUG_ASSERT(sp > last_savepoint());
+
+  savept.level= sp;
+  savept.flags= 0;
+
+  if ((error= insert_dynamic(&savepoints, (uchar*) &savept) ? -1 : 0))
+    goto err;
+
+  set_active(TRUE);
+  mysql.reconnect= 0;
+  requested_autocommit= FALSE;
+
+err:
+  DBUG_RETURN(error);
+}
+
+
+ulong federatedx_io_mysql::savepoint_release(ulong sp)
+{
+  SAVEPT *savept, *last= NULL;
+  DBUG_ENTER("federatedx_io_mysql::savepoint_release");
+  DBUG_PRINT("info",("savepoint=%lu", sp));
+  
+  while (savepoints.elements)
+  {
+    savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+    if (savept->level < sp)
+      break;
+  if ((savept->flags & (SAVEPOINT_REALIZED | 
+                        SAVEPOINT_RESTRICT)) == SAVEPOINT_REALIZED)
+    last= savept;
+    savepoints.elements--;
+  }
+
+  if (last)
+  {
+    char buffer[STRING_BUFFER_USUAL_SIZE];
+  int length= my_snprintf(buffer, sizeof(buffer),
+              "RELEASE SAVEPOINT save%lu", last->level);
+    actual_query(buffer, length);
+  }
+
+  DBUG_RETURN(last_savepoint()); 
+}
+
+
+ulong federatedx_io_mysql::savepoint_rollback(ulong sp)
+{
+  SAVEPT *savept;
+  uint index;
+  DBUG_ENTER("federatedx_io_mysql::savepoint_release");
+  DBUG_PRINT("info",("savepoint=%lu", sp));
+  
+  while (savepoints.elements)
+  {
+    savept= dynamic_element(&savepoints, savepoints.elements - 1, SAVEPT *);
+  if (savept->level <= sp)
+    break;
+    savepoints.elements--;
+  }
+
+  for (index= savepoints.elements, savept= NULL; index;)
+  {
+    savept= dynamic_element(&savepoints, --index, SAVEPT *);
+    if (savept->flags & SAVEPOINT_REALIZED)
+    break;
+  savept= NULL;
+  }
+  
+  if (savept && !(savept->flags & SAVEPOINT_RESTRICT))
+  {
+    char buffer[STRING_BUFFER_USUAL_SIZE];
+  int length= my_snprintf(buffer, sizeof(buffer),
+              "ROLLBACK TO SAVEPOINT save%lu", savept->level);
+    actual_query(buffer, length);
+  }
+
+  DBUG_RETURN(last_savepoint());
+}
+
+
+void federatedx_io_mysql::savepoint_restrict(ulong sp)
+{
+  SAVEPT *savept;
+  uint index= savepoints.elements;
+  DBUG_ENTER("federatedx_io_mysql::savepoint_restrict");
+  
+  while (index)
+  {
+    savept= dynamic_element(&savepoints, --index, SAVEPT *);
+  if (savept->level > sp)
+    continue;
+  if (savept->level < sp)
+    break;
+  savept->flags|= SAVEPOINT_RESTRICT;
+  break;
+  }
+  
+  DBUG_VOID_RETURN;
+}
+
+
+int federatedx_io_mysql::simple_query(const char *fmt, ...)
+{
+  char buffer[STRING_BUFFER_USUAL_SIZE];
+  int length, error;
+  va_list arg;
+  DBUG_ENTER("federatedx_io_mysql::simple_query");
+
+  va_start(arg, fmt);  
+  length= my_vsnprintf(buffer, sizeof(buffer), fmt, arg);
+  va_end(arg);
+  
+  error= query(buffer, length);
+  
+  DBUG_RETURN(error);
+}
+
+
+bool federatedx_io_mysql::test_all_restrict() const
+{
+  bool result= FALSE;
+  SAVEPT *savept;
+  uint index= savepoints.elements;
+  DBUG_ENTER("federatedx_io_mysql::test_all_restrict");
+  
+  while (index)
+  {
+    savept= dynamic_element(&savepoints, --index, SAVEPT *);
+  if ((savept->flags & (SAVEPOINT_REALIZED | 
+                        SAVEPOINT_RESTRICT)) == SAVEPOINT_REALIZED ||
+    (savept->flags & SAVEPOINT_EMITTED))
+      DBUG_RETURN(FALSE);
+    if (savept->flags & SAVEPOINT_RESTRICT)
+    result= TRUE;
+  }
+  
+  DBUG_RETURN(result); 
+}
+
+
+int federatedx_io_mysql::query(const char *buffer, uint length)
+{
+  int error;
+  bool wants_autocommit= requested_autocommit | is_readonly();
+  DBUG_ENTER("federatedx_io_mysql::query");
+
+  if (!wants_autocommit && test_all_restrict())
+    wants_autocommit= TRUE;
+
+  if (wants_autocommit != actual_autocommit)
+  {
+    if ((error= actual_query(wants_autocommit ? "SET AUTOCOMMIT=1"
+                                            : "SET AUTOCOMMIT=0", 16)))
+    DBUG_RETURN(error);                         
+    mysql.reconnect= wants_autocommit ? 1 : 0;
+    actual_autocommit= wants_autocommit;
+  }
+  
+  if (!actual_autocommit && last_savepoint() != actual_savepoint())
+  {
+    SAVEPT *savept= dynamic_element(&savepoints, savepoints.elements - 1, 
+                                SAVEPT *);
+    if (!(savept->flags & SAVEPOINT_RESTRICT))
+  {
+      char buf[STRING_BUFFER_USUAL_SIZE];
+    int len= my_snprintf(buf, sizeof(buf),
+                  "SAVEPOINT save%lu", savept->level);
+      if ((error= actual_query(buf, len)))
+    DBUG_RETURN(error);                         
+    set_active(TRUE);
+    savept->flags|= SAVEPOINT_EMITTED;
+    }
+    savept->flags|= SAVEPOINT_REALIZED;
+  }
+
+  if (!(error= actual_query(buffer, length)))
+    set_active(is_active() || !actual_autocommit);
+
+  DBUG_RETURN(error);
+}
+
+
+int federatedx_io_mysql::actual_query(const char *buffer, uint length)
+{
+  int error;
+  DBUG_ENTER("federatedx_io_mysql::actual_query");
+
+  if (!mysql.master)
+  {
+    if (!(mysql_init(&mysql)))
+    DBUG_RETURN(-1);
+  
+    /*
+	BUG# 17044 Federated Storage Engine is not UTF8 clean
+	Add set names to whatever charset the table is at open
+	of table
+    */
+    /* this sets the csname like 'set names utf8' */
+    mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, get_charsetname());
+
+    if (!mysql_real_connect(&mysql,
+                            get_hostname(),
+                            get_username(),
+                            get_password(),
+                            get_database(),
+                            get_port(),
+                            get_socket(), 0))
+      DBUG_RETURN(ER_CONNECT_TO_FOREIGN_DATA_SOURCE);
+    mysql.reconnect= 1;
+  }
+
+  error= mysql_real_query(&mysql, buffer, length);
+  
+  DBUG_RETURN(error);
+}
+
+size_t federatedx_io_mysql::max_query_size() const
+{
+  return mysql.net.max_packet_size;
+}
+
+
+my_ulonglong federatedx_io_mysql::affected_rows() const
+{
+  return mysql.affected_rows;
+}
+
+
+my_ulonglong federatedx_io_mysql::last_insert_id() const
+{
+  return mysql.last_used_con->insert_id;
+}
+
+
+int federatedx_io_mysql::error_code()
+{
+  return mysql_errno(&mysql);
+}
+
+
+const char *federatedx_io_mysql::error_str()
+{
+  return mysql_error(&mysql);
+}
+
+
+FEDERATEDX_IO_RESULT *federatedx_io_mysql::store_result()
+{
+  FEDERATEDX_IO_RESULT *result;
+  DBUG_ENTER("federatedx_io_mysql::store_result");
+  
+  result= (FEDERATEDX_IO_RESULT *) mysql_store_result(&mysql);
+  
+  DBUG_RETURN(result);
+}
+
+
+void federatedx_io_mysql::free_result(FEDERATEDX_IO_RESULT *io_result)
+{
+  mysql_free_result((MYSQL_RES *) io_result);
+}
+
+
+unsigned int federatedx_io_mysql::get_num_fields(FEDERATEDX_IO_RESULT *io_result)
+{
+  return mysql_num_fields((MYSQL_RES *) io_result);
+}
+
+
+my_ulonglong federatedx_io_mysql::get_num_rows(FEDERATEDX_IO_RESULT *io_result)
+{
+  return mysql_num_rows((MYSQL_RES *) io_result);
+}
+
+
+FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result)
+{
+  return (FEDERATEDX_IO_ROW *) mysql_fetch_row((MYSQL_RES *) io_result);
+}
+
+
+ulong *federatedx_io_mysql::fetch_lengths(FEDERATEDX_IO_RESULT *io_result)
+{
+  return mysql_fetch_lengths((MYSQL_RES *) io_result);
+}
+
+
+const char *federatedx_io_mysql::get_column_data(FEDERATEDX_IO_ROW *row,
+                                                 unsigned int column)
+{
+  return ((MYSQL_ROW)row)[column];
+}
+
+
+bool federatedx_io_mysql::is_column_null(const FEDERATEDX_IO_ROW *row,
+                                         unsigned int column) const
+{
+  return !((MYSQL_ROW)row)[column];
+}
+
+bool federatedx_io_mysql::table_metadata(ha_statistics *stats,
+                                         const char *table_name,
+                                         uint table_name_length, uint flag)
+{
+  char status_buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+  FEDERATEDX_IO_RESULT *result= 0;
+  FEDERATEDX_IO_ROW *row;
+  String status_query_string(status_buf, sizeof(status_buf), &my_charset_bin);
+  int error;
+
+  status_query_string.length(0);
+  status_query_string.append(STRING_WITH_LEN("SHOW TABLE STATUS LIKE "));
+  append_ident(&status_query_string, table_name,
+               table_name_length, value_quote_char);
+
+  if (query(status_query_string.ptr(), status_query_string.length()))
+    goto error;
+
+  status_query_string.length(0);
+
+  result= store_result();
+
+  /*
+    We're going to use fields num. 4, 12 and 13 of the resultset,
+    so make sure we have these fields.
+  */
+  if (!result || (get_num_fields(result) < 14))
+    goto error;
+
+  if (!get_num_rows(result))
+    goto error;
+
+  if (!(row= fetch_row(result)))
+    goto error;
+
+  /*
+    deleted is set in ha_federatedx::info
+  */
+  /*
+    need to figure out what this means as far as federatedx is concerned,
+    since we don't have a "file"
+
+    data_file_length = ?
+    index_file_length = ?
+    delete_length = ?
+  */
+  if (!is_column_null(row, 4))
+    stats->records= (ha_rows) my_strtoll10(get_column_data(row, 4),
+	                                   (char**) 0, &error);
+  if (!is_column_null(row, 5))
+    stats->mean_rec_length= (ulong) my_strtoll10(get_column_data(row, 5),
+	                                         (char**) 0, &error);
+
+  stats->data_file_length= stats->records * stats->mean_rec_length;
+
+  if (!is_column_null(row, 12))
+    stats->update_time= (time_t) my_strtoll10(get_column_data(row, 12),
+	                                      (char**) 0, &error);
+  if (!is_column_null(row, 13))
+    stats->check_time= (time_t) my_strtoll10(get_column_data(row, 13),
+	                                     (char**) 0, &error);
+
+  free_result(result);
+  return 0;
+
+error:
+  free_result(result);
+  return 1;
+}

=== added file 'storage/federatedx/federatedx_io_null.cc'
--- a/storage/federatedx/federatedx_io_null.cc	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_io_null.cc	2009-10-30 18:50:56 +0000
@@ -0,0 +1,277 @@
+/* 
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation                          // gcc: Class implementation
+#endif
+
+
+#define SAVEPOINT_REALIZED  1
+#define SAVEPOINT_RESTRICT  2
+#define SAVEPOINT_EMITTED 4
+
+
+typedef struct federatedx_savepoint
+{
+  ulong level;
+  uint  flags;
+} SAVEPT;
+
+
+class federatedx_io_null :public federatedx_io
+{
+public:
+  federatedx_io_null(FEDERATEDX_SERVER *);
+  ~federatedx_io_null();
+
+  int query(const char *buffer, uint length);
+  virtual FEDERATEDX_IO_RESULT *store_result();
+
+  virtual size_t max_query_size() const;
+
+  virtual my_ulonglong affected_rows() const;
+  virtual my_ulonglong last_insert_id() const;
+
+  virtual int error_code();
+  virtual const char *error_str();
+  
+  void reset();
+  int commit();
+  int rollback();
+  
+  int savepoint_set(ulong sp);
+  ulong savepoint_release(ulong sp);
+  ulong savepoint_rollback(ulong sp);
+  void savepoint_restrict(ulong sp);
+  
+  ulong last_savepoint() const;
+  ulong actual_savepoint() const;
+  bool is_autocommit() const;
+
+  bool table_metadata(ha_statistics *stats, const char *table_name,
+                      uint table_name_length, uint flag);
+  
+  /* resultset operations */
+  
+  virtual void free_result(FEDERATEDX_IO_RESULT *io_result);
+  virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result);
+  virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result);
+  virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result);
+  virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result);
+  virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+                                      unsigned int column);
+  virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+                              unsigned int column) const;
+};
+
+
+federatedx_io *instantiate_io_null(MEM_ROOT *server_root,
+                                   FEDERATEDX_SERVER *server)
+{
+  return new (server_root) federatedx_io_null(server);
+}
+
+
+federatedx_io_null::federatedx_io_null(FEDERATEDX_SERVER *aserver)
+  : federatedx_io(aserver)
+{
+}
+
+
+federatedx_io_null::~federatedx_io_null()
+{
+}
+
+
+void federatedx_io_null::reset()
+{
+}
+
+
+int federatedx_io_null::commit()
+{
+  return 0;
+}
+
+int federatedx_io_null::rollback()
+{
+  return 0;
+}
+
+
+ulong federatedx_io_null::last_savepoint() const
+{
+  return 0;
+}
+
+
+ulong federatedx_io_null::actual_savepoint() const
+{
+  return 0;
+}
+
+bool federatedx_io_null::is_autocommit() const
+{
+  return 0;
+}
+
+
+int federatedx_io_null::savepoint_set(ulong sp)
+{
+  return 0;
+}
+
+
+ulong federatedx_io_null::savepoint_release(ulong sp)
+{
+  return 0;
+}
+
+
+ulong federatedx_io_null::savepoint_rollback(ulong sp)
+{
+  return 0;
+}
+
+
+void federatedx_io_null::savepoint_restrict(ulong sp)
+{
+}
+
+
+int federatedx_io_null::query(const char *buffer, uint length)
+{
+  return 0;
+}
+
+
+size_t federatedx_io_null::max_query_size() const
+{
+  return INT_MAX;
+}
+
+
+my_ulonglong federatedx_io_null::affected_rows() const
+{
+  return 0;
+}
+
+
+my_ulonglong federatedx_io_null::last_insert_id() const
+{
+  return 0;
+}
+
+
+int federatedx_io_null::error_code()
+{
+  return 0;
+}
+
+
+const char *federatedx_io_null::error_str()
+{
+  return "";
+}
+
+
+FEDERATEDX_IO_RESULT *federatedx_io_null::store_result()
+{
+  FEDERATEDX_IO_RESULT *result;
+  DBUG_ENTER("federatedx_io_null::store_result");
+  
+  result= NULL;
+  
+  DBUG_RETURN(result);
+}
+
+
+void federatedx_io_null::free_result(FEDERATEDX_IO_RESULT *)
+{
+}
+
+
+unsigned int federatedx_io_null::get_num_fields(FEDERATEDX_IO_RESULT *)
+{
+  return 0;
+}
+
+
+my_ulonglong federatedx_io_null::get_num_rows(FEDERATEDX_IO_RESULT *)
+{
+  return 0;
+}
+
+
+FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *)
+{
+  return NULL;
+}
+
+
+ulong *federatedx_io_null::fetch_lengths(FEDERATEDX_IO_RESULT *)
+{
+  return NULL;
+}
+
+
+const char *federatedx_io_null::get_column_data(FEDERATEDX_IO_ROW *,
+                                                 unsigned int)
+{
+  return "";
+}
+
+
+bool federatedx_io_null::is_column_null(const FEDERATEDX_IO_ROW *,
+                                         unsigned int) const
+{
+  return true;
+}
+
+bool federatedx_io_null::table_metadata(ha_statistics *stats,
+                                        const char *table_name,
+                                        uint table_name_length, uint flag)
+{
+  stats->records= (ha_rows) 0;
+  stats->mean_rec_length= (ulong) 0;
+  stats->data_file_length= 0;
+
+  stats->update_time= (time_t) 0;
+  stats->check_time= (time_t) 0;
+
+  return 0;
+}

=== added file 'storage/federatedx/federatedx_probes.h'
--- a/storage/federatedx/federatedx_probes.h	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_probes.h	2009-10-30 18:50:56 +0000
@@ -0,0 +1,45 @@
+/*
+ * Generated by dtrace(1M).
+ */
+
+#ifndef	_FEDERATED_PROBES_H
+#define	_FEDERATED_PROBES_H
+
+
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#if _DTRACE_VERSION
+
+#define	FEDERATED_CLOSE() \
+	__dtrace_federated___close()
+#define	FEDERATED_CLOSE_ENABLED() \
+	__dtraceenabled_federated___close()
+#define	FEDERATED_OPEN() \
+	__dtrace_federated___open()
+#define	FEDERATED_OPEN_ENABLED() \
+	__dtraceenabled_federated___open()
+
+
+extern void __dtrace_federated___close(void);
+extern int __dtraceenabled_federated___close(void);
+extern void __dtrace_federated___open(void);
+extern int __dtraceenabled_federated___open(void);
+
+#else
+
+#define	FEDERATED_CLOSE()
+#define	FEDERATED_CLOSE_ENABLED() (0)
+#define	FEDERATED_OPEN()
+#define	FEDERATED_OPEN_ENABLED() (0)
+
+#endif
+
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _FEDERATED_PROBES_H */

=== added file 'storage/federatedx/federatedx_txn.cc'
--- a/storage/federatedx/federatedx_txn.cc	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/federatedx_txn.cc	2009-10-30 18:50:56 +0000
@@ -0,0 +1,424 @@
+/* 
+Copyright (c) 2007, Antony T Curtis
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Neither the name of FederatedX nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+/*#define MYSQL_SERVER 1*/
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation                          // gcc: Class implementation
+#endif
+
+
+federatedx_txn::federatedx_txn()
+  : txn_list(0), savepoint_level(0), savepoint_stmt(0), savepoint_next(0)
+{
+  DBUG_ENTER("federatedx_txn::federatedx_txn");
+  DBUG_VOID_RETURN;
+}
+
+federatedx_txn::~federatedx_txn()
+{
+  DBUG_ENTER("federatedx_txn::~federatedx_txn");
+  DBUG_ASSERT(!txn_list);
+  DBUG_VOID_RETURN;
+}
+
+
+void federatedx_txn::close(FEDERATEDX_SERVER *server)
+{
+  uint count= 0;
+  federatedx_io *io, **iop;
+  DBUG_ENTER("federatedx_txn::close");
+  
+  DBUG_ASSERT(!server->use_count);
+  DBUG_PRINT("info",("use count: %u  connections: %u", 
+                     server->use_count, server->io_count));
+
+  for (iop= &txn_list; (io= *iop);)
+  {
+    if (io->server != server)
+      iop= &io->txn_next;
+    else
+    {
+      *iop= io->txn_next;
+      io->txn_next= NULL;
+      io->busy= FALSE;
+
+      io->idle_next= server->idle_list;
+      server->idle_list= io;
+    }
+  }
+
+  while ((io= server->idle_list))
+  {
+    server->idle_list= io->idle_next;
+    delete io;
+    count++;
+  }
+  
+  DBUG_PRINT("info",("closed %u connections,  txn_list: %s", count,
+                     txn_list ? "active":  "empty"));
+  DBUG_VOID_RETURN;
+}
+
+
+int federatedx_txn::acquire(FEDERATEDX_SHARE *share, bool readonly,
+                            federatedx_io **ioptr)
+{
+  federatedx_io *io;
+  FEDERATEDX_SERVER *server= share->s;
+  DBUG_ENTER("federatedx_txn::acquire");
+  DBUG_ASSERT(ioptr && server);
+
+  if (!(io= *ioptr))
+  {
+    /* check to see if we have an available IO connection */
+    for (io= txn_list; io; io= io->txn_next)
+      if (io->server == server)
+	break;
+
+    if (!io)
+    {
+      /* check to see if there are any unowned IO connections */
+      pthread_mutex_lock(&server->mutex);
+      if ((io= server->idle_list))
+      {
+	server->idle_list= io->idle_next;
+	io->idle_next= NULL;
+      }
+      else
+	io= federatedx_io::construct(&server->mem_root, server);
+
+      io->txn_next= txn_list;
+      txn_list= io;
+
+      pthread_mutex_unlock(&server->mutex);
+    }
+
+    if (io->busy)
+      *io->owner_ptr= NULL;
+    
+    io->busy= TRUE;
+    io->owner_ptr= ioptr;
+  }
+  
+  DBUG_ASSERT(io->busy && io->server == server);
+  
+  io->readonly&= readonly;
+
+  DBUG_RETURN((*ioptr= io) ? 0 : -1);
+}
+
+
+void federatedx_txn::release(federatedx_io **ioptr)
+{
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::release");
+  DBUG_ASSERT(ioptr);
+
+  if ((io= *ioptr))
+  {
+    /* mark as available for reuse in this transaction */
+    io->busy= FALSE;
+    *ioptr= NULL;
+  
+    DBUG_PRINT("info", ("active: %d autocommit: %d", 
+                	io->active, io->is_autocommit()));
+
+    if (io->is_autocommit())
+      io->active= FALSE;
+  }
+
+  release_scan();
+
+  DBUG_VOID_RETURN;
+}
+
+
+void federatedx_txn::release_scan()
+{
+  uint count= 0, returned= 0;
+  federatedx_io *io, **pio;
+  DBUG_ENTER("federatedx_txn::release_scan");
+
+  /* return any inactive and idle connections to the server */  
+  for (pio= &txn_list; (io= *pio); count++)
+  {
+    if (io->active || io->busy)
+      pio= &io->txn_next;
+    else
+    {
+      FEDERATEDX_SERVER *server= io->server;
+
+      /* unlink from list of connections bound to the transaction */
+      *pio= io->txn_next; 
+      io->txn_next= NULL;
+
+      /* reset some values */
+      io->readonly= TRUE;
+
+      pthread_mutex_lock(&server->mutex);
+      io->idle_next= server->idle_list;
+      server->idle_list= io;
+      pthread_mutex_unlock(&server->mutex);
+      returned++;
+    }
+  }
+  DBUG_PRINT("info",("returned %u of %u connections(s)", returned, count));
+
+  DBUG_VOID_RETURN;
+}
+
+
+bool federatedx_txn::txn_begin()
+{
+  ulong level= 0;
+  DBUG_ENTER("federatedx_txn::txn_begin");
+
+  if (savepoint_next == 0)
+  {
+    savepoint_next++;
+    savepoint_level= savepoint_stmt= 0;
+    sp_acquire(&level);
+  }
+
+  DBUG_RETURN(level == 1);
+}
+
+
+int federatedx_txn::txn_commit()
+{
+  int error= 0;
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::txn_commit");
+
+  if (savepoint_next)
+  {
+    DBUG_ASSERT(savepoint_stmt != 1);
+
+    for (io= txn_list; io; io= io->txn_next)
+    {
+      int rc= 0;
+
+      if (io->active)
+	rc= io->commit();
+      else
+	io->rollback();
+
+      if (io->active && rc)
+	error= -1;
+
+      io->reset();
+    }
+
+    release_scan();
+
+    savepoint_next= savepoint_stmt= savepoint_level= 0;
+  }
+    
+  DBUG_RETURN(error);
+}
+
+
+int federatedx_txn::txn_rollback()
+{
+  int error= 0;
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::txn_commit");
+
+  if (savepoint_next)
+  {
+    DBUG_ASSERT(savepoint_stmt != 1);
+
+    for (io= txn_list; io; io= io->txn_next)
+    {
+      int rc= io->rollback();
+
+      if (io->active && rc)
+	error= -1;
+
+      io->reset();
+    }
+
+    release_scan();
+
+    savepoint_next= savepoint_stmt= savepoint_level= 0;
+  }
+    
+  DBUG_RETURN(error);
+}
+
+
+bool federatedx_txn::sp_acquire(ulong *sp)
+{
+  bool rc= FALSE;
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::sp_acquire");
+  DBUG_ASSERT(sp && savepoint_next);
+  
+  *sp= savepoint_level= savepoint_next++;
+    
+  for (io= txn_list; io; io= io->txn_next)
+  {
+    if (io->readonly)
+      continue;
+
+    io->savepoint_set(savepoint_level);
+    rc= TRUE;
+  }
+
+  DBUG_RETURN(rc);
+}
+
+
+int federatedx_txn::sp_rollback(ulong *sp)
+{
+  ulong level, new_level= savepoint_level;
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::sp_rollback");
+  DBUG_ASSERT(sp && savepoint_next && *sp && *sp <= savepoint_level);
+  
+  for (io= txn_list; io; io= io->txn_next)
+  {
+    if (io->readonly)
+      continue;
+
+    if ((level= io->savepoint_rollback(*sp)) < new_level)
+      new_level= level;
+  } 
+  
+  savepoint_level= new_level;
+  
+  DBUG_RETURN(0);
+}
+
+
+int federatedx_txn::sp_release(ulong *sp)
+{
+  ulong level, new_level= savepoint_level;
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::sp_release");
+  DBUG_ASSERT(sp && savepoint_next && *sp && *sp <= savepoint_level);
+  
+  for (io= txn_list; io; io= io->txn_next)
+  {
+    if (io->readonly)
+      continue;
+
+    if ((level= io->savepoint_release(*sp)) < new_level)
+      new_level= level;
+  }
+
+  savepoint_level= new_level;
+  *sp= 0;
+
+  DBUG_RETURN(0);
+}
+
+
+bool federatedx_txn::stmt_begin()
+{
+  bool result= FALSE;
+  DBUG_ENTER("federatedx_txn::stmt_begin");
+
+  if (!savepoint_stmt)
+  {
+    if (!savepoint_next)
+    {
+      savepoint_next++;
+      savepoint_level= savepoint_stmt= 0;
+    }
+    result= sp_acquire(&savepoint_stmt);
+  }
+
+  DBUG_RETURN(result);
+}
+
+
+int federatedx_txn::stmt_commit()
+{ 
+  int result= 0;
+  DBUG_ENTER("federatedx_txn::stmt_commit");
+  
+  if (savepoint_stmt == 1)
+  {
+    savepoint_stmt= 0;
+    result= txn_commit();
+  }
+  else  
+  if (savepoint_stmt)
+    result= sp_release(&savepoint_stmt);
+
+  DBUG_RETURN(result);
+}
+
+
+int federatedx_txn::stmt_rollback()
+{
+  int result= 0;
+  DBUG_ENTER("federated:txn::stmt_rollback");
+
+  if (savepoint_stmt == 1)
+  {
+    savepoint_stmt= 0;
+    result= txn_rollback();
+  }
+  else
+  if (savepoint_stmt)
+  {
+    result= sp_rollback(&savepoint_stmt);
+    sp_release(&savepoint_stmt);
+  }
+  
+  DBUG_RETURN(result);
+}
+
+
+void federatedx_txn::stmt_autocommit()
+{
+  federatedx_io *io;
+  DBUG_ENTER("federatedx_txn::stmt_autocommit");
+
+  for (io= txn_list; savepoint_stmt && io; io= io->txn_next)
+  {
+    if (io->readonly)
+      continue;
+
+    io->savepoint_restrict(savepoint_stmt);
+  }
+
+  DBUG_VOID_RETURN;  
+}
+
+

=== added file 'storage/federatedx/ha_federatedx.cc'
--- a/storage/federatedx/ha_federatedx.cc	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ha_federatedx.cc	2009-11-03 11:08:09 +0000
@@ -0,0 +1,3493 @@
+/*
+Copyright (c) 2008, Patrick Galbraith 
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Neither the name of Patrick Galbraith nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+/*
+
+  FederatedX Pluggable Storage Engine
+
+  ha_federatedx.cc - FederatedX Pluggable Storage Engine
+  Patrick Galbraith, 2008
+
+  This is a handler which uses a foreign database as the data file, as
+  opposed to a handler like MyISAM, which uses .MYD files locally.
+
+  How this handler works
+  ----------------------------------
+  Normal database files are local and as such: You create a table called
+  'users', a file such as 'users.MYD' is created. A handler reads, inserts,
+  deletes, updates data in this file. The data is stored in particular format,
+  so to read, that data has to be parsed into fields, to write, fields have to
+  be stored in this format to write to this data file.
+
+  With FederatedX storage engine, there will be no local files
+  for each table's data (such as .MYD). A foreign database will store
+  the data that would normally be in this file. This will necessitate
+  the use of MySQL client API to read, delete, update, insert this
+  data. The data will have to be retrieve via an SQL call "SELECT *
+  FROM users". Then, to read this data, it will have to be retrieved
+  via mysql_fetch_row one row at a time, then converted from the
+  column in this select into the format that the handler expects.
+
+  The create table will simply create the .frm file, and within the
+  "CREATE TABLE" SQL, there SHALL be any of the following :
+
+  connection=scheme://username:password@hostname:port/database/tablename
+  connection=scheme://username@hostname/database/tablename
+  connection=scheme://username:password@hostname/database/tablename
+  connection=scheme://username:password@hostname/database/tablename
+
+  - OR -
+
+  As of 5.1 federatedx now allows you to use a non-url
+  format, taking advantage of mysql.servers:
+
+  connection="connection_one"
+  connection="connection_one/table_foo"
+
+  An example would be:
+
+  connection=mysql://username:password@hostname:port/database/tablename
+
+  or, if we had:
+
+  create server 'server_one' foreign data wrapper 'mysql' options
+  (HOST '127.0.0.1',
+  DATABASE 'db1',
+  USER 'root',
+  PASSWORD '',
+  PORT 3306,
+  SOCKET '',
+  OWNER 'root');
+
+  CREATE TABLE federatedx.t1 (
+    `id` int(20) NOT NULL,
+    `name` varchar(64) NOT NULL default ''
+    )
+  ENGINE="FEDERATEDX" DEFAULT CHARSET=latin1
+  CONNECTION='server_one';
+
+  So, this will have been the equivalent of
+
+  CONNECTION="mysql://root@127.0.0.1:3306/db1/t1"
+
+  Then, we can also change the server to point to a new schema:
+
+  ALTER SERVER 'server_one' options(DATABASE 'db2');
+
+  All subsequent calls will now be against db2.t1! Guess what? You don't
+  have to perform an alter table!
+
+  This connecton="connection string" is necessary for the handler to be
+  able to connect to the foreign server, either by URL, or by server
+  name. 
+
+
+  The basic flow is this:
+
+  SQL calls issues locally ->
+  mysql handler API (data in handler format) ->
+  mysql client API (data converted to SQL calls) ->
+  foreign database -> mysql client API ->
+  convert result sets (if any) to handler format ->
+  handler API -> results or rows affected to local
+
+  What this handler does and doesn't support
+  ------------------------------------------
+  * Tables MUST be created on the foreign server prior to any action on those
+    tables via the handler, first version. IMPORTANT: IF you MUST use the
+    federatedx storage engine type on the REMOTE end, MAKE SURE [ :) ] That
+    the table you connect to IS NOT a table pointing BACK to your ORIGNAL
+    table! You know  and have heard the screaching of audio feedback? You
+    know putting two mirror in front of each other how the reflection
+    continues for eternity? Well, need I say more?!
+  * There will not be support for transactions.
+  * There is no way for the handler to know if the foreign database or table
+    has changed. The reason for this is that this database has to work like a
+    data file that would never be written to by anything other than the
+    database. The integrity of the data in the local table could be breached
+    if there was any change to the foreign database.
+  * Support for SELECT, INSERT, UPDATE , DELETE, indexes.
+  * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls.
+  * Prepared statements will not be used in the first implementation, it
+    remains to to be seen whether the limited subset of the client API for the
+    server supports this.
+  * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its
+    implementation.
+  * This will not work with the query cache.
+
+   Method calls
+
+   A two column table, with one record:
+
+   (SELECT)
+
+   "SELECT * FROM foo"
+    ha_federatedx::info
+    ha_federatedx::scan_time:
+    ha_federatedx::rnd_init: share->select_query SELECT * FROM foo
+    ha_federatedx::extra
+
+    <for every row of data retrieved>
+    ha_federatedx::rnd_next
+    ha_federatedx::convert_row_to_internal_format
+    ha_federatedx::rnd_next
+    </for every row of data retrieved>
+
+    ha_federatedx::rnd_end
+    ha_federatedx::extra
+    ha_federatedx::reset
+
+    (INSERT)
+
+    "INSERT INTO foo (id, ts) VALUES (2, now());"
+
+    ha_federatedx::write_row
+
+    ha_federatedx::reset
+
+    (UPDATE)
+
+    "UPDATE foo SET ts = now() WHERE id = 1;"
+
+    ha_federatedx::index_init
+    ha_federatedx::index_read
+    ha_federatedx::index_read_idx
+    ha_federatedx::rnd_next
+    ha_federatedx::convert_row_to_internal_format
+    ha_federatedx::update_row
+
+    ha_federatedx::extra
+    ha_federatedx::extra
+    ha_federatedx::extra
+    ha_federatedx::external_lock
+    ha_federatedx::reset
+
+
+    How do I use this handler?
+    --------------------------
+
+    <insert text about plugin storage engine>
+
+    Next, to use this handler, it's very simple. You must
+    have two databases running, either both on the same host, or
+    on different hosts.
+
+    One the server that will be connecting to the foreign
+    host (client), you create your table as such:
+
+    CREATE TABLE test_table (
+      id     int(20) NOT NULL auto_increment,
+      name   varchar(32) NOT NULL default '',
+      other  int(20) NOT NULL default '0',
+      PRIMARY KEY  (id),
+      KEY name (name),
+      KEY other_key (other))
+       ENGINE="FEDERATEDX"
+       DEFAULT CHARSET=latin1
+       CONNECTION='mysql://root@127.0.0.1:9306/federatedx/test_federatedx';
+
+   Notice the "COMMENT" and "ENGINE" field? This is where you
+   respectively set the engine type, "FEDERATEDX" and foreign
+   host information, this being the database your 'client' database
+   will connect to and use as the "data file". Obviously, the foreign
+   database is running on port 9306, so you want to start up your other
+   database so that it is indeed on port 9306, and your federatedx
+   database on a port other than that. In my setup, I use port 5554
+   for federatedx, and port 5555 for the foreign database.
+
+   Then, on the foreign database:
+
+   CREATE TABLE test_table (
+     id     int(20) NOT NULL auto_increment,
+     name   varchar(32) NOT NULL default '',
+     other  int(20) NOT NULL default '0',
+     PRIMARY KEY  (id),
+     KEY name (name),
+     KEY other_key (other))
+     ENGINE="<NAME>" <-- whatever you want, or not specify
+     DEFAULT CHARSET=latin1 ;
+
+    This table is exactly the same (and must be exactly the same),
+    except that it is not using the federatedx handler and does
+    not need the URL.
+
+
+    How to see the handler in action
+    --------------------------------
+
+    When developing this handler, I compiled the federatedx database with
+    debugging:
+
+    ./configure --with-federatedx-storage-engine
+    --prefix=/home/mysql/mysql-build/federatedx/ --with-debug
+
+    Once compiled, I did a 'make install' (not for the purpose of installing
+    the binary, but to install all the files the binary expects to see in the
+    diretory I specified in the build with --prefix,
+    "/home/mysql/mysql-build/federatedx".
+
+    Then, I started the foreign server:
+
+    /usr/local/mysql/bin/mysqld_safe
+    --user=mysql --log=/tmp/mysqld.5555.log -P 5555
+
+    Then, I went back to the directory containing the newly compiled mysqld,
+    <builddir>/sql/, started up gdb:
+
+    gdb ./mysqld
+
+    Then, withn the (gdb) prompt:
+    (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug
+
+    Next, I open several windows for each:
+
+    1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed
+    2. Tail the SQL calls to the foreign database: tail -f /tmp/mysqld.5555.log
+    3. A window with a client open to the federatedx server on port 5554
+    4. A window with a client open to the federatedx server on port 5555
+
+    I would create a table on the client to the foreign server on port
+    5555, and then to the federatedx server on port 5554. At this point,
+    I would run whatever queries I wanted to on the federatedx server,
+    just always remembering that whatever changes I wanted to make on
+    the table, or if I created new tables, that I would have to do that
+    on the foreign server.
+
+    Another thing to look for is 'show variables' to show you that you have
+    support for federatedx handler support:
+
+    show variables like '%federat%'
+
+    and:
+
+    show storage engines;
+
+    Both should display the federatedx storage handler.
+
+
+    Testing
+    -------
+
+    Testing for FederatedX as a pluggable storage engine for
+    now is a manual process that I intend to build a test
+    suite that works for all pluggable storage engines.
+
+    How to test
+
+    1. cp fed.dat /tmp
+    (make sure you have access to "test". Use a user that has
+    super privileges for now)
+    2. mysql -f -u root test < federated.test > federated.myresult 2>&1
+    3. diff federated.result federated.myresult (there _should_ be no differences)
+
+
+*/
+
+
+#define MYSQL_SERVER 1q
+#include "mysql_priv.h"
+#include <mysql/plugin.h>
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation                          // gcc: Class implementation
+#endif
+
+#include "ha_federatedx.h"
+
+#include "m_string.h"
+
+#include <mysql/plugin.h>
+
+/* Variables for federatedx share methods */
+static HASH federatedx_open_tables;              // To track open tables
+static HASH federatedx_open_servers;             // To track open servers
+pthread_mutex_t federatedx_mutex;                // To init the hash
+const char ident_quote_char= '`';               // Character for quoting
+                                                // identifiers
+const char value_quote_char= '\'';              // Character for quoting
+                                                // literals
+static const int bulk_padding= 64;              // bytes "overhead" in packet
+
+/* Variables used when chopping off trailing characters */
+static const uint sizeof_trailing_comma= sizeof(", ") - 1;
+static const uint sizeof_trailing_closeparen= sizeof(") ") - 1;
+static const uint sizeof_trailing_and= sizeof(" AND ") - 1;
+static const uint sizeof_trailing_where= sizeof(" WHERE ") - 1;
+
+/* Static declaration for handerton */
+static handler *federatedx_create_handler(handlerton *hton,
+                                         TABLE_SHARE *table,
+                                         MEM_ROOT *mem_root);
+
+/* FederatedX storage engine handlerton */
+
+static handler *federatedx_create_handler(handlerton *hton, 
+                                         TABLE_SHARE *table,
+                                         MEM_ROOT *mem_root)
+{
+  return new (mem_root) ha_federatedx(hton, table);
+}
+
+
+/* Function we use in the creation of our hash to get key */
+
+static uchar *
+federatedx_share_get_key(FEDERATEDX_SHARE *share, size_t *length,
+                         my_bool not_used __attribute__ ((unused)))
+{
+  *length= share->share_key_length;
+  return (uchar*) share->share_key;
+}
+
+
+static uchar *
+federatedx_server_get_key(FEDERATEDX_SERVER *server, size_t *length,
+                          my_bool not_used __attribute__ ((unused)))
+{
+  *length= server->key_length;
+  return server->key;
+}
+
+
+/*
+  Initialize the federatedx handler.
+
+  SYNOPSIS
+    federatedx_db_init()
+    p		Handlerton
+
+  RETURN
+    FALSE       OK
+    TRUE        Error
+*/
+
+int federatedx_db_init(void *p)
+{
+  DBUG_ENTER("federatedx_db_init");
+  handlerton *federatedx_hton= (handlerton *)p;
+  federatedx_hton->state= SHOW_OPTION_YES;
+  /* This is no longer needed for plugin storage engines */
+  federatedx_hton->db_type= DB_TYPE_DEFAULT;
+  federatedx_hton->savepoint_offset= sizeof(ulong);
+  federatedx_hton->close_connection= ha_federatedx::disconnect;
+  federatedx_hton->savepoint_set= ha_federatedx::savepoint_set;
+  federatedx_hton->savepoint_rollback= ha_federatedx::savepoint_rollback;
+  federatedx_hton->savepoint_release= ha_federatedx::savepoint_release;
+  federatedx_hton->commit= ha_federatedx::commit;
+  federatedx_hton->rollback= ha_federatedx::rollback;
+  federatedx_hton->create= federatedx_create_handler;
+  federatedx_hton->flags= HTON_ALTER_NOT_SUPPORTED | HTON_NO_PARTITION;
+
+  if (pthread_mutex_init(&federatedx_mutex, MY_MUTEX_INIT_FAST))
+    goto error;
+  if (!hash_init(&federatedx_open_tables, &my_charset_bin, 32, 0, 0,
+                 (hash_get_key) federatedx_share_get_key, 0, 0) &&
+      !hash_init(&federatedx_open_servers, &my_charset_bin, 32, 0, 0,
+                 (hash_get_key) federatedx_server_get_key, 0, 0))
+  {
+    DBUG_RETURN(FALSE);
+  }
+
+  VOID(pthread_mutex_destroy(&federatedx_mutex));
+error:
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Release the federatedx handler.
+
+  SYNOPSIS
+    federatedx_db_end()
+
+  RETURN
+    FALSE       OK
+*/
+
+int federatedx_done(void *p)
+{
+  hash_free(&federatedx_open_tables);
+  hash_free(&federatedx_open_servers);
+  VOID(pthread_mutex_destroy(&federatedx_mutex));
+
+  return 0;
+}
+
+/**
+  @brief Append identifiers to the string.
+
+  @param[in,out] string	The target string.
+  @param[in] name 		Identifier name
+  @param[in] length 	Length of identifier name in bytes
+  @param[in] quote_char Quote char to use for quoting identifier.
+
+  @return Operation Status
+  @retval FALSE OK
+  @retval TRUE  There was an error appending to the string.
+
+  @note This function is based upon the append_identifier() function
+        in sql_show.cc except that quoting always occurs.
+*/
+
+bool append_ident(String *string, const char *name, uint length,
+                  const char quote_char)
+{
+  bool result;
+  uint clen;
+  const char *name_end;
+  DBUG_ENTER("append_ident");
+
+  if (quote_char)
+  {
+    string->reserve(length * 2 + 2);
+    if ((result= string->append(&quote_char, 1, system_charset_info)))
+      goto err;
+
+    for (name_end= name+length; name < name_end; name+= clen)
+    {
+      uchar c= *(uchar *) name;
+      if (!(clen= my_mbcharlen(system_charset_info, c)))
+        clen= 1;
+      if (clen == 1 && c == (uchar) quote_char &&
+          (result= string->append(&quote_char, 1, system_charset_info)))
+        goto err;
+      if ((result= string->append(name, clen, string->charset())))
+        goto err;
+    }
+    result= string->append(&quote_char, 1, system_charset_info);
+  }
+  else
+    result= string->append(name, length, system_charset_info);
+
+err:
+  DBUG_RETURN(result);
+}
+
+
+static int parse_url_error(FEDERATEDX_SHARE *share, TABLE *table, int error_num)
+{
+  char buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+  int buf_len;
+  DBUG_ENTER("ha_federatedx parse_url_error");
+
+  buf_len= min(table->s->connect_string.length,
+               FEDERATEDX_QUERY_BUFFER_SIZE-1);
+  strmake(buf, table->s->connect_string.str, buf_len);
+  my_error(error_num, MYF(0), buf);
+  DBUG_RETURN(error_num);
+}
+
+/*
+  retrieve server object which contains server meta-data 
+  from the system table given a server's name, set share
+  connection parameter members
+*/
+int get_connection(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share)
+{
+  int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST;
+  char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  FOREIGN_SERVER *server, server_buffer;
+  DBUG_ENTER("ha_federatedx::get_connection");
+
+  /*
+    get_server_by_name() clones the server if exists and allocates
+	copies of strings in the supplied mem_root
+  */
+  if (!(server=
+       get_server_by_name(mem_root, share->connection_string, &server_buffer)))
+  {
+    DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!"));
+    /* need to come up with error handling */
+    error_num=1;
+    goto error;
+  }
+  DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
+                      (long unsigned int) server));
+
+  /*
+    Most of these should never be empty strings, error handling will
+    need to be implemented. Also, is this the best way to set the share
+    members? Is there some allocation needed? In running this code, it works
+    except there are errors in the trace file of the share being overrun 
+    at the address of the share.
+  */
+  share->server_name_length= server->server_name_length;
+  share->server_name= server->server_name;
+  share->username= server->username;
+  share->password= server->password;
+  share->database= server->db;
+#ifndef I_AM_PARANOID
+  share->port= server->port > 0 && server->port < 65536 ? 
+#else
+  share->port= server->port > 1023 && server->port < 65536 ? 
+#endif
+               (ushort) server->port : MYSQL_PORT;
+  share->hostname= server->host;
+  if (!(share->socket= server->socket) &&
+      !strcmp(share->hostname, my_localhost))
+    share->socket= (char *) MYSQL_UNIX_ADDR;
+  share->scheme= server->scheme;
+
+  DBUG_PRINT("info", ("share->username: %s", share->username));
+  DBUG_PRINT("info", ("share->password: %s", share->password));
+  DBUG_PRINT("info", ("share->hostname: %s", share->hostname));
+  DBUG_PRINT("info", ("share->database: %s", share->database));
+  DBUG_PRINT("info", ("share->port:     %d", share->port));
+  DBUG_PRINT("info", ("share->socket:   %s", share->socket));
+  DBUG_RETURN(0);
+
+error:
+  my_sprintf(error_buffer,
+             (error_buffer, "server name: '%s' doesn't exist!",
+              share->connection_string));
+  my_error(error_num, MYF(0), error_buffer);
+  DBUG_RETURN(error_num);
+}
+
+/*
+  Parse connection info from table->s->connect_string
+
+  SYNOPSIS
+    parse_url()
+    mem_root            MEM_ROOT pointer for memory allocation
+    share               pointer to FEDERATEDX share
+    table               pointer to current TABLE class
+    table_create_flag   determines what error to throw
+
+  DESCRIPTION
+    Populates the share with information about the connection
+    to the foreign database that will serve as the data source.
+    This string must be specified (currently) in the "CONNECTION" field,
+    listed in the CREATE TABLE statement.
+
+    This string MUST be in the format of any of these:
+
+    CONNECTION="scheme://username:password@hostname:port/database/table"
+    CONNECTION="scheme://username@hostname/database/table"
+    CONNECTION="scheme://username@hostname:port/database/table"
+    CONNECTION="scheme://username:password@hostname/database/table"
+
+    _OR_
+
+    CONNECTION="connection name"
+
+    
+
+  An Example:
+
+  CREATE TABLE t1 (id int(32))
+    ENGINE="FEDERATEDX"
+    CONNECTION="mysql://joe:joespass@192.168.1.111:9308/federatedx/testtable";
+
+  CREATE TABLE t2 (
+    id int(4) NOT NULL auto_increment,
+    name varchar(32) NOT NULL,
+    PRIMARY KEY(id)
+    ) ENGINE="FEDERATEDX" CONNECTION="my_conn";
+
+  ***IMPORTANT***
+  Currently, the FederatedX Storage Engine only supports connecting to another
+  Database ("scheme" of "mysql"). Connections using JDBC as well as 
+  other connectors are in the planning stage.
+  
+
+  'password' and 'port' are both optional.
+
+  RETURN VALUE
+    0           success
+    error_num   particular error code 
+
+*/
+
+static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, TABLE *table,
+                     uint table_create_flag)
+{
+  uint error_num= (table_create_flag ?
+                   ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE :
+                   ER_FOREIGN_DATA_STRING_INVALID);
+  DBUG_ENTER("ha_federatedx::parse_url");
+
+  share->port= 0;
+  share->socket= 0;
+  DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
+  DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length));
+  DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length,
+                      table->s->connect_string.str));
+  share->connection_string= strmake_root(mem_root, table->s->connect_string.str,
+                                       table->s->connect_string.length);
+
+  DBUG_PRINT("info",("parse_url alloced share->connection_string %lx",
+                     (long unsigned int) share->connection_string));
+
+  DBUG_PRINT("info",("share->connection_string: %s",share->connection_string));
+  /*
+    No :// or @ in connection string. Must be a straight connection name of
+    either "servername" or "servername/tablename"
+  */
+  if ((!strstr(share->connection_string, "://") &&
+       (!strchr(share->connection_string, '@'))))
+  {
+
+    DBUG_PRINT("info",
+               ("share->connection_string: %s  internal format "
+                "share->connection_string: %lx",
+                share->connection_string,
+                (ulong) share->connection_string));
+
+    /* ok, so we do a little parsing, but not completely! */
+    share->parsed= FALSE;
+    /*
+      If there is a single '/' in the connection string, this means the user is
+      specifying a table name
+    */
+
+    if ((share->table_name= strchr(share->connection_string, '/')))
+    {
+      *share->table_name++= '\0';
+      share->table_name_length= strlen(share->table_name);
+
+      DBUG_PRINT("info", 
+                 ("internal format, parsed table_name "
+                  "share->connection_string: %s  share->table_name: %s",
+                  share->connection_string, share->table_name));
+
+      /*
+        there better not be any more '/'s !
+      */
+      if (strchr(share->table_name, '/'))
+        goto error;
+    }
+    /*
+      Otherwise, straight server name, use tablename of federatedx table
+      as remote table name
+    */
+    else
+    {
+      /*
+        Connection specifies everything but, resort to
+        expecting remote and foreign table names to match
+      */
+      share->table_name= strmake_root(mem_root, table->s->table_name.str,
+                                      (share->table_name_length=
+                                       table->s->table_name.length));
+      DBUG_PRINT("info", 
+                 ("internal format, default table_name "
+                  "share->connection_string: %s  share->table_name: %s",
+                  share->connection_string, share->table_name));
+    }
+
+    if ((error_num= get_connection(mem_root, share)))
+      goto error;
+  }
+  else
+  {
+    share->parsed= TRUE;
+    // Add a null for later termination of table name
+    share->connection_string[table->s->connect_string.length]= 0;
+    share->scheme= share->connection_string;
+    DBUG_PRINT("info",("parse_url alloced share->scheme: %lx",
+                       (ulong) share->scheme));
+
+    /*
+      Remove addition of null terminator and store length
+      for each string  in share
+    */
+    if (!(share->username= strstr(share->scheme, "://")))
+      goto error;
+    share->scheme[share->username - share->scheme]= '\0';
+
+    if (!federatedx_io::handles_scheme(share->scheme))
+      goto error;
+
+    share->username+= 3;
+
+    if (!(share->hostname= strchr(share->username, '@')))
+      goto error;
+    *share->hostname++= '\0';                   // End username
+
+    if ((share->password= strchr(share->username, ':')))
+    {
+      *share->password++= '\0';                 // End username
+
+      /* make sure there isn't an extra / or @ */
+      if ((strchr(share->password, '/') || strchr(share->hostname, '@')))
+        goto error;
+      /*
+        Found that if the string is:
+        user:@hostname:port/db/table
+        Then password is a null string, so set to NULL
+      */
+      if ((share->password[0] == '\0'))
+        share->password= NULL;
+    }
+
+    /* make sure there isn't an extra / or @ */
+    if ((strchr(share->username, '/')) || (strchr(share->hostname, '@')))
+      goto error;
+
+    if (!(share->database= strchr(share->hostname, '/')))
+      goto error;
+    *share->database++= '\0';
+
+    if ((share->sport= strchr(share->hostname, ':')))
+    {
+      *share->sport++= '\0';
+      if (share->sport[0] == '\0')
+        share->sport= NULL;
+      else
+        share->port= atoi(share->sport);
+    }
+
+    if (!(share->table_name= strchr(share->database, '/')))
+      goto error;
+    *share->table_name++= '\0';
+
+    share->table_name_length= strlen(share->table_name);
+
+    /* make sure there's not an extra / */
+    if ((strchr(share->table_name, '/')))
+      goto error;
+
+    if (share->hostname[0] == '\0')
+      share->hostname= NULL;
+
+  }
+  if (!share->port)
+  {
+    if (!share->hostname || strcmp(share->hostname, my_localhost) == 0)
+      share->socket= (char *) MYSQL_UNIX_ADDR;
+    else
+      share->port= MYSQL_PORT;
+  }
+
+  DBUG_PRINT("info",
+             ("scheme: %s  username: %s  password: %s  hostname: %s  "
+              "port: %d  db: %s  tablename: %s",
+              share->scheme, share->username, share->password,
+              share->hostname, share->port, share->database,
+              share->table_name));
+
+  DBUG_RETURN(0);
+
+error:
+  DBUG_RETURN(parse_url_error(share, table, error_num));
+}
+
+/*****************************************************************************
+** FEDERATEDX tables
+*****************************************************************************/
+
+ha_federatedx::ha_federatedx(handlerton *hton,
+                           TABLE_SHARE *table_arg)
+  :handler(hton, table_arg),
+   txn(0), io(0), stored_result(0)
+{
+  bzero(&bulk_insert, sizeof(bulk_insert));
+}
+
+
+/*
+  Convert MySQL result set row to handler internal format
+
+  SYNOPSIS
+    convert_row_to_internal_format()
+      record    Byte pointer to record
+      row       MySQL result set row from fetchrow()
+      result	Result set to use
+
+  DESCRIPTION
+    This method simply iterates through a row returned via fetchrow with
+    values from a successful SELECT , and then stores each column's value
+    in the field object via the field object pointer (pointing to the table's
+    array of field object pointers). This is how the handler needs the data
+    to be stored to then return results back to the user
+
+  RETURN VALUE
+    0   After fields have had field values stored from record
+*/
+
+uint ha_federatedx::convert_row_to_internal_format(uchar *record,
+                                                  FEDERATEDX_IO_ROW *row,
+                                                  FEDERATEDX_IO_RESULT *result)
+{
+  ulong *lengths;
+  Field **field;
+  int column= 0;
+  my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+  DBUG_ENTER("ha_federatedx::convert_row_to_internal_format");
+
+  lengths= io->fetch_lengths(result);
+
+  for (field= table->field; *field; field++, column++)
+  {
+    /*
+      index variable to move us through the row at the
+      same iterative step as the field
+    */
+    my_ptrdiff_t old_ptr;
+    old_ptr= (my_ptrdiff_t) (record - table->record[0]);
+    (*field)->move_field_offset(old_ptr);
+    if (io->is_column_null(row, column))
+      (*field)->set_null();
+    else
+    {
+      if (bitmap_is_set(table->read_set, (*field)->field_index))
+      {
+        (*field)->set_notnull();
+        (*field)->store(io->get_column_data(row, column), lengths[column], &my_charset_bin);
+      }
+    }
+    (*field)->move_field_offset(-old_ptr);
+  }
+  dbug_tmp_restore_column_map(table->write_set, old_map);
+  DBUG_RETURN(0);
+}
+
+static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
+{
+  DBUG_ENTER("emit_key_part_name");
+  if (append_ident(to, part->field->field_name, 
+                   strlen(part->field->field_name), ident_quote_char))
+    DBUG_RETURN(1);                           // Out of memory
+  DBUG_RETURN(0);
+}
+
+static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
+                                  bool needs_quotes, bool is_like,
+                                  const uchar *ptr, uint len)
+{
+  Field *field= part->field;
+  DBUG_ENTER("emit_key_part_element");
+
+  if (needs_quotes && to->append(STRING_WITH_LEN("'")))
+    DBUG_RETURN(1);
+
+  if (part->type == HA_KEYTYPE_BIT)
+  {
+    char buff[STRING_BUFFER_USUAL_SIZE], *buf= buff;
+
+    *buf++= '0';
+    *buf++= 'x';
+    buf= octet2hex(buf, (char*) ptr, len);
+    if (to->append((char*) buff, (uint)(buf - buff)))
+      DBUG_RETURN(1);
+  }
+  else if (part->key_part_flag & HA_BLOB_PART)
+  {
+    String blob;
+    uint blob_length= uint2korr(ptr);
+    blob.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+                   blob_length, &my_charset_bin);
+    if (append_escaped(to, &blob))
+      DBUG_RETURN(1);
+  }
+  else if (part->key_part_flag & HA_VAR_LENGTH_PART)
+  {
+    String varchar;
+    uint var_length= uint2korr(ptr);
+    varchar.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+                      var_length, &my_charset_bin);
+    if (append_escaped(to, &varchar))
+      DBUG_RETURN(1);
+  }
+  else
+  {
+    char strbuff[MAX_FIELD_WIDTH];
+    String str(strbuff, sizeof(strbuff), part->field->charset()), *res;
+
+    res= field->val_str(&str, ptr);
+
+    if (field->result_type() == STRING_RESULT)
+    {
+      if (append_escaped(to, res))
+        DBUG_RETURN(1);
+    }
+    else if (to->append(res->ptr(), res->length()))
+      DBUG_RETURN(1);
+  }
+
+  if (is_like && to->append(STRING_WITH_LEN("%")))
+    DBUG_RETURN(1);
+
+  if (needs_quotes && to->append(STRING_WITH_LEN("'")))
+    DBUG_RETURN(1);
+
+  DBUG_RETURN(0);
+}
+
+/*
+  Create a WHERE clause based off of values in keys
+  Note: This code was inspired by key_copy from key.cc
+
+  SYNOPSIS
+    create_where_from_key ()
+      to          String object to store WHERE clause
+      key_info    KEY struct pointer
+      key         byte pointer containing key
+      key_length  length of key
+      range_type  0 - no range, 1 - min range, 2 - max range
+                  (see enum range_operation)
+
+  DESCRIPTION
+    Using iteration through all the keys via a KEY_PART_INFO pointer,
+    This method 'extracts' the value of each key in the byte pointer
+    *key, and for each key found, constructs an appropriate WHERE clause
+
+  RETURN VALUE
+    0   After all keys have been accounted for to create the WHERE clause
+    1   No keys found
+
+    Range flags Table per Timour:
+
+   -----------------
+   - start_key:
+     * ">"  -> HA_READ_AFTER_KEY
+     * ">=" -> HA_READ_KEY_OR_NEXT
+     * "="  -> HA_READ_KEY_EXACT
+
+   - end_key:
+     * "<"  -> HA_READ_BEFORE_KEY
+     * "<=" -> HA_READ_AFTER_KEY
+
+   records_in_range:
+   -----------------
+   - start_key:
+     * ">"  -> HA_READ_AFTER_KEY
+     * ">=" -> HA_READ_KEY_EXACT
+     * "="  -> HA_READ_KEY_EXACT
+
+   - end_key:
+     * "<"  -> HA_READ_BEFORE_KEY
+     * "<=" -> HA_READ_AFTER_KEY
+     * "="  -> HA_READ_AFTER_KEY
+
+0 HA_READ_KEY_EXACT,              Find first record else error
+1 HA_READ_KEY_OR_NEXT,            Record or next record
+2 HA_READ_KEY_OR_PREV,            Record or previous
+3 HA_READ_AFTER_KEY,              Find next rec. after key-record
+4 HA_READ_BEFORE_KEY,             Find next rec. before key-record
+5 HA_READ_PREFIX,                 Key which as same prefix
+6 HA_READ_PREFIX_LAST,            Last key with the same prefix
+7 HA_READ_PREFIX_LAST_OR_PREV,    Last or prev key with the same prefix
+
+Flags that I've found:
+
+id, primary key, varchar
+
+id = 'ccccc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 'ccccc'
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id < 'ccccc'
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 'ccccc'
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id >= 'ccccc'
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id like 'cc%cc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id < 'ccccc'
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 'aaaaa' and id < 'ccccc';
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+numeric keys:
+
+id = 4
+index_read_idx: start_key 0 end_key NULL 
+
+id > 4
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id >= 4
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4
+full table scan, select * from
+
+id > 2 and id < 8
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 8
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 8
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 8
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+multi keys (id int, name varchar, other varchar)
+
+id = 1;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 4;
+id > 2 and name = '333'; remote: id > 2
+id > 2 and name > '333'; remote: id > 2
+id > 2 and name > '333' and other < 'ddd'; remote: id > 2 no results
+id > 2 and name >= '333' and other < 'ddd'; remote: id > 2 1 result
+id >= 4 and name = 'eric was here' and other > 'eeee';
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id >= 4;
+id >= 2 and name = '333' and other < 'ddd';
+remote: `id`  >= 2 AND `name`  >= '333';
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4;
+id < 3 and name = '222' and other <= 'ccc'; remote: id < 3
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4;
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4;
+full table scan
+
+id  > 2 and id < 4;
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 4;
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 4;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 4;
+id = 6 and name = 'eric was here' and other > 'eeee';
+remote: (`id`  > 6 AND `name`  > 'eric was here' AND `other`  > 'eeee')
+AND (`id`  <= 6) AND ( AND `name`  <= 'eric was here')
+no results
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+Summary:
+
+* If the start key flag is 0 the max key flag shouldn't even be set, 
+  and if it is, the query produced would be invalid.
+* Multipart keys, even if containing some or all numeric columns,
+  are treated the same as non-numeric keys
+
+  If the query is " = " (quotes or not):
+  - records in range start key flag HA_READ_KEY_EXACT,
+    end key flag HA_READ_AFTER_KEY (incorrect)
+  - any other: start key flag HA_READ_KEY_OR_NEXT,
+    end key flag HA_READ_AFTER_KEY (correct)
+
+* 'like' queries (of key)
+  - Numeric, full table scan
+  - Non-numeric
+      records_in_range: start_key 0 end_key 3
+      other : start_key 1 end_key 3
+
+* If the key flag is HA_READ_AFTER_KEY:
+   if start_key, append >
+   if end_key, append <=
+
+* If create_where_key was called by records_in_range:
+
+ - if the key is numeric:
+    start key flag is 0 when end key is NULL, end key flag is 3 or 4
+ - if create_where_key was called by any other function:
+    start key flag is 1 when end key is NULL, end key flag is 3 or 4
+ - if the key is non-numeric, or multipart
+    When the query is an exact match, the start key flag is 0,
+    end key flag is 3 for what should be a no-range condition where
+    you should have 0 and max key NULL, which it is if called by
+    read_range_first
+
+Conclusion:
+
+1. Need logic to determin if a key is min or max when the flag is
+HA_READ_AFTER_KEY, and handle appending correct operator accordingly
+
+2. Need a boolean flag to pass to create_where_from_key, used in the
+switch statement. Add 1 to the flag if:
+  - start key flag is HA_READ_KEY_EXACT and the end key is NULL
+
+*/
+
+bool ha_federatedx::create_where_from_key(String *to,
+                                         KEY *key_info,
+                                         const key_range *start_key,
+                                         const key_range *end_key,
+                                         bool from_records_in_range,
+                                         bool eq_range)
+{
+  bool both_not_null=
+    (start_key != NULL && end_key != NULL) ? TRUE : FALSE;
+  const uchar *ptr;
+  uint remainder, length;
+  char tmpbuff[FEDERATEDX_QUERY_BUFFER_SIZE];
+  String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
+  const key_range *ranges[2]= { start_key, end_key };
+  my_bitmap_map *old_map;
+  DBUG_ENTER("ha_federatedx::create_where_from_key");
+
+  tmp.length(0); 
+  if (start_key == NULL && end_key == NULL)
+    DBUG_RETURN(1);
+
+  old_map= dbug_tmp_use_all_columns(table, table->write_set);
+  for (uint i= 0; i <= 1; i++)
+  {
+    bool needs_quotes;
+    KEY_PART_INFO *key_part;
+    if (ranges[i] == NULL)
+      continue;
+
+    if (both_not_null)
+    {
+      if (i > 0)
+        tmp.append(STRING_WITH_LEN(") AND ("));
+      else
+        tmp.append(STRING_WITH_LEN(" ("));
+    }
+
+    for (key_part= key_info->key_part,
+         remainder= key_info->key_parts,
+         length= ranges[i]->length,
+         ptr= ranges[i]->key; ;
+         remainder--,
+         key_part++)
+    {
+      Field *field= key_part->field;
+      uint store_length= key_part->store_length;
+      uint part_length= min(store_length, length);
+      needs_quotes= field->str_needs_quotes();
+      DBUG_DUMP("key, start of loop", ptr, length);
+
+      if (key_part->null_bit)
+      {
+        if (*ptr++)
+        {
+          /*
+            We got "IS [NOT] NULL" condition against nullable column. We
+            distinguish between "IS NOT NULL" and "IS NULL" by flag. For
+            "IS NULL", flag is set to HA_READ_KEY_EXACT.
+          */
+          if (emit_key_part_name(&tmp, key_part) ||
+              tmp.append(ranges[i]->flag == HA_READ_KEY_EXACT ?
+                         " IS NULL " : " IS NOT NULL "))
+            goto err;
+          /*
+            We need to adjust pointer and length to be prepared for next
+            key part. As well as check if this was last key part.
+          */
+          goto prepare_for_next_key_part;
+        }
+      }
+
+      if (tmp.append(STRING_WITH_LEN(" (")))
+        goto err;
+
+      switch (ranges[i]->flag) {
+      case HA_READ_KEY_EXACT:
+        DBUG_PRINT("info", ("federatedx HA_READ_KEY_EXACT %d", i));
+        if (store_length >= length ||
+            !needs_quotes ||
+            key_part->type == HA_KEYTYPE_BIT ||
+            field->result_type() != STRING_RESULT)
+        {
+          if (emit_key_part_name(&tmp, key_part))
+            goto err;
+
+          if (from_records_in_range)
+          {
+            if (tmp.append(STRING_WITH_LEN(" >= ")))
+              goto err;
+          }
+          else
+          {
+            if (tmp.append(STRING_WITH_LEN(" = ")))
+              goto err;
+          }
+
+          if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+                                    part_length))
+            goto err;
+        }
+        else
+        {
+          /* LIKE */
+          if (emit_key_part_name(&tmp, key_part) ||
+              tmp.append(STRING_WITH_LEN(" LIKE ")) ||
+              emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
+                                    part_length))
+            goto err;
+        }
+        break;
+      case HA_READ_AFTER_KEY:
+        if (eq_range)
+        {
+          if (tmp.append("1=1"))                // Dummy
+            goto err;
+          break;
+        }
+        DBUG_PRINT("info", ("federatedx HA_READ_AFTER_KEY %d", i));
+        if (store_length >= length) /* end key */
+        {
+          if (emit_key_part_name(&tmp, key_part))
+            goto err;
+
+          if (i > 0) /* end key */
+          {
+            if (tmp.append(STRING_WITH_LEN(" <= ")))
+              goto err;
+          }
+          else /* start key */
+          {
+            if (tmp.append(STRING_WITH_LEN(" > ")))
+              goto err;
+          }
+
+          if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+                                    part_length))
+          {
+            goto err;
+          }
+          break;
+        }
+      case HA_READ_KEY_OR_NEXT:
+        DBUG_PRINT("info", ("federatedx HA_READ_KEY_OR_NEXT %d", i));
+        if (emit_key_part_name(&tmp, key_part) ||
+            tmp.append(STRING_WITH_LEN(" >= ")) ||
+            emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+              part_length))
+          goto err;
+        break;
+      case HA_READ_BEFORE_KEY:
+        DBUG_PRINT("info", ("federatedx HA_READ_BEFORE_KEY %d", i));
+        if (store_length >= length)
+        {
+          if (emit_key_part_name(&tmp, key_part) ||
+              tmp.append(STRING_WITH_LEN(" < ")) ||
+              emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+                                    part_length))
+            goto err;
+          break;
+        }
+      case HA_READ_KEY_OR_PREV:
+        DBUG_PRINT("info", ("federatedx HA_READ_KEY_OR_PREV %d", i));
+        if (emit_key_part_name(&tmp, key_part) ||
+            tmp.append(STRING_WITH_LEN(" <= ")) ||
+            emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+                                  part_length))
+          goto err;
+        break;
+      default:
+        DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
+        goto err;
+      }
+      if (tmp.append(STRING_WITH_LEN(") ")))
+        goto err;
+
+prepare_for_next_key_part:
+      if (store_length >= length)
+        break;
+      DBUG_PRINT("info", ("remainder %d", remainder));
+      DBUG_ASSERT(remainder > 1);
+      length-= store_length;
+      /*
+        For nullable columns, null-byte is already skipped before, that is
+        ptr was incremented by 1. Since store_length still counts null-byte,
+        we need to subtract 1 from store_length.
+      */
+      ptr+= store_length - test(key_part->null_bit);
+      if (tmp.append(STRING_WITH_LEN(" AND ")))
+        goto err;
+
+      DBUG_PRINT("info",
+                 ("create_where_from_key WHERE clause: %s",
+                  tmp.c_ptr_quick()));
+    }
+  }
+  dbug_tmp_restore_column_map(table->write_set, old_map);
+
+  if (both_not_null)
+    if (tmp.append(STRING_WITH_LEN(") ")))
+      DBUG_RETURN(1);
+
+  if (to->append(STRING_WITH_LEN(" WHERE ")))
+    DBUG_RETURN(1);
+
+  if (to->append(tmp))
+    DBUG_RETURN(1);
+
+  DBUG_RETURN(0);
+
+err:
+  dbug_tmp_restore_column_map(table->write_set, old_map);
+  DBUG_RETURN(1);
+}
+
+static void fill_server(MEM_ROOT *mem_root, FEDERATEDX_SERVER *server,
+                        FEDERATEDX_SHARE *share, CHARSET_INFO *table_charset)
+{
+  char buffer[STRING_BUFFER_USUAL_SIZE];
+  String key(buffer, sizeof(buffer), &my_charset_bin);  
+  String scheme(share->scheme, &my_charset_latin1);
+  String hostname(share->hostname, &my_charset_latin1);
+  String database(share->database, system_charset_info);
+  String username(share->username, system_charset_info);
+  String socket(share->socket ? share->socket : "", files_charset_info);
+  String password(share->password ? share->password : "", &my_charset_bin);
+  DBUG_ENTER("fill_server");
+
+  /* Do some case conversions */
+  scheme.reserve(scheme.length());
+  scheme.length(my_casedn_str(&my_charset_latin1, scheme.c_ptr_safe()));
+  
+  hostname.reserve(hostname.length());
+  hostname.length(my_casedn_str(&my_charset_latin1, hostname.c_ptr_safe()));
+  
+  if (lower_case_table_names)
+  {
+    database.reserve(database.length());
+    database.length(my_casedn_str(system_charset_info, database.c_ptr_safe()));
+  }
+
+#ifndef __WIN__
+  /*
+    TODO: there is no unix sockets under windows so the engine should be
+    revised about using sockets in such environment.
+  */
+  if (lower_case_file_system && socket.length())
+  {
+    socket.reserve(socket.length());
+    socket.length(my_casedn_str(files_charset_info, socket.c_ptr_safe()));
+  }
+#endif
+
+  /* start with all bytes zeroed */  
+  bzero(server, sizeof(*server));
+
+  key.length(0);
+  key.reserve(scheme.length() + hostname.length() + database.length() +
+              socket.length() + username.length() + password.length() +
+       sizeof(int) + 8);
+  key.append(scheme);
+  key.q_append('\0');
+  server->hostname= (const char *) (intptr) key.length();
+  key.append(hostname);
+  key.q_append('\0');
+  server->database= (const char *) (intptr) key.length();
+  key.append(database);
+  key.q_append('\0');
+  key.q_append((uint32) share->port);
+  server->socket= (const char *) (intptr) key.length();
+  key.append(socket);
+  key.q_append('\0');
+  server->username= (const char *) (intptr) key.length();
+  key.append(username);
+  key.q_append('\0');
+  server->password= (const char *) (intptr) key.length();
+  key.append(password);
+  
+  server->key_length= key.length();
+  server->key= (uchar *)  memdup_root(mem_root, key.ptr(), key.length()+1);
+
+  /* pointer magic */
+  server->scheme+= (intptr) server->key;
+  server->hostname+= (intptr) server->key;
+  server->database+= (intptr) server->key;
+  server->username+= (intptr) server->key;
+  server->password+= (intptr) server->key;
+  server->socket+= (intptr) server->key;
+  server->port= share->port;
+
+  if (!share->socket)
+    server->socket= NULL;
+  if (!share->password)
+    server->password= NULL;
+
+  if (table_charset)
+    server->csname= strdup_root(mem_root, table_charset->csname);
+
+  DBUG_VOID_RETURN;
+}
+
+
+static FEDERATEDX_SERVER *get_server(FEDERATEDX_SHARE *share, TABLE *table)
+{
+  FEDERATEDX_SERVER *server= NULL, tmp_server;
+  MEM_ROOT mem_root;
+  char buffer[STRING_BUFFER_USUAL_SIZE];
+  String key(buffer, sizeof(buffer), &my_charset_bin);  
+  String scheme(share->scheme, &my_charset_latin1);
+  String hostname(share->hostname, &my_charset_latin1);
+  String database(share->database, system_charset_info);
+  String username(share->username, system_charset_info);
+  String socket(share->socket ? share->socket : "", files_charset_info);
+  String password(share->password ? share->password : "", &my_charset_bin);
+  DBUG_ENTER("ha_federated.cc::get_server");
+
+  safe_mutex_assert_owner(&federatedx_mutex);
+
+  init_alloc_root(&mem_root, 4096, 4096);
+
+  fill_server(&mem_root, &tmp_server, share, table ? table->s->table_charset : 0);
+
+  if (!(server= (FEDERATEDX_SERVER *) hash_search(&federatedx_open_servers,
+                                                 tmp_server.key,
+                                                 tmp_server.key_length)))
+  {
+    if (!table || !tmp_server.csname)
+      goto error;
+ 
+    if (!(server= (FEDERATEDX_SERVER *) memdup_root(&mem_root, 
+                          (char *) &tmp_server,
+                          sizeof(*server))))
+      goto error;
+
+    server->mem_root= mem_root;
+
+    if (my_hash_insert(&federatedx_open_servers, (uchar*) server))
+      goto error;
+
+    pthread_mutex_init(&server->mutex, MY_MUTEX_INIT_FAST);
+  }
+  else
+    free_root(&mem_root, MYF(0)); /* prevents memory leak */
+
+  server->use_count++;
+  
+  DBUG_RETURN(server);
+error:
+  free_root(&mem_root, MYF(0));
+  DBUG_RETURN(NULL);
+}
+
+
+/*
+  Example of simple lock controls. The "share" it creates is structure we will
+  pass to each federatedx handler. Do you have to have one of these? Well, you
+  have pieces that are used for locking, and they are needed to function.
+*/
+
+static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
+{
+  char query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  Field **field;
+  String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+  FEDERATEDX_SHARE *share= NULL, tmp_share;
+  MEM_ROOT mem_root;
+  DBUG_ENTER("ha_federatedx.cc::get_share");
+
+  /*
+    In order to use this string, we must first zero it's length,
+    or it will contain garbage
+  */
+  query.length(0);
+
+  bzero(&tmp_share, sizeof(tmp_share));
+  init_alloc_root(&mem_root, 256, 0);
+
+  pthread_mutex_lock(&federatedx_mutex);
+
+  tmp_share.share_key= table_name;
+  tmp_share.share_key_length= strlen(table_name);
+  if (parse_url(&mem_root, &tmp_share, table, 0))
+    goto error;
+
+  /* TODO: change tmp_share.scheme to LEX_STRING object */
+  if (!(share= (FEDERATEDX_SHARE *) hash_search(&federatedx_open_tables,
+                                               (uchar*) tmp_share.share_key,
+                                               tmp_share.
+                                               share_key_length)))
+  {
+    query.set_charset(system_charset_info);
+    query.append(STRING_WITH_LEN("SELECT "));
+    for (field= table->field; *field; field++)
+    {
+      append_ident(&query, (*field)->field_name, 
+                   strlen((*field)->field_name), ident_quote_char);
+      query.append(STRING_WITH_LEN(", "));
+    }
+    /* chops off trailing comma */
+    query.length(query.length() - sizeof_trailing_comma);
+
+    query.append(STRING_WITH_LEN(" FROM "));
+
+    append_ident(&query, tmp_share.table_name, 
+                 tmp_share.table_name_length, ident_quote_char);
+
+    if (!(share= (FEDERATEDX_SHARE *) memdup_root(&mem_root, (char*)&tmp_share, sizeof(*share))) ||
+        !(share->select_query= (char*) strmake_root(&mem_root, query.ptr(), query.length() + 1)))
+      goto error;
+
+    share->mem_root= mem_root;
+
+    DBUG_PRINT("info",
+               ("share->select_query %s", share->select_query));
+
+    if (!(share->s= get_server(share, table)))
+      goto error;
+   
+    if (my_hash_insert(&federatedx_open_tables, (uchar*) share))
+      goto error;
+    thr_lock_init(&share->lock);
+  }
+  else
+    free_root(&mem_root, MYF(0)); /* prevents memory leak */
+
+  share->use_count++;
+  pthread_mutex_unlock(&federatedx_mutex);
+
+  DBUG_RETURN(share);
+
+error:
+  pthread_mutex_unlock(&federatedx_mutex);
+  free_root(&mem_root, MYF(0));
+  DBUG_RETURN(NULL);
+}
+
+
+static int free_server(federatedx_txn *txn, FEDERATEDX_SERVER *server)
+{
+  bool destroy;
+  DBUG_ENTER("free_server");
+
+  pthread_mutex_lock(&federatedx_mutex);
+  if ((destroy= !--server->use_count))
+    hash_delete(&federatedx_open_servers, (uchar*) server);
+  pthread_mutex_unlock(&federatedx_mutex);
+
+  if (destroy)
+  {
+    MEM_ROOT mem_root;
+
+    txn->close(server);
+
+    DBUG_ASSERT(server->io_count == 0);
+
+    pthread_mutex_destroy(&server->mutex);
+    mem_root= server->mem_root;
+    free_root(&mem_root, MYF(0));
+  }
+
+  DBUG_RETURN(0);
+}
+
+
+/*
+  Free lock controls. We call this whenever we close a table.
+  If the table had the last reference to the share then we
+  free memory associated with it.
+*/
+
+static int free_share(federatedx_txn *txn, FEDERATEDX_SHARE *share)
+{
+  bool destroy;
+  DBUG_ENTER("free_share");
+
+  pthread_mutex_lock(&federatedx_mutex);
+  if ((destroy= !--share->use_count))
+    hash_delete(&federatedx_open_tables, (uchar*) share);
+  pthread_mutex_unlock(&federatedx_mutex);
+
+  if (destroy)
+  {
+    MEM_ROOT mem_root;
+    FEDERATEDX_SERVER *server= share->s;
+
+    thr_lock_delete(&share->lock);
+
+    mem_root= share->mem_root;
+    free_root(&mem_root, MYF(0));
+
+    free_server(txn, server);
+  }
+
+  DBUG_RETURN(0);
+}
+
+
+ha_rows ha_federatedx::records_in_range(uint inx, key_range *start_key,
+                                       key_range *end_key)
+{
+  /*
+
+  We really want indexes to be used as often as possible, therefore
+  we just need to hard-code the return value to a very low number to
+  force the issue
+
+*/
+  DBUG_ENTER("ha_federatedx::records_in_range");
+  DBUG_RETURN(FEDERATEDX_RECORDS_IN_RANGE);
+}
+/*
+  If frm_error() is called then we will use this to to find out
+  what file extentions exist for the storage engine. This is
+  also used by the default rename_table and delete_table method
+  in handler.cc.
+*/
+
+const char **ha_federatedx::bas_ext() const
+{
+  static const char *ext[]=
+  {
+    NullS
+  };
+  return ext;
+}
+
+
+federatedx_txn *ha_federatedx::get_txn(THD *thd, bool no_create)
+{
+  federatedx_txn **txnp= (federatedx_txn **) ha_data(thd);
+  if (!*txnp && !no_create)
+    *txnp= new federatedx_txn();
+  return *txnp;
+}
+
+  
+int ha_federatedx::disconnect(handlerton *hton, MYSQL_THD thd)
+{
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  delete txn;
+  return 0;
+}
+ 
+
+/*
+  Used for opening tables. The name will be the name of the file.
+  A table is opened when it needs to be opened. For instance
+  when a request comes in for a select on the table (tables are not
+  open and closed for each request, they are cached).
+
+  Called from handler.cc by handler::ha_open(). The server opens
+  all tables by calling ha_open() which then calls the handler
+  specific open().
+*/
+
+int ha_federatedx::open(const char *name, int mode, uint test_if_locked)
+{
+  int error;
+  THD *thd= current_thd;
+  DBUG_ENTER("ha_federatedx::open");
+
+  if (!(share= get_share(name, table)))
+    DBUG_RETURN(1);
+  thr_lock_data_init(&share->lock, &lock, NULL);
+
+  DBUG_ASSERT(io == NULL);
+
+  txn= get_txn(thd);
+
+  if ((error= txn->acquire(share, TRUE, &io)))
+  {
+    free_share(txn, share);
+    DBUG_RETURN(error);
+  }
+ 
+  txn->release(&io);
+ 
+  ref_length= (table->s->primary_key != MAX_KEY ?
+               table->key_info[table->s->primary_key].key_length :
+               table->s->reclength);
+  DBUG_PRINT("info", ("ref_length: %u", ref_length));
+
+  reset();
+
+  DBUG_RETURN(0);
+}
+
+
+/*
+  Closes a table. We call the free_share() function to free any resources
+  that we have allocated in the "shared" structure.
+
+  Called from sql_base.cc, sql_select.cc, and table.cc.
+  In sql_select.cc it is only used to close up temporary tables or during
+  the process where a temporary table is converted over to being a
+  myisam table.
+  For sql_base.cc look at close_data_tables().
+*/
+
+int ha_federatedx::close(void)
+{
+  int retval, error;
+  THD *thd= current_thd;
+  DBUG_ENTER("ha_federatedx::close");
+
+  /* free the result set */
+  if (stored_result)
+    retval= free_result();
+
+  /* Disconnect from mysql */
+  if ((txn= get_txn(thd, true)))
+    txn->release(&io);
+
+  DBUG_ASSERT(io == NULL);
+
+  if ((error= free_share(txn, share)))
+    retval= error;
+  DBUG_RETURN(retval);
+}
+
+/*
+
+  Checks if a field in a record is SQL NULL.
+
+  SYNOPSIS
+    field_in_record_is_null()
+      table     TABLE pointer, MySQL table object
+      field     Field pointer, MySQL field object
+      record    char pointer, contains record
+
+    DESCRIPTION
+      This method uses the record format information in table to track
+      the null bit in record.
+
+    RETURN VALUE
+      1    if NULL
+      0    otherwise
+*/
+
+static inline uint field_in_record_is_null(TABLE *table,
+                                    Field *field,
+                                    char *record)
+{
+  int null_offset;
+  DBUG_ENTER("ha_federatedx::field_in_record_is_null");
+
+  if (!field->null_ptr)
+    DBUG_RETURN(0);
+
+  null_offset= (uint) ((char*)field->null_ptr - (char*)table->record[0]);
+
+  if (record[null_offset] & field->null_bit)
+    DBUG_RETURN(1);
+
+  DBUG_RETURN(0);
+}
+
+
+/**
+  @brief Construct the INSERT statement.
+  
+  @details This method will construct the INSERT statement and appends it to
+  the supplied query string buffer.
+  
+  @return
+    @retval FALSE       No error
+    @retval TRUE        Failure
+*/
+
+bool ha_federatedx::append_stmt_insert(String *query)
+{
+  char insert_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  Field **field;
+  uint tmp_length;
+  bool added_field= FALSE;
+
+  /* The main insert query string */
+  String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
+  DBUG_ENTER("ha_federatedx::append_stmt_insert");
+
+  insert_string.length(0);
+
+  if (replace_duplicates)
+    insert_string.append(STRING_WITH_LEN("REPLACE INTO "));
+  else if (ignore_duplicates && !insert_dup_update)
+    insert_string.append(STRING_WITH_LEN("INSERT IGNORE INTO "));
+  else
+    insert_string.append(STRING_WITH_LEN("INSERT INTO "));
+  append_ident(&insert_string, share->table_name, share->table_name_length, 
+               ident_quote_char);
+  tmp_length= insert_string.length();
+  insert_string.append(STRING_WITH_LEN(" ("));
+
+  /*
+    loop through the field pointer array, add any fields to both the values
+    list and the fields list that match the current query id
+  */
+  for (field= table->field; *field; field++)
+  {
+    if (bitmap_is_set(table->write_set, (*field)->field_index))
+    {
+      /* append the field name */
+      append_ident(&insert_string, (*field)->field_name, 
+                   strlen((*field)->field_name), ident_quote_char);
+
+      /* append commas between both fields and fieldnames */
+      /*
+        unfortunately, we can't use the logic if *(fields + 1) to
+        make the following appends conditional as we don't know if the
+        next field is in the write set
+      */
+      insert_string.append(STRING_WITH_LEN(", "));
+      added_field= TRUE;
+    }
+  }
+
+  if (added_field)
+  {
+    /* Remove trailing comma. */
+    insert_string.length(insert_string.length() - sizeof_trailing_comma);
+    insert_string.append(STRING_WITH_LEN(") "));
+  }
+  else
+  {
+    /* If there were no fields, we don't want to add a closing paren. */
+    insert_string.length(tmp_length);
+  }
+
+  insert_string.append(STRING_WITH_LEN(" VALUES "));
+
+  DBUG_RETURN(query->append(insert_string));
+}
+
+
+/*
+  write_row() inserts a row. No extra() hint is given currently if a bulk load
+  is happeneding. buf() is a byte array of data. You can use the field
+  information to extract the data from the native byte array type.
+  Example of this would be:
+  for (Field **field=table->field ; *field ; field++)
+  {
+    ...
+  }
+
+  Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+  sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::write_row(uchar *buf)
+{
+  char values_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+  Field **field;
+  uint tmp_length;
+  int error= 0;
+  bool use_bulk_insert;
+  bool auto_increment_update_required= (table->next_number_field != NULL);
+
+  /* The string containing the values to be added to the insert */
+  String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
+  /* The actual value of the field, to be added to the values_string */
+  String insert_field_value_string(insert_field_value_buffer,
+                                   sizeof(insert_field_value_buffer),
+                                   &my_charset_bin);
+  my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+  DBUG_ENTER("ha_federatedx::write_row");
+
+  values_string.length(0);
+  insert_field_value_string.length(0);
+  ha_statistic_increment(&SSV::ha_write_count);
+  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+    table->timestamp_field->set_time();
+
+  /*
+    start both our field and field values strings
+    We must disable multi-row insert for "INSERT...ON DUPLICATE KEY UPDATE"
+    Ignore duplicates is always true when insert_dup_update is true.
+    When replace_duplicates == TRUE, we can safely enable multi-row insert.
+    When performing multi-row insert, we only collect the columns values for
+    the row. The start of the statement is only created when the first
+    row is copied in to the bulk_insert string.
+  */
+  if (!(use_bulk_insert= bulk_insert.str && 
+        (!insert_dup_update || replace_duplicates)))
+    append_stmt_insert(&values_string);
+
+  values_string.append(STRING_WITH_LEN(" ("));
+  tmp_length= values_string.length();
+
+  /*
+    loop through the field pointer array, add any fields to both the values
+    list and the fields list that is part of the write set
+  */
+  for (field= table->field; *field; field++)
+  {
+    if (bitmap_is_set(table->write_set, (*field)->field_index))
+    {
+      if ((*field)->is_null())
+        values_string.append(STRING_WITH_LEN(" NULL "));
+      else
+      {
+        bool needs_quote= (*field)->str_needs_quotes();
+        (*field)->val_str(&insert_field_value_string);
+        if (needs_quote)
+          values_string.append(value_quote_char);
+        insert_field_value_string.print(&values_string);
+        if (needs_quote)
+          values_string.append(value_quote_char);
+
+        insert_field_value_string.length(0);
+      }
+
+      /* append commas between both fields and fieldnames */
+      /*
+        unfortunately, we can't use the logic if *(fields + 1) to
+        make the following appends conditional as we don't know if the
+        next field is in the write set
+      */
+      values_string.append(STRING_WITH_LEN(", "));
+    }
+  }
+  dbug_tmp_restore_column_map(table->read_set, old_map);
+
+  /*
+    if there were no fields, we don't want to add a closing paren
+    AND, we don't want to chop off the last char '('
+    insert will be "INSERT INTO t1 VALUES ();"
+  */
+  if (values_string.length() > tmp_length)
+  {
+    /* chops off trailing comma */
+    values_string.length(values_string.length() - sizeof_trailing_comma);
+  }
+  /* we always want to append this, even if there aren't any fields */
+  values_string.append(STRING_WITH_LEN(") "));
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (use_bulk_insert)
+  {
+    /*
+      Send the current bulk insert out if appending the current row would
+      cause the statement to overflow the packet size, otherwise set
+      auto_increment_update_required to FALSE as no query was executed.
+    */
+    if (bulk_insert.length + values_string.length() + bulk_padding >
+        io->max_query_size() && bulk_insert.length)
+    {
+      error= io->query(bulk_insert.str, bulk_insert.length);
+      bulk_insert.length= 0;
+    }
+    else
+      auto_increment_update_required= FALSE;
+      
+    if (bulk_insert.length == 0)
+    {
+      char insert_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+      String insert_string(insert_buffer, sizeof(insert_buffer), 
+                           &my_charset_bin);
+      insert_string.length(0);
+      append_stmt_insert(&insert_string);
+      dynstr_append_mem(&bulk_insert, insert_string.ptr(), 
+                        insert_string.length());
+    }
+    else
+      dynstr_append_mem(&bulk_insert, ",", 1);
+
+    dynstr_append_mem(&bulk_insert, values_string.ptr(), 
+                      values_string.length());
+  }  
+  else
+  {
+    error= io->query(values_string.ptr(), values_string.length());
+  }
+  
+  if (error)
+  {
+    DBUG_RETURN(stash_remote_error());
+  }
+  /*
+    If the table we've just written a record to contains an auto_increment
+    field, then store the last_insert_id() value from the foreign server
+  */
+  if (auto_increment_update_required)
+  {
+    update_auto_increment();
+
+    /* mysql_insert() uses this for protocol return value */
+    table->next_number_field->store(stats.auto_increment_value, 1);
+  }
+
+  DBUG_RETURN(0);
+}
+
+
+/**
+  @brief Prepares the storage engine for bulk inserts.
+  
+  @param[in] rows       estimated number of rows in bulk insert 
+                        or 0 if unknown.
+  
+  @details Initializes memory structures required for bulk insert.
+*/
+
+void ha_federatedx::start_bulk_insert(ha_rows rows)
+{
+  uint page_size;
+  DBUG_ENTER("ha_federatedx::start_bulk_insert");
+
+  dynstr_free(&bulk_insert);
+  
+  /**
+    We don't bother with bulk-insert semantics when the estimated rows == 1
+    The rows value will be 0 if the server does not know how many rows
+    would be inserted. This can occur when performing INSERT...SELECT
+  */
+  
+  if (rows == 1)
+    DBUG_VOID_RETURN;
+
+  /*
+    Make sure we have an open connection so that we know the 
+    maximum packet size.
+  */
+  if (txn->acquire(share, FALSE, &io))
+    DBUG_VOID_RETURN;
+
+  page_size= (uint) my_getpagesize();
+
+  if (init_dynamic_string(&bulk_insert, NULL, page_size, page_size))
+    DBUG_VOID_RETURN;
+  
+  bulk_insert.length= 0;
+  DBUG_VOID_RETURN;
+}
+
+
+/**
+  @brief End bulk insert.
+  
+  @details This method will send any remaining rows to the remote server.
+  Finally, it will deinitialize the bulk insert data structure.
+  
+  @return Operation status
+  @retval       0       No error
+  @retval       != 0    Error occured at remote server. Also sets my_errno.
+*/
+
+int ha_federatedx::end_bulk_insert(bool abort)
+{
+  int error= 0;
+  DBUG_ENTER("ha_federatedx::end_bulk_insert");
+  
+  if (bulk_insert.str && bulk_insert.length && !abort)
+  {
+    if ((error= txn->acquire(share, FALSE, &io)))
+      DBUG_RETURN(error);
+    if (io->query(bulk_insert.str, bulk_insert.length))
+      error= stash_remote_error();
+    else
+    if (table->next_number_field)
+      update_auto_increment();
+  }
+
+  dynstr_free(&bulk_insert);
+  
+  DBUG_RETURN(my_errno= error);
+}
+
+
+/*
+  ha_federatedx::update_auto_increment
+
+  This method ensures that last_insert_id() works properly. What it simply does
+  is calls last_insert_id() on the foreign database immediately after insert
+  (if the table has an auto_increment field) and sets the insert id via
+  thd->insert_id(ID)).
+*/
+void ha_federatedx::update_auto_increment(void)
+{
+  THD *thd= current_thd;
+  DBUG_ENTER("ha_federatedx::update_auto_increment");
+
+  ha_federatedx::info(HA_STATUS_AUTO);
+  thd->first_successful_insert_id_in_cur_stmt= 
+    stats.auto_increment_value;
+  DBUG_PRINT("info",("last_insert_id: %ld", (long) stats.auto_increment_value));
+
+  DBUG_VOID_RETURN;
+}
+
+int ha_federatedx::optimize(THD* thd, HA_CHECK_OPT* check_opt)
+{
+  int error= 0;
+  char query_buffer[STRING_BUFFER_USUAL_SIZE];
+  String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+  DBUG_ENTER("ha_federatedx::optimize");
+  
+  query.length(0);
+
+  query.set_charset(system_charset_info);
+  query.append(STRING_WITH_LEN("OPTIMIZE TABLE "));
+  append_ident(&query, share->table_name, share->table_name_length, 
+               ident_quote_char);
+
+  DBUG_ASSERT(txn == get_txn(thd));
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (io->query(query.ptr(), query.length()))
+    error= stash_remote_error();
+
+  DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::repair(THD* thd, HA_CHECK_OPT* check_opt)
+{
+  int error= 0;
+  char query_buffer[STRING_BUFFER_USUAL_SIZE];
+  String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+  DBUG_ENTER("ha_federatedx::repair");
+
+  query.length(0);
+
+  query.set_charset(system_charset_info);
+  query.append(STRING_WITH_LEN("REPAIR TABLE "));
+  append_ident(&query, share->table_name, share->table_name_length, 
+               ident_quote_char);
+  if (check_opt->flags & T_QUICK)
+    query.append(STRING_WITH_LEN(" QUICK"));
+  if (check_opt->flags & T_EXTEND)
+    query.append(STRING_WITH_LEN(" EXTENDED"));
+  if (check_opt->sql_flags & TT_USEFRM)
+    query.append(STRING_WITH_LEN(" USE_FRM"));
+
+  DBUG_ASSERT(txn == get_txn(thd));
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (io->query(query.ptr(), query.length()))
+    error= stash_remote_error();
+
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Yes, update_row() does what you expect, it updates a row. old_data will have
+  the previous row record in it, while new_data will have the newest data in
+  it.
+
+  Keep in mind that the server can do updates based on ordering if an ORDER BY
+  clause was used. Consecutive ordering is not guaranteed.
+  Currently new_data will not have an updated auto_increament record, or
+  and updated timestamp field. You can do these for federatedx by doing these:
+  if (table->timestamp_on_update_now)
+    update_timestamp(new_row+table->timestamp_on_update_now-1);
+  if (table->next_number_field && record == table->record[0])
+    update_auto_increment();
+
+  Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+*/
+
+int ha_federatedx::update_row(const uchar *old_data, uchar *new_data)
+{
+  /*
+    This used to control how the query was built. If there was a
+    primary key, the query would be built such that there was a where
+    clause with only that column as the condition. This is flawed,
+    because if we have a multi-part primary key, it would only use the
+    first part! We don't need to do this anyway, because
+    read_range_first will retrieve the correct record, which is what
+    is used to build the WHERE clause. We can however use this to
+    append a LIMIT to the end if there is NOT a primary key. Why do
+    this? Because we only are updating one record, and LIMIT enforces
+    this.
+  */
+  bool has_a_primary_key= test(table->s->primary_key != MAX_KEY);
+  
+  /*
+    buffers for following strings
+  */
+  char field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+  char update_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  char where_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+
+  /* Work area for field values */
+  String field_value(field_value_buffer, sizeof(field_value_buffer),
+                     &my_charset_bin);
+  /* stores the update query */
+  String update_string(update_buffer,
+                       sizeof(update_buffer),
+                       &my_charset_bin);
+  /* stores the WHERE clause */
+  String where_string(where_buffer,
+                      sizeof(where_buffer),
+                      &my_charset_bin);
+  uchar *record= table->record[0];
+  int error;
+  DBUG_ENTER("ha_federatedx::update_row");
+  /*
+    set string lengths to 0 to avoid misc chars in string
+  */
+  field_value.length(0);
+  update_string.length(0);
+  where_string.length(0);
+
+  if (ignore_duplicates)
+    update_string.append(STRING_WITH_LEN("UPDATE IGNORE "));
+  else
+    update_string.append(STRING_WITH_LEN("UPDATE "));
+  append_ident(&update_string, share->table_name,
+               share->table_name_length, ident_quote_char);
+  update_string.append(STRING_WITH_LEN(" SET "));
+
+  /*
+    In this loop, we want to match column names to values being inserted
+    (while building INSERT statement).
+
+    Iterate through table->field (new data) and share->old_field (old_data)
+    using the same index to create an SQL UPDATE statement. New data is
+    used to create SET field=value and old data is used to create WHERE
+    field=oldvalue
+  */
+
+  for (Field **field= table->field; *field; field++)
+  {
+    if (bitmap_is_set(table->write_set, (*field)->field_index))
+    {
+      uint field_name_length= strlen((*field)->field_name);
+      append_ident(&update_string, (*field)->field_name, field_name_length,
+                   ident_quote_char);
+      update_string.append(STRING_WITH_LEN(" = "));
+
+      if ((*field)->is_null())
+        update_string.append(STRING_WITH_LEN(" NULL "));
+      else
+      {
+        /* otherwise = */
+        my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+        bool needs_quote= (*field)->str_needs_quotes();
+	(*field)->val_str(&field_value);
+        if (needs_quote)
+          update_string.append(value_quote_char);
+        field_value.print(&update_string);
+        if (needs_quote)
+          update_string.append(value_quote_char);
+        field_value.length(0);
+        tmp_restore_column_map(table->read_set, old_map);
+      }
+      update_string.append(STRING_WITH_LEN(", "));
+    }
+
+    if (bitmap_is_set(table->read_set, (*field)->field_index))
+    {
+      uint field_name_length= strlen((*field)->field_name);
+      append_ident(&where_string, (*field)->field_name, field_name_length,
+                   ident_quote_char);
+      if (field_in_record_is_null(table, *field, (char*) old_data))
+        where_string.append(STRING_WITH_LEN(" IS NULL "));
+      else
+      {
+        bool needs_quote= (*field)->str_needs_quotes();
+        where_string.append(STRING_WITH_LEN(" = "));
+        (*field)->val_str(&field_value,
+                          (old_data + (*field)->offset(record)));
+        if (needs_quote)
+          where_string.append(value_quote_char);
+        field_value.print(&where_string);
+        if (needs_quote)
+          where_string.append(value_quote_char);
+        field_value.length(0);
+      }
+      where_string.append(STRING_WITH_LEN(" AND "));
+    }
+  }
+
+  /* Remove last ', '. This works as there must be at least on updated field */
+  update_string.length(update_string.length() - sizeof_trailing_comma);
+
+  if (where_string.length())
+  {
+    /* chop off trailing AND */
+    where_string.length(where_string.length() - sizeof_trailing_and);
+    update_string.append(STRING_WITH_LEN(" WHERE "));
+    update_string.append(where_string);
+  }
+
+  /*
+    If this table has not a primary key, then we could possibly
+    update multiple rows. We want to make sure to only update one!
+  */
+  if (!has_a_primary_key)
+    update_string.append(STRING_WITH_LEN(" LIMIT 1"));
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (io->query(update_string.ptr(), update_string.length()))
+  {
+    DBUG_RETURN(stash_remote_error());
+  }
+  DBUG_RETURN(0);
+}
+
+/*
+  This will delete a row. 'buf' will contain a copy of the row to be =deleted.
+  The server will call this right after the current row has been called (from
+  either a previous rnd_next() or index call).
+  If you keep a pointer to the last row or can access a primary key it will
+  make doing the deletion quite a bit easier.
+  Keep in mind that the server does no guarentee consecutive deletions.
+  ORDER BY clauses can be used.
+
+  Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+  Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+  it is used for removing duplicates while in insert it is used for REPLACE
+  calls.
+*/
+
+int ha_federatedx::delete_row(const uchar *buf)
+{
+  char delete_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  char data_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
+  String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
+  uint found= 0;
+  int error;
+  DBUG_ENTER("ha_federatedx::delete_row");
+
+  delete_string.length(0);
+  delete_string.append(STRING_WITH_LEN("DELETE FROM "));
+  append_ident(&delete_string, share->table_name,
+               share->table_name_length, ident_quote_char);
+  delete_string.append(STRING_WITH_LEN(" WHERE "));
+
+  for (Field **field= table->field; *field; field++)
+  {
+    Field *cur_field= *field;
+    found++;
+    if (bitmap_is_set(table->read_set, cur_field->field_index))
+    {
+      append_ident(&delete_string, (*field)->field_name,
+                   strlen((*field)->field_name), ident_quote_char);
+      data_string.length(0);
+      if (cur_field->is_null())
+      {
+        delete_string.append(STRING_WITH_LEN(" IS NULL "));
+      }
+      else
+      {
+        bool needs_quote= cur_field->str_needs_quotes();
+        delete_string.append(STRING_WITH_LEN(" = "));
+        cur_field->val_str(&data_string);
+        if (needs_quote)
+          delete_string.append(value_quote_char);
+        data_string.print(&delete_string);
+        if (needs_quote)
+          delete_string.append(value_quote_char);
+      }
+      delete_string.append(STRING_WITH_LEN(" AND "));
+    }
+  }
+
+  // Remove trailing AND
+  delete_string.length(delete_string.length() - sizeof_trailing_and);
+  if (!found)
+    delete_string.length(delete_string.length() - sizeof_trailing_where);
+
+  delete_string.append(STRING_WITH_LEN(" LIMIT 1"));
+  DBUG_PRINT("info",
+             ("Delete sql: %s", delete_string.c_ptr_quick()));
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (io->query(delete_string.ptr(), delete_string.length()))
+  {
+    DBUG_RETURN(stash_remote_error());
+  }
+  stats.deleted+= (ha_rows) io->affected_rows();
+  stats.records-= (ha_rows) io->affected_rows();
+  DBUG_PRINT("info",
+             ("rows deleted %ld  rows deleted for all time %ld",
+              (long) io->affected_rows(), (long) stats.deleted));
+
+  DBUG_RETURN(0);
+}
+
+
+/*
+  Positions an index cursor to the index specified in the handle. Fetches the
+  row if available. If the key value is null, begin at the first key of the
+  index. This method, which is called in the case of an SQL statement having
+  a WHERE clause on a non-primary key index, simply calls index_read_idx.
+*/
+
+int ha_federatedx::index_read(uchar *buf, const uchar *key,
+                             uint key_len, ha_rkey_function find_flag)
+{
+  DBUG_ENTER("ha_federatedx::index_read");
+
+  if (stored_result)
+    (void) free_result();
+  DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key,
+                                             key_len, find_flag,
+                                             &stored_result));
+}
+
+
+/*
+  Positions an index cursor to the index specified in key. Fetches the
+  row if any.  This is only used to read whole keys.
+
+  This method is called via index_read in the case of a WHERE clause using
+  a primary key index OR is called DIRECTLY when the WHERE clause
+  uses a PRIMARY KEY index.
+
+  NOTES
+    This uses an internal result set that is deleted before function
+    returns.  We need to be able to be callable from ha_rnd_pos()
+*/
+
+int ha_federatedx::index_read_idx(uchar *buf, uint index, const uchar *key,
+                                 uint key_len, enum ha_rkey_function find_flag)
+{
+  int retval;
+  FEDERATEDX_IO_RESULT *io_result;
+  DBUG_ENTER("ha_federatedx::index_read_idx");
+
+  if ((retval= index_read_idx_with_result_set(buf, index, key,
+                                              key_len, find_flag,
+                                              &io_result)))
+    DBUG_RETURN(retval);
+  /* io is correct, as index_read_idx_with_result_set was ok */
+  io->free_result(io_result);
+  DBUG_RETURN(retval);
+}
+
+
+/*
+  Create result set for rows matching query and return first row
+
+  RESULT
+    0	ok     In this case *result will contain the result set
+	       table->status == 0 
+    #   error  In this case *result will contain 0
+               table->status == STATUS_NOT_FOUND
+*/
+
+int ha_federatedx::index_read_idx_with_result_set(uchar *buf, uint index,
+                                                 const uchar *key,
+                                                 uint key_len,
+                                                 ha_rkey_function find_flag,
+                                                 FEDERATEDX_IO_RESULT **result)
+{
+  int retval;
+  char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  char index_value[STRING_BUFFER_USUAL_SIZE];
+  char sql_query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  String index_string(index_value,
+                      sizeof(index_value),
+                      &my_charset_bin);
+  String sql_query(sql_query_buffer,
+                   sizeof(sql_query_buffer),
+                   &my_charset_bin);
+  key_range range;
+  DBUG_ENTER("ha_federatedx::index_read_idx_with_result_set");
+
+  *result= 0;                                   // In case of errors
+  index_string.length(0);
+  sql_query.length(0);
+  ha_statistic_increment(&SSV::ha_read_key_count);
+
+  sql_query.append(share->select_query);
+
+  range.key= key;
+  range.length= key_len;
+  range.flag= find_flag;
+  create_where_from_key(&index_string,
+                        &table->key_info[index],
+                        &range,
+                        NULL, 0, 0);
+  sql_query.append(index_string);
+
+  if ((retval= txn->acquire(share, TRUE, &io)))
+    DBUG_RETURN(retval);
+
+  if (io->query(sql_query.ptr(), sql_query.length()))
+  {
+    my_sprintf(error_buffer, (error_buffer, "error: %d '%s'",
+                              io->error_code(), io->error_str()));
+    retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+    goto error;
+  }
+  if (!(*result= io->store_result()))
+  {
+    retval= HA_ERR_END_OF_FILE;
+    goto error;
+  }
+  if (!(retval= read_next(buf, *result)))
+    DBUG_RETURN(retval);
+
+  io->free_result(*result);
+  *result= 0;
+  table->status= STATUS_NOT_FOUND;
+  DBUG_RETURN(retval);
+
+error:
+  table->status= STATUS_NOT_FOUND;
+  my_error(retval, MYF(0), error_buffer);
+  DBUG_RETURN(retval);
+}
+
+
+/*
+  This method is used exlusevely by filesort() to check if we
+  can create sorting buffers of necessary size.
+  If the handler returns more records that it declares
+  here server can just crash on filesort().
+  We cannot guarantee that's not going to happen with
+  the FEDERATEDX engine, as we have records==0 always if the
+  client is a VIEW, and for the table the number of
+  records can inpredictably change during execution.
+  So we return maximum possible value here.
+*/
+
+ha_rows ha_federatedx::estimate_rows_upper_bound()
+{
+  return HA_POS_ERROR;
+}
+
+
+/* Initialized at each key walk (called multiple times unlike rnd_init()) */
+
+int ha_federatedx::index_init(uint keynr, bool sorted)
+{
+  DBUG_ENTER("ha_federatedx::index_init");
+  DBUG_PRINT("info", ("table: '%s'  key: %u", table->s->table_name.str, keynr));
+  active_index= keynr;
+  DBUG_RETURN(0);
+}
+
+
+/*
+  Read first range
+*/
+
+int ha_federatedx::read_range_first(const key_range *start_key,
+                                   const key_range *end_key,
+                                   bool eq_range_arg, bool sorted)
+{
+  char sql_query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  int retval;
+  String sql_query(sql_query_buffer,
+                   sizeof(sql_query_buffer),
+                   &my_charset_bin);
+  DBUG_ENTER("ha_federatedx::read_range_first");
+
+  DBUG_ASSERT(!(start_key == NULL && end_key == NULL));
+
+  sql_query.length(0);
+  sql_query.append(share->select_query);
+  create_where_from_key(&sql_query,
+                        &table->key_info[active_index],
+                        start_key, end_key, 0, eq_range_arg);
+
+  if ((retval= txn->acquire(share, TRUE, &io)))
+    DBUG_RETURN(retval);
+
+  if (stored_result)
+  {
+    io->free_result(stored_result);
+    stored_result= 0;
+  }
+
+  if (io->query(sql_query.ptr(), sql_query.length()))
+  {
+    retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+    goto error;
+  }
+  sql_query.length(0);
+
+  if (!(stored_result= io->store_result()))
+  {
+    retval= HA_ERR_END_OF_FILE;
+    goto error;
+  }
+
+  retval= read_next(table->record[0], stored_result);
+  DBUG_RETURN(retval);
+
+error:
+  table->status= STATUS_NOT_FOUND;
+  DBUG_RETURN(retval);
+}
+
+
+int ha_federatedx::read_range_next()
+{
+  int retval;
+  DBUG_ENTER("ha_federatedx::read_range_next");
+  retval= rnd_next(table->record[0]);
+  DBUG_RETURN(retval);
+}
+
+
+/* Used to read forward through the index.  */
+int ha_federatedx::index_next(uchar *buf)
+{
+  DBUG_ENTER("ha_federatedx::index_next");
+  ha_statistic_increment(&SSV::ha_read_next_count);
+  DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+  rnd_init() is called when the system wants the storage engine to do a table
+  scan.
+
+  This is the method that gets data for the SELECT calls.
+
+  See the federatedx in the introduction at the top of this file to see when
+  rnd_init() is called.
+
+  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+  sql_table.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::rnd_init(bool scan)
+{
+  DBUG_ENTER("ha_federatedx::rnd_init");
+  /*
+    The use of the 'scan' flag is incredibly important for this handler
+    to work properly, especially with updates containing WHERE clauses
+    using indexed columns.
+
+    When the initial query contains a WHERE clause of the query using an
+    indexed column, it's index_read_idx that selects the exact record from
+    the foreign database.
+
+    When there is NO index in the query, either due to not having a WHERE
+    clause, or the WHERE clause is using columns that are not indexed, a
+    'full table scan' done by rnd_init, which in this situation simply means
+    a 'select * from ...' on the foreign table.
+
+    In other words, this 'scan' flag gives us the means to ensure that if
+    there is an index involved in the query, we want index_read_idx to
+    retrieve the exact record (scan flag is 0), and do not  want rnd_init
+    to do a 'full table scan' and wipe out that result set.
+
+    Prior to using this flag, the problem was most apparent with updates.
+
+    An initial query like 'UPDATE tablename SET anything = whatever WHERE
+    indexedcol = someval', index_read_idx would get called, using a query
+    constructed with a WHERE clause built from the values of index ('indexcol'
+    in this case, having a value of 'someval').  mysql_store_result would
+    then get called (this would be the result set we want to use).
+
+    After this rnd_init (from sql_update.cc) would be called, it would then
+    unecessarily call "select * from table" on the foreign table, then call
+    mysql_store_result, which would wipe out the correct previous result set
+    from the previous call of index_read_idx's that had the result set
+    containing the correct record, hence update the wrong row!
+
+  */
+
+  if (scan)
+  {
+    int error;
+
+    if ((error= txn->acquire(share, TRUE, &io)))
+      DBUG_RETURN(error);
+
+    if (stored_result)
+    {
+      io->free_result(stored_result);
+      stored_result= 0;
+    }
+
+    if (io->query(share->select_query,
+                  strlen(share->select_query)))
+      goto error;
+
+    stored_result= io->store_result();
+    if (!stored_result)
+      goto error;
+  }
+  DBUG_RETURN(0);
+
+error:
+  DBUG_RETURN(stash_remote_error());
+}
+
+
+int ha_federatedx::rnd_end()
+{
+  DBUG_ENTER("ha_federatedx::rnd_end");
+  DBUG_RETURN(index_end());
+}
+
+
+int ha_federatedx::free_result()
+{
+  int error;
+  DBUG_ASSERT(stored_result);
+  if ((error= txn->acquire(share, FALSE, &io)))
+  {
+    DBUG_ASSERT(0);                             // Fail when testing
+    return error;
+  }
+  io->free_result(stored_result);
+  stored_result= 0;
+  return 0;
+}
+
+int ha_federatedx::index_end(void)
+{
+  int error= 0;
+  DBUG_ENTER("ha_federatedx::index_end");
+  if (stored_result)
+    error= free_result();
+  active_index= MAX_KEY;
+  DBUG_RETURN(error);
+}
+
+
+/*
+  This is called for each row of the table scan. When you run out of records
+  you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
+  The Field structure for the table is the key to getting data into buf
+  in a manner that will allow the server to understand it.
+
+  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+  sql_table.cc, and sql_update.cc.
+*/
+
+int ha_federatedx::rnd_next(uchar *buf)
+{
+  DBUG_ENTER("ha_federatedx::rnd_next");
+
+  if (stored_result == 0)
+  {
+    /*
+      Return value of rnd_init is not always checked (see records.cc),
+      so we can get here _even_ if there is _no_ pre-fetched result-set!
+      TODO: fix it. We can delete this in 5.1 when rnd_init() is checked.
+    */
+    DBUG_RETURN(1);
+  }
+  DBUG_RETURN(read_next(buf, stored_result));
+}
+
+
+/*
+  ha_federatedx::read_next
+
+  reads from a result set and converts to mysql internal
+  format
+
+  SYNOPSIS
+    field_in_record_is_null()
+      buf       byte pointer to record 
+      result    mysql result set 
+
+    DESCRIPTION
+     This method is a wrapper method that reads one record from a result
+     set and converts it to the internal table format
+
+    RETURN VALUE
+      1    error
+      0    no error 
+*/
+
+int ha_federatedx::read_next(uchar *buf, FEDERATEDX_IO_RESULT *result)
+{
+  int retval;
+  FEDERATEDX_IO_ROW *row;
+  DBUG_ENTER("ha_federatedx::read_next");
+
+  table->status= STATUS_NOT_FOUND;              // For easier return
+
+  if ((retval= txn->acquire(share, TRUE, &io)))
+    DBUG_RETURN(retval);
+
+  /* Fetch a row, insert it back in a row format. */
+  if (!(row= io->fetch_row(result)))
+    DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+  if (!(retval= convert_row_to_internal_format(buf, row, result)))
+    table->status= 0;
+
+  DBUG_RETURN(retval);
+}
+
+
+/*
+  store reference to current row so that we can later find it for
+  a re-read, update or delete.
+
+  In case of federatedx, a reference is either a primary key or
+  the whole record.
+
+  Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+*/
+
+void ha_federatedx::position(const uchar *record)
+{
+  DBUG_ENTER("ha_federatedx::position");
+  if (table->s->primary_key != MAX_KEY)
+    key_copy(ref, (uchar *)record, table->key_info + table->s->primary_key,
+             ref_length);
+  else
+    memcpy(ref, record, ref_length);
+  DBUG_VOID_RETURN;
+}
+
+
+/*
+  This is like rnd_next, but you are given a position to use to determine the
+  row. The position will be of the type that you stored in ref.
+
+  This method is required for an ORDER BY
+
+  Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc.
+*/
+
+int ha_federatedx::rnd_pos(uchar *buf, uchar *pos)
+{
+  int result;
+  DBUG_ENTER("ha_federatedx::rnd_pos");
+  ha_statistic_increment(&SSV::ha_read_rnd_count);
+  if (table->s->primary_key != MAX_KEY)
+  {
+    /* We have a primary key, so use index_read_idx to find row */
+    result= index_read_idx(buf, table->s->primary_key, pos,
+                           ref_length, HA_READ_KEY_EXACT);
+  }
+  else
+  {
+    /* otherwise, get the old record ref as obtained in ::position */
+    memcpy(buf, pos, ref_length);
+    result= 0;
+  }
+  table->status= result ? STATUS_NOT_FOUND : 0;
+  DBUG_RETURN(result);
+}
+
+
+/*
+  ::info() is used to return information to the optimizer.
+  Currently this table handler doesn't implement most of the fields
+  really needed. SHOW also makes use of this data
+  Another note, you will probably want to have the following in your
+  code:
+  if (records < 2)
+    records = 2;
+  The reason is that the server will optimize for cases of only a single
+  record. If in a table scan you don't know the number of records
+  it will probably be better to set records to two so you can return
+  as many records as you need.
+  Along with records a few more variables you may wish to set are:
+    records
+    deleted
+    data_file_length
+    index_file_length
+    delete_length
+    check_time
+  Take a look at the public variables in handler.h for more information.
+
+  Called in:
+    filesort.cc
+    ha_heap.cc
+    item_sum.cc
+    opt_sum.cc
+    sql_delete.cc
+    sql_delete.cc
+    sql_derived.cc
+    sql_select.cc
+    sql_select.cc
+    sql_select.cc
+    sql_select.cc
+    sql_select.cc
+    sql_show.cc
+    sql_show.cc
+    sql_show.cc
+    sql_show.cc
+    sql_table.cc
+    sql_union.cc
+    sql_update.cc
+
+*/
+
+int ha_federatedx::info(uint flag)
+{
+  char error_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  uint error_code;
+  federatedx_io *tmp_io= 0;
+  DBUG_ENTER("ha_federatedx::info");
+
+  error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+  
+  /* we want not to show table status if not needed to do so */
+  if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_AUTO))
+  {
+    if ((error_code= txn->acquire(share, TRUE, &tmp_io)))
+      goto fail;
+  }
+
+  if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
+  {
+    /*
+      size of IO operations (This is based on a good guess, no high science
+      involved)
+    */
+    if (flag & HA_STATUS_CONST)
+      stats.block_size= 4096;
+
+    if (tmp_io->table_metadata(&stats, share->table_name,
+                               share->table_name_length, flag))
+      goto error;
+  }
+
+  if (flag & HA_STATUS_AUTO)
+    stats.auto_increment_value= tmp_io->last_insert_id();
+
+  /*
+    If ::info created it's own transaction, close it. This happens in case
+    of show table status;
+  */
+  txn->release(&tmp_io);
+
+  DBUG_RETURN(0);
+
+error:
+  if (tmp_io)
+  {
+    my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+                              tmp_io->error_code(), tmp_io->error_str()));
+    my_error(error_code, MYF(0), error_buffer);
+  }
+  else
+  if (remote_error_number != -1 /* error already reported */)
+  {
+    error_code= remote_error_number;
+    my_error(error_code, MYF(0), ER(error_code));
+  }
+fail:
+  txn->release(&tmp_io);
+  DBUG_RETURN(error_code);
+}
+
+
+/**
+  @brief Handles extra signals from MySQL server
+
+  @param[in] operation  Hint for storage engine
+
+  @return Operation Status
+  @retval 0     OK
+ */
+int ha_federatedx::extra(ha_extra_function operation)
+{
+  DBUG_ENTER("ha_federatedx::extra");
+  switch (operation) {
+  case HA_EXTRA_IGNORE_DUP_KEY:
+    ignore_duplicates= TRUE;
+    break;
+  case HA_EXTRA_NO_IGNORE_DUP_KEY:
+    insert_dup_update= FALSE;
+    ignore_duplicates= FALSE;
+    break;
+  case HA_EXTRA_WRITE_CAN_REPLACE:
+    replace_duplicates= TRUE;
+    break;
+  case HA_EXTRA_WRITE_CANNOT_REPLACE:
+    /*
+      We use this flag to ensure that we do not create an "INSERT IGNORE"
+      statement when inserting new rows into the remote table.
+    */
+    replace_duplicates= FALSE;
+    break;
+  case HA_EXTRA_INSERT_WITH_UPDATE:
+    insert_dup_update= TRUE;
+    break;
+  default:
+    /* do nothing */
+    DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
+  }
+  DBUG_RETURN(0);
+}
+
+
+/**
+  @brief Reset state of file to after 'open'.
+
+  @detail This function is called after every statement for all tables
+    used by that statement.
+
+  @return Operation status
+    @retval     0       OK
+*/
+
+int ha_federatedx::reset(void)
+{
+  insert_dup_update= FALSE;
+  ignore_duplicates= FALSE;
+  replace_duplicates= FALSE;
+  return 0;
+}
+
+
+/*
+  Used to delete all rows in a table. Both for cases of truncate and
+  for cases where the optimizer realizes that all rows will be
+  removed as a result of a SQL statement.
+
+  Called from item_sum.cc by Item_func_group_concat::clear(),
+  Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+  Called from sql_delete.cc by mysql_delete().
+  Called from sql_select.cc by JOIN::reinit().
+  Called from sql_union.cc by st_select_lex_unit::exec().
+*/
+
+int ha_federatedx::delete_all_rows()
+{
+  char query_buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+  int error;
+  DBUG_ENTER("ha_federatedx::delete_all_rows");
+
+  query.length(0);
+
+  query.set_charset(system_charset_info);
+  query.append(STRING_WITH_LEN("TRUNCATE "));
+  append_ident(&query, share->table_name, share->table_name_length,
+               ident_quote_char);
+
+  /* no need for savepoint in autocommit mode */
+  if (!(ha_thd()->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+    txn->stmt_autocommit();
+
+  /*
+    TRUNCATE won't return anything in mysql_affected_rows
+  */
+
+  if ((error= txn->acquire(share, FALSE, &io)))
+    DBUG_RETURN(error);
+
+  if (io->query(query.ptr(), query.length()))
+  {
+    DBUG_RETURN(stash_remote_error());
+  }
+  stats.deleted+= stats.records;
+  stats.records= 0;
+  DBUG_RETURN(0);
+}
+
+
+/*
+  The idea with handler::store_lock() is the following:
+
+  The statement decided which locks we should need for the table
+  for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+  read locks.
+
+  Before adding the lock into the table lock handler (see thr_lock.c)
+  mysqld calls store lock with the requested locks.  Store lock can now
+  modify a write lock to a read lock (or some other lock), ignore the
+  lock (if we don't want to use MySQL table locks at all) or add locks
+  for many tables (like we do when we are using a MERGE handler).
+
+  Berkeley DB for federatedx  changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+  (which signals that we are doing WRITES, but we are still allowing other
+  reader's and writer's.
+
+  When releasing locks, store_lock() are also called. In this case one
+  usually doesn't have to do anything.
+
+  In some exceptional cases MySQL may send a request for a TL_IGNORE;
+  This means that we are requesting the same lock as last time and this
+  should also be ignored. (This may happen when someone does a flush
+  table when we have opened a part of the tables, in which case mysqld
+  closes and reopens the tables and tries to get the same locks at last
+  time).  In the future we will probably try to remove this.
+
+  Called from lock.cc by get_lock_data().
+*/
+
+THR_LOCK_DATA **ha_federatedx::store_lock(THD *thd,
+                                         THR_LOCK_DATA **to,
+                                         enum thr_lock_type lock_type)
+{
+  DBUG_ENTER("ha_federatedx::store_lock");
+  if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+  {
+    /*
+      Here is where we get into the guts of a row level lock.
+      If TL_UNLOCK is set
+      If we are not doing a LOCK TABLE or DISCARD/IMPORT
+      TABLESPACE, then allow multiple writers
+    */
+
+    if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+         lock_type <= TL_WRITE) && !thd->in_lock_tables)
+      lock_type= TL_WRITE_ALLOW_WRITE;
+
+    /*
+      In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
+      MySQL would use the lock TL_READ_NO_INSERT on t2, and that
+      would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
+      to t2. Convert the lock to a normal read lock to allow
+      concurrent inserts to t2.
+    */
+
+    if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
+      lock_type= TL_READ;
+
+    lock.type= lock_type;
+  }
+
+  *to++= &lock;
+
+  DBUG_RETURN(to);
+}
+
+
+static int test_connection(MYSQL_THD thd, federatedx_io *io,
+                           FEDERATEDX_SHARE *share)
+{
+  char buffer[FEDERATEDX_QUERY_BUFFER_SIZE];
+  String str(buffer, sizeof(buffer), &my_charset_bin);
+  FEDERATEDX_IO_RESULT *resultset= NULL;
+  int retval;
+
+  str.length(0);
+  str.append(STRING_WITH_LEN("SELECT * FROM "));
+  append_identifier(thd, &str, share->table_name, 
+                    share->table_name_length);
+  str.append(STRING_WITH_LEN(" WHERE 1=0"));
+
+  if ((retval= io->query(str.ptr(), str.length())))
+  {
+    my_sprintf(buffer, (buffer,
+               "database: '%s'  username: '%s'  hostname: '%s'",
+               share->database, share->username, share->hostname));
+    DBUG_PRINT("info", ("error-code: %d", io->error_code()));
+    my_error(ER_CANT_CREATE_FEDERATED_TABLE, MYF(0), buffer);
+  }
+  else
+    resultset= io->store_result();
+
+  io->free_result(resultset);
+
+  return retval;
+}
+
+/*
+  create() does nothing, since we have no local setup of our own.
+  FUTURE: We should potentially connect to the foreign database and
+*/
+
+int ha_federatedx::create(const char *name, TABLE *table_arg,
+                         HA_CREATE_INFO *create_info)
+{
+  int retval;
+  THD *thd= current_thd;
+  FEDERATEDX_SHARE tmp_share; // Only a temporary share, to test the url
+  federatedx_txn *tmp_txn;
+  federatedx_io *tmp_io= NULL;
+  DBUG_ENTER("ha_federatedx::create");
+
+  if ((retval= parse_url(thd->mem_root, &tmp_share, table_arg, 1)))
+    goto error;
+
+  /* loopback socket connections hang due to LOCK_open mutex */
+  if ((!tmp_share.hostname || !strcmp(tmp_share.hostname,my_localhost)) &&
+      !tmp_share.port)
+    goto error;
+
+  /*
+    If possible, we try to use an existing network connection to
+    the remote server. To ensure that no new FEDERATEDX_SERVER
+    instance is created, we pass NULL in get_server() TABLE arg.
+  */
+  pthread_mutex_lock(&federatedx_mutex);
+  tmp_share.s= get_server(&tmp_share, NULL);
+  pthread_mutex_unlock(&federatedx_mutex);
+    
+  if (tmp_share.s)
+  {
+    tmp_txn= get_txn(thd);
+    if (!(retval= tmp_txn->acquire(&tmp_share, TRUE, &tmp_io)))
+    {
+      retval= test_connection(thd, tmp_io, &tmp_share);
+      tmp_txn->release(&tmp_io);    
+    }
+    free_server(tmp_txn, tmp_share.s);
+  }
+  else
+  {
+    FEDERATEDX_SERVER server;
+
+#ifdef NOT_YET
+    /* 
+      Bug#25679
+      Ensure that we do not hold the LOCK_open mutex while attempting
+      to establish FederatedX connection to guard against a trivial
+      Denial of Service scenerio.
+    */
+    safe_mutex_assert_not_owner(&LOCK_open);
+#endif
+
+    fill_server(thd->mem_root, &server, &tmp_share, create_info->table_charset);
+
+#ifndef DBUG_OFF
+    pthread_mutex_init(&server.mutex, MY_MUTEX_INIT_FAST);
+    pthread_mutex_lock(&server.mutex);
+#endif
+
+    tmp_io= federatedx_io::construct(thd->mem_root, &server);
+
+    retval= test_connection(thd, tmp_io, &tmp_share);
+
+#ifndef DBUG_OFF
+    pthread_mutex_unlock(&server.mutex);
+    pthread_mutex_destroy(&server.mutex);
+#endif
+
+    delete tmp_io;
+  }
+
+error:
+  DBUG_RETURN(retval);
+
+}
+
+
+int ha_federatedx::stash_remote_error()
+{
+  DBUG_ENTER("ha_federatedx::stash_remote_error()");
+  if (!io)
+    DBUG_RETURN(remote_error_number);
+  remote_error_number= io->error_code();
+  strmake(remote_error_buf, io->error_str(), sizeof(remote_error_buf)-1);
+  if (remote_error_number == ER_DUP_ENTRY ||
+      remote_error_number == ER_DUP_KEY)
+    DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
+  DBUG_RETURN(HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM);
+}
+
+
+bool ha_federatedx::get_error_message(int error, String* buf)
+{
+  DBUG_ENTER("ha_federatedx::get_error_message");
+  DBUG_PRINT("enter", ("error: %d", error));
+  if (error == HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM)
+  {
+    buf->append(STRING_WITH_LEN("Error on remote system: "));
+    buf->qs_append(remote_error_number);
+    buf->append(STRING_WITH_LEN(": "));
+    buf->append(remote_error_buf);
+
+    remote_error_number= 0;
+    remote_error_buf[0]= '\0';
+  }
+  DBUG_PRINT("exit", ("message: %s", buf->ptr()));
+  DBUG_RETURN(FALSE);
+}
+
+
+int ha_federatedx::start_stmt(MYSQL_THD thd, thr_lock_type lock_type)
+{
+  DBUG_ENTER("ha_federatedx::start_stmt");
+  DBUG_ASSERT(txn == get_txn(thd));
+  
+  if (!txn->in_transaction())
+  {
+    txn->stmt_begin();
+    trans_register_ha(thd, FALSE, ht);
+  }
+  DBUG_RETURN(0);
+}
+
+
+int ha_federatedx::external_lock(MYSQL_THD thd, int lock_type)
+{
+  int error= 0;
+  DBUG_ENTER("ha_federatedx::external_lock");
+
+  if (lock_type == F_UNLCK)
+    txn->release(&io);
+  else
+  {
+    txn= get_txn(thd);  
+    if (!(error= txn->acquire(share, lock_type == F_RDLCK, &io)) &&
+        (lock_type == F_WRLCK || !io->is_autocommit()))
+    {
+      if (!thd_test_options(thd, (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))
+      {
+        txn->stmt_begin();
+        trans_register_ha(thd, FALSE, ht);
+      }
+      else
+      {
+        txn->txn_begin();
+        trans_register_ha(thd, TRUE, ht);
+      }
+    }
+  }
+
+  DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_set(handlerton *hton, MYSQL_THD thd, void *sv)
+{
+  int error= 0;
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  DBUG_ENTER("ha_federatedx::savepoint_set");
+
+  if (txn && txn->has_connections())
+  {
+    if (txn->txn_begin())
+      trans_register_ha(thd, TRUE, hton);
+    
+    txn->sp_acquire((ulong *) sv);
+
+    DBUG_ASSERT(1 < *(ulong *) sv);
+  }
+
+  DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_rollback(handlerton *hton, MYSQL_THD thd, void *sv)
+ {
+  int error= 0;
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  DBUG_ENTER("ha_federatedx::savepoint_rollback");
+  
+  if (txn)
+    error= txn->sp_rollback((ulong *) sv);
+
+  DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::savepoint_release(handlerton *hton, MYSQL_THD thd, void *sv)
+{
+  int error= 0;
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  DBUG_ENTER("ha_federatedx::savepoint_release");
+  
+  if (txn)
+    error= txn->sp_release((ulong *) sv);
+
+  DBUG_RETURN(error);
+}
+
+
+int ha_federatedx::commit(handlerton *hton, MYSQL_THD thd, bool all)
+{
+  int return_val;
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  DBUG_ENTER("ha_federatedx::commit");
+
+  if (all)
+    return_val= txn->txn_commit();
+  else
+    return_val= txn->stmt_commit();    
+  
+  DBUG_PRINT("info", ("error val: %d", return_val));
+  DBUG_RETURN(return_val);
+}
+
+
+int ha_federatedx::rollback(handlerton *hton, MYSQL_THD thd, bool all)
+{
+  int return_val;
+  federatedx_txn *txn= (federatedx_txn *) thd_get_ha_data(thd, hton);
+  DBUG_ENTER("ha_federatedx::rollback");
+
+  if (all)
+    return_val= txn->txn_rollback();
+  else
+    return_val= txn->stmt_rollback();
+
+  DBUG_PRINT("info", ("error val: %d", return_val));
+  DBUG_RETURN(return_val);
+}
+
+struct st_mysql_storage_engine federatedx_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+mysql_declare_plugin(federated)
+{
+  MYSQL_STORAGE_ENGINE_PLUGIN,
+  &federatedx_storage_engine,
+  "FEDERATED",
+  "Patrick Galbraith",
+  "FederatedX pluggable storage engine",
+  PLUGIN_LICENSE_GPL,
+  federatedx_db_init, /* Plugin Init */
+  federatedx_done, /* Plugin Deinit */
+  0x0100 /* 1.0 */,
+  NULL,                       /* status variables                */
+  NULL,                       /* system variables                */
+  NULL                        /* config options                  */
+}
+mysql_declare_plugin_end;

=== added file 'storage/federatedx/ha_federatedx.h'
--- a/storage/federatedx/ha_federatedx.h	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/ha_federatedx.h	2009-10-30 18:50:56 +0000
@@ -0,0 +1,446 @@
+/* 
+Copyright (c) 2008, Patrick Galbraith 
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+    * Neither the name of Patrick Galbraith nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+class federatedx_io;
+
+/*
+  FEDERATEDX_SERVER will eventually be a structure that will be shared among
+  all FEDERATEDX_SHARE instances so that the federated server can minimise
+  the number of open connections. This will eventually lead to the support
+  of reliable XA federated tables.
+*/
+typedef struct st_fedrated_server {
+  MEM_ROOT mem_root;
+  uint use_count, io_count;
+  
+  uchar *key;
+  uint key_length;
+
+  const char *scheme;
+  const char *hostname;
+  const char *username;
+  const char *password;
+  const char *database;
+  const char *socket;
+  ushort port;
+
+  const char *csname;
+
+  pthread_mutex_t mutex;
+  federatedx_io *idle_list;
+} FEDERATEDX_SERVER;
+
+/*
+  Please read ha_exmple.cc before reading this file.
+  Please keep in mind that the federatedx storage engine implements all methods
+  that are required to be implemented. handler.h has a full list of methods
+  that you can implement.
+*/
+
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface			/* gcc class implementation */
+#endif
+
+#include <mysql.h>
+
+/* 
+  handler::print_error has a case statement for error numbers.
+  This value is (10000) is far out of range and will envoke the 
+  default: case.  
+  (Current error range is 120-159 from include/my_base.h)
+*/
+#define HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM 10000
+
+#define FEDERATEDX_QUERY_BUFFER_SIZE STRING_BUFFER_USUAL_SIZE * 5
+#define FEDERATEDX_RECORDS_IN_RANGE 2
+#define FEDERATEDX_MAX_KEY_LENGTH 3500 // Same as innodb
+
+/*
+  FEDERATEDX_SHARE is a structure that will be shared amoung all open handlers
+  The example implements the minimum of what you will probably need.
+*/
+typedef struct st_federatedx_share {
+  MEM_ROOT mem_root;
+
+  bool parsed;
+  /* this key is unique db/tablename */
+  const char *share_key;
+  /*
+    the primary select query to be used in rnd_init
+  */
+  char *select_query;
+  /*
+    remote host info, parse_url supplies
+  */
+  char *server_name;
+  char *connection_string;
+  char *scheme;
+  char *hostname;
+  char *username;
+  char *password;
+  char *database;
+  char *table_name;
+  char *table;
+  char *socket;
+  char *sport;
+  int share_key_length;
+  ushort port;
+
+  uint table_name_length, server_name_length, connect_string_length;
+  uint use_count;
+  THR_LOCK lock;
+  FEDERATEDX_SERVER *s;
+} FEDERATEDX_SHARE;
+
+
+typedef struct st_federatedx_result FEDERATEDX_IO_RESULT;
+typedef struct st_federatedx_row FEDERATEDX_IO_ROW;
+typedef ptrdiff_t FEDERATEDX_IO_OFFSET;
+
+class federatedx_io
+{
+  friend class federatedx_txn;
+  FEDERATEDX_SERVER * const server;
+  federatedx_io **owner_ptr;
+  federatedx_io *txn_next;
+  federatedx_io *idle_next;
+  bool active;  /* currently participating in a transaction */
+  bool busy;    /* in use by a ha_federated instance */
+  bool readonly;/* indicates that no updates have occurred */
+
+protected:
+  void set_active(bool new_active)
+  { active= new_active; }
+public:
+  federatedx_io(FEDERATEDX_SERVER *);
+  virtual ~federatedx_io();
+
+  bool is_readonly() const { return readonly; }
+  bool is_active() const { return active; }
+
+  const char * get_charsetname() const
+  { return server->csname ? server->csname : "latin1"; }
+
+  const char * get_hostname() const { return server->hostname; }
+  const char * get_username() const { return server->username; }
+  const char * get_password() const { return server->password; }
+  const char * get_database() const { return server->database; }
+  ushort       get_port() const     { return server->port; }
+  const char * get_socket() const   { return server->socket; }
+  
+  static bool handles_scheme(const char *scheme);
+  static federatedx_io *construct(MEM_ROOT *server_root,
+                                  FEDERATEDX_SERVER *server);
+
+  static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
+  { return alloc_root(mem_root, size); }
+  static void operator delete(void *ptr, size_t size)
+  { TRASH(ptr, size); }
+    
+  virtual int query(const char *buffer, uint length)=0;
+  virtual FEDERATEDX_IO_RESULT *store_result()=0;
+
+  virtual size_t max_query_size() const=0;
+
+  virtual my_ulonglong affected_rows() const=0;
+  virtual my_ulonglong last_insert_id() const=0;
+
+  virtual int error_code()=0;
+  virtual const char *error_str()=0;
+  
+  virtual void reset()=0;
+  virtual int commit()=0;
+  virtual int rollback()=0;
+  
+  virtual int savepoint_set(ulong sp)=0;
+  virtual ulong savepoint_release(ulong sp)=0;
+  virtual ulong savepoint_rollback(ulong sp)=0;
+  virtual void savepoint_restrict(ulong sp)=0;
+  
+  virtual ulong last_savepoint() const=0;
+  virtual ulong actual_savepoint() const=0;
+  virtual bool is_autocommit() const=0;
+
+  virtual bool table_metadata(ha_statistics *stats, const char *table_name,
+                              uint table_name_length, uint flag) = 0;
+  
+  /* resultset operations */
+  
+  virtual void free_result(FEDERATEDX_IO_RESULT *io_result)=0;
+  virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result)=0;
+  virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result)=0;
+  virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result)=0;
+  virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result)=0;
+  virtual const char *get_column_data(FEDERATEDX_IO_ROW *row,
+                                      unsigned int column)=0;
+  virtual bool is_column_null(const FEDERATEDX_IO_ROW *row,
+                              unsigned int column) const=0;
+};
+
+
+class federatedx_txn
+{
+  federatedx_io *txn_list;
+  ulong savepoint_level;
+  ulong savepoint_stmt;
+  ulong savepoint_next;
+  
+  void release_scan();
+public:
+  federatedx_txn();
+  ~federatedx_txn();
+  
+  bool has_connections() const { return txn_list != NULL; }
+  bool in_transaction() const { return savepoint_next != 0; }
+  int acquire(FEDERATEDX_SHARE *share, bool readonly, federatedx_io **io);
+  void release(federatedx_io **io);
+  void close(FEDERATEDX_SERVER *);
+
+  bool txn_begin();
+  int txn_commit();
+  int txn_rollback();
+
+  bool sp_acquire(ulong *save);
+  int sp_rollback(ulong *save);
+  int sp_release(ulong *save);
+
+  bool stmt_begin();
+  int stmt_commit();
+  int stmt_rollback();
+  void stmt_autocommit();
+};
+
+
+/*
+  Class definition for the storage engine
+*/
+class ha_federatedx: public handler
+{
+  friend int federatedx_db_init(void *p);
+
+  THR_LOCK_DATA lock;      /* MySQL lock */
+  FEDERATEDX_SHARE *share;    /* Shared lock info */
+  federatedx_txn *txn;
+  federatedx_io *io;
+  FEDERATEDX_IO_RESULT *stored_result;
+  uint fetch_num; // stores the fetch num
+  FEDERATEDX_IO_OFFSET current_position;  // Current position used by ::position()
+  int remote_error_number;
+  char remote_error_buf[FEDERATEDX_QUERY_BUFFER_SIZE];
+  bool ignore_duplicates, replace_duplicates;
+  bool insert_dup_update;
+  DYNAMIC_STRING bulk_insert;
+
+private:
+  /*
+      return 0 on success
+      return errorcode otherwise
+  */
+  uint convert_row_to_internal_format(uchar *buf, FEDERATEDX_IO_ROW *row,
+                                      FEDERATEDX_IO_RESULT *result);
+  bool create_where_from_key(String *to, KEY *key_info, 
+                             const key_range *start_key,
+                             const key_range *end_key,
+                             bool records_in_range, bool eq_range);
+  int stash_remote_error();
+
+  federatedx_txn *get_txn(THD *thd, bool no_create= FALSE);
+
+  static int disconnect(handlerton *hton, MYSQL_THD thd);
+  static int savepoint_set(handlerton *hton, MYSQL_THD thd, void *sv);
+  static int savepoint_rollback(handlerton *hton, MYSQL_THD thd, void *sv);
+  static int savepoint_release(handlerton *hton, MYSQL_THD thd, void *sv);
+  static int commit(handlerton *hton, MYSQL_THD thd, bool all);
+  static int rollback(handlerton *hton, MYSQL_THD thd, bool all);
+
+  bool append_stmt_insert(String *query);
+
+  int read_next(uchar *buf, FEDERATEDX_IO_RESULT *result);
+  int index_read_idx_with_result_set(uchar *buf, uint index,
+                                     const uchar *key,
+                                     uint key_len,
+                                     ha_rkey_function find_flag,
+                                     FEDERATEDX_IO_RESULT **result);
+  int real_query(const char *query, uint length);
+  int real_connect(FEDERATEDX_SHARE *my_share, uint create_flag);
+public:
+  ha_federatedx(handlerton *hton, TABLE_SHARE *table_arg);
+  ~ha_federatedx() {}
+  /* The name that will be used for display purposes */
+  const char *table_type() const { return "FEDERATED"; }
+  /*
+    The name of the index type that will be used for display
+    don't implement this method unless you really have indexes
+   */
+  // perhaps get index type
+  const char *index_type(uint inx) { return "REMOTE"; }
+  const char **bas_ext() const;
+  /*
+    This is a list of flags that says what the storage engine
+    implements. The current table flags are documented in
+    handler.h
+  */
+  ulonglong table_flags() const
+  {
+    /* fix server to be able to get remote server table flags */
+    return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED
+            | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS |
+            HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
+            HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
+            HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY);
+  }
+  /*
+    This is a bitmap of flags that says how the storage engine
+    implements indexes. The current index flags are documented in
+    handler.h. If you do not implement indexes, just return zero
+    here.
+
+    part is the key part to check. First key part is 0
+    If all_parts it's set, MySQL want to know the flags for the combined
+    index up to and including 'part'.
+  */
+    /* fix server to be able to get remote server index flags */
+  ulong index_flags(uint inx, uint part, bool all_parts) const
+  {
+    return (HA_READ_NEXT | HA_READ_RANGE | HA_READ_AFTER_KEY);
+  }
+  uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+  uint max_supported_keys()          const { return MAX_KEY; }
+  uint max_supported_key_parts()     const { return MAX_REF_PARTS; }
+  uint max_supported_key_length()    const { return FEDERATEDX_MAX_KEY_LENGTH; }
+  uint max_supported_key_part_length() const { return FEDERATEDX_MAX_KEY_LENGTH; }
+  /*
+    Called in test_quick_select to determine if indexes should be used.
+    Normally, we need to know number of blocks . For federatedx we need to
+    know number of blocks on remote side, and number of packets and blocks
+    on the network side (?)
+    Talk to Kostja about this - how to get the
+    number of rows * ...
+    disk scan time on other side (block size, size of the row) + network time ...
+    The reason for "records * 1000" is that such a large number forces 
+    this to use indexes "
+  */
+  double scan_time()
+  {
+    DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
+    return (double)(stats.records*1000); 
+  }
+  /*
+    The next method will never be called if you do not implement indexes.
+  */
+  double read_time(uint index, uint ranges, ha_rows rows) 
+  {
+    /*
+      Per Brian, this number is bugus, but this method must be implemented,
+      and at a later date, he intends to document this issue for handler code
+    */
+    return (double) rows /  20.0+1;
+  }
+
+  const key_map *keys_to_use_for_scanning() { return &key_map_full; }
+  /*
+    Everything below are methods that we implment in ha_federatedx.cc.
+
+    Most of these methods are not obligatory, skip them and
+    MySQL will treat them as not implemented
+  */
+  int open(const char *name, int mode, uint test_if_locked);    // required
+  int close(void);                                              // required
+
+  void start_bulk_insert(ha_rows rows);
+  int end_bulk_insert(bool abort);
+  int write_row(uchar *buf);
+  int update_row(const uchar *old_data, uchar *new_data);
+  int delete_row(const uchar *buf);
+  int index_init(uint keynr, bool sorted);
+  ha_rows estimate_rows_upper_bound();
+  int index_read(uchar *buf, const uchar *key,
+                 uint key_len, enum ha_rkey_function find_flag);
+  int index_read_idx(uchar *buf, uint idx, const uchar *key,
+                     uint key_len, enum ha_rkey_function find_flag);
+  int index_next(uchar *buf);
+  int index_end();
+  int read_range_first(const key_range *start_key,
+                               const key_range *end_key,
+                               bool eq_range, bool sorted);
+  int read_range_next();
+  /*
+    unlike index_init(), rnd_init() can be called two times
+    without rnd_end() in between (it only makes sense if scan=1).
+    then the second call should prepare for the new table scan
+    (e.g if rnd_init allocates the cursor, second call should
+    position it to the start of the table, no need to deallocate
+    and allocate it again
+  */
+  int rnd_init(bool scan);                                      //required
+  int rnd_end();
+  int rnd_next(uchar *buf);                                      //required
+  int rnd_pos(uchar *buf, uchar *pos);                            //required
+  void position(const uchar *record);                            //required
+  int info(uint);                                              //required
+  int extra(ha_extra_function operation);
+
+  void update_auto_increment(void);
+  int repair(THD* thd, HA_CHECK_OPT* check_opt);
+  int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+
+  int delete_all_rows(void);
+  int create(const char *name, TABLE *form,
+             HA_CREATE_INFO *create_info);                      //required
+  ha_rows records_in_range(uint inx, key_range *start_key,
+                                   key_range *end_key);
+  uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; }
+
+  THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+                             enum thr_lock_type lock_type);     //required
+  bool get_error_message(int error, String *buf);
+  int start_stmt(THD *thd, thr_lock_type lock_type);
+  int external_lock(THD *thd, int lock_type);
+  int reset(void);
+  int free_result(void);
+};
+
+extern const char ident_quote_char;              // Character for quoting
+                                                 // identifiers
+extern const char value_quote_char;              // Character for quoting
+                                                 // literals
+
+extern bool append_ident(String *string, const char *name, uint length,
+                         const char quote_char);
+
+
+extern federatedx_io *instantiate_io_mysql(MEM_ROOT *server_root,
+                                           FEDERATEDX_SERVER *server);
+extern federatedx_io *instantiate_io_null(MEM_ROOT *server_root,
+                                          FEDERATEDX_SERVER *server);

=== added file 'storage/federatedx/plug.in'
--- a/storage/federatedx/plug.in	1970-01-01 00:00:00 +0000
+++ b/storage/federatedx/plug.in	2009-10-30 18:50:56 +0000
@@ -0,0 +1,5 @@
+MYSQL_STORAGE_ENGINE(federated,,[FederatedX Storage Engine],
+        [FederatedX Storage Engine], [max,max-no-ndb])
+MYSQL_PLUGIN_DYNAMIC(federated,   [ha_federatedx.la])
+MYSQL_PLUGIN_STATIC(federated,    [libfederatedx.a])
+MYSQL_PLUGIN_DEPENDS_ON_MYSQL_INTERNALS(federated, [ha_federatedx.cc])

=== modified file 'storage/pbxt/src/cache_xt.cc'
--- a/storage/pbxt/src/cache_xt.cc	2009-08-17 11:12:36 +0000
+++ b/storage/pbxt/src/cache_xt.cc	2009-10-30 18:50:56 +0000
@@ -374,7 +374,7 @@ xtPublic void xt_ind_release_handle(XTIn
 {
 	DcHandleSlotPtr	hs;
 	XTIndBlockPtr	block = NULL;
-	u_int		hash_idx = NULL;
+	u_int		hash_idx = 0;
 	DcSegmentPtr	seg = NULL;
 	XTIndBlockPtr	xblock;
 
@@ -1379,7 +1379,7 @@ xtPublic xtBool xt_ind_fetch(XTOpenTable
 	ASSERT_NS(iref->ir_xlock == 2);
 #endif
 	if (!(block = ind_cac_fetch(ot, ind, address, &seg, TRUE)))
-		return NULL;
+		return 0;
 
 	branch_size = XT_GET_DISK_2(((XTIdxBranchDPtr) block->cb_data)->tb_size_2);
 	if (XT_GET_INDEX_BLOCK_LEN(branch_size) < 2 || XT_GET_INDEX_BLOCK_LEN(branch_size) > XT_INDEX_PAGE_SIZE) {

=== modified file 'storage/xtradb/include/buf0buf.ic'
--- a/storage/xtradb/include/buf0buf.ic	2009-06-25 01:43:25 +0000
+++ b/storage/xtradb/include/buf0buf.ic	2009-10-30 18:50:56 +0000
@@ -1056,7 +1056,7 @@ buf_page_release(
 	buf_block_t*	block,		/* in: buffer block */
 	ulint		rw_latch,	/* in: RW_S_LATCH, RW_X_LATCH,
 					RW_NO_LATCH */
-	mtr_t*		mtr)		/* in: mtr */
+	mtr_t*		mtr __attribute__((unused)))		/* in: mtr */
 {
 	ut_ad(block);
 

=== modified file 'storage/xtradb/include/srv0srv.h'
--- a/storage/xtradb/include/srv0srv.h	2009-08-10 22:36:10 +0000
+++ b/storage/xtradb/include/srv0srv.h	2009-10-31 19:22:50 +0000
@@ -116,8 +116,8 @@ extern ulint	srv_log_file_size;
 extern ulint	srv_log_buffer_size;
 extern ulong	srv_flush_log_at_trx_commit;
 
-extern ulint    srv_show_locks_held;
-extern ulint    srv_show_verbose_locks;
+extern ulong    srv_show_locks_held;
+extern ulong    srv_show_verbose_locks;
 
 /* The sort order table of the MySQL latin1_swedish_ci character set
 collation */
@@ -166,11 +166,11 @@ extern ulint	srv_fast_shutdown;	 /* If t
 extern ibool	srv_innodb_status;
 
 extern unsigned long long	srv_stats_sample_pages;
-extern ulint	srv_stats_method;
+extern ulong	srv_stats_method;
 #define SRV_STATS_METHOD_NULLS_EQUAL     0
 #define SRV_STATS_METHOD_NULLS_NOT_EQUAL 1
 #define SRV_STATS_METHOD_IGNORE_NULLS    2
-extern ulint	srv_stats_auto_update;
+extern ulong	srv_stats_auto_update;
 
 extern ibool	srv_use_doublewrite_buf;
 extern ibool	srv_use_checksums;
@@ -183,19 +183,19 @@ extern ulong	srv_max_purge_lag;
 
 extern ulong	srv_replication_delay;
 
-extern ulint	srv_io_capacity;
+extern ulong	srv_io_capacity;
 extern long long	srv_ibuf_max_size;
-extern ulint	srv_ibuf_active_contract;
-extern ulint	srv_ibuf_accel_rate;
-extern ulint	srv_flush_neighbor_pages;
-extern ulint	srv_enable_unsafe_group_commit;
-extern ulint	srv_read_ahead;
-extern ulint	srv_adaptive_checkpoint;
+extern ulong	srv_ibuf_active_contract;
+extern ulong	srv_ibuf_accel_rate;
+extern ulong	srv_flush_neighbor_pages;
+extern ulong	srv_enable_unsafe_group_commit;
+extern ulong	srv_read_ahead;
+extern ulong	srv_adaptive_checkpoint;
 
-extern ulint	srv_expand_import;
+extern ulong	srv_expand_import;
 
-extern ulint	srv_extra_rsegments;
-extern ulint	srv_dict_size_limit;
+extern ulong	srv_extra_rsegments;
+extern ulong	srv_dict_size_limit;
 /*-------------------------------------------*/
 
 extern ulint	srv_n_rows_inserted;

=== modified file 'storage/xtradb/srv/srv0srv.c'
--- a/storage/xtradb/srv/srv0srv.c	2009-09-15 10:46:35 +0000
+++ b/storage/xtradb/srv/srv0srv.c	2009-10-31 19:22:50 +0000
@@ -160,8 +160,8 @@ UNIV_INTERN ulint	srv_log_file_size	= UL
 UNIV_INTERN ulint	srv_log_buffer_size	= ULINT_MAX;
 UNIV_INTERN ulong	srv_flush_log_at_trx_commit = 1;
 
-UNIV_INTERN ulint  srv_show_locks_held     = 10;
-UNIV_INTERN ulint  srv_show_verbose_locks  = 0;
+UNIV_INTERN ulong  srv_show_locks_held     = 10;
+UNIV_INTERN ulong  srv_show_verbose_locks  = 0;
 
 
 /* The sort order table of the MySQL latin1_swedish_ci character set
@@ -338,8 +338,8 @@ UNIV_INTERN ibool	srv_innodb_status	= FA
 /* When estimating number of different key values in an index, sample
 this many index pages */
 UNIV_INTERN unsigned long long	srv_stats_sample_pages = 8;
-UNIV_INTERN ulint	srv_stats_method = 0;
-UNIV_INTERN ulint	srv_stats_auto_update = 1;
+UNIV_INTERN ulong	srv_stats_method = 0;
+UNIV_INTERN ulong	srv_stats_auto_update = 1;
 
 UNIV_INTERN ibool	srv_use_doublewrite_buf	= TRUE;
 UNIV_INTERN ibool	srv_use_checksums = TRUE;
@@ -349,7 +349,7 @@ UNIV_INTERN int	srv_query_thread_priorit
 
 UNIV_INTERN ulong	srv_replication_delay		= 0;
 
-UNIV_INTERN ulint	srv_io_capacity = 100;
+UNIV_INTERN ulong	srv_io_capacity = 100;
 
 /* Returns the number of IO operations that is X percent of the capacity.
 PCT_IO(5) -> returns the number of IO operations that is 5% of the max
@@ -357,20 +357,20 @@ where max is srv_io_capacity. */
 #define PCT_IO(pct) ((ulint) (srv_io_capacity * ((double) pct / 100.0)))
 
 UNIV_INTERN long long	srv_ibuf_max_size = 0;
-UNIV_INTERN ulint	srv_ibuf_active_contract = 0; /* 0:disable 1:enable */
-UNIV_INTERN ulint	srv_ibuf_accel_rate = 100;
+UNIV_INTERN ulong	srv_ibuf_active_contract = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong	srv_ibuf_accel_rate = 100;
 #define PCT_IBUF_IO(pct) ((ulint) (srv_io_capacity * srv_ibuf_accel_rate * ((double) pct / 10000.0)))
 
-UNIV_INTERN ulint	srv_flush_neighbor_pages = 1; /* 0:disable 1:enable */
+UNIV_INTERN ulong	srv_flush_neighbor_pages = 1; /* 0:disable 1:enable */
 
-UNIV_INTERN ulint	srv_enable_unsafe_group_commit = 0; /* 0:disable 1:enable */
-UNIV_INTERN ulint	srv_read_ahead = 3; /* 1: random  2: linear  3: Both */
-UNIV_INTERN ulint	srv_adaptive_checkpoint = 0; /* 0: none  1: reflex  2: estimate */
+UNIV_INTERN ulong	srv_enable_unsafe_group_commit = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong	srv_read_ahead = 3; /* 1: random  2: linear  3: Both */
+UNIV_INTERN ulong	srv_adaptive_checkpoint = 0; /* 0: none  1: reflex  2: estimate */
 
-UNIV_INTERN ulint	srv_expand_import = 0; /* 0:disable 1:enable */
+UNIV_INTERN ulong	srv_expand_import = 0; /* 0:disable 1:enable */
 
-UNIV_INTERN ulint	srv_extra_rsegments = 0; /* extra rseg for users */
-UNIV_INTERN ulint	srv_dict_size_limit = 0;
+UNIV_INTERN ulong	srv_extra_rsegments = 0; /* extra rseg for users */
+UNIV_INTERN ulong	srv_dict_size_limit = 0;
 /*-------------------------------------------*/
 UNIV_INTERN ulong	srv_n_spin_wait_rounds	= 20;
 UNIV_INTERN ulong	srv_n_free_tickets_to_enter = 500;

=== modified file 'win/make_mariadb_win_dist'
--- a/win/make_mariadb_win_dist	2009-10-12 16:50:20 +0000
+++ b/win/make_mariadb_win_dist	2009-10-30 10:50:48 +0000
@@ -66,7 +66,7 @@ ls -lah $ZIPFILE
 echo "$ZIPFILE is the Windows noinstall binary zip"
 
 if [ $RES ] ; then
- echo "Archive contents differ from the standard file list, check the diff output above"
+  echo "Archive contents differ from the standard file list, check the diff output above"
 else
   echo "Archive contents match the standard list, OK"
 fi