← Back to team overview

maria-developers team mailing list archive

bzr commit into MariaDB 5.1, with Maria 1.5:maria branch (knielsen:2703)

 

#At lp:maria

 2703 knielsen@xxxxxxxxxxxxxxx	2009-06-10 [merge]
      test merging latest XtraDB. Some conflicts as expected, seems to be ok.
      modified:
        storage/xtradb/btr/btr0cur.c
        storage/xtradb/btr/btr0sea.c
        storage/xtradb/buf/buf0buddy.c
        storage/xtradb/buf/buf0buf.c
        storage/xtradb/buf/buf0flu.c
        storage/xtradb/buf/buf0lru.c
        storage/xtradb/buf/buf0rea.c
        storage/xtradb/handler/ha_innodb.cc
        storage/xtradb/handler/i_s.cc
        storage/xtradb/handler/innodb_patch_info.h
        storage/xtradb/include/buf0buddy.h
        storage/xtradb/include/buf0buddy.ic
        storage/xtradb/include/buf0buf.h
        storage/xtradb/include/buf0buf.ic
        storage/xtradb/include/buf0flu.ic
        storage/xtradb/include/buf0lru.h
        storage/xtradb/include/sync0rw.h
        storage/xtradb/include/sync0rw.ic
        storage/xtradb/include/trx0sys.h
        storage/xtradb/include/trx0trx.h
        storage/xtradb/include/univ.i
        storage/xtradb/include/ut0auxconf.h
        storage/xtradb/sync/sync0arr.c
        storage/xtradb/sync/sync0rw.c
        storage/xtradb/trx/trx0sys.c
        storage/xtradb/trx/trx0trx.c

=== modified file 'storage/xtradb/btr/btr0cur.c'
--- a/storage/xtradb/btr/btr0cur.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/btr/btr0cur.c	2009-05-04 02:45:47 +0000
@@ -3733,8 +3733,7 @@ btr_blob_free(
 
 	mtr_commit(mtr);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 	mutex_enter(&block->mutex);
 
 	/* Only free the block if it is still allocated to
@@ -3745,22 +3744,17 @@ btr_blob_free(
 	    && buf_block_get_space(block) == space
 	    && buf_block_get_page_no(block) == page_no) {
 
-		if (buf_LRU_free_block(&block->page, all, NULL, TRUE)
+		if (buf_LRU_free_block(&block->page, all, NULL)
 		    != BUF_LRU_FREED
-		    && all && block->page.zip.data
-		    /* Now, buf_LRU_free_block() may release mutex temporarily */
-		    && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
-		    && buf_block_get_space(block) == space
-		    && buf_block_get_page_no(block) == page_no) {
+		    && all && block->page.zip.data) {
 			/* Attempt to deallocate the uncompressed page
 			if the whole block cannot be deallocted. */
 
-			buf_LRU_free_block(&block->page, FALSE, NULL, TRUE);
+			buf_LRU_free_block(&block->page, FALSE, NULL);
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 	mutex_exit(&block->mutex);
 }
 

=== modified file 'storage/xtradb/btr/btr0sea.c'
--- a/storage/xtradb/btr/btr0sea.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/btr/btr0sea.c	2009-05-04 02:45:47 +0000
@@ -1731,8 +1731,7 @@ btr_search_validate(void)
 	rec_offs_init(offsets_);
 
 	rw_lock_x_lock(&btr_search_latch);
-	//buf_pool_mutex_enter();
-	rw_lock_x_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	cell_count = hash_get_n_cells(btr_search_sys->hash_index);
 
@@ -1740,13 +1739,11 @@ btr_search_validate(void)
 		/* We release btr_search_latch every once in a while to
 		give other queries a chance to run. */
 		if ((i != 0) && ((i % chunk_size) == 0)) {
-			//buf_pool_mutex_exit();
-			rw_lock_x_unlock(&page_hash_latch);
+			buf_pool_mutex_exit();
 			rw_lock_x_unlock(&btr_search_latch);
 			os_thread_yield();
 			rw_lock_x_lock(&btr_search_latch);
-			//buf_pool_mutex_enter();
-			rw_lock_x_lock(&page_hash_latch);
+			buf_pool_mutex_enter();
 		}
 
 		node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
@@ -1853,13 +1850,11 @@ btr_search_validate(void)
 		/* We release btr_search_latch every once in a while to
 		give other queries a chance to run. */
 		if (i != 0) {
-			//buf_pool_mutex_exit();
-			rw_lock_x_unlock(&page_hash_latch);
+			buf_pool_mutex_exit();
 			rw_lock_x_unlock(&btr_search_latch);
 			os_thread_yield();
 			rw_lock_x_lock(&btr_search_latch);
-			//buf_pool_mutex_enter();
-			rw_lock_x_lock(&page_hash_latch);
+			buf_pool_mutex_enter();
 		}
 
 		if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
@@ -1867,8 +1862,7 @@ btr_search_validate(void)
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_x_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 	rw_lock_x_unlock(&btr_search_latch);
 	if (UNIV_LIKELY_NULL(heap)) {
 		mem_heap_free(heap);

=== modified file 'storage/xtradb/buf/buf0buddy.c'
--- a/storage/xtradb/buf/buf0buddy.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/buf/buf0buddy.c	2009-05-04 04:32:30 +0000
@@ -131,8 +131,7 @@ buf_buddy_alloc_zip(
 {
 	buf_page_t*	bpage;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&zip_free_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_a(i < BUF_BUDDY_SIZES);
 
 #if defined UNIV_DEBUG && !defined UNIV_DEBUG_VALGRIND
@@ -178,19 +177,16 @@ static
 void
 buf_buddy_block_free(
 /*=================*/
-	void*	buf,	/* in: buffer frame to deallocate */
-	ibool	have_page_hash_mutex)
+	void*	buf)	/* in: buffer frame to deallocate */
 {
 	const ulint	fold	= BUF_POOL_ZIP_FOLD_PTR(buf);
 	buf_page_t*	bpage;
 	buf_block_t*	block;
 
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(!mutex_own(&buf_pool_zip_mutex));
 	ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE));
 
-	mutex_enter(&zip_hash_mutex);
-
 	HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
 		    ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY
 			  && bpage->in_zip_hash && !bpage->in_page_hash),
@@ -202,14 +198,12 @@ buf_buddy_block_free(
 	ut_d(bpage->in_zip_hash = FALSE);
 	HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
 
-	mutex_exit(&zip_hash_mutex);
-
 	ut_d(memset(buf, 0, UNIV_PAGE_SIZE));
 	UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE);
 
 	block = (buf_block_t*) bpage;
 	mutex_enter(&block->mutex);
-	buf_LRU_block_free_non_file_page(block, have_page_hash_mutex);
+	buf_LRU_block_free_non_file_page(block);
 	mutex_exit(&block->mutex);
 
 	ut_ad(buf_buddy_n_frames > 0);
@@ -225,7 +219,7 @@ buf_buddy_block_register(
 	buf_block_t*	block)	/* in: buffer frame to allocate */
 {
 	const ulint	fold = BUF_POOL_ZIP_FOLD(block);
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(!mutex_own(&buf_pool_zip_mutex));
 
 	buf_block_set_state(block, BUF_BLOCK_MEMORY);
@@ -236,10 +230,7 @@ buf_buddy_block_register(
 	ut_ad(!block->page.in_page_hash);
 	ut_ad(!block->page.in_zip_hash);
 	ut_d(block->page.in_zip_hash = TRUE);
-
-	mutex_enter(&zip_hash_mutex);
 	HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
-	mutex_exit(&zip_hash_mutex);
 
 	ut_d(buf_buddy_n_frames++);
 }
@@ -293,28 +284,24 @@ buf_buddy_alloc_low(
 			possibly NULL if lru==NULL */
 	ulint	i,	/* in: index of buf_pool->zip_free[],
 			or BUF_BUDDY_SIZES */
-	ibool*	lru,	/* in: pointer to a variable that will be assigned
+	ibool*	lru)	/* in: pointer to a variable that will be assigned
 			TRUE if storage was allocated from the LRU list
 			and buf_pool_mutex was temporarily released,
 			or NULL if the LRU list should not be used */
-	ibool	have_page_hash_mutex)
 {
 	buf_block_t*	block;
 
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(!mutex_own(&buf_pool_zip_mutex));
 
 	if (i < BUF_BUDDY_SIZES) {
 		/* Try to allocate from the buddy system. */
-		mutex_enter(&zip_free_mutex);
 		block = buf_buddy_alloc_zip(i);
 
 		if (block) {
 
 			goto func_exit;
 		}
-
-		mutex_exit(&zip_free_mutex);
 	}
 
 	/* Try allocating from the buf_pool->free list. */
@@ -331,31 +318,18 @@ buf_buddy_alloc_low(
 	}
 
 	/* Try replacing an uncompressed page in the buffer pool. */
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	if (have_page_hash_mutex) {
-		mutex_exit(&flush_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
-	}
+	buf_pool_mutex_exit();
 	block = buf_LRU_get_free_block(0);
 	*lru = TRUE;
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	if (have_page_hash_mutex) {
-		mutex_enter(&flush_list_mutex);
-		rw_lock_x_lock(&page_hash_latch);
-	}
+	buf_pool_mutex_enter();
 
 alloc_big:
 	buf_buddy_block_register(block);
 
-	mutex_enter(&zip_free_mutex);
 	block = buf_buddy_alloc_from(block->frame, i, BUF_BUDDY_SIZES);
 
 func_exit:
 	buf_buddy_stat[i].used++;
-	mutex_exit(&zip_free_mutex);
-
 	return(block);
 }
 
@@ -371,8 +345,7 @@ buf_buddy_relocate_block(
 {
 	buf_page_t*	b;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	switch (buf_page_get_state(bpage)) {
 	case BUF_BLOCK_ZIP_FREE:
@@ -381,7 +354,7 @@ buf_buddy_relocate_block(
 	case BUF_BLOCK_FILE_PAGE:
 	case BUF_BLOCK_MEMORY:
 	case BUF_BLOCK_REMOVE_HASH:
-		/* ut_error; */ /* optimistic */
+		ut_error;
 	case BUF_BLOCK_ZIP_DIRTY:
 		/* Cannot relocate dirty pages. */
 		return(FALSE);
@@ -391,17 +364,9 @@ buf_buddy_relocate_block(
 	}
 
 	mutex_enter(&buf_pool_zip_mutex);
-	mutex_enter(&zip_free_mutex);
 
 	if (!buf_page_can_relocate(bpage)) {
 		mutex_exit(&buf_pool_zip_mutex);
-		mutex_exit(&zip_free_mutex);
-		return(FALSE);
-	}
-
-	if (bpage != buf_page_hash_get(bpage->space, bpage->offset)) {
-		mutex_exit(&buf_pool_zip_mutex);
-		mutex_exit(&zip_free_mutex);
 		return(FALSE);
 	}
 
@@ -419,7 +384,6 @@ buf_buddy_relocate_block(
 	}
 
 	mutex_exit(&buf_pool_zip_mutex);
-	mutex_exit(&zip_free_mutex);
 	return(TRUE);
 }
 
@@ -432,15 +396,13 @@ buf_buddy_relocate(
 			/* out: TRUE if relocated */
 	void*	src,	/* in: block to relocate */
 	void*	dst,	/* in: free block to relocate to */
-	ulint	i,	/* in: index of buf_pool->zip_free[] */
-	ibool	have_page_hash_mutex)
+	ulint	i)	/* in: index of buf_pool->zip_free[] */
 {
 	buf_page_t*	bpage;
 	const ulint	size	= BUF_BUDDY_LOW << i;
 	ullint		usec	= ut_time_us(NULL);
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&zip_free_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(!mutex_own(&buf_pool_zip_mutex));
 	ut_ad(!ut_align_offset(src, size));
 	ut_ad(!ut_align_offset(dst, size));
@@ -459,17 +421,9 @@ buf_buddy_relocate(
 	actually is a properly initialized buf_page_t object. */
 
 	if (size >= PAGE_ZIP_MIN_SIZE) {
-		if (!have_page_hash_mutex)
-			mutex_exit(&zip_free_mutex);
-
 		/* This is a compressed page. */
 		mutex_t*	mutex;
 
-		if (!have_page_hash_mutex) {
-			mutex_enter(&LRU_list_mutex);
-			mutex_enter(&flush_list_mutex);
-			rw_lock_x_lock(&page_hash_latch);
-		}
 		/* The src block may be split into smaller blocks,
 		some of which may be free.  Thus, the
 		mach_read_from_4() calls below may attempt to read
@@ -490,12 +444,6 @@ buf_buddy_relocate(
 			added to buf_pool->page_hash yet.  Obviously,
 			it cannot be relocated. */
 
-			if (!have_page_hash_mutex) {
-				mutex_enter(&zip_free_mutex);
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
-				rw_lock_x_unlock(&page_hash_latch);
-			}
 			return(FALSE);
 		}
 
@@ -505,19 +453,9 @@ buf_buddy_relocate(
 			For the sake of simplicity, give up. */
 			ut_ad(page_zip_get_size(&bpage->zip) < size);
 
-			if (!have_page_hash_mutex) {
-				mutex_enter(&zip_free_mutex);
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
-				rw_lock_x_unlock(&page_hash_latch);
-			}
 			return(FALSE);
 		}
 
-		/* To keep latch order */
-		if (have_page_hash_mutex)
-			mutex_exit(&zip_free_mutex);
-
 		/* The block must have been allocated, but it may
 		contain uninitialized data. */
 		UNIV_MEM_ASSERT_W(src, size);
@@ -525,7 +463,6 @@ buf_buddy_relocate(
 		mutex = buf_page_get_mutex(bpage);
 
 		mutex_enter(mutex);
-		mutex_enter(&zip_free_mutex);
 
 		if (buf_page_can_relocate(bpage)) {
 			/* Relocate the compressed page. */
@@ -542,53 +479,17 @@ success:
 				buddy_stat->relocated_usec
 					+= ut_time_us(NULL) - usec;
 			}
-
-			if (!have_page_hash_mutex) {
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
-				rw_lock_x_unlock(&page_hash_latch);
-			}
 			return(TRUE);
 		}
 
-		if (!have_page_hash_mutex) {
-			mutex_exit(&LRU_list_mutex);
-			mutex_exit(&flush_list_mutex);
-			rw_lock_x_unlock(&page_hash_latch);
-		}
-
 		mutex_exit(mutex);
 	} else if (i == buf_buddy_get_slot(sizeof(buf_page_t))) {
 		/* This must be a buf_page_t object. */
 		UNIV_MEM_ASSERT_RW(src, size);
-
-		mutex_exit(&zip_free_mutex);
-
-		if (!have_page_hash_mutex) {
-			mutex_enter(&LRU_list_mutex);
-			mutex_enter(&flush_list_mutex);
-			rw_lock_x_lock(&page_hash_latch);
-		}
-
 		if (buf_buddy_relocate_block(src, dst)) {
-			mutex_enter(&zip_free_mutex);
-
-			if (!have_page_hash_mutex) {
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
-				rw_lock_x_unlock(&page_hash_latch);
-			}
 
 			goto success;
 		}
-
-		mutex_enter(&zip_free_mutex);
-
-		if (!have_page_hash_mutex) {
-			mutex_exit(&LRU_list_mutex);
-			mutex_exit(&flush_list_mutex);
-			rw_lock_x_unlock(&page_hash_latch);
-		}
 	}
 
 	return(FALSE);
@@ -602,14 +503,12 @@ buf_buddy_free_low(
 /*===============*/
 	void*	buf,	/* in: block to be freed, must not be
 			pointed to by the buffer pool */
-	ulint	i,	/* in: index of buf_pool->zip_free[] */
-	ibool	have_page_hash_mutex)
+	ulint	i)	/* in: index of buf_pool->zip_free[] */
 {
 	buf_page_t*	bpage;
 	buf_page_t*	buddy;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&zip_free_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(!mutex_own(&buf_pool_zip_mutex));
 	ut_ad(i <= BUF_BUDDY_SIZES);
 	ut_ad(buf_buddy_stat[i].used > 0);
@@ -620,7 +519,7 @@ recombine:
 	ut_d(((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE);
 
 	if (i == BUF_BUDDY_SIZES) {
-		buf_buddy_block_free(buf, have_page_hash_mutex);
+		buf_buddy_block_free(buf);
 		return;
 	}
 
@@ -678,7 +577,7 @@ buddy_nonfree:
 #endif /* UNIV_DEBUG_VALGRIND */
 
 	/* The buddy is not free. Is there a free block of this size? */
-	bpage = UT_LIST_GET_LAST(buf_pool->zip_free[i]);
+	bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
 
 	if (bpage) {
 		/* Remove the block from the free list, because a successful
@@ -688,7 +587,7 @@ buddy_nonfree:
 		buf_buddy_remove_from_free(bpage, i);
 
 		/* Try to relocate the buddy of buf to the free block. */
-		if (buf_buddy_relocate(buddy, bpage, i, have_page_hash_mutex)) {
+		if (buf_buddy_relocate(buddy, bpage, i)) {
 
 			ut_d(buddy->state = BUF_BLOCK_ZIP_FREE);
 			goto buddy_free2;
@@ -716,7 +615,7 @@ buddy_nonfree:
 		}
 #endif /* UNIV_DEBUG && !UNIV_DEBUG_VALGRIND */
 
-		if (buf_buddy_relocate(buddy, buf, i, have_page_hash_mutex)) {
+		if (buf_buddy_relocate(buddy, buf, i)) {
 
 			buf = bpage;
 			UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i);

=== modified file 'storage/xtradb/buf/buf0buf.c'
--- a/storage/xtradb/buf/buf0buf.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/buf/buf0buf.c	2009-05-04 04:32:30 +0000
@@ -244,12 +244,6 @@ UNIV_INTERN buf_pool_t*	buf_pool = NULL;
 /* mutex protecting the buffer pool struct and control blocks, except the
 read-write lock in them */
 UNIV_INTERN mutex_t		buf_pool_mutex;
-UNIV_INTERN mutex_t		LRU_list_mutex;
-UNIV_INTERN mutex_t		flush_list_mutex;
-UNIV_INTERN rw_lock_t		page_hash_latch;
-UNIV_INTERN mutex_t		free_list_mutex;
-UNIV_INTERN mutex_t		zip_free_mutex;
-UNIV_INTERN mutex_t		zip_hash_mutex;
 /* mutex protecting the control blocks of compressed-only pages
 (of type buf_page_t, not buf_block_t) */
 UNIV_INTERN mutex_t		buf_pool_zip_mutex;
@@ -670,9 +664,9 @@ buf_block_init(
 	block->page.in_zip_hash = FALSE;
 	block->page.in_flush_list = FALSE;
 	block->page.in_free_list = FALSE;
+	block->in_unzip_LRU_list = FALSE;
 #endif /* UNIV_DEBUG */
 	block->page.in_LRU_list = FALSE;
-	block->in_unzip_LRU_list = FALSE;
 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
 	block->n_pointers = 0;
 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -757,10 +751,8 @@ buf_chunk_init(
 		memset(block->frame, '\0', UNIV_PAGE_SIZE);
 #endif
 		/* Add the block to the free list */
-		mutex_enter(&free_list_mutex);
 		UT_LIST_ADD_LAST(list, buf_pool->free, (&block->page));
 		ut_d(block->page.in_free_list = TRUE);
-		mutex_exit(&free_list_mutex);
 
 		block++;
 		frame += UNIV_PAGE_SIZE;
@@ -786,7 +778,7 @@ buf_chunk_contains_zip(
 	ulint		i;
 
 	ut_ad(buf_pool);
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
 	block = chunk->blocks;
 
@@ -840,7 +832,7 @@ buf_chunk_not_freed(
 	ulint		i;
 
 	ut_ad(buf_pool);
-	//ut_ad(buf_pool_mutex_own()); /*optimistic...*/
+	ut_ad(buf_pool_mutex_own());
 
 	block = chunk->blocks;
 
@@ -873,7 +865,7 @@ buf_chunk_all_free(
 	ulint			i;
 
 	ut_ad(buf_pool);
-	ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
+	ut_ad(buf_pool_mutex_own());
 
 	block = chunk->blocks;
 
@@ -899,7 +891,7 @@ buf_chunk_free(
 	buf_block_t*		block;
 	const buf_block_t*	block_end;
 
-	ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
+	ut_ad(buf_pool_mutex_own());
 
 	block_end = chunk->blocks + chunk->size;
 
@@ -911,10 +903,8 @@ buf_chunk_free(
 		ut_ad(!block->in_unzip_LRU_list);
 		ut_ad(!block->page.in_flush_list);
 		/* Remove the block from the free list. */
-		mutex_enter(&free_list_mutex);
 		ut_ad(block->page.in_free_list);
 		UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
-		mutex_exit(&free_list_mutex);
 
 		/* Free the latches. */
 		mutex_free(&block->mutex);
@@ -945,18 +935,8 @@ buf_pool_init(void)
 	/* 1. Initialize general fields
 	------------------------------- */
 	mutex_create(&buf_pool_mutex, SYNC_BUF_POOL);
-	mutex_create(&LRU_list_mutex, SYNC_NO_ORDER_CHECK);
-	mutex_create(&flush_list_mutex, SYNC_NO_ORDER_CHECK);
-	rw_lock_create(&page_hash_latch, SYNC_NO_ORDER_CHECK);
-	mutex_create(&free_list_mutex, SYNC_NO_ORDER_CHECK);
-	mutex_create(&zip_free_mutex, SYNC_NO_ORDER_CHECK);
-	mutex_create(&zip_hash_mutex, SYNC_NO_ORDER_CHECK);
-
 	mutex_create(&buf_pool_zip_mutex, SYNC_BUF_BLOCK);
 
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
 	buf_pool_mutex_enter();
 
 	buf_pool->n_chunks = 1;
@@ -993,9 +973,6 @@ buf_pool_init(void)
 	--------------------------- */
 	/* All fields are initialized by mem_zalloc(). */
 
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	rw_lock_x_unlock(&page_hash_latch);
 	buf_pool_mutex_exit();
 
 	btr_search_sys_create(buf_pool->curr_size
@@ -1128,11 +1105,7 @@ buf_relocate(
 	buf_page_t*	b;
 	ulint		fold;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
-#ifdef UNIV_SYNC_DEBUG
-	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX));
-#endif
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
 	ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
 	ut_a(bpage->buf_fix_count == 0);
@@ -1213,8 +1186,7 @@ buf_pool_shrink(
 
 try_again:
 	btr_search_disable(); /* Empty the adaptive hash index again */
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 shrink_again:
 	if (buf_pool->n_chunks <= 1) {
@@ -1285,7 +1257,7 @@ shrink_again:
 
 				buf_LRU_make_block_old(&block->page);
 				dirty++;
-			} else if (buf_LRU_free_block(&block->page, TRUE, NULL, FALSE)
+			} else if (buf_LRU_free_block(&block->page, TRUE, NULL)
 				   != BUF_LRU_FREED) {
 				nonfree++;
 			}
@@ -1293,8 +1265,7 @@ shrink_again:
 			mutex_exit(&block->mutex);
 		}
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 
 		/* Request for a flush of the chunk if it helps.
 		Do not flush if there are non-free blocks, since
@@ -1343,8 +1314,7 @@ shrink_again:
 func_done:
 	srv_buf_pool_old_size = srv_buf_pool_size;
 func_exit:
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 	btr_search_enable();
 }
 
@@ -1362,11 +1332,7 @@ buf_pool_page_hash_rebuild(void)
 	hash_table_t*	zip_hash;
 	buf_page_t*	b;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
-	
+	buf_pool_mutex_enter();
 
 	/* Free, create, and populate the hash table. */
 	hash_table_free(buf_pool->page_hash);
@@ -1446,10 +1412,7 @@ buf_pool_page_hash_rebuild(void)
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	rw_lock_x_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 }
 
 /************************************************************************
@@ -1459,20 +1422,17 @@ void
 buf_pool_resize(void)
 /*=================*/
 {
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	if (srv_buf_pool_old_size == srv_buf_pool_size) {
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 		return;
 	}
 
 	if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 
 		/* Disable adaptive hash indexes and empty the index
 		in order to free up memory in the buffer pool chunks. */
@@ -1506,8 +1466,7 @@ buf_pool_resize(void)
 		}
 
 		srv_buf_pool_old_size = srv_buf_pool_size;
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 	}
 
 	buf_pool_page_hash_rebuild();
@@ -1529,14 +1488,12 @@ buf_block_make_young(
 
 	if (buf_page_peek_if_too_old(bpage)) {
 
-		//buf_pool_mutex_enter();
-		mutex_enter(&LRU_list_mutex);
+		buf_pool_mutex_enter();
 		/* There has been freeing activity in the LRU list:
 		best to move to the head of the LRU list */
 
 		buf_LRU_make_block_young(bpage);
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 	}
 }
 
@@ -1550,15 +1507,13 @@ buf_page_make_young(
 /*================*/
 	buf_page_t*	bpage)	/* in: buffer block of a file page */
 {
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	ut_a(buf_page_in_file(bpage));
 
 	buf_LRU_make_block_young(bpage);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 }
 
 /************************************************************************
@@ -1573,8 +1528,7 @@ buf_reset_check_index_page_at_flush(
 {
 	buf_block_t*	block;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	block = (buf_block_t*) buf_page_hash_get(space, offset);
 
@@ -1582,8 +1536,7 @@ buf_reset_check_index_page_at_flush(
 		block->check_index_page_at_flush = FALSE;
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 }
 
 /************************************************************************
@@ -1602,8 +1555,7 @@ buf_page_peek_if_search_hashed(
 	buf_block_t*	block;
 	ibool		is_hashed;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	block = (buf_block_t*) buf_page_hash_get(space, offset);
 
@@ -1613,8 +1565,7 @@ buf_page_peek_if_search_hashed(
 		is_hashed = block->is_hashed;
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	return(is_hashed);
 }
@@ -1636,8 +1587,7 @@ buf_page_set_file_page_was_freed(
 {
 	buf_page_t*	bpage;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	bpage = buf_page_hash_get(space, offset);
 
@@ -1645,8 +1595,7 @@ buf_page_set_file_page_was_freed(
 		bpage->file_page_was_freed = TRUE;
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	return(bpage);
 }
@@ -1667,8 +1616,7 @@ buf_page_reset_file_page_was_freed(
 {
 	buf_page_t*	bpage;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	bpage = buf_page_hash_get(space, offset);
 
@@ -1676,8 +1624,7 @@ buf_page_reset_file_page_was_freed(
 		bpage->file_page_was_freed = FALSE;
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	return(bpage);
 }
@@ -1710,9 +1657,8 @@ buf_page_get_zip(
 	buf_pool->n_page_gets++;
 
 	for (;;) {
-		//buf_pool_mutex_enter();
+		buf_pool_mutex_enter();
 lookup:
-		rw_lock_s_lock(&page_hash_latch);
 		bpage = buf_page_hash_get(space, offset);
 		if (bpage) {
 			break;
@@ -1720,8 +1666,7 @@ lookup:
 
 		/* Page not in buf_pool: needs to be read from file */
 
-		//buf_pool_mutex_exit();
-		rw_lock_s_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 
 		buf_read_page(space, zip_size, offset);
 
@@ -1732,16 +1677,13 @@ lookup:
 
 	if (UNIV_UNLIKELY(!bpage->zip.data)) {
 		/* There is no compressed page. */
-		//buf_pool_mutex_exit();
-		rw_lock_s_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 		return(NULL);
 	}
 
 	block_mutex = buf_page_get_mutex(bpage);
 	mutex_enter(block_mutex);
 
-	rw_lock_s_unlock(&page_hash_latch);
-
 	switch (buf_page_get_state(bpage)) {
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
@@ -1756,7 +1698,7 @@ lookup:
 		break;
 	case BUF_BLOCK_FILE_PAGE:
 		/* Discard the uncompressed page frame if possible. */
-		if (buf_LRU_free_block(bpage, FALSE, NULL, FALSE)
+		if (buf_LRU_free_block(bpage, FALSE, NULL)
 		    == BUF_LRU_FREED) {
 
 			mutex_exit(block_mutex);
@@ -1770,7 +1712,7 @@ lookup:
 
 	must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
 
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 
 	buf_page_set_accessed(bpage, TRUE);
 
@@ -2001,7 +1943,7 @@ buf_block_is_uncompressed(
 	const buf_chunk_t*		chunk	= buf_pool->chunks;
 	const buf_chunk_t* const	echunk	= chunk + buf_pool->n_chunks;
 
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
 	if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
 		/* The pointer should be aligned. */
@@ -2044,7 +1986,6 @@ buf_page_get_gen(
 	ibool		accessed;
 	ulint		fix_type;
 	ibool		must_read;
-	mutex_t*	block_mutex;
 
 	ut_ad(mtr);
 	ut_ad((rw_latch == RW_S_LATCH)
@@ -2060,12 +2001,9 @@ buf_page_get_gen(
 	buf_pool->n_page_gets++;
 loop:
 	block = guess;
-	//buf_pool_mutex_enter();
+	buf_pool_mutex_enter();
 
 	if (block) {
-		block_mutex = buf_page_get_mutex((buf_page_t*)block);
-		mutex_enter(block_mutex);
-
 		/* If the guess is a compressed page descriptor that
 		has been allocated by buf_buddy_alloc(), it may have
 		been invalidated by buf_buddy_relocate().  In that
@@ -2079,8 +2017,6 @@ loop:
 		    || space != block->page.space
 		    || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
 
-			mutex_exit(block_mutex);
-
 			block = guess = NULL;
 		} else {
 			ut_ad(!block->page.in_zip_hash);
@@ -2089,20 +2025,14 @@ loop:
 	}
 
 	if (block == NULL) {
-		rw_lock_s_lock(&page_hash_latch);
 		block = (buf_block_t*) buf_page_hash_get(space, offset);
-		if (block) {
-			block_mutex = buf_page_get_mutex((buf_page_t*)block);
-			mutex_enter(block_mutex);
-		}
-		rw_lock_s_unlock(&page_hash_latch);
 	}
 
 loop2:
 	if (block == NULL) {
 		/* Page not in buf_pool: needs to be read from file */
 
-		//buf_pool_mutex_exit();
+		buf_pool_mutex_exit();
 
 		if (mode == BUF_GET_IF_IN_POOL) {
 
@@ -2123,8 +2053,7 @@ loop2:
 
 	if (must_read && mode == BUF_GET_IF_IN_POOL) {
 		/* The page is only being read to buffer */
-		//buf_pool_mutex_exit();
-		mutex_exit(block_mutex);
+		buf_pool_mutex_exit();
 
 		return(NULL);
 	}
@@ -2134,16 +2063,10 @@ loop2:
 		ibool		success;
 
 	case BUF_BLOCK_FILE_PAGE:
-		if (block_mutex == &buf_pool_zip_mutex) {
-			/* it is wrong mutex... */
-			mutex_exit(block_mutex);
-			goto loop;
-		}
 		break;
 
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
-		ut_ad(block_mutex == &buf_pool_zip_mutex);
 		bpage = &block->page;
 
 		if (bpage->buf_fix_count
@@ -2154,26 +2077,20 @@ loop2:
 wait_until_unfixed:
 			/* The block is buffer-fixed or I/O-fixed.
 			Try again later. */
-			//buf_pool_mutex_exit();
-			mutex_exit(block_mutex);
+			buf_pool_mutex_exit();
 			os_thread_sleep(WAIT_FOR_READ);
 
 			goto loop;
 		}
 
 		/* Allocate an uncompressed page. */
-		//buf_pool_mutex_exit();
-		mutex_exit(block_mutex);
+		buf_pool_mutex_exit();
 
 		block = buf_LRU_get_free_block(0);
 		ut_a(block);
-		block_mutex = &block->mutex;
 
-		//buf_pool_mutex_enter();
-		mutex_enter(&LRU_list_mutex);
-		mutex_enter(&flush_list_mutex);
-		rw_lock_x_lock(&page_hash_latch);
-		mutex_enter(block_mutex);
+		buf_pool_mutex_enter();
+		mutex_enter(&block->mutex);
 
 		{
 			buf_page_t*	hash_bpage
@@ -2184,17 +2101,10 @@ wait_until_unfixed:
 				while buf_pool_mutex was released.
 				Free the block that was allocated. */
 
-				buf_LRU_block_free_non_file_page(block, TRUE);
-				mutex_exit(block_mutex);
+				buf_LRU_block_free_non_file_page(block);
+				mutex_exit(&block->mutex);
 
 				block = (buf_block_t*) hash_bpage;
-				if (block) {
-					block_mutex = buf_page_get_mutex((buf_page_t*)block);
-					mutex_enter(block_mutex);
-				}
-				rw_lock_x_unlock(&page_hash_latch);
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
 				goto loop2;
 			}
 		}
@@ -2208,12 +2118,9 @@ wait_until_unfixed:
 			Free the block that was allocated and try again.
 			This should be extremely unlikely. */
 
-			buf_LRU_block_free_non_file_page(block, TRUE);
-			//mutex_exit(&block->mutex);
+			buf_LRU_block_free_non_file_page(block);
+			mutex_exit(&block->mutex);
 
-			rw_lock_x_unlock(&page_hash_latch);
-			mutex_exit(&LRU_list_mutex);
-			mutex_exit(&flush_list_mutex);
 			goto wait_until_unfixed;
 		}
 
@@ -2223,9 +2130,6 @@ wait_until_unfixed:
 		mutex_enter(&buf_pool_zip_mutex);
 
 		buf_relocate(bpage, &block->page);
-
-		rw_lock_x_unlock(&page_hash_latch);
-
 		buf_block_init_low(block);
 		block->lock_hash_val = lock_rec_hash(space, offset);
 
@@ -2257,8 +2161,6 @@ wait_until_unfixed:
 			}
 		}
 
-		mutex_exit(&flush_list_mutex);
-
 		/* Buffer-fix, I/O-fix, and X-latch the block
 		for the duration of the decompression.
 		Also add the block to the unzip_LRU list. */
@@ -2267,22 +2169,16 @@ wait_until_unfixed:
 		/* Insert at the front of unzip_LRU list */
 		buf_unzip_LRU_add_block(block, FALSE);
 
-		mutex_exit(&LRU_list_mutex);
-
 		block->page.buf_fix_count = 1;
 		buf_block_set_io_fix(block, BUF_IO_READ);
-
-		mutex_enter(&buf_pool_mutex);
 		buf_pool->n_pend_unzip++;
-		mutex_exit(&buf_pool_mutex);
-
 		rw_lock_x_lock(&block->lock);
-		mutex_exit(block_mutex);
+		mutex_exit(&block->mutex);
 		mutex_exit(&buf_pool_zip_mutex);
 
-		buf_buddy_free(bpage, sizeof *bpage, FALSE);
+		buf_buddy_free(bpage, sizeof *bpage);
 
-		//buf_pool_mutex_exit();
+		buf_pool_mutex_exit();
 
 		/* Decompress the page and apply buffered operations
 		while not holding buf_pool_mutex or block->mutex. */
@@ -2294,21 +2190,17 @@ wait_until_unfixed:
 		}
 
 		/* Unfix and unlatch the block. */
-		//buf_pool_mutex_enter();
-		block_mutex = &block->mutex;
-		mutex_enter(block_mutex);
-		mutex_enter(&buf_pool_mutex);
+		buf_pool_mutex_enter();
+		mutex_enter(&block->mutex);
 		buf_pool->n_pend_unzip--;
-		mutex_exit(&buf_pool_mutex);
 		block->page.buf_fix_count--;
 		buf_block_set_io_fix(block, BUF_IO_NONE);
-		//mutex_exit(&block->mutex);
+		mutex_exit(&block->mutex);
 		rw_lock_x_unlock(&block->lock);
 
 		if (UNIV_UNLIKELY(!success)) {
 
-			//buf_pool_mutex_exit();
-			mutex_exit(block_mutex);
+			buf_pool_mutex_exit();
 			return(NULL);
 		}
 
@@ -2325,11 +2217,11 @@ wait_until_unfixed:
 
 	ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
 
-	//mutex_enter(&block->mutex);
+	mutex_enter(&block->mutex);
 	UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
 
 	buf_block_buf_fix_inc(block, file, line);
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 
 	/* Check if this is the first access to the page */
 
@@ -2337,7 +2229,7 @@ wait_until_unfixed:
 
 	buf_page_set_accessed(&block->page, TRUE);
 
-	mutex_exit(block_mutex);
+	mutex_exit(&block->mutex);
 
 	buf_block_make_young(&block->page);
 
@@ -2623,19 +2515,16 @@ buf_page_try_get_func(
 	ibool		success;
 	ulint		fix_type;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 	block = buf_block_hash_get(space_id, page_no);
 
 	if (!block) {
-		//buf_pool_mutex_exit();
-		rw_lock_s_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 		return(NULL);
 	}
 
 	mutex_enter(&block->mutex);
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
 	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@@ -2755,10 +2644,7 @@ buf_page_init(
 {
 	buf_page_t*	hash_page;
 
-	//ut_ad(buf_pool_mutex_own());
-#ifdef UNIV_SYNC_DEBUG
-	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX));
-#endif
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(&(block->mutex)));
 	ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
 
@@ -2791,8 +2677,7 @@ buf_page_init(
 			(const void*) hash_page, (const void*) block);
 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
 		mutex_exit(&block->mutex);
-		//buf_pool_mutex_exit();
-		rw_lock_x_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 		buf_print();
 		buf_LRU_print();
 		buf_validate();
@@ -2871,28 +2756,16 @@ buf_page_init_for_read(
 		ut_ad(block);
 	}
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	if(!block) {
-		mutex_enter(&flush_list_mutex);
-	}
-	rw_lock_x_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	if (buf_page_hash_get(space, offset)) {
 		/* The page is already in the buffer pool. */
 err_exit:
 		if (block) {
 			mutex_enter(&block->mutex);
-			mutex_exit(&LRU_list_mutex);
-			rw_lock_x_unlock(&page_hash_latch);
-			buf_LRU_block_free_non_file_page(block, FALSE);
+			buf_LRU_block_free_non_file_page(block);
 			mutex_exit(&block->mutex);
 		}
-		else {
-			mutex_exit(&LRU_list_mutex);
-			mutex_exit(&flush_list_mutex);
-			rw_lock_x_unlock(&page_hash_latch);
-		}
 
 		bpage = NULL;
 		goto func_exit;
@@ -2912,8 +2785,6 @@ err_exit:
 		mutex_enter(&block->mutex);
 		buf_page_init(space, offset, block);
 
-		rw_lock_x_unlock(&page_hash_latch);
-
 		/* The block must be put to the LRU list, to the old blocks */
 		buf_LRU_add_block(bpage, TRUE/* to old blocks */);
 
@@ -2941,7 +2812,7 @@ err_exit:
 			been added to buf_pool->LRU and
 			buf_pool->page_hash. */
 			mutex_exit(&block->mutex);
-			data = buf_buddy_alloc(zip_size, &lru, FALSE);
+			data = buf_buddy_alloc(zip_size, &lru);
 			mutex_enter(&block->mutex);
 			block->page.zip.data = data;
 
@@ -2954,7 +2825,6 @@ err_exit:
 			buf_unzip_LRU_add_block(block, TRUE);
 		}
 
-		mutex_exit(&LRU_list_mutex);
 		mutex_exit(&block->mutex);
 	} else {
 		/* Defer buf_buddy_alloc() until after the block has
@@ -2966,8 +2836,8 @@ err_exit:
 		control block (bpage), in order to avoid the
 		invocation of buf_buddy_relocate_block() on
 		uninitialized data. */
-		data = buf_buddy_alloc(zip_size, &lru, TRUE);
-		bpage = buf_buddy_alloc(sizeof *bpage, &lru, TRUE);
+		data = buf_buddy_alloc(zip_size, &lru);
+		bpage = buf_buddy_alloc(sizeof *bpage, &lru);
 
 		/* If buf_buddy_alloc() allocated storage from the LRU list,
 		it released and reacquired buf_pool_mutex.  Thus, we must
@@ -2976,12 +2846,8 @@ err_exit:
 		    && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) {
 
 			/* The block was added by some other thread. */
-			buf_buddy_free(bpage, sizeof *bpage, TRUE);
-			buf_buddy_free(data, zip_size, TRUE);
-
-			mutex_exit(&LRU_list_mutex);
-			mutex_exit(&flush_list_mutex);
-			rw_lock_x_unlock(&page_hash_latch);
+			buf_buddy_free(bpage, sizeof *bpage);
+			buf_buddy_free(data, zip_size);
 
 			bpage = NULL;
 			goto func_exit;
@@ -3011,25 +2877,18 @@ err_exit:
 		HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
 			    buf_page_address_fold(space, offset), bpage);
 
-		rw_lock_x_unlock(&page_hash_latch);
-
 		/* The block must be put to the LRU list, to the old blocks */
 		buf_LRU_add_block(bpage, TRUE/* to old blocks */);
 		buf_LRU_insert_zip_clean(bpage);
 
-		mutex_exit(&LRU_list_mutex);
-		mutex_exit(&flush_list_mutex);
-
 		buf_page_set_io_fix(bpage, BUF_IO_READ);
 
 		mutex_exit(&buf_pool_zip_mutex);
 	}
 
-	mutex_enter(&buf_pool_mutex);
 	buf_pool->n_pend_reads++;
-	mutex_exit(&buf_pool_mutex);
 func_exit:
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 
 	if (mode == BUF_READ_IBUF_PAGES_ONLY) {
 
@@ -3065,9 +2924,7 @@ buf_page_create(
 
 	free_block = buf_LRU_get_free_block(0);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	block = (buf_block_t*) buf_page_hash_get(space, offset);
 
@@ -3080,9 +2937,7 @@ buf_page_create(
 #endif /* UNIV_DEBUG_FILE_ACCESSES */
 
 		/* Page can be found in buf_pool */
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 
 		buf_block_free(free_block);
 
@@ -3104,7 +2959,6 @@ buf_page_create(
 	mutex_enter(&block->mutex);
 
 	buf_page_init(space, offset, block);
-	rw_lock_x_unlock(&page_hash_latch);
 
 	/* The block must be put to the LRU list */
 	buf_LRU_add_block(&block->page, FALSE);
@@ -3131,7 +2985,7 @@ buf_page_create(
 		the reacquisition of buf_pool_mutex.  We also must
 		defer this operation until after the block descriptor
 		has been added to buf_pool->LRU and buf_pool->page_hash. */
-		data = buf_buddy_alloc(zip_size, &lru, FALSE);
+		data = buf_buddy_alloc(zip_size, &lru);
 		mutex_enter(&block->mutex);
 		block->page.zip.data = data;
 
@@ -3147,8 +3001,7 @@ buf_page_create(
 		rw_lock_x_unlock(&block->lock);
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 
 	mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
 
@@ -3200,7 +3053,6 @@ buf_page_io_complete(
 	enum buf_io_fix	io_type;
 	const ibool	uncompressed = (buf_page_get_state(bpage)
 					== BUF_BLOCK_FILE_PAGE);
-	enum buf_flush	flush_type;
 
 	ut_a(buf_page_in_file(bpage));
 
@@ -3335,17 +3187,8 @@ corrupt:
 		}
 	}
 
-	//buf_pool_mutex_enter();
-	if (io_type == BUF_IO_WRITE) {
-		flush_type = buf_page_get_flush_type(bpage);
-		/* to keep consistency at buf_LRU_insert_zip_clean() */
-		//if (flush_type == BUF_FLUSH_LRU) { /* optimistic! */
-			mutex_enter(&LRU_list_mutex);
-		//}
-		mutex_enter(&flush_list_mutex);
-	}
+	buf_pool_mutex_enter();
 	mutex_enter(buf_page_get_mutex(bpage));
-	mutex_enter(&buf_pool_mutex);
 
 #ifdef UNIV_IBUF_COUNT_DEBUG
 	if (io_type == BUF_IO_WRITE || uncompressed) {
@@ -3385,12 +3228,6 @@ corrupt:
 
 		buf_flush_write_complete(bpage);
 
-		mutex_exit(&flush_list_mutex);
-		/* to keep consistency at buf_LRU_insert_zip_clean() */
-		//if (flush_type == BUF_FLUSH_LRU) { /* optimistic! */
-			mutex_exit(&LRU_list_mutex);
-		//}
-
 		if (uncompressed) {
 			rw_lock_s_unlock_gen(&((buf_block_t*) bpage)->lock,
 					     BUF_IO_WRITE);
@@ -3413,9 +3250,8 @@ corrupt:
 	}
 #endif /* UNIV_DEBUG */
 
-	mutex_exit(&buf_pool_mutex);
 	mutex_exit(buf_page_get_mutex(bpage));
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 }
 
 /*************************************************************************
@@ -3437,14 +3273,12 @@ buf_pool_invalidate(void)
 		freed = buf_LRU_search_and_free_block(100);
 	}
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
 	ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 }
 
 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -3468,11 +3302,7 @@ buf_validate(void)
 
 	ut_ad(buf_pool);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
-	/* for keep the new latch order, it cannot validate correctly... */
+	buf_pool_mutex_enter();
 
 	chunk = buf_pool->chunks;
 
@@ -3653,25 +3483,19 @@ buf_validate(void)
 	}
 
 	ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
-	/* because of latching order, we cannot get free_list_mutex here. */
-/*
 	if (UT_LIST_GET_LEN(buf_pool->free) != n_free) {
 		fprintf(stderr, "Free list len %lu, free blocks %lu\n",
 			(ulong) UT_LIST_GET_LEN(buf_pool->free),
 			(ulong) n_free);
 		ut_error;
 	}
-*/
 	ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
 
 	ut_a(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_single_flush);
 	ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
 	ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	rw_lock_x_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	ut_a(buf_LRU_validate());
 	ut_a(buf_flush_validate());
@@ -3705,10 +3529,7 @@ buf_print(void)
 	index_ids = mem_alloc(sizeof(dulint) * size);
 	counts = mem_alloc(sizeof(ulint) * size);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	mutex_enter(&free_list_mutex);
+	buf_pool_mutex_enter();
 
 	fprintf(stderr,
 		"buf_pool size %lu\n"
@@ -3771,10 +3592,7 @@ buf_print(void)
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	mutex_exit(&free_list_mutex);
+	buf_pool_mutex_exit();
 
 	for (i = 0; i < n_found; i++) {
 		index = dict_index_get_if_in_cache(index_ids[i]);
@@ -3812,7 +3630,7 @@ buf_get_latched_pages_number(void)
 	ulint		i;
 	ulint		fixed_pages_number = 0;
 
-	//buf_pool_mutex_enter();
+	buf_pool_mutex_enter();
 
 	chunk = buf_pool->chunks;
 
@@ -3882,7 +3700,7 @@ buf_get_latched_pages_number(void)
 	}
 
 	mutex_exit(&buf_pool_zip_mutex);
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 
 	return(fixed_pages_number);
 }
@@ -3939,11 +3757,7 @@ buf_print_io(
 	ut_ad(buf_pool);
 	size = buf_pool->curr_size;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	mutex_enter(&free_list_mutex);
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	fprintf(file,
 		"Buffer pool size        %lu\n"
@@ -4010,11 +3824,7 @@ buf_print_io(
 		buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
 		buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	mutex_exit(&free_list_mutex);
-	mutex_exit(&buf_pool_mutex);
+	buf_pool_mutex_exit();
 }
 
 /**************************************************************************
@@ -4043,7 +3853,7 @@ buf_all_freed(void)
 
 	ut_ad(buf_pool);
 
-	//buf_pool_mutex_enter(); /* optimistic */
+	buf_pool_mutex_enter();
 
 	chunk = buf_pool->chunks;
 
@@ -4060,7 +3870,7 @@ buf_all_freed(void)
 		}
 	}
 
-	//buf_pool_mutex_exit(); /* optimistic */
+	buf_pool_mutex_exit();
 
 	return(TRUE);
 }
@@ -4076,8 +3886,7 @@ buf_pool_check_no_pending_io(void)
 {
 	ibool	ret;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU]
 	    + buf_pool->n_flush[BUF_FLUSH_LIST]
@@ -4087,8 +3896,7 @@ buf_pool_check_no_pending_io(void)
 		ret = TRUE;
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&buf_pool_mutex);
+	buf_pool_mutex_exit();
 
 	return(ret);
 }
@@ -4102,13 +3910,11 @@ buf_get_free_list_len(void)
 {
 	ulint	len;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&free_list_mutex);
+	buf_pool_mutex_enter();
 
 	len = UT_LIST_GET_LEN(buf_pool->free);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&free_list_mutex);
+	buf_pool_mutex_exit();
 
 	return(len);
 }

=== modified file 'storage/xtradb/buf/buf0flu.c'
--- a/storage/xtradb/buf/buf0flu.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/buf/buf0flu.c	2009-05-04 04:32:30 +0000
@@ -61,8 +61,7 @@ buf_flush_insert_into_flush_list(
 /*=============================*/
 	buf_block_t*	block)	/* in/out: block which is modified */
 {
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
 	      || (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
 		  <= block->page.oldest_modification));
@@ -93,8 +92,7 @@ buf_flush_insert_sorted_into_flush_list(
 	buf_page_t*	prev_b;
 	buf_page_t*	b;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
 
 	ut_ad(block->page.in_LRU_list);
@@ -137,7 +135,7 @@ buf_flush_ready_for_replace(
 {
 	//ut_ad(buf_pool_mutex_own());
 	//ut_ad(mutex_own(buf_page_get_mutex(bpage)));
-	//ut_ad(bpage->in_LRU_list);
+	//ut_ad(bpage->in_LRU_list); /* optimistic use */
 
 	if (UNIV_LIKELY(bpage->in_LRU_list && buf_page_in_file(bpage))) {
 
@@ -172,7 +170,7 @@ buf_flush_ready_for_flush(
 	enum buf_flush	flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
 {
 	ut_a(buf_page_in_file(bpage));
-	//ut_ad(buf_pool_mutex_own()); /*optimistic...*/
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
 	ut_ad(flush_type == BUF_FLUSH_LRU || BUF_FLUSH_LIST);
 
@@ -205,8 +203,7 @@ buf_flush_remove(
 /*=============*/
 	buf_page_t*	bpage)	/* in: pointer to the block in question */
 {
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
 	ut_ad(bpage->in_flush_list);
 	ut_d(bpage->in_flush_list = FALSE);
@@ -765,19 +762,12 @@ buf_flush_page(
 	ibool		is_uncompressed;
 
 	ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
-	//ut_ad(buf_pool_mutex_own());
-#ifdef UNIV_SYNC_DEBUG
-	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
-	      || rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
-#endif
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(buf_page_in_file(bpage));
 
 	block_mutex = buf_page_get_mutex(bpage);
 	ut_ad(mutex_own(block_mutex));
 
-	mutex_enter(&buf_pool_mutex);
-	rw_lock_s_unlock(&page_hash_latch);
-
 	ut_ad(buf_flush_ready_for_flush(bpage, flush_type));
 
 	buf_page_set_io_fix(bpage, BUF_IO_WRITE);
@@ -808,8 +798,7 @@ buf_flush_page(
 		}
 
 		mutex_exit(block_mutex);
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		/* Even though bpage is not protected by any mutex at
 		this point, it is safe to access bpage, because it is
@@ -846,8 +835,7 @@ buf_flush_page(
 		immediately. */
 
 		mutex_exit(block_mutex);
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 		break;
 
 	default:
@@ -911,8 +899,7 @@ buf_flush_try_neighbors(
 		high = fil_space_get_size(space);
 	}
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	for (i = low; i < high; i++) {
 
@@ -949,16 +936,14 @@ buf_flush_try_neighbors(
 				ut_ad(!mutex_own(block_mutex));
 				count++;
 
-				//buf_pool_mutex_enter();
-				rw_lock_s_lock(&page_hash_latch);
+				buf_pool_mutex_enter();
 			} else {
 				mutex_exit(block_mutex);
 			}
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	return(count);
 }
@@ -1002,29 +987,20 @@ buf_flush_batch(
 	ut_ad((flush_type != BUF_FLUSH_LIST)
 	      || sync_thread_levels_empty_gen(TRUE));
 #endif /* UNIV_SYNC_DEBUG */
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	if ((buf_pool->n_flush[flush_type] > 0)
 	    || (buf_pool->init_flush[flush_type] == TRUE)) {
 
 		/* There is already a flush batch of the same type running */
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		return(ULINT_UNDEFINED);
 	}
 
 	buf_pool->init_flush[flush_type] = TRUE;
 
-	mutex_exit(&buf_pool_mutex);
-
-	if (flush_type == BUF_FLUSH_LRU) {
-		mutex_enter(&LRU_list_mutex);
-	}
-	mutex_enter(&flush_list_mutex);
-
 	for (;;) {
 flush_next:
 		/* If we have flushed enough, leave the loop */
@@ -1071,11 +1047,7 @@ flush_next:
 				space = buf_page_get_space(bpage);
 				offset = buf_page_get_page_no(bpage);
 
-				//buf_pool_mutex_exit();
-				if (flush_type == BUF_FLUSH_LRU) {
-					mutex_exit(&LRU_list_mutex);
-				}
-				mutex_exit(&flush_list_mutex);
+				buf_pool_mutex_exit();
 
 				old_page_count = page_count;
 
@@ -1085,8 +1057,7 @@ flush_next:
 					space, offset, flush_type);
 				} else {
 					/* Try to flush the page only */
-					//buf_pool_mutex_enter();
-					rw_lock_s_lock(&page_hash_latch);
+					buf_pool_mutex_enter();
 
 					mutex_t* block_mutex = buf_page_get_mutex(bpage);
 					mutex_enter(block_mutex);
@@ -1102,11 +1073,7 @@ flush_next:
 				flush_type, offset,
 				page_count - old_page_count); */
 
-				//buf_pool_mutex_enter();
-				if (flush_type == BUF_FLUSH_LRU) {
-					mutex_enter(&LRU_list_mutex);
-				}
-				mutex_enter(&flush_list_mutex);
+				buf_pool_mutex_enter();
 				goto flush_next;
 
 			} else if (flush_type == BUF_FLUSH_LRU) {
@@ -1124,13 +1091,6 @@ flush_next:
 		break;
 	}
 
-	if (flush_type == BUF_FLUSH_LRU) {
-		mutex_exit(&LRU_list_mutex);
-	}
-	mutex_exit(&flush_list_mutex);
-
-	mutex_enter(&buf_pool_mutex);
-
 	buf_pool->init_flush[flush_type] = FALSE;
 
 	if (buf_pool->n_flush[flush_type] == 0) {
@@ -1140,8 +1100,7 @@ flush_next:
 		os_event_set(buf_pool->no_flush[flush_type]);
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&buf_pool_mutex);
+	buf_pool_mutex_exit();
 
 	buf_flush_buffered_writes();
 
@@ -1195,7 +1154,7 @@ buf_flush_LRU_recommendation(void)
 
 	//buf_pool_mutex_enter();
 	if (have_LRU_mutex)
-		mutex_enter(&LRU_list_mutex);
+		buf_pool_mutex_enter();
 
 	n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
 
@@ -1229,7 +1188,7 @@ buf_flush_LRU_recommendation(void)
 
 	//buf_pool_mutex_exit();
 	if (have_LRU_mutex)
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 
 	if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {
 
@@ -1307,13 +1266,11 @@ buf_flush_validate(void)
 {
 	ibool	ret;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&flush_list_mutex);
+	buf_pool_mutex_enter();
 
 	ret = buf_flush_validate_low();
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&flush_list_mutex);
+	buf_pool_mutex_exit();
 
 	return(ret);
 }

=== modified file 'storage/xtradb/buf/buf0lru.c'
--- a/storage/xtradb/buf/buf0lru.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/buf/buf0lru.c	2009-05-04 04:32:30 +0000
@@ -129,31 +129,25 @@ static
 void
 buf_LRU_block_free_hashed_page(
 /*===========================*/
-	buf_block_t*	block,	/* in: block, must contain a file page and
+	buf_block_t*	block);	/* in: block, must contain a file page and
 				be in a state where it can be freed */
-	ibool		have_page_hash_mutex);
 
 /**********************************************************************
 Determines if the unzip_LRU list should be used for evicting a victim
 instead of the general LRU list. */
 UNIV_INLINE
 ibool
-buf_LRU_evict_from_unzip_LRU(
-	ibool		have_LRU_mutex)
+buf_LRU_evict_from_unzip_LRU(void)
 /*==============================*/
 				/* out: TRUE if should use unzip_LRU */
 {
 	ulint	io_avg;
 	ulint	unzip_avg;
 
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
-	if (!have_LRU_mutex)
-		mutex_enter(&LRU_list_mutex);
 	/* If the unzip_LRU list is empty, we can only use the LRU. */
 	if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
-		if (!have_LRU_mutex)
-			mutex_exit(&LRU_list_mutex);
 		return(FALSE);
 	}
 
@@ -162,20 +156,14 @@ buf_LRU_evict_from_unzip_LRU(
 	decompressed pages in the buffer pool. */
 	if (UT_LIST_GET_LEN(buf_pool->unzip_LRU)
 	    <= UT_LIST_GET_LEN(buf_pool->LRU) / 10) {
-		if (!have_LRU_mutex)
-			mutex_exit(&LRU_list_mutex);
 		return(FALSE);
 	}
 
 	/* If eviction hasn't started yet, we assume by default
 	that a workload is disk bound. */
 	if (buf_pool->freed_page_clock == 0) {
-		if (!have_LRU_mutex)
-			mutex_exit(&LRU_list_mutex);
 		return(TRUE);
 	}
-	if (!have_LRU_mutex)
-		mutex_exit(&LRU_list_mutex);
 
 	/* Calculate the average over past intervals, and add the values
 	of the current interval. */
@@ -241,8 +229,7 @@ buf_LRU_drop_page_hash_for_tablespace(
 
 	page_arr = ut_malloc(sizeof(ulint)
 			     * BUF_LRU_DROP_SEARCH_HASH_SIZE);
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 scan_again:
 	num_entries = 0;
@@ -282,14 +269,12 @@ scan_again:
 			}
 			/* Array full. We release the buf_pool_mutex to
 			obey the latching order. */
-			//buf_pool_mutex_exit();
-			mutex_exit(&LRU_list_mutex);
+			buf_pool_mutex_exit();
 
 			buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
 						     num_entries);
 			num_entries = 0;
-			//buf_pool_mutex_enter();
-			mutex_enter(&LRU_list_mutex);
+			buf_pool_mutex_enter();
 		} else {
 			mutex_exit(block_mutex);
 		}
@@ -314,8 +299,7 @@ next_page:
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 
 	/* Drop any remaining batch of search hashed pages. */
 	buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
@@ -343,10 +327,7 @@ buf_LRU_invalidate_tablespace(
 	buf_LRU_drop_page_hash_for_tablespace(id);
 
 scan_again:
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&flush_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	all_freed = TRUE;
 
@@ -388,10 +369,7 @@ scan_again:
 				ulint	page_no;
 				ulint	zip_size;
 
-				//buf_pool_mutex_exit();
-				mutex_exit(&LRU_list_mutex);
-				mutex_exit(&flush_list_mutex);
-				rw_lock_x_unlock(&page_hash_latch);
+				buf_pool_mutex_exit();
 
 				zip_size = buf_page_get_zip_size(bpage);
 				page_no = buf_page_get_page_no(bpage);
@@ -415,7 +393,7 @@ scan_again:
 			if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
 			    != BUF_BLOCK_ZIP_FREE) {
 				buf_LRU_block_free_hashed_page((buf_block_t*)
-							       bpage, TRUE);
+							       bpage);
 			} else {
 				/* The block_mutex should have been
 				released by buf_LRU_block_remove_hashed_page()
@@ -438,10 +416,7 @@ next_page:
 		bpage = prev_bpage;
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&flush_list_mutex);
-	rw_lock_x_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	if (!all_freed) {
 		os_thread_sleep(20000);
@@ -464,16 +439,14 @@ buf_LRU_get_recent_limit(void)
 	ulint			len;
 	ulint			limit;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	len = UT_LIST_GET_LEN(buf_pool->LRU);
 
 	if (len < BUF_LRU_OLD_MIN_LEN) {
 		/* The LRU list is too short to do read-ahead */
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
+		buf_pool_mutex_exit();
 
 		return(0);
 	}
@@ -482,8 +455,7 @@ buf_LRU_get_recent_limit(void)
 
 	limit = buf_page_get_LRU_position(bpage) - len / BUF_LRU_INITIAL_RATIO;
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 
 	return(limit);
 }
@@ -498,8 +470,7 @@ buf_LRU_insert_zip_clean(
 {
 	buf_page_t*	b;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
 
 	/* Find the first successor of bpage in the LRU list
@@ -507,7 +478,7 @@ buf_LRU_insert_zip_clean(
 	b = bpage;
 	do {
 		b = UT_LIST_GET_NEXT(LRU, b);
-	} while (b && (buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE || !b->in_LRU_list));
+	} while (b && buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE);
 
 	/* Insert bpage before b, i.e., after the predecessor of b. */
 	if (b) {
@@ -529,17 +500,16 @@ ibool
 buf_LRU_free_from_unzip_LRU_list(
 /*=============================*/
 				/* out: TRUE if freed */
-	ulint	n_iterations,	/* in: how many times this has been called
+	ulint	n_iterations)	/* in: how many times this has been called
 				repeatedly without result: a high value means
 				that we should search farther; we will search
 				n_iterations / 5 of the unzip_LRU list,
 				or nothing if n_iterations >= 5 */
-	ibool	have_LRU_mutex)
 {
 	buf_block_t*	block;
 	ulint		distance;
 
-	//ut_ad(buf_pool_mutex_own()); /* optimistic */
+	ut_ad(buf_pool_mutex_own());
 
 	/* Theoratically it should be much easier to find a victim
 	from unzip_LRU as we can choose even a dirty block (as we'll
@@ -549,7 +519,7 @@ buf_LRU_free_from_unzip_LRU_list(
 	if we have done five iterations so far. */
 
 	if (UNIV_UNLIKELY(n_iterations >= 5)
-	    || !buf_LRU_evict_from_unzip_LRU(have_LRU_mutex)) {
+	    || !buf_LRU_evict_from_unzip_LRU()) {
 
 		return(FALSE);
 	}
@@ -557,23 +527,18 @@ buf_LRU_free_from_unzip_LRU_list(
 	distance = 100 + (n_iterations
 			  * UT_LIST_GET_LEN(buf_pool->unzip_LRU)) / 5;
 
-restart:
 	for (block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
 	     UNIV_LIKELY(block != NULL) && UNIV_LIKELY(distance > 0);
 	     block = UT_LIST_GET_PREV(unzip_LRU, block), distance--) {
-		if (!block->in_unzip_LRU_list || !block->page.in_LRU_list
-		    || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)
-			goto restart;
 
 		enum buf_lru_free_block_status	freed;
 
-		/* optimistic */
-		//ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
-		//ut_ad(block->in_unzip_LRU_list);
-		//ut_ad(block->page.in_LRU_list);
+		ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
+		ut_ad(block->in_unzip_LRU_list);
+		ut_ad(block->page.in_LRU_list);
 
 		mutex_enter(&block->mutex);
-		freed = buf_LRU_free_block(&block->page, FALSE, NULL, have_LRU_mutex);
+		freed = buf_LRU_free_block(&block->page, FALSE, NULL);
 		mutex_exit(&block->mutex);
 
 		switch (freed) {
@@ -606,39 +571,33 @@ ibool
 buf_LRU_free_from_common_LRU_list(
 /*==============================*/
 				/* out: TRUE if freed */
-	ulint	n_iterations,	/* in: how many times this has been called
+	ulint	n_iterations)	/* in: how many times this has been called
 				repeatedly without result: a high value means
 				that we should search farther; if
 				n_iterations < 10, then we search
 				n_iterations / 10 * buf_pool->curr_size
 				pages from the end of the LRU list */
-	ibool	have_LRU_mutex)
 {
 	buf_page_t*	bpage;
 	ulint		distance;
 
-	//ut_ad(buf_pool_mutex_own()); /* optimistic */
+	ut_ad(buf_pool_mutex_own());
 
 	distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
 
-restart:
 	for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
 	     UNIV_LIKELY(bpage != NULL) && UNIV_LIKELY(distance > 0);
 	     bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
-		if (!bpage->in_LRU_list
-		    || buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE)
-			goto restart;
 
 		enum buf_lru_free_block_status	freed;
 		mutex_t*			block_mutex
 			= buf_page_get_mutex(bpage);
 
-		/* optimistic */
-		//ut_ad(buf_page_in_file(bpage));
-		//ut_ad(bpage->in_LRU_list);
+		ut_ad(buf_page_in_file(bpage));
+		ut_ad(bpage->in_LRU_list);
 
 		mutex_enter(block_mutex);
-		freed = buf_LRU_free_block(bpage, TRUE, NULL, have_LRU_mutex);
+		freed = buf_LRU_free_block(bpage, TRUE, NULL);
 		mutex_exit(block_mutex);
 
 		switch (freed) {
@@ -681,33 +640,22 @@ buf_LRU_search_and_free_block(
 				n_iterations / 5 of the unzip_LRU list. */
 {
 	ibool	freed = FALSE;
-	ibool	have_LRU_mutex = FALSE;
-
-	if (UT_LIST_GET_LEN(buf_pool->unzip_LRU))
-		have_LRU_mutex = TRUE;
 
-	/* optimistic search... */
-	//buf_pool_mutex_enter();
-	if (have_LRU_mutex)
-		mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
-	freed = buf_LRU_free_from_unzip_LRU_list(n_iterations, have_LRU_mutex);
+	freed = buf_LRU_free_from_unzip_LRU_list(n_iterations);
 
 	if (!freed) {
-		freed = buf_LRU_free_from_common_LRU_list(n_iterations, have_LRU_mutex);
+		freed = buf_LRU_free_from_common_LRU_list(n_iterations);
 	}
 
-	mutex_enter(&buf_pool_mutex);
 	if (!freed) {
 		buf_pool->LRU_flush_ended = 0;
 	} else if (buf_pool->LRU_flush_ended > 0) {
 		buf_pool->LRU_flush_ended--;
 	}
-	mutex_exit(&buf_pool_mutex);
 
-	//buf_pool_mutex_exit();
-	if (have_LRU_mutex)
-		mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 
 	return(freed);
 }
@@ -725,22 +673,18 @@ void
 buf_LRU_try_free_flushed_blocks(void)
 /*=================================*/
 {
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	while (buf_pool->LRU_flush_ended > 0) {
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		buf_LRU_search_and_free_block(1);
 
-		//buf_pool_mutex_enter();
-		mutex_enter(&buf_pool_mutex);
+		buf_pool_mutex_enter();
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&buf_pool_mutex);
+	buf_pool_mutex_exit();
 }
 
 /**********************************************************************
@@ -756,9 +700,7 @@ buf_LRU_buf_pool_running_out(void)
 {
 	ibool	ret	= FALSE;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
-	mutex_enter(&free_list_mutex);
+	buf_pool_mutex_enter();
 
 	if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
 	    + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
@@ -766,9 +708,7 @@ buf_LRU_buf_pool_running_out(void)
 		ret = TRUE;
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
-	mutex_exit(&free_list_mutex);
+	buf_pool_mutex_exit();
 
 	return(ret);
 }
@@ -785,10 +725,9 @@ buf_LRU_get_free_only(void)
 {
 	buf_block_t*	block;
 
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
-	mutex_enter(&free_list_mutex);
-	block = (buf_block_t*) UT_LIST_GET_LAST(buf_pool->free);
+	block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
 
 	if (block) {
 		ut_ad(block->page.in_free_list);
@@ -798,16 +737,12 @@ buf_LRU_get_free_only(void)
 		ut_a(!buf_page_in_file(&block->page));
 		UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
 
-		mutex_exit(&free_list_mutex);
-
 		mutex_enter(&block->mutex);
 
 		buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
 		UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
 
 		mutex_exit(&block->mutex);
-	} else {
-		mutex_exit(&free_list_mutex);
 	}
 
 	return(block);
@@ -832,7 +767,7 @@ buf_LRU_get_free_block(
 	ibool		mon_value_was	= FALSE;
 	ibool		started_monitor	= FALSE;
 loop:
-	//buf_pool_mutex_enter();
+	buf_pool_mutex_enter();
 
 	if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
 	    + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
@@ -912,16 +847,14 @@ loop:
 		if (UNIV_UNLIKELY(zip_size)) {
 			ibool	lru;
 			page_zip_set_size(&block->page.zip, zip_size);
-			mutex_enter(&LRU_list_mutex);
-			block->page.zip.data = buf_buddy_alloc(zip_size, &lru, FALSE);
-			mutex_exit(&LRU_list_mutex);
+			block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
 			UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
 		} else {
 			page_zip_set_size(&block->page.zip, 0);
 			block->page.zip.data = NULL;
 		}
 
-		//buf_pool_mutex_exit();
+		buf_pool_mutex_exit();
 
 		if (started_monitor) {
 			srv_print_innodb_monitor = mon_value_was;
@@ -933,7 +866,7 @@ loop:
 	/* If no block was in the free list, search from the end of the LRU
 	list and try to free a block there */
 
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 
 	freed = buf_LRU_search_and_free_block(n_iterations);
 
@@ -982,21 +915,18 @@ loop:
 
 	os_aio_simulated_wake_handler_threads();
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	if (buf_pool->LRU_flush_ended > 0) {
 		/* We have written pages in an LRU flush. To make the insert
 		buffer more efficient, we try to move these pages to the free
 		list. */
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		buf_LRU_try_free_flushed_blocks();
 	} else {
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 	}
 
 	if (n_iterations > 10) {
@@ -1021,8 +951,7 @@ buf_LRU_old_adjust_len(void)
 	ulint	new_len;
 
 	ut_a(buf_pool->LRU_old);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 #if 3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5
 # error "3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5"
 #endif
@@ -1080,8 +1009,7 @@ buf_LRU_old_init(void)
 {
 	buf_page_t*	bpage;
 
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
 
 	/* We first initialize all blocks in the LRU list as old and then use
@@ -1113,14 +1041,13 @@ buf_unzip_LRU_remove_block_if_needed(
 	ut_ad(buf_pool);
 	ut_ad(bpage);
 	ut_ad(buf_page_in_file(bpage));
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	if (buf_page_belongs_to_unzip_LRU(bpage)) {
 		buf_block_t*	block = (buf_block_t*) bpage;
 
 		ut_ad(block->in_unzip_LRU_list);
-		block->in_unzip_LRU_list = FALSE;
+		ut_d(block->in_unzip_LRU_list = FALSE);
 
 		UT_LIST_REMOVE(unzip_LRU, buf_pool->unzip_LRU, block);
 	}
@@ -1136,8 +1063,7 @@ buf_LRU_remove_block(
 {
 	ut_ad(buf_pool);
 	ut_ad(bpage);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	ut_a(buf_page_in_file(bpage));
 
@@ -1200,13 +1126,12 @@ buf_unzip_LRU_add_block(
 {
 	ut_ad(buf_pool);
 	ut_ad(block);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
 
 	ut_ad(!block->in_unzip_LRU_list);
-	block->in_unzip_LRU_list = TRUE;
+	ut_d(block->in_unzip_LRU_list = TRUE);
 
 	if (old) {
 		UT_LIST_ADD_LAST(unzip_LRU, buf_pool->unzip_LRU, block);
@@ -1227,8 +1152,7 @@ buf_LRU_add_block_to_end_low(
 
 	ut_ad(buf_pool);
 	ut_ad(bpage);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	ut_a(buf_page_in_file(bpage));
 
@@ -1288,8 +1212,7 @@ buf_LRU_add_block_low(
 {
 	ut_ad(buf_pool);
 	ut_ad(bpage);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	ut_a(buf_page_in_file(bpage));
 	ut_ad(!bpage->in_LRU_list);
@@ -1408,24 +1331,22 @@ buf_LRU_free_block(
 	buf_page_t*	bpage,	/* in: block to be freed */
 	ibool		zip,	/* in: TRUE if should remove also the
 				compressed page of an uncompressed page */
-	ibool*		buf_pool_mutex_released,
+	ibool*		buf_pool_mutex_released)
 				/* in: pointer to a variable that will
 				be assigned TRUE if buf_pool_mutex
 				was temporarily released, or NULL */
-	ibool		have_LRU_mutex)
 {
 	buf_page_t*	b = NULL;
 	mutex_t*	block_mutex = buf_page_get_mutex(bpage);
 
-	//ut_ad(buf_pool_mutex_own());
-	/* optimistic */
-	//ut_ad(mutex_own(block_mutex));
-	//ut_ad(buf_page_in_file(bpage));
-	//ut_ad(bpage->in_LRU_list);
-	//ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
+	ut_ad(buf_pool_mutex_own());
+	ut_ad(mutex_own(block_mutex));
+	ut_ad(buf_page_in_file(bpage));
+	ut_ad(bpage->in_LRU_list);
+	ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
 	UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
 
-	if (!bpage->in_LRU_list || !block_mutex || !buf_page_can_relocate(bpage)) {
+	if (!buf_page_can_relocate(bpage)) {
 
 		/* Do not free buffer-fixed or I/O-fixed blocks. */
 		return(BUF_LRU_NOT_FREED);
@@ -1457,15 +1378,15 @@ buf_LRU_free_block(
 		If it cannot be allocated (without freeing a block
 		from the LRU list), refuse to free bpage. */
 alloc:
-		//buf_pool_mutex_exit_forbid();
-		b = buf_buddy_alloc(sizeof *b, NULL, FALSE);
-		//buf_pool_mutex_exit_allow();
+		buf_pool_mutex_exit_forbid();
+		b = buf_buddy_alloc(sizeof *b, NULL);
+		buf_pool_mutex_exit_allow();
 
 		if (UNIV_UNLIKELY(!b)) {
 			return(BUF_LRU_CANNOT_RELOCATE);
 		}
 
-		//memcpy(b, bpage, sizeof *b);
+		memcpy(b, bpage, sizeof *b);
 	}
 
 #ifdef UNIV_DEBUG
@@ -1476,41 +1397,6 @@ alloc:
 	}
 #endif /* UNIV_DEBUG */
 
-	/* not to break latch order, must re-enter block_mutex */
-	mutex_exit(block_mutex);
-
-	if (!have_LRU_mutex)
-		mutex_enter(&LRU_list_mutex); /* optimistic */
-	mutex_enter(&flush_list_mutex);
-	rw_lock_x_lock(&page_hash_latch);
-	mutex_enter(block_mutex);
-
-	/* recheck states of block */
-	if (!bpage->in_LRU_list || block_mutex != buf_page_get_mutex(bpage)
-	    || !buf_page_can_relocate(bpage)) {
-not_freed:
-		if (b) {
-			buf_buddy_free(b, sizeof *b, TRUE);
-		}
-		if (!have_LRU_mutex)
-			mutex_exit(&LRU_list_mutex);
-		mutex_exit(&flush_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
-		return(BUF_LRU_NOT_FREED);
-	} else if (zip || !bpage->zip.data) {
-		if (bpage->oldest_modification)
-			goto not_freed;
-	} else if (bpage->oldest_modification) {
-		if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
-			ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY);
-			goto not_freed;
-		}
-	}
-
-	if (b) {
-		memcpy(b, bpage, sizeof *b);
-	}
-
 	if (buf_LRU_block_remove_hashed_page(bpage, zip)
 	    != BUF_BLOCK_ZIP_FREE) {
 		ut_a(bpage->buf_fix_count == 0);
@@ -1522,10 +1408,6 @@ not_freed:
 
 			ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
 
-			while (prev_b && !prev_b->in_LRU_list) {
-				prev_b = UT_LIST_GET_PREV(LRU, prev_b);
-			}
-
 			b->state = b->oldest_modification
 				? BUF_BLOCK_ZIP_DIRTY
 				: BUF_BLOCK_ZIP_PAGE;
@@ -1639,10 +1521,7 @@ not_freed:
 			*buf_pool_mutex_released = TRUE;
 		}
 
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
-		mutex_exit(&flush_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 		mutex_exit(block_mutex);
 
 		/* Remove possible adaptive hash index on the page.
@@ -1674,9 +1553,7 @@ not_freed:
 				: BUF_NO_CHECKSUM_MAGIC);
 		}
 
-		//buf_pool_mutex_enter();
-		if (have_LRU_mutex)
-			mutex_enter(&LRU_list_mutex);
+		buf_pool_mutex_enter();
 		mutex_enter(block_mutex);
 
 		if (b) {
@@ -1686,18 +1563,13 @@ not_freed:
 			mutex_exit(&buf_pool_zip_mutex);
 		}
 
-		buf_LRU_block_free_hashed_page((buf_block_t*) bpage, FALSE);
+		buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
 	} else {
 		/* The block_mutex should have been released by
 		buf_LRU_block_remove_hashed_page() when it returns
 		BUF_BLOCK_ZIP_FREE. */
 		ut_ad(block_mutex == &buf_pool_zip_mutex);
 		mutex_enter(block_mutex);
-
-		if (!have_LRU_mutex)
-			mutex_exit(&LRU_list_mutex);
-		mutex_exit(&flush_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
 	}
 
 	return(BUF_LRU_FREED);
@@ -1709,13 +1581,12 @@ UNIV_INTERN
 void
 buf_LRU_block_free_non_file_page(
 /*=============================*/
-	buf_block_t*	block,	/* in: block, must not contain a file page */
-	ibool		have_page_hash_mutex)
+	buf_block_t*	block)	/* in: block, must not contain a file page */
 {
 	void*	data;
 
 	ut_ad(block);
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(&block->mutex));
 
 	switch (buf_block_get_state(block)) {
@@ -1749,17 +1620,15 @@ buf_LRU_block_free_non_file_page(
 	if (data) {
 		block->page.zip.data = NULL;
 		mutex_exit(&block->mutex);
-		//buf_pool_mutex_exit_forbid();
-		buf_buddy_free(data, page_zip_get_size(&block->page.zip), have_page_hash_mutex);
-		//buf_pool_mutex_exit_allow();
+		buf_pool_mutex_exit_forbid();
+		buf_buddy_free(data, page_zip_get_size(&block->page.zip));
+		buf_pool_mutex_exit_allow();
 		mutex_enter(&block->mutex);
 		page_zip_set_size(&block->page.zip, 0);
 	}
 
-	mutex_enter(&free_list_mutex);
 	UT_LIST_ADD_FIRST(list, buf_pool->free, (&block->page));
 	ut_d(block->page.in_free_list = TRUE);
-	mutex_exit(&free_list_mutex);
 
 	UNIV_MEM_ASSERT_AND_FREE(block->frame, UNIV_PAGE_SIZE);
 }
@@ -1788,11 +1657,7 @@ buf_LRU_block_remove_hashed_page(
 {
 	const buf_page_t*	hashed_bpage;
 	ut_ad(bpage);
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
-#ifdef UNIV_SYNC_DEBUG
-	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX));
-#endif
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
 
 	ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
@@ -1893,9 +1758,7 @@ buf_LRU_block_remove_hashed_page(
 
 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
 		mutex_exit(buf_page_get_mutex(bpage));
-		//buf_pool_mutex_exit();
-		mutex_exit(&LRU_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 		buf_print();
 		buf_LRU_print();
 		buf_validate();
@@ -1921,11 +1784,11 @@ buf_LRU_block_remove_hashed_page(
 		UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
 
 		mutex_exit(&buf_pool_zip_mutex);
-		//buf_pool_mutex_exit_forbid();
+		buf_pool_mutex_exit_forbid();
 		buf_buddy_free(bpage->zip.data,
-			       page_zip_get_size(&bpage->zip), TRUE);
-		buf_buddy_free(bpage, sizeof(*bpage), TRUE);
-		//buf_pool_mutex_exit_allow();
+			       page_zip_get_size(&bpage->zip));
+		buf_buddy_free(bpage, sizeof(*bpage));
+		buf_pool_mutex_exit_allow();
 		UNIV_MEM_UNDESC(bpage);
 		return(BUF_BLOCK_ZIP_FREE);
 
@@ -1944,9 +1807,9 @@ buf_LRU_block_remove_hashed_page(
 			bpage->zip.data = NULL;
 
 			mutex_exit(&((buf_block_t*) bpage)->mutex);
-			//buf_pool_mutex_exit_forbid();
-			buf_buddy_free(data, page_zip_get_size(&bpage->zip), TRUE);
-			//buf_pool_mutex_exit_allow();
+			buf_pool_mutex_exit_forbid();
+			buf_buddy_free(data, page_zip_get_size(&bpage->zip));
+			buf_pool_mutex_exit_allow();
 			mutex_enter(&((buf_block_t*) bpage)->mutex);
 			page_zip_set_size(&bpage->zip, 0);
 		}
@@ -1972,16 +1835,15 @@ static
 void
 buf_LRU_block_free_hashed_page(
 /*===========================*/
-	buf_block_t*	block,	/* in: block, must contain a file page and
+	buf_block_t*	block)	/* in: block, must contain a file page and
 				be in a state where it can be freed */
-	ibool		have_page_hash_mutex)
 {
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(&block->mutex));
 
 	buf_block_set_state(block, BUF_BLOCK_MEMORY);
 
-	buf_LRU_block_free_non_file_page(block, have_page_hash_mutex);
+	buf_LRU_block_free_non_file_page(block);
 }
 
 /************************************************************************
@@ -1999,8 +1861,7 @@ buf_LRU_stat_update(void)
 		goto func_exit;
 	}
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	/* Update the index. */
 	item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
@@ -2014,8 +1875,7 @@ buf_LRU_stat_update(void)
 	/* Put current entry in the array. */
 	memcpy(item, &buf_LRU_stat_cur, sizeof *item);
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&buf_pool_mutex);
+	buf_pool_mutex_exit();
 
 func_exit:
 	/* Clear the current entry. */
@@ -2037,8 +1897,7 @@ buf_LRU_validate(void)
 	ulint		LRU_pos;
 
 	ut_ad(buf_pool);
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
 
@@ -2097,9 +1956,6 @@ buf_LRU_validate(void)
 		ut_a(buf_pool->LRU_old_len == old_len);
 	}
 
-	mutex_exit(&LRU_list_mutex);
-	mutex_enter(&free_list_mutex);
-
 	UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free);
 
 	for (bpage = UT_LIST_GET_FIRST(buf_pool->free);
@@ -2109,9 +1965,6 @@ buf_LRU_validate(void)
 		ut_a(buf_page_get_state(bpage) == BUF_BLOCK_NOT_USED);
 	}
 
-	mutex_exit(&free_list_mutex);
-	mutex_enter(&LRU_list_mutex);
-
 	UT_LIST_VALIDATE(unzip_LRU, buf_block_t, buf_pool->unzip_LRU);
 
 	for (block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU);
@@ -2123,8 +1976,7 @@ buf_LRU_validate(void)
 		ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 	return(TRUE);
 }
 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
@@ -2140,8 +1992,7 @@ buf_LRU_print(void)
 	const buf_page_t*	bpage;
 
 	ut_ad(buf_pool);
-	//buf_pool_mutex_enter();
-	mutex_enter(&LRU_list_mutex);
+	buf_pool_mutex_enter();
 
 	fprintf(stderr, "Pool ulint clock %lu\n",
 		(ulong) buf_pool->ulint_clock);
@@ -2204,7 +2055,6 @@ buf_LRU_print(void)
 		bpage = UT_LIST_GET_NEXT(LRU, bpage);
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&LRU_list_mutex);
+	buf_pool_mutex_exit();
 }
 #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */

=== modified file 'storage/xtradb/buf/buf0rea.c'
--- a/storage/xtradb/buf/buf0rea.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/buf/buf0rea.c	2009-05-04 02:45:47 +0000
@@ -246,22 +246,18 @@ buf_read_ahead_random(
 
 	LRU_recent_limit = buf_LRU_get_recent_limit();
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	if (buf_pool->n_pend_reads
 	    > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		return(0);
 	}
-	mutex_exit(&buf_pool_mutex);
 
 	/* Count how many blocks in the area have been recently accessed,
 	that is, reside near the start of the LRU list. */
 
-	rw_lock_s_lock(&page_hash_latch);
 	for (i = low; i < high; i++) {
 		const buf_page_t*	bpage = buf_page_hash_get(space, i);
 
@@ -273,15 +269,13 @@ buf_read_ahead_random(
 
 			if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) {
 
-				//buf_pool_mutex_exit();
-				rw_lock_s_unlock(&page_hash_latch);
+				buf_pool_mutex_exit();
 				goto read_ahead;
 			}
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 	/* Do nothing */
 	return(0);
 
@@ -475,12 +469,10 @@ buf_read_ahead_linear(
 
 	tablespace_version = fil_space_get_version(space);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&buf_pool_mutex);
+	buf_pool_mutex_enter();
 
 	if (high > fil_space_get_size(space)) {
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 		/* The area is not whole, return */
 
 		return(0);
@@ -488,12 +480,10 @@ buf_read_ahead_linear(
 
 	if (buf_pool->n_pend_reads
 	    > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
-		//buf_pool_mutex_exit();
-		mutex_exit(&buf_pool_mutex);
+		buf_pool_mutex_exit();
 
 		return(0);
 	}
-	mutex_exit(&buf_pool_mutex);
 
 	/* Check that almost all pages in the area have been accessed; if
 	offset == low, the accesses must be in a descending order, otherwise,
@@ -507,7 +497,6 @@ buf_read_ahead_linear(
 
 	fail_count = 0;
 
-	rw_lock_s_lock(&page_hash_latch);
 	for (i = low; i < high; i++) {
 		bpage = buf_page_hash_get(space, i);
 
@@ -531,8 +520,7 @@ buf_read_ahead_linear(
 	    * LINEAR_AREA_THRESHOLD_COEF) {
 		/* Too many failures: return */
 
-		//buf_pool_mutex_exit();
-		rw_lock_s_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 
 		return(0);
 	}
@@ -543,8 +531,7 @@ buf_read_ahead_linear(
 	bpage = buf_page_hash_get(space, offset);
 
 	if (bpage == NULL) {
-		//buf_pool_mutex_exit();
-		rw_lock_s_unlock(&page_hash_latch);
+		buf_pool_mutex_exit();
 
 		return(0);
 	}
@@ -570,8 +557,7 @@ buf_read_ahead_linear(
 	pred_offset = fil_page_get_prev(frame);
 	succ_offset = fil_page_get_next(frame);
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	if ((offset == low) && (succ_offset == offset + 1)) {
 

=== modified file 'storage/xtradb/handler/ha_innodb.cc'
--- a/storage/xtradb/handler/ha_innodb.cc	2009-06-09 12:21:26 +0000
+++ b/storage/xtradb/handler/ha_innodb.cc	2009-06-10 06:51:03 +0000
@@ -35,7 +35,17 @@ Place, Suite 330, Boston, MA 02111-1307 
 #pragma implementation				// gcc: Class implementation
 #endif
 
+#define MYSQL_SERVER
+
 #include <mysql_priv.h>
+#ifdef MYSQL_SERVER
+#include <rpl_mi.h>
+#include <slave.h>
+// Defined in slave.cc
+int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
+int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
+			  const char *default_val);
+#endif /* MYSQL_SERVER */
 
 #include <m_ctype.h>
 #include <mysys_err.h>
@@ -78,6 +88,14 @@ extern "C" {
 #include "i_s.h"
 #include "handler0vars.h"
 
+#ifdef MYSQL_SERVER
+// Defined in trx0sys.c
+extern char		trx_sys_mysql_master_log_name[];
+extern ib_int64_t	trx_sys_mysql_master_log_pos;
+extern char		trx_sys_mysql_relay_log_name[];
+extern ib_int64_t	trx_sys_mysql_relay_log_pos;
+#endif /* MYSQL_SERVER */
+
 #ifndef MYSQL_SERVER
 /* This is needed because of Bug #3596.  Let us hope that pthread_mutex_t
 is defined the same in both builds: the MySQL server and the InnoDB plugin. */
@@ -176,6 +194,7 @@ static my_bool	innobase_use_doublewrite	
 static my_bool	innobase_use_checksums			= TRUE;
 static my_bool	innobase_extra_undoslots		= FALSE;
 static my_bool	innobase_locks_unsafe_for_binlog	= FALSE;
+static my_bool	innobase_overwrite_relay_log_info	= FALSE;
 static my_bool	innobase_rollback_on_timeout		= FALSE;
 static my_bool	innobase_create_status_file		= FALSE;
 static my_bool	innobase_stats_on_metadata		= TRUE;
@@ -1920,6 +1939,89 @@ innobase_init(
 	}
 #endif /* UNIV_DEBUG */
 
+#ifndef MYSQL_SERVER
+	innodb_overwrite_relay_log_info = FALSE;
+#endif
+
+#ifdef HAVE_REPLICATION
+#ifdef MYSQL_SERVER
+	/* read master log position from relay-log.info if exists */
+	char fname[FN_REFLEN+128];
+	int pos;
+	int info_fd;
+	IO_CACHE info_file;
+
+	fname[0] = '\0';
+
+	if(innobase_overwrite_relay_log_info) {
+
+	fprintf(stderr,
+		"InnoDB: Warning: innodb_overwrite_relay_log_info is enabled."
+		" Updates in other storage engines may have problem with consistency.\n");
+
+	bzero((char*) &info_file, sizeof(info_file));
+	fn_format(fname, relay_log_info_file, mysql_data_home, "", 4+32);
+
+	int error=0;
+
+	if (!access(fname,F_OK)) {
+		/* exist */
+		if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0) {
+			error=1;
+		} else if (init_io_cache(&info_file, info_fd, IO_SIZE*2,
+					READ_CACHE, 0L, 0, MYF(MY_WME))) {
+			error=1;
+		}
+
+		if (error) {
+relay_info_error:
+			if (info_fd >= 0)
+				my_close(info_fd, MYF(0));
+			fname[0] = '\0';
+			goto skip_relay;
+		}
+	} else {
+		fname[0] = '\0';
+		goto skip_relay;
+	}
+
+	if (init_strvar_from_file(fname, sizeof(fname), &info_file, "") || /* dummy (it is relay-log) */
+	    init_intvar_from_file(&pos, &info_file, BIN_LOG_HEADER_SIZE)) { 
+		end_io_cache(&info_file);
+		error=1;
+		goto relay_info_error;
+	}
+
+	fprintf(stderr,
+		"InnoDB: relay-log.info is detected.\n"
+		"InnoDB: relay log: position %u, file name %s\n",
+		pos, fname);
+
+	strncpy(trx_sys_mysql_relay_log_name, fname, TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN);
+	trx_sys_mysql_relay_log_pos = (ib_int64_t) pos;
+
+	if (init_strvar_from_file(fname, sizeof(fname), &info_file, "") ||
+	    init_intvar_from_file(&pos, &info_file, 0)) {
+		end_io_cache(&info_file);
+		error=1;
+		goto relay_info_error;
+	}
+
+	fprintf(stderr,
+		"InnoDB: master log: position %u, file name %s\n",
+		pos, fname);
+
+	strncpy(trx_sys_mysql_master_log_name, fname, TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN);
+	trx_sys_mysql_master_log_pos = (ib_int64_t) pos;
+
+	end_io_cache(&info_file);
+	if (info_fd >= 0)
+		my_close(info_fd, MYF(0));
+	}
+skip_relay:
+#endif /* MYSQL_SERVER */
+#endif /* HAVE_REPLICATION */
+
 	/* Check that values don't overflow on 32-bit systems. */
 	if (sizeof(ulint) == 4) {
 		if (innobase_buffer_pool_size > UINT_MAX32) {
@@ -2003,6 +2105,77 @@ mem_free_and_error:
 		goto error;
 	}
 
+#ifdef HAVE_REPLICATION
+#ifdef MYSQL_SERVER
+	if(innobase_overwrite_relay_log_info) {
+	/* If InnoDB progressed from relay-log.info, overwrite it */
+	if (fname[0] == '\0') {
+		fprintf(stderr,
+			"InnoDB: something wrong with relay-info.log. InnoDB will not overwrite it.\n");
+	} else if (0 != strcmp(fname, trx_sys_mysql_master_log_name)
+		   || pos != trx_sys_mysql_master_log_pos) {
+		/* Overwrite relay-log.info */
+		bzero((char*) &info_file, sizeof(info_file));
+		fn_format(fname, relay_log_info_file, mysql_data_home, "", 4+32);
+
+		int error = 0;
+
+		if (!access(fname,F_OK)) {
+			/* exist */
+			if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0) {
+				error = 1;
+			} else if (init_io_cache(&info_file, info_fd, IO_SIZE*2,
+						WRITE_CACHE, 0L, 0, MYF(MY_WME))) {
+				error = 1;
+			}
+
+			if (error) {
+				if (info_fd >= 0)
+					my_close(info_fd, MYF(0));
+				goto skip_overwrite;
+			}
+		} else {
+			error = 1;
+			goto skip_overwrite;
+		}
+
+		char buff[FN_REFLEN*2+22*2+4], *pos;
+
+		my_b_seek(&info_file, 0L);
+		pos=strmov(buff, trx_sys_mysql_relay_log_name);
+		*pos++='\n';
+		pos=longlong2str(trx_sys_mysql_relay_log_pos, pos, 10);
+		*pos++='\n';
+		pos=strmov(pos, trx_sys_mysql_master_log_name);
+		*pos++='\n';
+		pos=longlong2str(trx_sys_mysql_master_log_pos, pos, 10);
+		*pos='\n';
+
+		if (my_b_write(&info_file, (uchar*) buff, (size_t) (pos-buff)+1))
+			error = 1;
+		if (flush_io_cache(&info_file))
+			error = 1;
+
+		end_io_cache(&info_file);
+		if (info_fd >= 0)
+			my_close(info_fd, MYF(0));
+skip_overwrite:
+		if (error) {
+			fprintf(stderr,
+				"InnoDB: ERROR: error occured during overwriting relay-log.info.\n");
+		} else {
+			fprintf(stderr,
+				"InnoDB: relay-log.info was overwritten.\n");
+		}
+	} else {
+		fprintf(stderr,
+			"InnoDB: InnoDB and relay-log.info are synchronized. InnoDB will not overwrite it.\n");
+	}
+	}
+#endif /* MYSQL_SERVER */
+#endif /* HAVE_REPLICATION */
+
+
 	srv_extra_undoslots = (ibool) innobase_extra_undoslots;
 
 	/* -------------- Log files ---------------------------*/
@@ -2270,6 +2443,26 @@ innobase_commit_low(
 		return;
 	}
 
+#ifdef HAVE_REPLICATION
+#ifdef MYSQL_SERVER
+	THD *thd=current_thd;
+
+	if (thd && thd->slave_thread) {
+		/* Update the replication position info inside InnoDB */
+		trx->mysql_master_log_file_name
+			= active_mi->rli.group_master_log_name;
+		trx->mysql_master_log_pos
+			= ((ib_int64_t)active_mi->rli.group_master_log_pos +
+			   ((ib_int64_t)active_mi->rli.future_event_relay_log_pos -
+			    (ib_int64_t)active_mi->rli.group_relay_log_pos));
+		trx->mysql_relay_log_file_name
+			= active_mi->rli.group_relay_log_name;
+		trx->mysql_relay_log_pos
+			= (ib_int64_t)active_mi->rli.future_event_relay_log_pos;
+	}
+#endif /* MYSQL_SERVER */
+#endif /* HAVE_REPLICATION */
+
 	trx_commit_for_mysql(trx);
 }
 
@@ -9509,6 +9702,12 @@ static MYSQL_SYSVAR_BOOL(extra_undoslots
   "don't use the datafile for normal mysqld or ibbackup! ####",
   NULL, NULL, FALSE);
 
+static MYSQL_SYSVAR_BOOL(overwrite_relay_log_info, innobase_overwrite_relay_log_info,
+  PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+  "During InnoDB crash recovery on slave overwrite relay-log.info "
+  "to align master log file position if information in InnoDB and relay-log.info is different.",
+  NULL, NULL, FALSE);
+
 static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
   PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
   "Enable InnoDB doublewrite buffer (enabled by default). "
@@ -9851,6 +10050,7 @@ static struct st_mysql_sys_var* innobase
   MYSQL_SYSVAR(max_purge_lag),
   MYSQL_SYSVAR(mirrored_log_groups),
   MYSQL_SYSVAR(open_files),
+  MYSQL_SYSVAR(overwrite_relay_log_info),
   MYSQL_SYSVAR(rollback_on_timeout),
   MYSQL_SYSVAR(stats_on_metadata),
   MYSQL_SYSVAR(stats_sample_pages),

=== modified file 'storage/xtradb/handler/i_s.cc'
--- a/storage/xtradb/handler/i_s.cc	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/handler/i_s.cc	2009-05-04 02:45:47 +0000
@@ -2282,8 +2282,7 @@ i_s_cmpmem_fill_low(
 
 	RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&zip_free_mutex);
+	buf_pool_mutex_enter();
 
 	for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
 		buf_buddy_stat_t*	buddy_stat = &buf_buddy_stat[x];
@@ -2309,8 +2308,7 @@ i_s_cmpmem_fill_low(
 		}
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&zip_free_mutex);
+	buf_pool_mutex_exit();
 	DBUG_RETURN(status);
 }
 

=== modified file 'storage/xtradb/handler/innodb_patch_info.h'
--- a/storage/xtradb/handler/innodb_patch_info.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/handler/innodb_patch_info.h	2009-05-04 02:45:47 +0000
@@ -26,11 +26,10 @@ struct innodb_enhancement {
 {"xtradb_show_enhancements","I_S.XTRADB_ENHANCEMENTS","","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_show_status","Improvements to SHOW INNODB STATUS","Memory information and lock info fixes","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_io","Improvements to InnoDB IO","","http://www.percona.com/docs/wiki/percona-xtradb"},
-{"innodb_rw_lock","InnoDB RW-lock fixes","Useful for 8+ cores SMP systems","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_opt_lru_count","Fix of buffer_pool mutex","Decreases contention on buffer_pool mutex on LRU operations","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_buffer_pool_pages","Information of buffer pool content","","http://www.percona.com/docs/wiki/percona-xtradb"},
-{"innodb_split_buf_pool_mutex","More fix of buffer_pool mutex","Spliting buf_pool_mutex and optimizing based on innodb_opt_lru_count","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_expand_undo_slots","expandable maximum number of undo slots","from 1024 (default) to about 4000","http://www.percona.com/docs/wiki/percona-xtradb"},
 {"innodb_extra_rseg","allow to create extra rollback segments","When create new db, the new parameter allows to create more rollback segments","http://www.percona.com/docs/wiki/percona-xtradb"},
+{"innodb_overwrite_relay_log_info","overwrite relay-log.info when slave recovery","Building as plugin, it is not used.","http://www.percona.com/docs/wiki/percona-xtradb:innodb_overwrite_relay_log_info"},
 {NULL, NULL, NULL, NULL}
 };

=== modified file 'storage/xtradb/include/buf0buddy.h'
--- a/storage/xtradb/include/buf0buddy.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0buddy.h	2009-05-04 02:45:47 +0000
@@ -49,11 +49,10 @@ buf_buddy_alloc(
 			/* out: allocated block,
 			possibly NULL if lru == NULL */
 	ulint	size,	/* in: block size, up to UNIV_PAGE_SIZE */
-	ibool*	lru,	/* in: pointer to a variable that will be assigned
+	ibool*	lru)	/* in: pointer to a variable that will be assigned
 			TRUE if storage was allocated from the LRU list
 			and buf_pool_mutex was temporarily released,
 			or NULL if the LRU list should not be used */
-	ibool	have_page_hash_mutex)
 	__attribute__((malloc));
 
 /**************************************************************************
@@ -64,8 +63,7 @@ buf_buddy_free(
 /*===========*/
 	void*	buf,	/* in: block to be freed, must not be
 			pointed to by the buffer pool */
-	ulint	size,	/* in: block size, up to UNIV_PAGE_SIZE */
-	ibool	have_page_hash_mutex)
+	ulint	size)	/* in: block size, up to UNIV_PAGE_SIZE */
 	__attribute__((nonnull));
 
 /** Statistics of buddy blocks of a given size. */

=== modified file 'storage/xtradb/include/buf0buddy.ic'
--- a/storage/xtradb/include/buf0buddy.ic	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0buddy.ic	2009-05-04 02:45:47 +0000
@@ -44,11 +44,10 @@ buf_buddy_alloc_low(
 			possibly NULL if lru==NULL */
 	ulint	i,	/* in: index of buf_pool->zip_free[],
 			or BUF_BUDDY_SIZES */
-	ibool*	lru,	/* in: pointer to a variable that will be assigned
+	ibool*	lru)	/* in: pointer to a variable that will be assigned
 			TRUE if storage was allocated from the LRU list
 			and buf_pool_mutex was temporarily released,
 			or NULL if the LRU list should not be used */
-	ibool	have_page_hash_mutex)
 	__attribute__((malloc));
 
 /**************************************************************************
@@ -59,9 +58,8 @@ buf_buddy_free_low(
 /*===============*/
 	void*	buf,	/* in: block to be freed, must not be
 			pointed to by the buffer pool */
-	ulint	i,	/* in: index of buf_pool->zip_free[],
+	ulint	i)	/* in: index of buf_pool->zip_free[],
 			or BUF_BUDDY_SIZES */
-	ibool	have_page_hash_mutex)
 	__attribute__((nonnull));
 
 /**************************************************************************
@@ -100,15 +98,14 @@ buf_buddy_alloc(
 			/* out: allocated block,
 			possibly NULL if lru == NULL */
 	ulint	size,	/* in: block size, up to UNIV_PAGE_SIZE */
-	ibool*	lru,	/* in: pointer to a variable that will be assigned
+	ibool*	lru)	/* in: pointer to a variable that will be assigned
 			TRUE if storage was allocated from the LRU list
 			and buf_pool_mutex was temporarily released,
 			or NULL if the LRU list should not be used */
-	ibool	have_page_hash_mutex)
 {
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
-	return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru, have_page_hash_mutex));
+	return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru));
 }
 
 /**************************************************************************
@@ -119,26 +116,11 @@ buf_buddy_free(
 /*===========*/
 	void*	buf,	/* in: block to be freed, must not be
 			pointed to by the buffer pool */
-	ulint	size,	/* in: block size, up to UNIV_PAGE_SIZE */
-	ibool	have_page_hash_mutex)
+	ulint	size)	/* in: block size, up to UNIV_PAGE_SIZE */
 {
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 
-	if (!have_page_hash_mutex) {
-		mutex_enter(&LRU_list_mutex);
-		mutex_enter(&flush_list_mutex);
-		rw_lock_x_lock(&page_hash_latch);
-	}
-
-	mutex_enter(&zip_free_mutex);
-	buf_buddy_free_low(buf, buf_buddy_get_slot(size), TRUE);
-	mutex_exit(&zip_free_mutex);
-
-	if (!have_page_hash_mutex) {
-		mutex_exit(&LRU_list_mutex);
-		mutex_exit(&flush_list_mutex);
-		rw_lock_x_unlock(&page_hash_latch);
-	}
+	buf_buddy_free_low(buf, buf_buddy_get_slot(size));
 }
 
 #ifdef UNIV_MATERIALIZE

=== modified file 'storage/xtradb/include/buf0buf.h'
--- a/storage/xtradb/include/buf0buf.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0buf.h	2009-05-04 04:32:30 +0000
@@ -1104,11 +1104,11 @@ struct buf_block_struct{
 					a block is in the unzip_LRU list
 					if page.state == BUF_BLOCK_FILE_PAGE
 					and page.zip.data != NULL */
-//#ifdef UNIV_DEBUG
+#ifdef UNIV_DEBUG
 	ibool		in_unzip_LRU_list;/* TRUE if the page is in the
 					decompressed LRU list;
 					used in debugging */
-//#endif /* UNIV_DEBUG */
+#endif /* UNIV_DEBUG */
 	byte*		frame;		/* pointer to buffer frame which
 					is of size UNIV_PAGE_SIZE, and
 					aligned to an address divisible by
@@ -1316,12 +1316,6 @@ struct buf_pool_struct{
 /* mutex protecting the buffer pool struct and control blocks, except the
 read-write lock in them */
 extern mutex_t	buf_pool_mutex;
-extern mutex_t	LRU_list_mutex;
-extern mutex_t	flush_list_mutex;
-extern rw_lock_t	page_hash_latch;
-extern mutex_t	free_list_mutex;
-extern mutex_t	zip_free_mutex;
-extern mutex_t	zip_hash_mutex;
 /* mutex protecting the control blocks of compressed-only pages
 (of type buf_page_t, not buf_block_t) */
 extern mutex_t	buf_pool_zip_mutex;

=== modified file 'storage/xtradb/include/buf0buf.ic'
--- a/storage/xtradb/include/buf0buf.ic	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0buf.ic	2009-05-04 02:45:47 +0000
@@ -100,8 +100,7 @@ buf_pool_get_oldest_modification(void)
 	buf_page_t*	bpage;
 	ib_uint64_t	lsn;
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&flush_list_mutex);
+	buf_pool_mutex_enter();
 
 	bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
 
@@ -112,8 +111,7 @@ buf_pool_get_oldest_modification(void)
 		lsn = bpage->oldest_modification;
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&flush_list_mutex);
+	buf_pool_mutex_exit();
 
 	/* The returned answer may be out of date: the flush_list can
 	change after the mutex has been released. */
@@ -130,8 +128,7 @@ buf_pool_clock_tic(void)
 /*====================*/
 			/* out: new clock value */
 {
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	buf_pool->ulint_clock++;
 
@@ -249,7 +246,7 @@ buf_page_in_file(
 	case BUF_BLOCK_ZIP_FREE:
 		/* This is a free page in buf_pool->zip_free[].
 		Such pages should only be accessed by the buddy allocator. */
-		/* ut_error; */ /* optimistic */
+		ut_error;
 		break;
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
@@ -308,7 +305,7 @@ buf_page_get_mutex(
 {
 	switch (buf_page_get_state(bpage)) {
 	case BUF_BLOCK_ZIP_FREE:
-		/* ut_error; */ /* optimistic */
+		ut_error;
 		return(NULL);
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
@@ -413,7 +410,7 @@ buf_page_set_io_fix(
 	buf_page_t*	bpage,	/* in/out: control block */
 	enum buf_io_fix	io_fix)	/* in: io_fix state */
 {
-	//ut_ad(buf_pool_mutex_own());
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
 
 	bpage->io_fix = io_fix;
@@ -441,13 +438,12 @@ buf_page_can_relocate(
 /*==================*/
 	const buf_page_t*	bpage)	/* control block being relocated */
 {
-	//ut_ad(buf_pool_mutex_own());
-	/* optimistic */
-	//ut_ad(mutex_own(buf_page_get_mutex(bpage)));
-	//ut_ad(buf_page_in_file(bpage));
-	//ut_ad(bpage->in_LRU_list);
+	ut_ad(buf_pool_mutex_own());
+	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
+	ut_ad(buf_page_in_file(bpage));
+	ut_ad(bpage->in_LRU_list);
 
-	return(bpage->in_LRU_list && bpage->io_fix == BUF_IO_NONE
+	return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
 	       && bpage->buf_fix_count == 0);
 }
 
@@ -476,8 +472,7 @@ buf_page_set_old(
 	ibool		old)	/* in: old */
 {
 	ut_a(buf_page_in_file(bpage));
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&LRU_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 	ut_ad(bpage->in_LRU_list);
 
 #ifdef UNIV_LRU_DEBUG
@@ -733,17 +728,17 @@ buf_block_free(
 /*===========*/
 	buf_block_t*	block)	/* in, own: block to be freed */
 {
-	//buf_pool_mutex_enter();
+	buf_pool_mutex_enter();
 
 	mutex_enter(&block->mutex);
 
 	ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
 
-	buf_LRU_block_free_non_file_page(block, FALSE);
+	buf_LRU_block_free_non_file_page(block);
 
 	mutex_exit(&block->mutex);
 
-	//buf_pool_mutex_exit();
+	buf_pool_mutex_exit();
 }
 
 /*************************************************************************
@@ -788,17 +783,14 @@ buf_page_io_query(
 	buf_page_t*	bpage)	/* in: buf_pool block, must be bufferfixed */
 {
 	ibool	io_fixed;
-	mutex_t* block_mutex = buf_page_get_mutex(bpage);
 
-	//buf_pool_mutex_enter();
-	mutex_enter(block_mutex);
+	buf_pool_mutex_enter();
 
 	ut_ad(buf_page_in_file(bpage));
 	ut_ad(bpage->buf_fix_count > 0);
 
 	io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE;
-	//buf_pool_mutex_exit();
-	mutex_exit(block_mutex);
+	buf_pool_mutex_exit();
 
 	return(io_fixed);
 }
@@ -925,11 +917,7 @@ buf_page_hash_get(
 	ulint		fold;
 
 	ut_ad(buf_pool);
-	//ut_ad(buf_pool_mutex_own());
-#ifdef UNIV_SYNC_DEBUG
-	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
-	      || rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
-#endif
+	ut_ad(buf_pool_mutex_own());
 
 	/* Look for the page in the hash table */
 
@@ -978,13 +966,11 @@ buf_page_peek(
 {
 	const buf_page_t*	bpage;
 
-	//buf_pool_mutex_enter();
-	rw_lock_s_lock(&page_hash_latch);
+	buf_pool_mutex_enter();
 
 	bpage = buf_page_hash_get(space, offset);
 
-	//buf_pool_mutex_exit();
-	rw_lock_s_unlock(&page_hash_latch);
+	buf_pool_mutex_exit();
 
 	return(bpage != NULL);
 }
@@ -1047,17 +1033,12 @@ buf_page_release(
 	ut_a(block->page.buf_fix_count > 0);
 
 	if (rw_latch == RW_X_LATCH && mtr->modifications) {
-		//buf_pool_mutex_enter();
-		mutex_enter(&flush_list_mutex);
-		mutex_enter(&block->mutex);
-		ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
+		buf_pool_mutex_enter();
 		buf_flush_note_modification(block, mtr);
-		//buf_pool_mutex_exit();
-		mutex_exit(&flush_list_mutex);
+		buf_pool_mutex_exit();
 	}
-	else {
+
 	mutex_enter(&block->mutex);
-	}
 
 #ifdef UNIV_SYNC_DEBUG
 	rw_lock_s_unlock(&(block->debug_latch));

=== modified file 'storage/xtradb/include/buf0flu.ic'
--- a/storage/xtradb/include/buf0flu.ic	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0flu.ic	2009-05-04 02:45:47 +0000
@@ -59,8 +59,7 @@ buf_flush_note_modification(
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 #endif /* UNIV_SYNC_DEBUG */
-	//ut_ad(buf_pool_mutex_own());
-	ut_ad(mutex_own(&flush_list_mutex));
+	ut_ad(buf_pool_mutex_own());
 
 	ut_ad(mtr->start_lsn != 0);
 	ut_ad(mtr->modifications);
@@ -100,8 +99,7 @@ buf_flush_recv_note_modification(
 	ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
 #endif /* UNIV_SYNC_DEBUG */
 
-	//buf_pool_mutex_enter();
-	mutex_enter(&flush_list_mutex);
+	buf_pool_mutex_enter();
 
 	ut_ad(block->page.newest_modification <= end_lsn);
 
@@ -118,6 +116,5 @@ buf_flush_recv_note_modification(
 		ut_ad(block->page.oldest_modification <= start_lsn);
 	}
 
-	//buf_pool_mutex_exit();
-	mutex_exit(&flush_list_mutex);
+	buf_pool_mutex_exit();
 }

=== modified file 'storage/xtradb/include/buf0lru.h'
--- a/storage/xtradb/include/buf0lru.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/buf0lru.h	2009-05-04 02:45:47 +0000
@@ -122,11 +122,10 @@ buf_LRU_free_block(
 	buf_page_t*	bpage,	/* in: block to be freed */
 	ibool		zip,	/* in: TRUE if should remove also the
 				compressed page of an uncompressed page */
-	ibool*		buf_pool_mutex_released,
+	ibool*		buf_pool_mutex_released);
 				/* in: pointer to a variable that will
 				be assigned TRUE if buf_pool_mutex
 				was temporarily released, or NULL */
-	ibool		have_LRU_mutex);
 /**********************************************************************
 Try to free a replaceable block. */
 UNIV_INTERN
@@ -170,8 +169,7 @@ UNIV_INTERN
 void
 buf_LRU_block_free_non_file_page(
 /*=============================*/
-	buf_block_t*	block,	/* in: block, must not contain a file page */
-	ibool		have_page_hash_mutex);
+	buf_block_t*	block);	/* in: block, must not contain a file page */
 /**********************************************************************
 Adds a block to the LRU list. */
 UNIV_INTERN

=== modified file 'storage/xtradb/include/sync0rw.h'
--- a/storage/xtradb/include/sync0rw.h	2009-06-09 15:08:46 +0000
+++ b/storage/xtradb/include/sync0rw.h	2009-06-10 06:51:03 +0000
@@ -361,17 +361,7 @@ Accessor functions for rw lock. */
 #ifdef INNODB_RW_LOCKS_USE_ATOMICS
 UNIV_INLINE
 ulint
-rw_lock_get_s_waiters(
-/*==================*/
-	rw_lock_t*	lock);
-UNIV_INLINE
-ulint
-rw_lock_get_x_waiters(
-/*==================*/
-	rw_lock_t*	lock);
-UNIV_INLINE
-ulint
-rw_lock_get_wx_waiters(
+rw_lock_get_waiters(
 /*================*/
 	rw_lock_t*	lock);
 #else /* !INNODB_RW_LOCKS_USE_ATOMICS */
@@ -498,16 +488,6 @@ rw_lock_debug_print(
 	rw_lock_debug_t*	info);	/* in: debug struct */
 #endif /* UNIV_SYNC_DEBUG */
 
-/*
-#ifndef INNODB_RW_LOCKS_USE_ATOMICS
-#error INNODB_RW_LOCKS_USE_ATOMICS is not defined. Do you use enough new GCC or compatibles?
-#error Or do you use exact options for CFLAGS?
-#error e.g. (for x86_32): "-m32 -march=i586 -mtune=i686"
-#error e.g. (for Sparc_64): "-m64 -mcpu=v9"
-#error Otherwise, this build may be slower than normal version.
-#endif
-*/
-
 /* NOTE! The structure appears here only for the compiler to know its size.
 Do not use its fields directly! The structure used in the spin lock
 implementation of a read-write lock. Several threads may have a shared lock
@@ -519,16 +499,7 @@ no new readers will be let in while the 
 struct rw_lock_struct {
 	volatile lint	lock_word;
 				/* Holds the state of the lock. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	volatile ulint	s_waiters;	/* 1: there are waiters (s_lock) */
-	volatile ulint	x_waiters;	/* 1: there are waiters (x_lock) */
-	volatile ulint	wait_ex_waiters;	/* 1: there are waiters (wait_ex) */
-	volatile ulint	reader_count;	/* Number of readers who have locked this
- 				lock in the shared mode */
-	volatile ulint	writer;
-#else
 	volatile ulint	waiters;/* 1: there are waiters */
-#endif
 	volatile ibool	recursive;/* Default value FALSE which means the lock
 				is non-recursive. The value is typically set
 				to TRUE making normal rw_locks recursive. In
@@ -545,16 +516,7 @@ struct rw_lock_struct {
 				/* Thread id of writer thread. Is only
 				guaranteed to have sane and non-stale
 				value iff recursive flag is set. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	volatile ulint	writer_count;	/* Number of times the same thread has
- 				recursively locked the lock in the exclusive
- 				mode */
-			/* Used by sync0arr.c for thread queueing */
-	os_event_t	s_event;	/* Used for s_lock */
-	os_event_t	x_event;	/* Used for x_lock */
-#else
 	os_event_t	event;	/* Used by sync0arr.c for thread queueing */
-#endif
 	os_event_t	wait_ex_event;
 				/* Event for next-writer to wait on. A thread
 				must decrement lock_word before waiting. */
@@ -576,7 +538,7 @@ struct rw_lock_struct {
         /* last s-lock file/line is not guaranteed to be correct */
 	const char*	last_s_file_name;/* File name where last s-locked */
 	const char*	last_x_file_name;/* File name where last x-locked */
-	volatile ibool		writer_is_wait_ex;
+	ibool		writer_is_wait_ex;
 				/* This is TRUE if the writer field is
 				RW_LOCK_WAIT_EX; this field is located far
 				from the memory update hotspot fields which

=== modified file 'storage/xtradb/include/sync0rw.ic'
--- a/storage/xtradb/include/sync0rw.ic	2009-06-09 15:08:46 +0000
+++ b/storage/xtradb/include/sync0rw.ic	2009-06-10 06:51:03 +0000
@@ -68,34 +68,6 @@ rw_lock_remove_debug_info(
 
 /************************************************************************
 Accessor functions for rw lock. */
-
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-UNIV_INLINE
-ulint
-rw_lock_get_s_waiters(
-/*================*/
-				/* out: 1 if waiters, 0 otherwise */
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	return(lock->s_waiters);
-}
-UNIV_INLINE
-ulint
-rw_lock_get_x_waiters(
-/*================*/
-	rw_lock_t*	lock)
-{
-	return(lock->x_waiters);
-}
-UNIV_INLINE
-ulint
-rw_lock_get_wx_waiters(
-/*================*/
-	rw_lock_t*      lock)
-{
-	return(lock->wait_ex_waiters);
-}
-#else /* !INNODB_RW_LOCKS_USE_ATOMICS */
 UNIV_INLINE
 ulint
 rw_lock_get_waiters(
@@ -105,95 +77,40 @@ rw_lock_get_waiters(
 {
 	return(lock->waiters);
 }
-#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
-
 
 /************************************************************************
 Sets lock->waiters to 1. It is not an error if lock->waiters is already
 1. On platforms where ATOMIC builtins are used this function enforces a
 memory barrier. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-UNIV_INLINE
-void
-rw_lock_set_s_waiter_flag(
-/*====================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->s_waiters, 0, 1);
-	__sync_lock_test_and_set(&lock->s_waiters, 1);
-}
-UNIV_INLINE
-void
-rw_lock_set_x_waiter_flag(
-/*====================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->x_waiters, 0, 1);
-	__sync_lock_test_and_set(&lock->x_waiters, 1);
-}
-UNIV_INLINE
-void
-rw_lock_set_wx_waiter_flag(
-/*====================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->wait_ex_waiters, 0, 1);
-	__sync_lock_test_and_set(&lock->wait_ex_waiters, 1);
-}
-#else /* !INNODB_RW_LOCKS_USE_ATOMICS */
 UNIV_INLINE
 void
 rw_lock_set_waiter_flag(
 /*====================*/
 	rw_lock_t*	lock)	/* in: rw-lock */
 {
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+	os_compare_and_swap(&lock->waiters, 0, 1);
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
 	lock->waiters = 1;
-}
 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
 
 /************************************************************************
 Resets lock->waiters to 0. It is not an error if lock->waiters is already
 0. On platforms where ATOMIC builtins are used this function enforces a
 memory barrier. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-
-UNIV_INLINE
-void
-rw_lock_reset_s_waiter_flag(
-/*======================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->s_waiters, 1, 0);
-	__sync_lock_test_and_set(&lock->s_waiters, 0);
-}
-UNIV_INLINE
-void
-rw_lock_reset_x_waiter_flag(
-/*======================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->x_waiters, 1, 0);
-	__sync_lock_test_and_set(&lock->x_waiters, 0);
-}
-UNIV_INLINE
-void
-rw_lock_reset_wx_waiter_flag(
-/*======================*/
-	rw_lock_t*	lock)	/* in: rw-lock */
-{
-	// os_compare_and_swap(&lock->wait_ex_waiters, 1, 0);
-	__sync_lock_test_and_set(&lock->wait_ex_waiters, 0);
-}
-#else /* !INNODB_RW_LOCKS_USE_ATOMICS */
 UNIV_INLINE
 void
 rw_lock_reset_waiter_flag(
 /*======================*/
 	rw_lock_t*	lock)	/* in: rw-lock */
 {
+#ifdef INNODB_RW_LOCKS_USE_ATOMICS
+	os_compare_and_swap(&lock->waiters, 1, 0);
+#else /* INNODB_RW_LOCKS_USE_ATOMICS */
 	lock->waiters = 0;
-}
 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
+}
 
 /**********************************************************************
 Returns the write-status of the lock - this function made more sense
@@ -204,17 +121,6 @@ rw_lock_get_writer(
 /*===============*/
 	rw_lock_t*	lock)
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	if (lock->writer == RW_LOCK_NOT_LOCKED) {
-		return(RW_LOCK_NOT_LOCKED);
-	}
-
-	if (lock->writer_is_wait_ex) {
-		return(RW_LOCK_WAIT_EX);
-	} else {
-		return(RW_LOCK_EX);
-	}
-#else
 	lint lock_word = lock->lock_word;
 	if(lock_word > 0) {
 		/* return NOT_LOCKED in s-lock state, like the writer
@@ -226,7 +132,6 @@ rw_lock_get_writer(
                 ut_ad(lock_word > -X_LOCK_DECR);
 		return(RW_LOCK_WAIT_EX);
 	}
-#endif
 }
 
 /**********************************************************************
@@ -237,9 +142,6 @@ rw_lock_get_reader_count(
 /*=====================*/
 	rw_lock_t*	lock)
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	return(lock->reader_count);
-#else
 	lint lock_word = lock->lock_word;
 	if(lock_word > 0) {
 		/* s-locked, no x-waiters */
@@ -249,7 +151,6 @@ rw_lock_get_reader_count(
 		return((ulint)(-lock_word));
 	}
 	return(0);
-#endif
 }
 
 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
@@ -273,16 +174,12 @@ rw_lock_get_x_lock_count(
 				/* out: value of writer_count */
 	rw_lock_t*	lock)	/* in: rw-lock */
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	return(lock->writer_count);
-#else
 	lint lock_copy = lock->lock_word;
 	/* If there is a reader, lock_word is not divisible by X_LOCK_DECR */
 	if(lock_copy > 0 || (-lock_copy) % X_LOCK_DECR != 0) {
 		return(0);
 	}
 	return(((-lock_copy) / X_LOCK_DECR) + 1);
-#endif
 }
 
 /**********************************************************************
@@ -420,26 +317,11 @@ rw_lock_s_lock_low(
 	const char*	file_name, /* in: file name where lock requested */
 	ulint		line)	/* in: line where requested */
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	if (UNIV_LIKELY(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED)) {
-		/* try s-lock */
-		if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
-			/* fail */
-			__sync_fetch_and_add(&(lock->lock_word),1);
-			return(FALSE);  /* locking did not succeed */
-		}
-		/* success */
-		__sync_fetch_and_add(&(lock->reader_count),1);
-	} else {
-		return(FALSE);  /* locking did not succeed */
-	}
-#else
 	/* TODO: study performance of UNIV_LIKELY branch prediction hints. */
 	if (!rw_lock_lock_word_decr(lock, 1)) {
 		/* Locking did not succeed */
 		return(FALSE);
 	}
-#endif
 
 #ifdef UNIV_SYNC_DEBUG
 	rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name, line);
@@ -464,17 +346,10 @@ rw_lock_s_lock_direct(
 	const char*	file_name,	/* in: file name where requested */
 	ulint		line)		/* in: line where lock requested */
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ut_ad(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
-	ut_ad(rw_lock_get_reader_count(lock) == 0);
-
-	__sync_fetch_and_add(&(lock->reader_count),1);
-#else
 	ut_ad(lock->lock_word == X_LOCK_DECR);
 
 	/* Indicate there is a new reader by decrementing lock_word */
 	lock->lock_word--;
-#endif
 
 	lock->last_s_file_name = file_name;
 	lock->last_s_line = line;
@@ -497,17 +372,9 @@ rw_lock_x_lock_direct(
 	ulint		line)		/* in: line where lock requested */
 {
 	ut_ad(rw_lock_validate(lock));
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ut_ad(rw_lock_get_reader_count(lock) == 0);
-	ut_ad(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
-
-	lock->writer = RW_LOCK_EX;
-	__sync_fetch_and_add(&(lock->writer_count),1);
-#else
 	ut_ad(lock->lock_word == X_LOCK_DECR);
 
 	lock->lock_word -= X_LOCK_DECR;
-#endif
 	lock->writer_thread = os_thread_get_curr_id();
 	lock->recursive = TRUE;
 
@@ -581,56 +448,7 @@ rw_lock_x_lock_func_nowait(
 	ibool success;
 
 #ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	success = FALSE;
-	if ((lock->reader_count == 0)
-			&& rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
-retry_x_lock:
-		/* try x-lock */
-		if(__sync_sub_and_fetch(&(lock->lock_word),
-				X_LOCK_DECR) == 0) {
-			/* success */
-			/* try to lock writer */
-			if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
-					== RW_LOCK_NOT_LOCKED) {
-				/* success */
-				lock->writer_thread = curr_thread;
-				lock->recursive = TRUE;
-				lock->writer_is_wait_ex = FALSE;
-				/* next function may work as memory barrier */
-			relock:
-				__sync_fetch_and_add(&(lock->writer_count),1);
-
-#ifdef UNIV_SYNC_DEBUG
-				rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
-#endif
-
-				lock->last_x_file_name = file_name;
-				lock->last_x_line = line;
-
-				ut_ad(rw_lock_validate(lock));
-
-				return(TRUE);
-			} else {
-				/* x-unlock */
-				__sync_fetch_and_add(&(lock->lock_word),
-					X_LOCK_DECR);
-			}
-		} else {
-			/* fail (x-lock) */
-			if (__sync_fetch_and_add(&(lock->lock_word),X_LOCK_DECR)
-					== 0)
-				goto retry_x_lock;
-		}
-	}
-
-	if (lock->recursive
-			&& os_thread_eq(lock->writer_thread, curr_thread)) {
-		goto relock;
-	}
-
-	//ut_ad(rw_lock_validate(lock));
-
-	return(FALSE);
+	success = os_compare_and_swap(&(lock->lock_word), X_LOCK_DECR, 0);
 #else
 
 	success = FALSE;
@@ -641,6 +459,7 @@ retry_x_lock:
 	}
 	mutex_exit(&(lock->mutex));
 
+#endif
 	if (success) {
 		rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
 
@@ -667,7 +486,6 @@ retry_x_lock:
 	ut_ad(rw_lock_validate(lock));
 
 	return(TRUE);
-#endif
 }
 
 /**********************************************************************
@@ -683,31 +501,6 @@ rw_lock_s_unlock_func(
 #endif
 	)
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ibool	last	= FALSE;
-
-	ut_a(lock->reader_count > 0);
-
-	/* unlock lock_word */
-	__sync_fetch_and_add(&(lock->lock_word),1);
-
-	if(__sync_sub_and_fetch(&(lock->reader_count),1) == 0) {
-		last = TRUE;
-	}
-
-#ifdef UNIV_SYNC_DEBUG
-	rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
-#endif
-
-	if (UNIV_UNLIKELY(last && __sync_lock_test_and_set(&lock->wait_ex_waiters, 0))) {
-		os_event_set(lock->wait_ex_event);
-		sync_array_object_signalled(sync_primary_wait_array);
-	}
-	else if (UNIV_UNLIKELY(last && __sync_lock_test_and_set(&lock->x_waiters, 0))) {
-		os_event_set(lock->x_event);
-		sync_array_object_signalled(sync_primary_wait_array);
-	}
-#else
 	ut_ad((lock->lock_word % X_LOCK_DECR) != 0);
 
 #ifdef UNIV_SYNC_DEBUG
@@ -724,7 +517,6 @@ rw_lock_s_unlock_func(
 		sync_array_object_signalled(sync_primary_wait_array);
 
 	}
-#endif
 
 	ut_ad(rw_lock_validate(lock));
 
@@ -742,19 +534,6 @@ rw_lock_s_unlock_direct(
 /*====================*/
 	rw_lock_t*	lock)	/* in: rw-lock */
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ut_ad(lock->reader_count > 0);
-
-	__sync_sub_and_fetch(&(lock->reader_count),1);
-
-#ifdef UNIV_SYNC_DEBUG
-	rw_lock_remove_debug_info(lock, 0, RW_LOCK_SHARED);
-#endif
-
-	ut_ad(!lock->s_waiters);
-	ut_ad(!lock->x_waiters);
-	ut_ad(!lock->wait_ex_waiters);
-#else
 	ut_ad(lock->lock_word < X_LOCK_DECR);
 
 #ifdef UNIV_SYNC_DEBUG
@@ -765,7 +544,6 @@ rw_lock_s_unlock_direct(
 	lock->lock_word++;
 
 	ut_ad(!lock->waiters);
-#endif
 	ut_ad(rw_lock_validate(lock));
 #ifdef UNIV_SYNC_PERF_STAT
 	rw_s_exit_count++;
@@ -785,49 +563,6 @@ rw_lock_x_unlock_func(
 #endif
 	)
 {
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ibool	last	= FALSE;
-	ibool	s_sg	= FALSE;
-	ibool	x_sg	= FALSE;
-
-	ut_ad(lock->writer_count > 0);
-
-	if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
-		last = TRUE;
-	}
-
-	if (last) {
-		/* unlock lock_word */
-		__sync_fetch_and_add(&(lock->lock_word),X_LOCK_DECR);
-
-		lock->recursive = FALSE;
-		/* FIXME: It is a value of bad manners for pthread.
-		          But we shouldn't keep an ID of not-owner. */
-		lock->writer_thread = -1;
-		__sync_lock_test_and_set(&(lock->writer),RW_LOCK_NOT_LOCKED);
-	}
-
-#ifdef UNIV_SYNC_DEBUG
-	rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
-#endif
-	if (last) {
-		if(__sync_lock_test_and_set(&lock->s_waiters, 0)){
-			s_sg = TRUE;
-		}
-		if(__sync_lock_test_and_set(&lock->x_waiters, 0)){
-			x_sg = TRUE;
-		}
-	}
-
-	if (UNIV_UNLIKELY(s_sg)) {
-		os_event_set(lock->s_event);
-		sync_array_object_signalled(sync_primary_wait_array);
-	}
-	if (UNIV_UNLIKELY(x_sg)) {
-		os_event_set(lock->x_event);
-		sync_array_object_signalled(sync_primary_wait_array);
-	}
-#else
 	ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
 
 	/* lock->recursive flag also indicates if lock->writer_thread is
@@ -858,7 +593,6 @@ rw_lock_x_unlock_func(
 		}
 	}
 
-#endif
 	ut_ad(rw_lock_validate(lock));
 
 #ifdef UNIV_SYNC_PERF_STAT
@@ -878,19 +612,6 @@ rw_lock_x_unlock_direct(
 	/* Reset the exclusive lock if this thread no longer has an x-mode
 	lock */
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
-		lock->writer = RW_LOCK_NOT_LOCKED;
-	}
-
-#ifdef UNIV_SYNC_DEBUG
-	rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
-#endif
-
-	ut_ad(!lock->s_waiters);
-	ut_ad(!lock->x_waiters);
-	ut_ad(!lock->wait_ex_waiters);
-#else
 	ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
 
 #ifdef UNIV_SYNC_DEBUG
@@ -906,7 +627,6 @@ rw_lock_x_unlock_direct(
 	lock->lock_word += X_LOCK_DECR;
 
 	ut_ad(!lock->waiters);
-#endif
 	ut_ad(rw_lock_validate(lock));
 
 #ifdef UNIV_SYNC_PERF_STAT

=== modified file 'storage/xtradb/include/trx0sys.h'
--- a/storage/xtradb/include/trx0sys.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/trx0sys.h	2009-04-07 20:51:15 +0000
@@ -49,6 +49,9 @@ or there was no master log position info
 extern char		trx_sys_mysql_master_log_name[];
 extern ib_int64_t	trx_sys_mysql_master_log_pos;
 
+extern char		trx_sys_mysql_relay_log_name[];
+extern ib_int64_t	trx_sys_mysql_relay_log_pos;
+
 /* If this MySQL server uses binary logging, after InnoDB has been inited
 and if it has done a crash recovery, we store the binlog file name and position
 here. If .._pos is -1, it means there was no binlog position info inside
@@ -290,7 +293,7 @@ UNIV_INTERN
 void
 trx_sys_update_mysql_binlog_offset(
 /*===============================*/
-	const char*	file_name,/* in: MySQL log file name */
+	const char*	file_name_in,/* in: MySQL log file name */
 	ib_int64_t	offset,	/* in: position in that log file */
 	ulint		field,	/* in: offset of the MySQL log info field in
 				the trx sys header */
@@ -421,6 +424,7 @@ therefore 256; each slot is currently 8 
 #define	TRX_SYS_N_RSEGS		256
 
 #define TRX_SYS_MYSQL_LOG_NAME_LEN	512
+#define TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN	480	/* (500 - 12) is dead line. */
 #define TRX_SYS_MYSQL_LOG_MAGIC_N	873422344
 
 #if UNIV_PAGE_SIZE < 4096
@@ -429,6 +433,7 @@ therefore 256; each slot is currently 8 
 /* The offset of the MySQL replication info in the trx system header;
 this contains the same fields as TRX_SYS_MYSQL_LOG_INFO below */
 #define TRX_SYS_MYSQL_MASTER_LOG_INFO	(UNIV_PAGE_SIZE - 2000)
+#define TRX_SYS_MYSQL_RELAY_LOG_INFO	(UNIV_PAGE_SIZE - 1500)
 
 /* The offset of the MySQL binlog offset info in the trx system header */
 #define TRX_SYS_MYSQL_LOG_INFO		(UNIV_PAGE_SIZE - 1000)

=== modified file 'storage/xtradb/include/trx0trx.h'
--- a/storage/xtradb/include/trx0trx.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/trx0trx.h	2009-04-07 20:51:15 +0000
@@ -579,6 +579,21 @@ struct trx_struct{
 	ib_int64_t	mysql_log_offset;/* if MySQL binlog is used, this field
 					contains the end offset of the binlog
 					entry */
+	const char*	mysql_master_log_file_name;
+					/* if the database server is a MySQL
+					replication slave, we have here the
+					master binlog name up to which
+					replication has processed; otherwise
+					this is a pointer to a null
+					character */
+	ib_int64_t	mysql_master_log_pos;
+					/* if the database server is a MySQL
+					replication slave, this is the
+					position in the log file up to which
+					replication has processed */
+	const char*	mysql_relay_log_file_name;
+	ib_int64_t	mysql_relay_log_pos;
+
 	os_thread_id_t	mysql_thread_id;/* id of the MySQL thread associated
 					with this transaction object */
 	ulint		mysql_process_no;/* since in Linux, 'top' reports

=== modified file 'storage/xtradb/include/univ.i'
--- a/storage/xtradb/include/univ.i	2009-06-09 15:08:46 +0000
+++ b/storage/xtradb/include/univ.i	2009-06-10 06:51:03 +0000
@@ -35,7 +35,7 @@ Created 1/20/1994 Heikki Tuuri
 #define INNODB_VERSION_MAJOR	1
 #define INNODB_VERSION_MINOR	0
 #define INNODB_VERSION_BUGFIX	3
-#define PERCONA_INNODB_VERSION	3
+#define PERCONA_INNODB_VERSION	5a
 
 /* The following is the InnoDB version as shown in
 SELECT plugin_version FROM information_schema.plugins;

=== modified file 'storage/xtradb/include/ut0auxconf.h'
--- a/storage/xtradb/include/ut0auxconf.h	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/include/ut0auxconf.h	2009-04-27 04:54:14 +0000
@@ -12,8 +12,3 @@ If by any chance Makefile.in and ./confi
 the hack from Makefile.in wiped away then the "real" check from plug.in
 will take over.
 */
-/* This is temprary fix for http://bugs.mysql.com/43740 */
-/* force to enable */
-#ifdef HAVE_GCC_ATOMIC_BUILTINS
-#define HAVE_ATOMIC_PTHREAD_T
-#endif

=== modified file 'storage/xtradb/sync/sync0arr.c'
--- a/storage/xtradb/sync/sync0arr.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/sync/sync0arr.c	2009-04-27 04:54:14 +0000
@@ -331,15 +331,8 @@ sync_cell_get_event(
 		return(((mutex_t *) cell->wait_object)->event);
 	} else if (type == RW_LOCK_WAIT_EX) {
 		return(((rw_lock_t *) cell->wait_object)->wait_ex_event);
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	} else if (type == RW_LOCK_SHARED) {
-		return(((rw_lock_t *) cell->wait_object)->s_event);
-	} else { /* RW_LOCK_EX */
-		return(((rw_lock_t *) cell->wait_object)->x_event);
-#else
 	} else { /* RW_LOCK_SHARED and RW_LOCK_EX wait on the same event */
 		return(((rw_lock_t *) cell->wait_object)->event);
-#endif
 	}
 }
 
@@ -483,7 +476,7 @@ sync_array_cell_print(
 
 	fprintf(file,
 		"--Thread %lu has waited at %s line %lu"
-		" for %.2f seconds the semaphore:\n",
+		" for %#.5g seconds the semaphore:\n",
 		(ulong) os_thread_pf(cell->thread), cell->file,
 		(ulong) cell->line,
 		difftime(time(NULL), cell->reservation_time));
@@ -510,7 +503,7 @@ sync_array_cell_print(
 		   || type == RW_LOCK_WAIT_EX
 		   || type == RW_LOCK_SHARED) {
 
-		fputs(type == RW_LOCK_SHARED ? "S-lock on" : "X-lock on", file);
+		fputs(type == RW_LOCK_EX ? "X-lock on" : "S-lock on", file);
 
 		rwlock = cell->old_wait_rw_lock;
 
@@ -530,21 +523,12 @@ sync_array_cell_print(
 		}
 
 		fprintf(file,
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-			"number of readers %lu, s_waiters flag %lu, x_waiters flag %lu, "
-#else
 			"number of readers %lu, waiters flag %lu, "
-#endif
                         "lock_word: %lx\n"
 			"Last time read locked in file %s line %lu\n"
 			"Last time write locked in file %s line %lu\n",
 			(ulong) rw_lock_get_reader_count(rwlock),
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-			(ulong) rwlock->s_waiters,
-			(ulong) (rwlock->x_waiters || rwlock->wait_ex_waiters),
-#else
 			(ulong) rwlock->waiters,
-#endif
 			rwlock->lock_word,
 			rwlock->last_s_file_name,
 			(ulong) rwlock->last_s_line,

=== modified file 'storage/xtradb/sync/sync0rw.c'
--- a/storage/xtradb/sync/sync0rw.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/sync/sync0rw.c	2009-04-27 04:54:14 +0000
@@ -250,17 +250,7 @@ rw_lock_create_func(
 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
 
 	lock->lock_word = X_LOCK_DECR;
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	lock->s_waiters = 0;
-	lock->x_waiters = 0;
-	lock->wait_ex_waiters = 0;
-	lock->writer = RW_LOCK_NOT_LOCKED;
-	lock->writer_count = 0;
-	lock->reader_count = 0;
-	lock->writer_is_wait_ex = FALSE;
-#else
 	lock->waiters = 0;
-#endif
 
 	/* We set this value to signify that lock->writer_thread
 	contains garbage at initialization and cannot be used for
@@ -283,12 +273,7 @@ rw_lock_create_func(
 	lock->last_x_file_name = "not yet reserved";
 	lock->last_s_line = 0;
 	lock->last_x_line = 0;
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	lock->s_event = os_event_create(NULL);
-	lock->x_event = os_event_create(NULL);
-#else
 	lock->event = os_event_create(NULL);
-#endif
 	lock->wait_ex_event = os_event_create(NULL);
 
 	mutex_enter(&rw_lock_list_mutex);
@@ -314,15 +299,7 @@ rw_lock_free(
 	rw_lock_t*	lock)	/* in: rw-lock */
 {
 	ut_ad(rw_lock_validate(lock));
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
-	ut_a(rw_lock_get_s_waiters(lock) == 0);
-	ut_a(rw_lock_get_x_waiters(lock) == 0);
-	ut_a(rw_lock_get_wx_waiters(lock) == 0);
-	ut_a(rw_lock_get_reader_count(lock) == 0);
-#else
 	ut_a(lock->lock_word == X_LOCK_DECR);
-#endif
 
 	lock->magic_n = 0;
 
@@ -331,12 +308,7 @@ rw_lock_free(
 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
 
 	mutex_enter(&rw_lock_list_mutex);
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	os_event_free(lock->s_event);
-	os_event_free(lock->x_event);
-#else
 	os_event_free(lock->event);
-#endif
 
 	os_event_free(lock->wait_ex_event);
 
@@ -364,23 +336,12 @@ rw_lock_validate(
 {
 	ut_a(lock);
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
-
-	ulint waiters = rw_lock_get_s_waiters(lock);
-	ut_a(waiters == 0 || waiters == 1);
-	waiters = rw_lock_get_x_waiters(lock);
-	ut_a(waiters == 0 || waiters == 1);
-	waiters = rw_lock_get_wx_waiters(lock);
-	ut_a(waiters == 0 || waiters == 1);
-#else
 	ulint waiters = rw_lock_get_waiters(lock);
 	lint lock_word = lock->lock_word;
 
 	ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
 	ut_a(waiters == 0 || waiters == 1);
 	ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
-#endif
 
 	return(TRUE);
 }
@@ -410,12 +371,7 @@ rw_lock_s_lock_spin(
 lock_loop:
 
 	/* Spin waiting for the writer field to become free */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	while (i < SYNC_SPIN_ROUNDS
-	       && rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) {
-#else
 	while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
-#endif
 		if (srv_spin_wait_delay) {
 			ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
 		}
@@ -456,29 +412,12 @@ lock_loop:
 
 		/* Set waiters before checking lock_word to ensure wake-up
                 signal is sent. This may lead to some unnecessary signals. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-		rw_lock_set_s_waiter_flag(lock);
-#else
 		rw_lock_set_waiter_flag(lock);
-#endif
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-		for (i = 0; i < 4; i++) {
-#endif
 		if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
 			sync_array_free_cell(sync_primary_wait_array, index);
 			return; /* Success */
 		}
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-		}
-
-		/* If wait_ex_waiter stalls, wakes it. */
-		if (lock->reader_count == 0
-		    && __sync_lock_test_and_set(&lock->wait_ex_waiters, 0)) {
-			os_event_set(lock->wait_ex_event);
-			sync_array_object_signalled(sync_primary_wait_array);
-		}
-#endif
 
 		if (srv_print_latch_waits) {
 			fprintf(stderr,
@@ -517,12 +456,7 @@ rw_lock_x_lock_move_ownership(
 {
 	ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	lock->writer_thread = os_thread_get_curr_id();
-	lock->recursive = TRUE;
-#else
 	rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
-#endif
 }
 
 /**********************************************************************
@@ -596,11 +530,7 @@ rw_lock_x_lock_wait(
 /**********************************************************************
 Low-level function for acquiring an exclusive lock. */
 UNIV_INLINE
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-ulint
-#else
 ibool
-#endif
 rw_lock_x_lock_low(
 /*===============*/
 				/* out: RW_LOCK_NOT_LOCKED if did
@@ -613,90 +543,6 @@ rw_lock_x_lock_low(
 {
 	os_thread_id_t	curr_thread	= os_thread_get_curr_id();
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-retry_writer:
-	/* try to lock writer */
-	if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
-			== RW_LOCK_NOT_LOCKED) {
-		/* success */
-		/* obtain RW_LOCK_WAIT_EX right */
-		lock->writer_thread = curr_thread;
-		lock->recursive = pass ? FALSE : TRUE;
-		lock->writer_is_wait_ex = TRUE;
-		/* atomic operation may be safer about memory order. */
-		__sync_synchronize();
-#ifdef UNIV_SYNC_DEBUG
-		rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
-					file_name, line);
-#endif
-	}
-
-	if (!os_thread_eq(lock->writer_thread, curr_thread)) {
-		return(RW_LOCK_NOT_LOCKED);
-	}
-
-	switch(rw_lock_get_writer(lock)) {
-	    case RW_LOCK_WAIT_EX:
-		/* have right to try x-lock */
-retry_x_lock:
-		/* try x-lock */
-		if(__sync_sub_and_fetch(&(lock->lock_word),
-				X_LOCK_DECR) == 0) {
-			/* success */
-			lock->recursive = pass ? FALSE : TRUE;
-			lock->writer_is_wait_ex = FALSE;
-			__sync_fetch_and_add(&(lock->writer_count),1);
-
-#ifdef UNIV_SYNC_DEBUG
-			rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX);
-			rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
-						file_name, line);
-#endif
-
-			lock->last_x_file_name = file_name;
-			lock->last_x_line = line;
-
-			/* Locking succeeded, we may return */
-			return(RW_LOCK_EX);
-		} else if(__sync_fetch_and_add(&(lock->lock_word),
-				X_LOCK_DECR) == 0) {
-			/* retry x-lock */
-			goto retry_x_lock;
-		}
-
-		/* There are readers, we have to wait */
-		return(RW_LOCK_WAIT_EX);
-
-		break;
-
-	    case RW_LOCK_EX:
-		/* already have x-lock */
-		if (lock->recursive && (pass == 0)) {
-			__sync_fetch_and_add(&(lock->writer_count),1);
-
-#ifdef UNIV_SYNC_DEBUG
-			rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name,
-						line);
-#endif
-
-			lock->last_x_file_name = file_name;
-			lock->last_x_line = line;
-
-			/* Locking succeeded, we may return */
-			return(RW_LOCK_EX);
-		}
-
-		return(RW_LOCK_NOT_LOCKED);
-
-		break;
-
-	    default: /* RW_LOCK_NOT_LOCKED? maybe impossible */
-		goto retry_writer;
-	}
-
-	/* Locking did not succeed */
-	return(RW_LOCK_NOT_LOCKED);
-#else
 	if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
 
 		/* lock->recursive also tells us if the writer_thread
@@ -734,7 +580,6 @@ retry_x_lock:
 	lock->last_x_line = (unsigned int) line;
 
 	return(TRUE);
-#endif
 }
 
 /**********************************************************************
@@ -759,55 +604,18 @@ rw_lock_x_lock_func(
 	ulint	index;	/* index of the reserved wait cell */
 	ulint	i;	/* spin round count */
 	ibool   spinning = FALSE;
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	ulint	state = RW_LOCK_NOT_LOCKED;	/* lock state acquired */
-	ulint	prev_state = RW_LOCK_NOT_LOCKED;
-#endif
 
 	ut_ad(rw_lock_validate(lock));
 
 	i = 0;
 
 lock_loop:
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	prev_state = state;
-	state = rw_lock_x_lock_low(lock, pass, file_name, line);
-
-lock_loop_2:
-	if (state != prev_state) i=0; /* if progress, reset counter. */
 
-	if (state == RW_LOCK_EX) {
-#else
 	if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
-#endif
 		rw_x_spin_round_count += i;
 
 		return;	/* Locking succeeded */
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	} else if (state == RW_LOCK_WAIT_EX) {
-
-		if (!spinning) {
-			spinning = TRUE;
-			rw_x_spin_wait_count++;
-		}
-
-		/* Spin waiting for the reader count field to become zero */
-		while (i < SYNC_SPIN_ROUNDS
-		       && lock->lock_word != X_LOCK_DECR) {
-			if (srv_spin_wait_delay) {
-				ut_delay(ut_rnd_interval(0,
-							 srv_spin_wait_delay));
-			}
-
-			i++;
-		}
-		if (i == SYNC_SPIN_ROUNDS) {
-			os_thread_yield();
-		} else {
-			goto lock_loop;
-		}
-#endif
 	} else {
 
                 if (!spinning) {
@@ -817,11 +625,7 @@ lock_loop_2:
 
 		/* Spin waiting for the lock_word to become free */
 		while (i < SYNC_SPIN_ROUNDS
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-		       && rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED) {
-#else
 		       && lock->lock_word <= 0) {
-#endif
 			if (srv_spin_wait_delay) {
 				ut_delay(ut_rnd_interval(0,
 							 srv_spin_wait_delay));
@@ -848,46 +652,18 @@ lock_loop_2:
 
 	sync_array_reserve_cell(sync_primary_wait_array,
 				lock,
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-				(state == RW_LOCK_WAIT_EX)
-				 ? RW_LOCK_WAIT_EX : RW_LOCK_EX,
-#else
 				RW_LOCK_EX,
-#endif
 				file_name, line,
 				&index);
 
 	/* Waiters must be set before checking lock_word, to ensure signal
 	is sent. This could lead to a few unnecessary wake-up signals. */
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	if (state == RW_LOCK_WAIT_EX) {
-		rw_lock_set_wx_waiter_flag(lock);
-	} else {
-		rw_lock_set_x_waiter_flag(lock);
-	}
-#else
 	rw_lock_set_waiter_flag(lock);
-#endif
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-	for (i = 0; i < 4; i++) {
-		prev_state = state;
-		state = rw_lock_x_lock_low(lock, pass, file_name, line);
-		if (state == RW_LOCK_EX) {
-			sync_array_free_cell(sync_primary_wait_array, index);
-			return; /* Locking succeeded */
-		} else if (state != prev_state) {
-			/* retry! */
-			sync_array_free_cell(sync_primary_wait_array, index);
-			goto lock_loop_2;
-		}
-	}
-#else
 	if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
 		sync_array_free_cell(sync_primary_wait_array, index);
 		return; /* Locking succeeded */
 	}
-#endif
 
 	if (srv_print_latch_waits) {
 		fprintf(stderr,
@@ -1138,24 +914,11 @@ rw_lock_list_print_info(
 
 			fprintf(file, "RW-LOCK: %p ", (void*) lock);
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-			if (rw_lock_get_s_waiters(lock)) {
-				fputs(" s_waiters for the lock exist", file);
-			}
-			if (rw_lock_get_x_waiters(lock)) {
-				fputs(" x_waiters for the lock exist", file);
-			}
-			if (rw_lock_get_wx_waiters(lock)) {
-				fputs(" wait_ex_waiters for the lock exist", file);
-			}
-			putc('\n', file);
-#else
 			if (rw_lock_get_waiters(lock)) {
 				fputs(" Waiters for the lock exist\n", file);
 			} else {
 				putc('\n', file);
 			}
-#endif
 
 			info = UT_LIST_GET_FIRST(lock->debug_list);
 			while (info != NULL) {
@@ -1194,24 +957,11 @@ rw_lock_print(
 #endif
 	if (lock->lock_word != X_LOCK_DECR) {
 
-#ifdef INNODB_RW_LOCKS_USE_ATOMICS
-		if (rw_lock_get_s_waiters(lock)) {
-			fputs(" s_waiters for the lock exist", stderr);
-		}
-		if (rw_lock_get_x_waiters(lock)) {
-			fputs(" x_waiters for the lock exist", stderr);
-		}
-		if (rw_lock_get_wx_waiters(lock)) {
-			fputs(" wait_ex_waiters for the lock exist", stderr);
-		}
-		putc('\n', stderr);
-#else
 		if (rw_lock_get_waiters(lock)) {
 			fputs(" Waiters for the lock exist\n", stderr);
 		} else {
 			putc('\n', stderr);
 		}
-#endif
 
 		info = UT_LIST_GET_FIRST(lock->debug_list);
 		while (info != NULL) {

=== modified file 'storage/xtradb/trx/trx0sys.c'
--- a/storage/xtradb/trx/trx0sys.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/trx/trx0sys.c	2009-04-25 20:23:06 +0000
@@ -69,9 +69,12 @@ file name and position here. We have suc
 up to this position. If .._pos is -1, it means no crash recovery was needed,
 or there was no master log position info inside InnoDB. */
 
-UNIV_INTERN char	trx_sys_mysql_master_log_name[TRX_SYS_MYSQL_LOG_NAME_LEN];
+UNIV_INTERN char	trx_sys_mysql_master_log_name[TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN];
 UNIV_INTERN ib_int64_t	trx_sys_mysql_master_log_pos	= -1;
 
+UNIV_INTERN char	trx_sys_mysql_relay_log_name[TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN];
+UNIV_INTERN ib_int64_t	trx_sys_mysql_relay_log_pos	= -1;
+
 /* If this MySQL server uses binary logging, after InnoDB has been inited
 and if it has done a crash recovery, we store the binlog file name and position
 here. If .._pos is -1, it means there was no binlog position info inside
@@ -661,19 +664,24 @@ UNIV_INTERN
 void
 trx_sys_update_mysql_binlog_offset(
 /*===============================*/
-	const char*	file_name,/* in: MySQL log file name */
+	const char*	file_name_in,/* in: MySQL log file name */
 	ib_int64_t	offset,	/* in: position in that log file */
 	ulint		field,	/* in: offset of the MySQL log info field in
 				the trx sys header */
 	mtr_t*		mtr)	/* in: mtr */
 {
 	trx_sysf_t*	sys_header;
+	const char*	file_name;
 
-	if (ut_strlen(file_name) >= TRX_SYS_MYSQL_LOG_NAME_LEN) {
+	if (ut_strlen(file_name_in) >= TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN) {
 
 		/* We cannot fit the name to the 512 bytes we have reserved */
+		/* -> To store relay log file information, file_name must fit to the 480 bytes */
 
-		return;
+		file_name = "";
+	}
+	else {
+		file_name = file_name_in;
 	}
 
 	sys_header = trx_sysf_get(mtr);
@@ -834,13 +842,26 @@ trx_sys_print_mysql_master_log_pos(void)
 					 + TRX_SYS_MYSQL_LOG_OFFSET_LOW),
 		sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO
 		+ TRX_SYS_MYSQL_LOG_NAME);
+
+	fprintf(stderr,
+		"InnoDB: and relay log file\n"
+		"InnoDB: position %lu %lu, file name %s\n",
+		(ulong) mach_read_from_4(sys_header
+					 + TRX_SYS_MYSQL_RELAY_LOG_INFO
+					 + TRX_SYS_MYSQL_LOG_OFFSET_HIGH),
+		(ulong) mach_read_from_4(sys_header
+					 + TRX_SYS_MYSQL_RELAY_LOG_INFO
+					 + TRX_SYS_MYSQL_LOG_OFFSET_LOW),
+		sys_header + TRX_SYS_MYSQL_RELAY_LOG_INFO
+		+ TRX_SYS_MYSQL_LOG_NAME);
+
 	/* Copy the master log position info to global variables we can
 	use in ha_innobase.cc to initialize glob_mi to right values */
 
 	ut_memcpy(trx_sys_mysql_master_log_name,
 		  sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO
 		  + TRX_SYS_MYSQL_LOG_NAME,
-		  TRX_SYS_MYSQL_LOG_NAME_LEN);
+		  TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN);
 
 	trx_sys_mysql_master_log_pos
 		= (((ib_int64_t) mach_read_from_4(
@@ -849,6 +870,19 @@ trx_sys_print_mysql_master_log_pos(void)
 		+ ((ib_int64_t) mach_read_from_4(
 			   sys_header + TRX_SYS_MYSQL_MASTER_LOG_INFO
 			   + TRX_SYS_MYSQL_LOG_OFFSET_LOW));
+
+	ut_memcpy(trx_sys_mysql_relay_log_name,
+		  sys_header + TRX_SYS_MYSQL_RELAY_LOG_INFO
+		  + TRX_SYS_MYSQL_LOG_NAME,
+		  TRX_SYS_MYSQL_MASTER_LOG_NAME_LEN);
+
+	trx_sys_mysql_relay_log_pos
+		= (((ib_int64_t) mach_read_from_4(
+			    sys_header + TRX_SYS_MYSQL_RELAY_LOG_INFO
+			    + TRX_SYS_MYSQL_LOG_OFFSET_HIGH)) << 32)
+		+ ((ib_int64_t) mach_read_from_4(
+			   sys_header + TRX_SYS_MYSQL_RELAY_LOG_INFO
+			   + TRX_SYS_MYSQL_LOG_OFFSET_LOW));
 	mtr_commit(&mtr);
 }
 

=== modified file 'storage/xtradb/trx/trx0trx.c'
--- a/storage/xtradb/trx/trx0trx.c	2009-03-26 06:11:11 +0000
+++ b/storage/xtradb/trx/trx0trx.c	2009-04-07 20:51:15 +0000
@@ -127,6 +127,10 @@ trx_create(
 
 	trx->mysql_log_file_name = NULL;
 	trx->mysql_log_offset = 0;
+	trx->mysql_master_log_file_name = "";
+	trx->mysql_master_log_pos = 0;
+	trx->mysql_relay_log_file_name = "";
+	trx->mysql_relay_log_pos = 0;
 
 	mutex_create(&trx->undo_mutex, SYNC_TRX_UNDO);
 
@@ -792,6 +796,19 @@ trx_commit_off_kernel(
 			trx->mysql_log_file_name = NULL;
 		}
 
+		if (trx->mysql_master_log_file_name[0] != '\0') {
+			/* This database server is a MySQL replication slave */
+			trx_sys_update_mysql_binlog_offset(
+				trx->mysql_relay_log_file_name,
+				trx->mysql_relay_log_pos,
+				TRX_SYS_MYSQL_RELAY_LOG_INFO, &mtr);
+			trx_sys_update_mysql_binlog_offset(
+				trx->mysql_master_log_file_name,
+				trx->mysql_master_log_pos,
+				TRX_SYS_MYSQL_MASTER_LOG_INFO, &mtr);
+			trx->mysql_master_log_file_name = "";
+		}
+
 		/* The following call commits the mini-transaction, making the
 		whole transaction committed in the file-based world, at this
 		log sequence number. The transaction becomes 'durable' when