# Reiser4 (don't check if fail (some patches are already in xen patchset)
-cd $(DIR_APP) && bzcat $(DIR_DL)/reiser4-for-2.6.32.patch.bz2 | patch -Np1
+ # Fix lag's at cache access (eg. quit of mc)
+ cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6-vmscan_remove_wait_on_page_writeback.patch
+
# ipp2p 0.8.2-pomng
cd $(DIR_APP) && patch -Np1 < $(DIR_SRC)/src/patches/linux-2.6.32.8-ipp2p-0.8.2-pomng.patch
--- /dev/null
+--- linux-next.orig/mm/vmscan.c 2010-07-28 16:22:21.000000000 +0800
++++ linux-next/mm/vmscan.c 2010-07-28 16:23:35.000000000 +0800
+@@ -324,8 +324,7 @@ typedef enum {
+ * pageout is called by shrink_page_list() for each dirty page.
+ * Calls ->writepage().
+ */
+-static pageout_t pageout(struct page *page, struct address_space *mapping,
+- enum pageout_io sync_writeback)
++static pageout_t pageout(struct page *page, struct address_space *mapping)
+ {
+ /*
+ * If the page is dirty, only perform writeback if that write
+@@ -384,14 +383,6 @@ static pageout_t pageout(struct page *pa
+ return PAGE_ACTIVATE;
+ }
+
+- /*
+- * Wait on writeback if requested to. This happens when
+- * direct reclaiming a large contiguous area and the
+- * first attempt to free a range of pages fails.
+- */
+- if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+- wait_on_page_writeback(page);
+-
+ if (!PageWriteback(page)) {
+ /* synchronous write or broken a_ops? */
+ ClearPageReclaim(page);
+@@ -727,7 +718,7 @@ static unsigned long shrink_page_list(st
+ goto keep_locked;
+
+ /* Page is dirty, try to write it out here */
+- switch (pageout(page, mapping, sync_writeback)) {
++ switch (pageout(page, mapping)) {
+ case PAGE_KEEP:
+ goto keep_locked;
+ case PAGE_ACTIVATE:
+