--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:54:35 2008
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Fri, 7 Nov 2008 00:07:15 GMT
+Subject: ARM: xsc3: fix xsc3_l2_inv_range
+To: stable@kernel.org
+Message-ID: <200811070007.mA707EdC005526@hera.kernel.org>
+
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit c7cf72dcadbe39c2077b32460f86c9f8167be3be upstream
+
+When 'start' and 'end' are less than a cacheline apart and 'start' is
+unaligned we are done after cleaning and invalidating the first
+cacheline. So check for (start < end) which will not walk off into
+invalid address ranges when (start > end).
+
+This issue was caught by drivers/dma/dmatest.
+
+2.6.27 is susceptible.
+
+Cc: <stable@kernel.org>
+Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
+Cc: Lothar WaÃ<9f>mann <LW@KARO-electronics.de>
+Cc: Lennert Buytenhek <buytenh@marvell.com>
+Cc: Eric Miao <eric.miao@marvell.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/cache-xsc3l2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/mm/cache-xsc3l2.c
++++ b/arch/arm/mm/cache-xsc3l2.c
+@@ -97,7 +97,7 @@ static void xsc3_l2_inv_range(unsigned l
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+- if (end & (CACHE_LINE_SIZE - 1)) {
++ if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
+ xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
+ xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
+ end &= ~(CACHE_LINE_SIZE - 1);
+@@ -106,7 +106,7 @@ static void xsc3_l2_inv_range(unsigned l
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+- while (start != end) {
++ while (start < end) {
+ xsc3_l2_inv_pa(start);
+ start += CACHE_LINE_SIZE;
+ }
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:53:44 2008
+From: David Woodhouse <David.Woodhouse@intel.com>
+Date: Fri, 7 Nov 2008 00:08:59 GMT
+Subject: JFFS2: Fix lack of locking in thread_should_wake()
+To: stable@kernel.org
+Message-ID: <200811070008.mA708xQE008191@hera.kernel.org>
+
+From: David Woodhouse <David.Woodhouse@intel.com>
+
+commit b27cf88e9592953ae292d05324887f2f44979433 upstream
+
+The thread_should_wake() function trawls through the list of 'very
+dirty' eraseblocks, determining whether the background GC thread should
+wake. Doing this without holding the appropriate locks is a bad idea.
+
+OLPC Trac #8615
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/jffs2/background.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/jffs2/background.c
++++ b/fs/jffs2/background.c
+@@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(
+ for (;;) {
+ allow_signal(SIGHUP);
+ again:
++ spin_lock(&c->erase_completion_lock);
+ if (!jffs2_thread_should_wake(c)) {
+ set_current_state (TASK_INTERRUPTIBLE);
++ spin_unlock(&c->erase_completion_lock);
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
+- /* Yes, there's a race here; we checked jffs2_thread_should_wake()
+- before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
+- matter - We don't care if we miss a wakeup, because the GC thread
+- is only an optimisation anyway. */
+ schedule();
+- }
++ } else
++ spin_unlock(&c->erase_completion_lock);
++
+
+ /* This thread is purely an optimisation. But if it runs when
+ other things could be running, it actually makes things a
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:53:08 2008
+From: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
+Date: Fri, 7 Nov 2008 00:08:19 GMT
+Subject: JFFS2: fix race condition in jffs2_lzo_compress()
+To: stable@kernel.org
+Message-ID: <200811070008.mA708Jdo007031@hera.kernel.org>
+
+From: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
+
+commit dc8a0843a435b2c0891e7eaea64faaf1ebec9b11 upstream
+
+deflate_mutex protects the globals lzo_mem and lzo_compress_buf. However,
+jffs2_lzo_compress() unlocks deflate_mutex _before_ it has copied out the
+compressed data from lzo_compress_buf. Correct this by moving the mutex
+unlock after the copy.
+
+In addition, document what deflate_mutex actually protects.
+
+Signed-off-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
+Acked-by: Richard Purdie <rpurdie@openedhand.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/jffs2/compr_lzo.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/fs/jffs2/compr_lzo.c
++++ b/fs/jffs2/compr_lzo.c
+@@ -19,7 +19,7 @@
+
+ static void *lzo_mem;
+ static void *lzo_compress_buf;
+-static DEFINE_MUTEX(deflate_mutex);
++static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
+
+ static void free_workspace(void)
+ {
+@@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned c
+
+ mutex_lock(&deflate_mutex);
+ ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
+- mutex_unlock(&deflate_mutex);
+-
+ if (ret != LZO_E_OK)
+- return -1;
++ goto fail;
+
+ if (compress_size > *dstlen)
+- return -1;
++ goto fail;
+
+ memcpy(cpage_out, lzo_compress_buf, compress_size);
+- *dstlen = compress_size;
++ mutex_unlock(&deflate_mutex);
+
++ *dstlen = compress_size;
+ return 0;
++
++ fail:
++ mutex_unlock(&deflate_mutex);
++ return -1;
+ }
+
+ static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:50:27 2008
+From: Neil Brown <neilb@suse.de>
+Date: Fri, 7 Nov 2008 00:08:12 GMT
+Subject: md: fix bug in raid10 recovery.
+To: stable@kernel.org
+Message-ID: <200811070008.mA708CGN006808@hera.kernel.org>
+
+From: Neil Brown <neilb@suse.de>
+
+commit a53a6c85756339f82ff19e001e90cfba2d6299a8 upstream
+
+Adding a spare to a raid10 doesn't cause recovery to start.
+This is due to an silly type in
+ commit 6c2fce2ef6b4821c21b5c42c7207cb9cf8c87eda
+and so is a bug in 2.6.27 and .28-rc.
+
+Thanks to Thomas Backlund for bisecting to find this.
+
+Cc: Thomas Backlund <tmb@mandriva.org>
+Cc: George Spelvin <linux@horizon.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid10.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1132,7 +1132,7 @@ static int raid10_add_disk(mddev_t *mdde
+ if (!enough(conf))
+ return -EINVAL;
+
+- if (rdev->raid_disk)
++ if (rdev->raid_disk >= 0)
+ first = last = rdev->raid_disk;
+
+ if (rdev->saved_raid_disk >= 0 &&
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:47:32 2008
+From: Andre Noll <maan@systemlinux.org>
+Date: Fri, 7 Nov 2008 00:07:46 GMT
+Subject: md: linear: Fix a division by zero bug for very small arrays.
+To: stable@kernel.org
+Message-ID: <200811070007.mA707k6d006270@hera.kernel.org>
+
+From: Andre Noll <maan@systemlinux.org>
+
+commit f1cd14ae52985634d0389e934eba25b5ecf24565 upstream
+
+Date: Thu, 6 Nov 2008 19:41:24 +1100
+Subject: md: linear: Fix a division by zero bug for very small arrays.
+
+We currently oops with a divide error on starting a linear software
+raid array consisting of at least two very small (< 500K) devices.
+
+The bug is caused by the calculation of the hash table size which
+tries to compute sector_div(sz, base) with "base" being zero due to
+the small size of the component devices of the array.
+
+Fix this by requiring the hash spacing to be at least one which
+implies that also "base" is non-zero.
+
+This bug has existed since about 2.6.14.
+
+Signed-off-by: Andre Noll <maan@systemlinux.org>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/linear.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -157,6 +157,8 @@ static linear_conf_t *linear_conf(mddev_
+
+ min_spacing = conf->array_sectors / 2;
+ sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
++ if (min_sectors == 0)
++ min_sectors = 1;
+
+ /* min_spacing is the minimum spacing that will fit the hash
+ * table in one PAGE. This may be much smaller than needed.
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:56:06 2008
+From: Eric W. Biederman <ebiederm@xmission.com>
+Date: Fri, 7 Nov 2008 00:08:33 GMT
+Subject: MTD: [NOR] Fix cfi_send_gen_cmd handling of x16 devices in x8 mode (v4)
+To: jejb@kernel.org, stable@kernel.org
+Message-ID: <200811070008.mA708Xrf007411@hera.kernel.org>
+
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 467622ef2acb01986eab37ef96c3632b3ea35999 upstream
+
+For "unlock" cycles to 16bit devices in 8bit compatibility mode we need
+to use the byte addresses 0xaaa and 0x555. These effectively match
+the word address 0x555 and 0x2aa, except the latter has its low bit set.
+
+Most chips don't care about the value of the 'A-1' pin in x8 mode,
+but some -- like the ST M29W320D -- do. So we need to be careful to
+set it where appropriate.
+
+cfi_send_gen_cmd is only ever passed addresses where the low byte
+is 0x00, 0x55 or 0xaa. Of those, only addresses ending 0xaa are
+affected by this patch, by masking in the extra low bit when the device
+is known to be in compatibility mode.
+
+[dwmw2: Do it only when (cmd_ofs & 0xff) == 0xaa]
+v4: Fix stupid typo in cfi_build_cmd_addr that failed to compile
+ I'm writing this patch way to late at night.
+v3: Bring all of the work back into cfi_build_cmd_addr
+ including calling of map_bankwidth(map) and cfi_interleave(cfi)
+ So every caller doesn't need to.
+v2: Only modified the address if we our device_type is larger than our
+ bus width.
+
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 13 -------------
+ drivers/mtd/chips/jedec_probe.c | 10 ++++------
+ include/linux/mtd/cfi.h | 22 +++++++++++++++++++---
+ 3 files changed, 23 insertions(+), 22 deletions(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -362,19 +362,6 @@ struct mtd_info *cfi_cmdset_0002(struct
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+- /* Modify the unlock address if we are in compatibility mode */
+- if ( /* x16 in x8 mode */
+- ((cfi->device_type == CFI_DEVICETYPE_X8) &&
+- (cfi->cfiq->InterfaceDesc ==
+- CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
+- /* x32 in x16 mode */
+- ((cfi->device_type == CFI_DEVICETYPE_X16) &&
+- (cfi->cfiq->InterfaceDesc ==
+- CFI_INTERFACE_X16_BY_X32_ASYNC)))
+- {
+- cfi->addr_unlock1 = 0xaaa;
+- cfi->addr_unlock2 = 0x555;
+- }
+
+ } /* CFI mode */
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+--- a/drivers/mtd/chips/jedec_probe.c
++++ b/drivers/mtd/chips/jedec_probe.c
+@@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct
+ * several first banks can contain 0x7f instead of actual ID
+ */
+ do {
+- uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8),
+- cfi_interleave(cfi),
+- cfi->device_type);
++ uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
+ mask = (1 << (cfi->device_type * 8)) - 1;
+ result = map_read(map, base + ofs);
+ bank++;
+@@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct m
+ {
+ map_word result;
+ unsigned long mask;
+- u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
++ u32 ofs = cfi_build_cmd_addr(1, map, cfi);
+ mask = (1 << (cfi->device_type * 8)) -1;
+ result = map_read(map, base + ofs);
+ return result.x[0] & mask;
+@@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_i
+
+ }
+ /* Ensure the unlock addresses we try stay inside the map */
+- probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
+- probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
++ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
++ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
+ if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
+ ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
+ goto retry;
+--- a/include/linux/mtd/cfi.h
++++ b/include/linux/mtd/cfi.h
+@@ -281,9 +281,25 @@ struct cfi_private {
+ /*
+ * Returns the command address according to the given geometry.
+ */
+-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
++static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
++ struct map_info *map, struct cfi_private *cfi)
+ {
+- return (cmd_ofs * type) * interleave;
++ unsigned bankwidth = map_bankwidth(map);
++ unsigned interleave = cfi_interleave(cfi);
++ unsigned type = cfi->device_type;
++ uint32_t addr;
++
++ addr = (cmd_ofs * type) * interleave;
++
++ /* Modify the unlock address if we are in compatiblity mode.
++ * For 16bit devices on 8 bit busses
++ * and 32bit devices on 16 bit busses
++ * set the low bit of the alternating bit sequence of the address.
++ */
++ if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
++ addr |= (type >> 1)*interleave;
++
++ return addr;
+ }
+
+ /*
+@@ -429,7 +445,7 @@ static inline uint32_t cfi_send_gen_cmd(
+ int type, map_word *prev_val)
+ {
+ map_word val;
+- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
++ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
+
+ val = cfi_build_cmd(cmd, map, cfi);
+
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:59:05 2008
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Sun, 9 Nov 2008 19:50:02 GMT
+Subject: net: unix: fix inflight counting bug in garbage collector
+To: stable@kernel.org
+Message-ID: <200811091950.mA9Jo2iL003804@hera.kernel.org>
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 6209344f5a3795d34b7f2c0061f49802283b6bdd upstream
+
+Previously I assumed that the receive queues of candidates don't
+change during the GC. This is only half true, nothing can be received
+from the queues (see comment in unix_gc()), but buffers could be added
+through the other half of the socket pair, which may still have file
+descriptors referring to it.
+
+This can result in inc_inflight_move_tail() erronously increasing the
+"inflight" counter for a unix socket for which dec_inflight() wasn't
+previously called. This in turn can trigger the "BUG_ON(total_refs <
+inflight_refs)" in a later garbage collection run.
+
+Fix this by only manipulating the "inflight" counter for sockets which
+are candidates themselves. Duplicating the file references in
+unix_attach_fds() is also needed to prevent a socket becoming a
+candidate for GC while the skb that contains it is not yet queued.
+
+Reported-by: Andrea Bittau <a.bittau@cs.ucl.ac.uk>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/af_unix.h | 1 +
+ net/unix/af_unix.c | 31 ++++++++++++++++++++++++-------
+ net/unix/garbage.c | 49 +++++++++++++++++++++++++++++++++++++------------
+ 3 files changed, 62 insertions(+), 19 deletions(-)
+
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -54,6 +54,7 @@ struct unix_sock {
+ atomic_long_t inflight;
+ spinlock_t lock;
+ unsigned int gc_candidate : 1;
++ unsigned int gc_maybe_cycle : 1;
+ wait_queue_head_t peer_wait;
+ };
+ #define unix_sk(__sk) ((struct unix_sock *)__sk)
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1300,14 +1300,23 @@ static void unix_destruct_fds(struct sk_
+ sock_wfree(skb);
+ }
+
+-static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
++static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ {
+ int i;
++
++ /*
++ * Need to duplicate file references for the sake of garbage
++ * collection. Otherwise a socket in the fps might become a
++ * candidate for GC while the skb is not yet queued.
++ */
++ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
++ if (!UNIXCB(skb).fp)
++ return -ENOMEM;
++
+ for (i=scm->fp->count-1; i>=0; i--)
+ unix_inflight(scm->fp->fp[i]);
+- UNIXCB(skb).fp = scm->fp;
+ skb->destructor = unix_destruct_fds;
+- scm->fp = NULL;
++ return 0;
+ }
+
+ /*
+@@ -1366,8 +1375,11 @@ static int unix_dgram_sendmsg(struct kio
+ goto out;
+
+ memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+- if (siocb->scm->fp)
+- unix_attach_fds(siocb->scm, skb);
++ if (siocb->scm->fp) {
++ err = unix_attach_fds(siocb->scm, skb);
++ if (err)
++ goto out_free;
++ }
+ unix_get_secdata(siocb->scm, skb);
+
+ skb_reset_transport_header(skb);
+@@ -1536,8 +1548,13 @@ static int unix_stream_sendmsg(struct ki
+ size = min_t(int, size, skb_tailroom(skb));
+
+ memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+- if (siocb->scm->fp)
+- unix_attach_fds(siocb->scm, skb);
++ if (siocb->scm->fp) {
++ err = unix_attach_fds(siocb->scm, skb);
++ if (err) {
++ kfree_skb(skb);
++ goto out_err;
++ }
++ }
+
+ if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
+ kfree_skb(skb);
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x
+ */
+ struct sock *sk = unix_get_socket(*fp++);
+ if (sk) {
+- hit = true;
+- func(unix_sk(sk));
++ struct unix_sock *u = unix_sk(sk);
++
++ /*
++ * Ignore non-candidates, they could
++ * have been added to the queues after
++ * starting the garbage collection
++ */
++ if (u->gc_candidate) {
++ hit = true;
++ func(u);
++ }
+ }
+ }
+ if (hit && hitlist != NULL) {
+@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struc
+ {
+ atomic_long_inc(&u->inflight);
+ /*
+- * If this is still a candidate, move it to the end of the
+- * list, so that it's checked even if it was already passed
+- * over
++ * If this still might be part of a cycle, move it to the end
++ * of the list, so that it's checked even if it was already
++ * passed over
+ */
+- if (u->gc_candidate)
++ if (u->gc_maybe_cycle)
+ list_move_tail(&u->link, &gc_candidates);
+ }
+
+@@ -267,6 +276,7 @@ void unix_gc(void)
+ struct unix_sock *next;
+ struct sk_buff_head hitlist;
+ struct list_head cursor;
++ LIST_HEAD(not_cycle_list);
+
+ spin_lock(&unix_gc_lock);
+
+@@ -282,10 +292,14 @@ void unix_gc(void)
+ *
+ * Holding unix_gc_lock will protect these candidates from
+ * being detached, and hence from gaining an external
+- * reference. This also means, that since there are no
+- * possible receivers, the receive queues of these sockets are
+- * static during the GC, even though the dequeue is done
+- * before the detach without atomicity guarantees.
++ * reference. Since there are no possible receivers, all
++ * buffers currently on the candidates' queues stay there
++ * during the garbage collection.
++ *
++ * We also know that no new candidate can be added onto the
++ * receive queues. Other, non candidate sockets _can_ be
++ * added to queue, so we must make sure only to touch
++ * candidates.
+ */
+ list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
+ long total_refs;
+@@ -299,6 +313,7 @@ void unix_gc(void)
+ if (total_refs == inflight_refs) {
+ list_move_tail(&u->link, &gc_candidates);
+ u->gc_candidate = 1;
++ u->gc_maybe_cycle = 1;
+ }
+ }
+
+@@ -325,14 +340,24 @@ void unix_gc(void)
+ list_move(&cursor, &u->link);
+
+ if (atomic_long_read(&u->inflight) > 0) {
+- list_move_tail(&u->link, &gc_inflight_list);
+- u->gc_candidate = 0;
++ list_move_tail(&u->link, ¬_cycle_list);
++ u->gc_maybe_cycle = 0;
+ scan_children(&u->sk, inc_inflight_move_tail, NULL);
+ }
+ }
+ list_del(&cursor);
+
+ /*
++ * not_cycle_list contains those sockets which do not make up a
++ * cycle. Restore these to the inflight list.
++ */
++ while (!list_empty(¬_cycle_list)) {
++ u = list_entry(not_cycle_list.next, struct unix_sock, link);
++ u->gc_candidate = 0;
++ list_move_tail(&u->link, &gc_inflight_list);
++ }
++
++ /*
+ * Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
--- /dev/null
+From romieu@fr.zoreil.com Tue Nov 11 10:02:02 2008
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 8 Nov 2008 12:04:42 +0100
+Subject: r8169: fix RxMissed register access
+To: stable@kernel.org
+Cc: Ivan Vecera <ivecera@redhat.com>, Andrew Morton <akpm@linux-foundation.org>, Edward Hsu <edward_hsu@realtek.com.tw>, Jeff Garzik <jgarzik@redhat.com>, Martin Capitanio <martin@capitanio.org>
+Message-ID: <20081108110442.GB2163@electric-eye.fr.zoreil.com>
+Content-Disposition: inline
+
+From: Francois Romieu <romieu@fr.zoreil.com>
+
+Upstream as 523a609496dbc3897e530db2a2f27650d125ea00
+
+- the register is defined for the 8169 chipset only and there is
+ no 8169 beyond RTL_GIGA_MAC_VER_06.
+- only the lower 3 bytes of the register are valid
+
+Fixes:
+1. http://bugzilla.kernel.org/show_bug.cgi?id=10180
+2. http://bugzilla.kernel.org/show_bug.cgi?id=11062 (bits of)
+
+Tested by Hermann Gausterer and Adam Huffman.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 25 ++++++++++++++-----------
+ 1 file changed, 14 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2092,8 +2092,6 @@ static void rtl_hw_start_8168(struct net
+
+ RTL_R8(IntrMask);
+
+- RTL_W32(RxMissed, 0);
+-
+ rtl_set_rx_mode(dev);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+@@ -2136,8 +2134,6 @@ static void rtl_hw_start_8101(struct net
+
+ RTL_R8(IntrMask);
+
+- RTL_W32(RxMissed, 0);
+-
+ rtl_set_rx_mode(dev);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+@@ -2915,6 +2911,17 @@ static int rtl8169_poll(struct napi_stru
+ return work_done;
+ }
+
++static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++
++ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
++ return;
++
++ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
++ RTL_W32(RxMissed, 0);
++}
++
+ static void rtl8169_down(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -2932,9 +2939,7 @@ core_down:
+
+ rtl8169_asic_down(ioaddr);
+
+- /* Update the error counts. */
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+@@ -3056,8 +3061,7 @@ static struct net_device_stats *rtl8169_
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&tp->lock, flags);
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+@@ -3082,8 +3086,7 @@ static int rtl8169_suspend(struct pci_de
+
+ rtl8169_asic_down(ioaddr);
+
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
--- /dev/null
+From romieu@fr.zoreil.com Tue Nov 11 10:01:01 2008
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 8 Nov 2008 12:03:09 +0100
+Subject: r8169: get ethtool settings through the generic mii helper
+To: stable@kernel.org
+Cc: Ivan Vecera <ivecera@redhat.com>, Andrew Morton <akpm@linux-foundation.org>, Edward Hsu <edward_hsu@realtek.com.tw>, Jeff Garzik <jgarzik@redhat.com>, Martin Capitanio <martin@capitanio.org>
+Message-ID: <20081108110309.GA2163@electric-eye.fr.zoreil.com>
+Content-Disposition: inline
+
+From: Francois Romieu <romieu@fr.zoreil.com>
+
+Upstream as ccdffb9a88b2907b159538d7bfd6256621db4f84 (post 2.6.27).
+
+It avoids to report unsupported link capabilities with
+the fast-ethernet only 8101/8102.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Tested-by: Martin Capitanio <martin@capitanio.org>
+Fixed-by: Ivan Vecera <ivecera@redhat.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 97 ++++++++++++++++++++++++----------------------------
+ 1 file changed, 45 insertions(+), 52 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -370,8 +370,9 @@ struct ring_info {
+ };
+
+ enum features {
+- RTL_FEATURE_WOL = (1 << 0),
+- RTL_FEATURE_MSI = (1 << 1),
++ RTL_FEATURE_WOL = (1 << 0),
++ RTL_FEATURE_MSI = (1 << 1),
++ RTL_FEATURE_GMII = (1 << 2),
+ };
+
+ struct rtl8169_private {
+@@ -406,13 +407,15 @@ struct rtl8169_private {
+ struct vlan_group *vlgrp;
+ #endif
+ int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+- void (*get_settings)(struct net_device *, struct ethtool_cmd *);
++ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(void __iomem *);
+ void (*hw_start)(struct net_device *);
+ unsigned int (*phy_reset_pending)(void __iomem *);
+ unsigned int (*link_ok)(void __iomem *);
+ struct delayed_work task;
+ unsigned features;
++
++ struct mii_if_info mii;
+ };
+
+ MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
+@@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioadd
+ return value;
+ }
+
++static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
++ int val)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ mdio_write(ioaddr, location, val);
++}
++
++static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ return mdio_read(ioaddr, location);
++}
++
+ static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+ {
+ RTL_W16(IntrMask, 0x0000);
+@@ -850,7 +870,7 @@ static int rtl8169_rx_vlan_skb(struct rt
+
+ #endif
+
+-static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
++static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+@@ -867,65 +887,29 @@ static void rtl8169_gset_tbi(struct net_
+
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL; /* Always set */
++
++ return 0;
+ }
+
+-static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
++static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+- void __iomem *ioaddr = tp->mmio_addr;
+- u8 status;
+-
+- cmd->supported = SUPPORTED_10baseT_Half |
+- SUPPORTED_10baseT_Full |
+- SUPPORTED_100baseT_Half |
+- SUPPORTED_100baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_Autoneg |
+- SUPPORTED_TP;
+-
+- cmd->autoneg = 1;
+- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+-
+- if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
+- cmd->advertising |= ADVERTISED_10baseT_Half;
+- if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
+- cmd->advertising |= ADVERTISED_10baseT_Full;
+- if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
+- cmd->advertising |= ADVERTISED_100baseT_Half;
+- if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
+- cmd->advertising |= ADVERTISED_100baseT_Full;
+- if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
+- cmd->advertising |= ADVERTISED_1000baseT_Full;
+-
+- status = RTL_R8(PHYstatus);
+-
+- if (status & _1000bpsF)
+- cmd->speed = SPEED_1000;
+- else if (status & _100bps)
+- cmd->speed = SPEED_100;
+- else if (status & _10bps)
+- cmd->speed = SPEED_10;
+-
+- if (status & TxFlowCtrl)
+- cmd->advertising |= ADVERTISED_Asym_Pause;
+- if (status & RxFlowCtrl)
+- cmd->advertising |= ADVERTISED_Pause;
+
+- cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
+- DUPLEX_FULL : DUPLEX_HALF;
++ return mii_ethtool_gset(&tp->mii, cmd);
+ }
+
+ static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
++ int rc;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+- tp->get_settings(dev, cmd);
++ rc = tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+- return 0;
++ return rc;
+ }
+
+ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+@@ -1513,7 +1497,7 @@ static const struct rtl_cfg_info {
+ unsigned int align;
+ u16 intr_event;
+ u16 napi_event;
+- unsigned msi;
++ unsigned features;
+ } rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+@@ -1522,7 +1506,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+- .msi = 0
++ .features = RTL_FEATURE_GMII
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+@@ -1531,7 +1515,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ TxErr | TxOK | RxOK | RxErr,
+ .napi_event = TxErr | TxOK | RxOK | RxOverflow,
+- .msi = RTL_FEATURE_MSI
++ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+@@ -1540,7 +1524,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+- .msi = RTL_FEATURE_MSI
++ .features = RTL_FEATURE_MSI
+ }
+ };
+
+@@ -1552,7 +1536,7 @@ static unsigned rtl_try_msi(struct pci_d
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+- if (cfg->msi) {
++ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(pdev)) {
+ dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ } else {
+@@ -1578,6 +1562,7 @@ rtl8169_init_one(struct pci_dev *pdev, c
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
++ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ unsigned int i;
+@@ -1602,6 +1587,14 @@ rtl8169_init_one(struct pci_dev *pdev, c
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
++ mii = &tp->mii;
++ mii->dev = dev;
++ mii->mdio_read = rtl_mdio_read;
++ mii->mdio_write = rtl_mdio_write;
++ mii->phy_id_mask = 0x1f;
++ mii->reg_num_mask = 0x1f;
++ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
++
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
--- /dev/null
+From romieu@fr.zoreil.com Tue Nov 11 10:02:36 2008
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 8 Nov 2008 12:06:09 +0100
+Subject: r8169: wake up the PHY of the 8168
+To: stable@kernel.org
+Cc: Ivan Vecera <ivecera@redhat.com>, Edward Hsu <edward_hsu@realtek.com.tw>, Martin Capitanio <martin@capitanio.org>, Chiaki Ishikawa <chiaki.ishikawa@ubin.jp>, Andrew Morton <akpm@linux-foundation.org>, Jeff Garzik <jgarzik@redhat.com>, RyanKao <ryankao@realtek.com.tw>
+Message-ID: <20081108110609.GC2163@electric-eye.fr.zoreil.com>
+Content-Disposition: inline
+
+From: Francois Romieu <romieu@fr.zoreil.com>
+
+Upstream as a2de6b89b74b28052e293fdb39975a5a03c432e0
+
+This is typically needed when some other OS puts the PHY
+to sleep due to the disabling of WOL options in the BIOS
+of the system.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Tested-by: Chiaki Ishikawa <chiaki.ishikawa@ubin.jp>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+Cc: RyanKao <ryankao@realtek.com.tw>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -740,9 +740,13 @@ static int rtl8169_set_speed_xmii(struct
+
+ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+- if ((tp->mac_version == RTL_GIGA_MAC_VER_12) ||
+- (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
+- /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
++ (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
++ /*
++ * Wake up the PHY.
++ * Vendor specific (0x1f) and reserved (0x0e) MII registers.
++ */
+ mdio_write(ioaddr, 0x1f, 0x0000);
+ mdio_write(ioaddr, 0x0e, 0x0000);
+ }
cciss-fix-regression-firmware-not-displayed-in-procfs.patch
cciss-fix-sysfs-broken-symlink-regression.patch
cciss-new-hardware-support.patch
+md-linear-fix-a-division-by-zero-bug-for-very-small-arrays.patch
+md-fix-bug-in-raid10-recovery.patch
+jffs2-fix-race-condition-in-jffs2_lzo_compress.patch
+jffs2-fix-lack-of-locking-in-thread_should_wake.patch
+arm-xsc3-fix-xsc3_l2_inv_range.patch
+mtd-fix-cfi_send_gen_cmd-handling-of-x16-devices-in-x8-mode.patch
+x86-don-t-use-tsc_khz-to-calculate-lpj-if-notsc-is-passed.patch
+net-unix-fix-inflight-counting-bug-in-garbage-collector.patch
+r8169-get-ethtool-settings-through-the-generic-mii-helper.patch
+r8169-fix-rxmissed-register-access.patch
+r8169-wake-up-the-phy-of-the-8168.patch
--- /dev/null
+From jejb@kernel.org Tue Nov 11 09:58:10 2008
+From: Alok Kataria <akataria@vmware.com>
+Date: Fri, 7 Nov 2008 00:08:46 GMT
+Subject: x86: don't use tsc_khz to calculate lpj if notsc is passed
+To: jejb@kernel.org, stable@kernel.org
+Message-ID: <200811070008.mA708kj5007779@hera.kernel.org>
+
+From: Alok Kataria <akataria@vmware.com>
+
+commit 70de9a97049e0ba79dc040868564408d5ce697f9 upstream
+
+Impact: fix udelay when "notsc" boot parameter is passed
+
+With notsc passed on commandline, tsc may not be used for
+udelays, make sure that we do not use tsc_khz to calculate
+the lpj value in such cases.
+
+Reported-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Alok N Kataria <akataria@vmware.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/tsc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -639,10 +639,6 @@ void __init tsc_init(void)
+ cpu_khz = calibrate_cpu();
+ #endif
+
+- lpj = ((u64)tsc_khz * 1000);
+- do_div(lpj, HZ);
+- lpj_fine = lpj;
+-
+ printk("Detected %lu.%03lu MHz processor.\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
+@@ -662,6 +658,10 @@ void __init tsc_init(void)
+ /* now allow native_sched_clock() to use rdtsc */
+ tsc_disabled = 0;
+
++ lpj = ((u64)tsc_khz * 1000);
++ do_div(lpj, HZ);
++ lpj_fine = lpj;
++
+ use_tsc_delay();
+ /* Check and install the TSC clocksource */
+ dmi_check_system(bad_tsc_dmi_table);