--- /dev/null
+From a49b7e82cab0f9b41f483359be83f44fbb6b4979 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 13 Apr 2013 15:15:30 -0700
+Subject: kobject: fix kset_find_obj() race with concurrent last kobject_put()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit a49b7e82cab0f9b41f483359be83f44fbb6b4979 upstream.
+
+Anatol Pomozov identified a race condition that hits module unloading
+and re-loading. To quote Anatol:
+
+ "This is a race codition that exists between kset_find_obj() and
+ kobject_put(). kset_find_obj() might return kobject that has refcount
+ equal to 0 if this kobject is freeing by kobject_put() in other
+ thread.
+
+ Here is timeline for the crash in case if kset_find_obj() searches for
+ an object tht nobody holds and other thread is doing kobject_put() on
+ the same kobject:
+
+ THREAD A (calls kset_find_obj()) THREAD B (calls kobject_put())
+ splin_lock()
+ atomic_dec_return(kobj->kref), counter gets zero here
+ ... starts kobject cleanup ....
+ spin_lock() // WAIT thread A in kobj_kset_leave()
+ iterate over kset->list
+ atomic_inc(kobj->kref) (counter becomes 1)
+ spin_unlock()
+ spin_lock() // taken
+ // it does not know that thread A increased counter so it
+ remove obj from list
+ spin_unlock()
+ vfree(module) // frees module object with containing kobj
+
+ // kobj points to freed memory area!!
+ kobject_put(kobj) // OOPS!!!!
+
+ The race above happens because module.c tries to use kset_find_obj()
+ when somebody unloads module. The module.c code was introduced in
+ commit 6494a93d55fa"
+
+Anatol supplied a patch specific for module.c that worked around the
+problem by simply not using kset_find_obj() at all, but rather than make
+a local band-aid, this just fixes kset_find_obj() to be thread-safe
+using the proper model of refusing the get a new reference if the
+refcount has already dropped to zero.
+
+See examples of this proper refcount handling not only in the kref
+documentation, but in various other equivalent uses of this pattern by
+grepping for atomic_inc_not_zero().
+
+[ Side note: the module race does indicate that module loading and
+ unloading is not properly serialized wrt sysfs information using the
+ module mutex. That may require further thought, but this is the
+ correct fix at the kobject layer regardless. ]
+
+Reported-analyzed-and-tested-by: Anatol Pomozov <anatol.pomozov@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/kobject.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -531,6 +531,13 @@ struct kobject *kobject_get(struct kobje
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -779,13 +786,13 @@ struct kobject *kset_find_obj_hinted(str
+ if (!kobject_name(k) || strcmp(kobject_name(k), name))
+ goto slow_search;
+
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ goto unlock_exit;
+
+ slow_search:
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
--- /dev/null
+From 4b20db3de8dab005b07c74161cb041db8c5ff3a7 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Tue, 6 Nov 2012 11:31:49 +0000
+Subject: kref: Implement kref_get_unless_zero v3
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 4b20db3de8dab005b07c74161cb041db8c5ff3a7 upstream.
+
+This function is intended to simplify locking around refcounting for
+objects that can be looked up from a lookup structure, and which are
+removed from that lookup structure in the object destructor.
+Operations on such objects require at least a read lock around
+lookup + kref_get, and a write lock around kref_put + remove from lookup
+structure. Furthermore, RCU implementations become extremely tricky.
+With a lookup followed by a kref_get_unless_zero *with return value check*
+locking in the kref_put path can be deferred to the actual removal from
+the lookup structure and RCU lookups become trivial.
+
+v2: Formatting fixes.
+v3: Invert the return value.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index 65af688..4972e6e 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
+ }
+ return 0;
+ }
++
++/**
++ * kref_get_unless_zero - Increment refcount for object unless it is zero.
++ * @kref: object.
++ *
++ * Return non-zero if the increment succeeded. Otherwise return 0.
++ *
++ * This function is intended to simplify locking around refcounting for
++ * objects that can be looked up from a lookup structure, and which are
++ * removed from that lookup structure in the object destructor.
++ * Operations on such objects require at least a read lock around
++ * lookup + kref_get, and a write lock around kref_put + remove from lookup
++ * structure. Furthermore, RCU implementations become extremely tricky.
++ * With a lookup followed by a kref_get_unless_zero *with return value check*
++ * locking in the kref_put path can be deferred to the actual removal from
++ * the lookup structure and RCU lookups become trivial.
++ */
++static inline int __must_check kref_get_unless_zero(struct kref *kref)
++{
++ return atomic_add_unless(&kref->refcount, 1, 0);
++}
+ #endif /* _KREF_H_ */