--- /dev/null
+Subject: sched: leave RT_GROUP_SCHED structure components intact to preserve kABI
+
+From: Gregory Haskins <ghaskins@novell.com>
+
+We have already commited to a kABI which includes components from
+RT_GROUP_SCHED. However, that feature has been found to be broken
+so we shouldnt ship with it on. The best short term solution may be
+to hack structure components back in (while leaving the actual code
+disabled) to preserve compatibility.
+
+Signed-off-by: Gregory Haskins <ghaskins@novell.com>
+---
+
+ include/linux/sched.h | 3 +--
+ kernel/sched.c | 7 +------
+ 2 files changed, 2 insertions(+), 8 deletions(-)
+
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1021,13 +1021,12 @@ struct sched_rt_entity {
+ int nr_cpus_allowed;
+
+ struct sched_rt_entity *back;
+-#ifdef CONFIG_RT_GROUP_SCHED
++
+ struct sched_rt_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct rt_rq *rt_rq;
+ /* rq "owned" by this entity/group: */
+ struct rt_rq *my_q;
+-#endif
+ };
+
+ struct task_struct {
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -263,12 +263,10 @@ struct task_group {
+ unsigned long shares;
+ #endif
+
+-#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity **rt_se;
+ struct rt_rq **rt_rq;
+
+ struct rt_bandwidth rt_bandwidth;
+-#endif
+
+ struct rcu_head rcu;
+ struct list_head list;
+@@ -454,14 +452,12 @@ struct rt_rq {
+ /* Nests inside the rq lock: */
+ spinlock_t rt_runtime_lock;
+
+-#ifdef CONFIG_RT_GROUP_SCHED
+ unsigned long rt_nr_boosted;
+
+ struct rq *rq;
+ struct list_head leaf_rt_rq_list;
+ struct task_group *tg;
+ struct sched_rt_entity *rt_se;
+-#endif
+ };
+
+ #ifdef CONFIG_SMP
+@@ -533,9 +529,8 @@ struct rq {
+ /* list of leaf cfs_rq on this cpu: */
+ struct list_head leaf_cfs_rq_list;
+ #endif
+-#ifdef CONFIG_RT_GROUP_SCHED
++
+ struct list_head leaf_rt_rq_list;
+-#endif
+
+ /*
+ * This is part of a global counter where only the total sum