]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.drivers/ehea.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / ehea.patch
CommitLineData
00e5a55c
BS
1Subject: [PATCH] ehea: Fix memory hotplug support
2Subject [PATCH] ehea: Add hugepage detection
3From: Hannes Hering <hannes.hering@linux.vnet.ibm.com>
4References: 436447 - LTC 48713
5References: 439599 - LTC 48958
6References: 477972 - LTC 51731
7
8This patch implements the memory notifier to update the busmap instantly
9instead of rebuilding the whole map. This is necessary because
10walk_memory_resource provides different information than required during memory
11hotplug.
12
13...
14
15All kernel memory which is used for kernel/hardware data transfer must be registered
16with firmware using "memory regions". 16GB hugepages may not be part of a memory region
17due to firmware restrictions.
18This patch modifies the walk_memory_resource callback fn to filter hugepages and add
19only standard memory to the busmap which is later on used for MR registration.
20
21...
22
23Added missing set_bit() to disable data transfer when a memchange notification is handled
24
25...
26
27Signed-off-by: Thomas Klein <tklein@de.ibm.com>
28Signed-off-by: Hannes Hering <hering2@de.ibm.com>
29Signed-off-by: Olaf Hering <olh@suse.de>
30---
31 drivers/net/ehea/ehea.h | 4
32 drivers/net/ehea/ehea_main.c | 27 ++++--
33 drivers/net/ehea/ehea_phyp.c | 2
34 drivers/net/ehea/ehea_qmr.c | 175 ++++++++++++++++++++++++++++++++++++-------
35 drivers/net/ehea/ehea_qmr.h | 5 +
36 5 files changed, 172 insertions(+), 41 deletions(-)
37
38--- a/drivers/net/ehea/ehea.h
39+++ b/drivers/net/ehea/ehea.h
40@@ -40,13 +40,13 @@
41 #include <asm/io.h>
42
43 #define DRV_NAME "ehea"
44-#define DRV_VERSION "EHEA_0092"
45+#define DRV_VERSION "EHEA_0094-02"
46
47 /* eHEA capability flags */
48 #define DLPAR_PORT_ADD_REM 1
49 #define DLPAR_MEM_ADD 2
50 #define DLPAR_MEM_REM 4
51-#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD)
52+#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
53
54 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
55 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
56--- a/drivers/net/ehea/ehea_main.c
57+++ b/drivers/net/ehea/ehea_main.c
58@@ -2869,7 +2869,7 @@ static void ehea_rereg_mrs(struct work_s
59 struct ehea_adapter *adapter;
60
61 mutex_lock(&dlpar_mem_lock);
62- ehea_info("LPAR memory enlarged - re-initializing driver");
63+ ehea_info("LPAR memory changed - re-initializing driver");
64
65 list_for_each_entry(adapter, &adapter_list, list)
66 if (adapter->active_ports) {
67@@ -2906,13 +2906,6 @@ static void ehea_rereg_mrs(struct work_s
68 }
69 }
70
71- ehea_destroy_busmap();
72- ret = ehea_create_busmap();
73- if (ret) {
74- ehea_error("creating ehea busmap failed");
75- goto out;
76- }
77-
78 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
79
80 list_for_each_entry(adapter, &adapter_list, list)
81@@ -3525,9 +3518,23 @@ void ehea_crash_handler(void)
82 static int ehea_mem_notifier(struct notifier_block *nb,
83 unsigned long action, void *data)
84 {
85+ struct memory_notify *arg = data;
86 switch (action) {
87- case MEM_OFFLINE:
88- ehea_info("memory has been removed");
89+ case MEM_CANCEL_OFFLINE:
90+ ehea_info("memory offlining canceled");
91+ /* Readd canceled memory block */
92+ case MEM_ONLINE:
93+ ehea_info("memory is going online");
94+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
95+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
96+ return NOTIFY_BAD;
97+ ehea_rereg_mrs(NULL);
98+ break;
99+ case MEM_GOING_OFFLINE:
100+ ehea_info("memory is going offline");
101+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
102+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
103+ return NOTIFY_BAD;
104 ehea_rereg_mrs(NULL);
105 break;
106 default:
107--- a/drivers/net/ehea/ehea_phyp.c
108+++ b/drivers/net/ehea/ehea_phyp.c
109@@ -535,7 +535,7 @@ u64 ehea_h_query_ehea(const u64 adapter_
110 cb_logaddr, /* R5 */
111 0, 0, 0, 0, 0); /* R6-R10 */
112 #ifdef DEBUG
113- ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
114+ ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
115 #endif
116 return hret;
117 }
118--- a/drivers/net/ehea/ehea_qmr.c
119+++ b/drivers/net/ehea/ehea_qmr.c
120@@ -567,7 +567,7 @@ static inline int ehea_calc_index(unsign
121 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
122 int dir)
123 {
124- if(!ehea_top_bmap->dir[dir]) {
125+ if (!ehea_top_bmap->dir[dir]) {
126 ehea_top_bmap->dir[dir] =
127 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
128 if (!ehea_top_bmap->dir[dir])
129@@ -578,7 +578,7 @@ static inline int ehea_init_top_bmap(str
130
131 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
132 {
133- if(!ehea_bmap->top[top]) {
134+ if (!ehea_bmap->top[top]) {
135 ehea_bmap->top[top] =
136 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
137 if (!ehea_bmap->top[top])
138@@ -587,52 +587,171 @@ static inline int ehea_init_bmap(struct
139 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
140 }
141
142-static int ehea_create_busmap_callback(unsigned long pfn,
143- unsigned long nr_pages, void *arg)
144+static DEFINE_MUTEX(ehea_busmap_mutex);
145+static unsigned long ehea_mr_len;
146+
147+#define EHEA_BUSMAP_ADD_SECT 1
148+#define EHEA_BUSMAP_REM_SECT 0
149+
150+static void ehea_rebuild_busmap(void)
151+{
152+ u64 vaddr = EHEA_BUSMAP_START;
153+ int top, dir, idx;
154+
155+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
156+ struct ehea_top_bmap *ehea_top;
157+ int valid_dir_entries = 0;
158+
159+ if (!ehea_bmap->top[top])
160+ continue;
161+ ehea_top = ehea_bmap->top[top];
162+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
163+ struct ehea_dir_bmap *ehea_dir;
164+ int valid_entries = 0;
165+
166+ if (!ehea_top->dir[dir])
167+ continue;
168+ valid_dir_entries++;
169+ ehea_dir = ehea_top->dir[dir];
170+ for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
171+ if (!ehea_dir->ent[idx])
172+ continue;
173+ valid_entries++;
174+ ehea_dir->ent[idx] = vaddr;
175+ vaddr += EHEA_SECTSIZE;
176+ }
177+ if (!valid_entries) {
178+ ehea_top->dir[dir] = NULL;
179+ kfree(ehea_dir);
180+ }
181+ }
182+ if (!valid_dir_entries) {
183+ ehea_bmap->top[top] = NULL;
184+ kfree(ehea_top);
185+ }
186+ }
187+}
188+
189+static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
190 {
191- unsigned long i, mr_len, start_section, end_section;
192+ unsigned long i, start_section, end_section;
193+
194+ if (!nr_pages)
195+ return 0;
196+
197+ if (!ehea_bmap) {
198+ ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
199+ if (!ehea_bmap)
200+ return -ENOMEM;
201+ }
202+
203 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
204 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
205- mr_len = *(unsigned long *)arg;
206+ /* Mark entries as valid or invalid only; address is assigned later */
207+ for (i = start_section; i < end_section; i++) {
208+ u64 flag;
209+ int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
210+ int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
211+ int idx = i & EHEA_INDEX_MASK;
212+
213+ if (add) {
214+ int ret = ehea_init_bmap(ehea_bmap, top, dir);
215+ if (ret)
216+ return ret;
217+ flag = 1; /* valid */
218+ ehea_mr_len += EHEA_SECTSIZE;
219+ } else {
220+ if (!ehea_bmap->top[top])
221+ continue;
222+ if (!ehea_bmap->top[top]->dir[dir])
223+ continue;
224+ flag = 0; /* invalid */
225+ ehea_mr_len -= EHEA_SECTSIZE;
226+ }
227
228- ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
229- if (!ehea_bmap)
230- return -ENOMEM;
231+ ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
232+ }
233+ ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
234+ return 0;
235+}
236
237- for (i = start_section; i < end_section; i++) {
238- int ret;
239- int top, dir, idx;
240- u64 vaddr;
241+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
242+{
243+ int ret;
244
245- top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
246- dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
247+ mutex_lock(&ehea_busmap_mutex);
248+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
249+ mutex_unlock(&ehea_busmap_mutex);
250+ return ret;
251+}
252
253- ret = ehea_init_bmap(ehea_bmap, top, dir);
254- if(ret)
255- return ret;
256+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
257+{
258+ int ret;
259
260- idx = i & EHEA_INDEX_MASK;
261- vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
262+ mutex_lock(&ehea_busmap_mutex);
263+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
264+ mutex_unlock(&ehea_busmap_mutex);
265+ return ret;
266+}
267
268- ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
269- }
270+static int ehea_is_hugepage(unsigned long pfn)
271+{
272+ int page_order;
273
274- mr_len += nr_pages * PAGE_SIZE;
275- *(unsigned long *)arg = mr_len;
276+ if (pfn & EHEA_HUGEPAGE_PFN_MASK)
277+ return 0;
278
279- return 0;
280+ page_order = compound_order(pfn_to_page(pfn));
281+ if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
282+ return 0;
283+
284+ return 1;
285 }
286
287-static unsigned long ehea_mr_len;
288+static int ehea_create_busmap_callback(unsigned long initial_pfn,
289+ unsigned long total_nr_pages, void *arg)
290+{
291+ int ret;
292+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
293
294-static DEFINE_MUTEX(ehea_busmap_mutex);
295+ if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
296+ return ehea_update_busmap(initial_pfn, total_nr_pages,
297+ EHEA_BUSMAP_ADD_SECT);
298+
299+ /* Given chunk is >= 16GB -> check for hugepages */
300+ start_pfn = initial_pfn;
301+ end_pfn = initial_pfn + total_nr_pages;
302+ pfn = start_pfn;
303+
304+ while (pfn < end_pfn) {
305+ if (ehea_is_hugepage(pfn)) {
306+ /* Add mem found in front of the hugepage */
307+ nr_pages = pfn - start_pfn;
308+ ret = ehea_update_busmap(start_pfn, nr_pages,
309+ EHEA_BUSMAP_ADD_SECT);
310+ if (ret)
311+ return ret;
312+
313+ /* Skip the hugepage */
314+ pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
315+ start_pfn = pfn;
316+ } else
317+ pfn += (EHEA_SECTSIZE / PAGE_SIZE);
318+ }
319+
320+ /* Add mem found behind the hugepage(s) */
321+ nr_pages = pfn - start_pfn;
322+ return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
323+}
324
325 int ehea_create_busmap(void)
326 {
327 int ret;
328+
329 mutex_lock(&ehea_busmap_mutex);
330 ehea_mr_len = 0;
331- ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
332+ ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
333 ehea_create_busmap_callback);
334 mutex_unlock(&ehea_busmap_mutex);
335 return ret;
336--- a/drivers/net/ehea/ehea_qmr.h
337+++ b/drivers/net/ehea/ehea_qmr.h
338@@ -40,6 +40,9 @@
339 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
340 #define EHEA_SECTSIZE (1UL << 24)
341 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
342+#define EHEA_HUGEPAGESHIFT 34
343+#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
344+#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
345
346 #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
347 #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
348@@ -378,6 +381,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
349
350 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
351
352+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
353+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
354 int ehea_create_busmap(void);
355 void ehea_destroy_busmap(void);
356 u64 ehea_map_vaddr(void *caddr);