]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/edac/edac_mc.c
EDAC: Issue tracepoint only when it is defined
[people/arne_f/kernel.git] / drivers / edac / edac_mc.c
CommitLineData
da9bb1d2
AC
1/*
2 * edac_mc kernel module
49c0dab7 3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
da9bb1d2
AC
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
da9bb1d2
AC
15#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
da9bb1d2 28#include <linux/ctype.h>
c0d12172 29#include <linux/edac.h>
53f2d028 30#include <linux/bitops.h>
7c0f6ba6 31#include <linux/uaccess.h>
da9bb1d2 32#include <asm/page.h>
78d88e8a 33#include "edac_mc.h"
7c9281d7 34#include "edac_module.h"
53f2d028
MCC
35#include <ras/ras_event.h>
36
b01aec9b
BP
37#ifdef CONFIG_EDAC_ATOMIC_SCRUB
38#include <asm/edac.h>
39#else
40#define edac_atomic_scrub(va, size) do { } while (0)
41#endif
42
8c22b4fe
BP
43int edac_op_state = EDAC_OPSTATE_INVAL;
44EXPORT_SYMBOL_GPL(edac_op_state);
45
da9bb1d2 46/* lock to memory controller's control array */
63b7df91 47static DEFINE_MUTEX(mem_ctls_mutex);
ff6ac2a6 48static LIST_HEAD(mc_devices);
da9bb1d2 49
80cc7d87
MCC
50/*
51 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
52 * apei/ghes and i7core_edac to be used at the same time.
53 */
54static void const *edac_mc_owner;
55
88d84ac9
BP
56static struct bus_type mc_bus[EDAC_MAX_MCS];
57
6e84d359
MCC
58unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
59 unsigned len)
60{
61 struct mem_ctl_info *mci = dimm->mci;
62 int i, n, count = 0;
63 char *p = buf;
64
65 for (i = 0; i < mci->n_layers; i++) {
66 n = snprintf(p, len, "%s %d ",
67 edac_layer_name[mci->layers[i].type],
68 dimm->location[i]);
69 p += n;
70 len -= n;
71 count += n;
72 if (!len)
73 break;
74 }
75
76 return count;
77}
78
da9bb1d2
AC
79#ifdef CONFIG_EDAC_DEBUG
80
a4b4be3f 81static void edac_mc_dump_channel(struct rank_info *chan)
da9bb1d2 82{
6e84d359
MCC
83 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
84 edac_dbg(4, " channel = %p\n", chan);
85 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
86 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
4275be63
MCC
87}
88
6e84d359 89static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
4275be63 90{
6e84d359
MCC
91 char location[80];
92
93 edac_dimm_info_location(dimm, location, sizeof(location));
94
95 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
9713faec 96 dimm->mci->csbased ? "rank" : "dimm",
6e84d359
MCC
97 number, location, dimm->csrow, dimm->cschannel);
98 edac_dbg(4, " dimm = %p\n", dimm);
99 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
100 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
101 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
102 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
da9bb1d2
AC
103}
104
2da1c119 105static void edac_mc_dump_csrow(struct csrow_info *csrow)
da9bb1d2 106{
6e84d359
MCC
107 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
108 edac_dbg(4, " csrow = %p\n", csrow);
109 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
110 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
111 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
112 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
113 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
114 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
da9bb1d2
AC
115}
116
2da1c119 117static void edac_mc_dump_mci(struct mem_ctl_info *mci)
da9bb1d2 118{
956b9ba1
JP
119 edac_dbg(3, "\tmci = %p\n", mci);
120 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
121 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
122 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
123 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
124 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
125 mci->nr_csrows, mci->csrows);
126 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
127 mci->tot_dimms, mci->dimms);
128 edac_dbg(3, "\tdev = %p\n", mci->pdev);
129 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
130 mci->mod_name, mci->ctl_name);
131 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
da9bb1d2
AC
132}
133
24f9a7fe
BP
134#endif /* CONFIG_EDAC_DEBUG */
135
f4ce6eca 136const char * const edac_mem_types[] = {
4cfc3a40
BP
137 [MEM_EMPTY] = "Empty csrow",
138 [MEM_RESERVED] = "Reserved csrow type",
139 [MEM_UNKNOWN] = "Unknown csrow type",
140 [MEM_FPM] = "Fast page mode RAM",
141 [MEM_EDO] = "Extended data out RAM",
142 [MEM_BEDO] = "Burst Extended data out RAM",
143 [MEM_SDR] = "Single data rate SDRAM",
144 [MEM_RDR] = "Registered single data rate SDRAM",
145 [MEM_DDR] = "Double data rate SDRAM",
146 [MEM_RDDR] = "Registered Double data rate SDRAM",
147 [MEM_RMBS] = "Rambus DRAM",
148 [MEM_DDR2] = "Unbuffered DDR2 RAM",
149 [MEM_FB_DDR2] = "Fully buffered DDR2",
150 [MEM_RDDR2] = "Registered DDR2 RAM",
151 [MEM_XDR] = "Rambus XDR",
152 [MEM_DDR3] = "Unbuffered DDR3 RAM",
153 [MEM_RDDR3] = "Registered DDR3 RAM",
154 [MEM_LRDDR3] = "Load-Reduced DDR3 RAM",
155 [MEM_DDR4] = "Unbuffered DDR4 RAM",
156 [MEM_RDDR4] = "Registered DDR4 RAM",
239642fe
BP
157};
158EXPORT_SYMBOL_GPL(edac_mem_types);
159
93e4fe64
MCC
160/**
161 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
162 * @p: pointer to a pointer with the memory offset to be used. At
163 * return, this will be incremented to point to the next offset
164 * @size: Size of the data structure to be reserved
165 * @n_elems: Number of elements that should be reserved
da9bb1d2
AC
166 *
167 * If 'size' is a constant, the compiler will optimize this whole function
93e4fe64
MCC
168 * down to either a no-op or the addition of a constant to the value of '*p'.
169 *
170 * The 'p' pointer is absolutely needed to keep the proper advancing
171 * further in memory to the proper offsets when allocating the struct along
172 * with its embedded structs, as edac_device_alloc_ctl_info() does it
173 * above, for example.
174 *
175 * At return, the pointer 'p' will be incremented to be used on a next call
176 * to this function.
da9bb1d2 177 */
93e4fe64 178void *edac_align_ptr(void **p, unsigned size, int n_elems)
da9bb1d2
AC
179{
180 unsigned align, r;
93e4fe64 181 void *ptr = *p;
da9bb1d2 182
93e4fe64
MCC
183 *p += size * n_elems;
184
185 /*
186 * 'p' can possibly be an unaligned item X such that sizeof(X) is
187 * 'size'. Adjust 'p' so that its alignment is at least as
188 * stringent as what the compiler would provide for X and return
189 * the aligned result.
190 * Here we assume that the alignment of a "long long" is the most
da9bb1d2
AC
191 * stringent alignment that the compiler will ever provide by default.
192 * As far as I know, this is a reasonable assumption.
193 */
194 if (size > sizeof(long))
195 align = sizeof(long long);
196 else if (size > sizeof(int))
197 align = sizeof(long);
198 else if (size > sizeof(short))
199 align = sizeof(int);
200 else if (size > sizeof(char))
201 align = sizeof(short);
202 else
079708b9 203 return (char *)ptr;
da9bb1d2 204
8447c4d1 205 r = (unsigned long)p % align;
da9bb1d2
AC
206
207 if (r == 0)
079708b9 208 return (char *)ptr;
da9bb1d2 209
93e4fe64
MCC
210 *p += align - r;
211
7391c6dc 212 return (void *)(((unsigned long)ptr) + align - r);
da9bb1d2
AC
213}
214
faa2ad09
SR
215static void _edac_mc_free(struct mem_ctl_info *mci)
216{
217 int i, chn, row;
218 struct csrow_info *csr;
219 const unsigned int tot_dimms = mci->tot_dimms;
220 const unsigned int tot_channels = mci->num_cschannel;
221 const unsigned int tot_csrows = mci->nr_csrows;
222
223 if (mci->dimms) {
224 for (i = 0; i < tot_dimms; i++)
225 kfree(mci->dimms[i]);
226 kfree(mci->dimms);
227 }
228 if (mci->csrows) {
229 for (row = 0; row < tot_csrows; row++) {
230 csr = mci->csrows[row];
231 if (csr) {
232 if (csr->channels) {
233 for (chn = 0; chn < tot_channels; chn++)
234 kfree(csr->channels[chn]);
235 kfree(csr->channels);
236 }
237 kfree(csr);
238 }
239 }
240 kfree(mci->csrows);
241 }
242 kfree(mci);
243}
244
ca0907b9
MCC
245struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
246 unsigned n_layers,
247 struct edac_mc_layer *layers,
248 unsigned sz_pvt)
da9bb1d2
AC
249{
250 struct mem_ctl_info *mci;
4275be63 251 struct edac_mc_layer *layer;
de3910eb
MCC
252 struct csrow_info *csr;
253 struct rank_info *chan;
a7d7d2e1 254 struct dimm_info *dimm;
4275be63
MCC
255 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
256 unsigned pos[EDAC_MAX_LAYERS];
4275be63
MCC
257 unsigned size, tot_dimms = 1, count = 1;
258 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
5926ff50 259 void *pvt, *p, *ptr = NULL;
de3910eb 260 int i, j, row, chn, n, len, off;
4275be63
MCC
261 bool per_rank = false;
262
263 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
264 /*
265 * Calculate the total amount of dimms and csrows/cschannels while
266 * in the old API emulation mode
267 */
268 for (i = 0; i < n_layers; i++) {
269 tot_dimms *= layers[i].size;
270 if (layers[i].is_virt_csrow)
271 tot_csrows *= layers[i].size;
272 else
273 tot_channels *= layers[i].size;
274
275 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
276 per_rank = true;
277 }
da9bb1d2
AC
278
279 /* Figure out the offsets of the various items from the start of an mc
280 * structure. We want the alignment of each item to be at least as
281 * stringent as what the compiler would provide if we could simply
282 * hardcode everything into a single struct.
283 */
93e4fe64 284 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
4275be63 285 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
4275be63
MCC
286 for (i = 0; i < n_layers; i++) {
287 count *= layers[i].size;
956b9ba1 288 edac_dbg(4, "errcount layer %d size %d\n", i, count);
4275be63
MCC
289 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
290 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
291 tot_errcount += 2 * count;
292 }
293
956b9ba1 294 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
93e4fe64 295 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
079708b9 296 size = ((unsigned long)pvt) + sz_pvt;
da9bb1d2 297
956b9ba1
JP
298 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
299 size,
300 tot_dimms,
301 per_rank ? "ranks" : "dimms",
302 tot_csrows * tot_channels);
de3910eb 303
8096cfaf
DT
304 mci = kzalloc(size, GFP_KERNEL);
305 if (mci == NULL)
da9bb1d2
AC
306 return NULL;
307
308 /* Adjust pointers so they point within the memory we just allocated
309 * rather than an imaginary chunk of memory located at address 0.
310 */
4275be63 311 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
4275be63
MCC
312 for (i = 0; i < n_layers; i++) {
313 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
314 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
315 }
079708b9 316 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
da9bb1d2 317
b8f6f975 318 /* setup index and various internal pointers */
4275be63 319 mci->mc_idx = mc_num;
4275be63 320 mci->tot_dimms = tot_dimms;
da9bb1d2 321 mci->pvt_info = pvt;
4275be63
MCC
322 mci->n_layers = n_layers;
323 mci->layers = layer;
324 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
325 mci->nr_csrows = tot_csrows;
326 mci->num_cschannel = tot_channels;
9713faec 327 mci->csbased = per_rank;
da9bb1d2 328
a7d7d2e1 329 /*
de3910eb 330 * Alocate and fill the csrow/channels structs
a7d7d2e1 331 */
d3d09e18 332 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
de3910eb
MCC
333 if (!mci->csrows)
334 goto error;
4275be63 335 for (row = 0; row < tot_csrows; row++) {
de3910eb
MCC
336 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
337 if (!csr)
338 goto error;
339 mci->csrows[row] = csr;
4275be63
MCC
340 csr->csrow_idx = row;
341 csr->mci = mci;
342 csr->nr_channels = tot_channels;
d3d09e18 343 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
de3910eb
MCC
344 GFP_KERNEL);
345 if (!csr->channels)
346 goto error;
4275be63
MCC
347
348 for (chn = 0; chn < tot_channels; chn++) {
de3910eb
MCC
349 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
350 if (!chan)
351 goto error;
352 csr->channels[chn] = chan;
da9bb1d2 353 chan->chan_idx = chn;
4275be63
MCC
354 chan->csrow = csr;
355 }
356 }
357
358 /*
de3910eb 359 * Allocate and fill the dimm structs
4275be63 360 */
d3d09e18 361 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
de3910eb
MCC
362 if (!mci->dimms)
363 goto error;
364
4275be63
MCC
365 memset(&pos, 0, sizeof(pos));
366 row = 0;
367 chn = 0;
4275be63 368 for (i = 0; i < tot_dimms; i++) {
de3910eb
MCC
369 chan = mci->csrows[row]->channels[chn];
370 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
371 if (off < 0 || off >= tot_dimms) {
372 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
373 goto error;
374 }
4275be63 375
de3910eb 376 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
08a4a136
DC
377 if (!dimm)
378 goto error;
de3910eb 379 mci->dimms[off] = dimm;
4275be63 380 dimm->mci = mci;
4275be63 381
5926ff50
MCC
382 /*
383 * Copy DIMM location and initialize it.
384 */
385 len = sizeof(dimm->label);
386 p = dimm->label;
387 n = snprintf(p, len, "mc#%u", mc_num);
388 p += n;
389 len -= n;
390 for (j = 0; j < n_layers; j++) {
391 n = snprintf(p, len, "%s#%u",
392 edac_layer_name[layers[j].type],
393 pos[j]);
394 p += n;
395 len -= n;
4275be63
MCC
396 dimm->location[j] = pos[j];
397
5926ff50
MCC
398 if (len <= 0)
399 break;
400 }
401
4275be63
MCC
402 /* Link it to the csrows old API data */
403 chan->dimm = dimm;
404 dimm->csrow = row;
405 dimm->cschannel = chn;
406
407 /* Increment csrow location */
24bef66e 408 if (layers[0].is_virt_csrow) {
4275be63 409 chn++;
24bef66e
MCC
410 if (chn == tot_channels) {
411 chn = 0;
412 row++;
413 }
414 } else {
415 row++;
416 if (row == tot_csrows) {
417 row = 0;
418 chn++;
419 }
4275be63 420 }
a7d7d2e1 421
4275be63
MCC
422 /* Increment dimm location */
423 for (j = n_layers - 1; j >= 0; j--) {
424 pos[j]++;
425 if (pos[j] < layers[j].size)
426 break;
427 pos[j] = 0;
da9bb1d2
AC
428 }
429 }
430
81d87cb1 431 mci->op_state = OP_ALLOC;
8096cfaf 432
da9bb1d2 433 return mci;
de3910eb
MCC
434
435error:
faa2ad09 436 _edac_mc_free(mci);
de3910eb
MCC
437
438 return NULL;
4275be63 439}
9110540f 440EXPORT_SYMBOL_GPL(edac_mc_alloc);
da9bb1d2 441
da9bb1d2
AC
442void edac_mc_free(struct mem_ctl_info *mci)
443{
956b9ba1 444 edac_dbg(1, "\n");
bbc560ae 445
faa2ad09
SR
446 /* If we're not yet registered with sysfs free only what was allocated
447 * in edac_mc_alloc().
448 */
449 if (!device_is_registered(&mci->dev)) {
450 _edac_mc_free(mci);
451 return;
452 }
453
de3910eb 454 /* the mci instance is freed here, when the sysfs object is dropped */
7a623c03 455 edac_unregister_sysfs(mci);
da9bb1d2 456}
9110540f 457EXPORT_SYMBOL_GPL(edac_mc_free);
da9bb1d2 458
d7fc9d77
YG
459bool edac_has_mcs(void)
460{
461 bool ret;
462
463 mutex_lock(&mem_ctls_mutex);
464
465 ret = list_empty(&mc_devices);
466
467 mutex_unlock(&mem_ctls_mutex);
468
469 return !ret;
470}
471EXPORT_SYMBOL_GPL(edac_has_mcs);
472
c73e8833
BP
473/* Caller must hold mem_ctls_mutex */
474static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
da9bb1d2
AC
475{
476 struct mem_ctl_info *mci;
477 struct list_head *item;
478
956b9ba1 479 edac_dbg(3, "\n");
da9bb1d2
AC
480
481 list_for_each(item, &mc_devices) {
482 mci = list_entry(item, struct mem_ctl_info, link);
483
fd687502 484 if (mci->pdev == dev)
da9bb1d2
AC
485 return mci;
486 }
487
488 return NULL;
489}
c73e8833
BP
490
491/**
492 * find_mci_by_dev
493 *
494 * scan list of controllers looking for the one that manages
495 * the 'dev' device
496 * @dev: pointer to a struct device related with the MCI
497 */
498struct mem_ctl_info *find_mci_by_dev(struct device *dev)
499{
500 struct mem_ctl_info *ret;
501
502 mutex_lock(&mem_ctls_mutex);
503 ret = __find_mci_by_dev(dev);
504 mutex_unlock(&mem_ctls_mutex);
505
506 return ret;
507}
939747bd 508EXPORT_SYMBOL_GPL(find_mci_by_dev);
da9bb1d2 509
81d87cb1
DJ
510/*
511 * edac_mc_workq_function
512 * performs the operation scheduled by a workq request
513 */
81d87cb1
DJ
514static void edac_mc_workq_function(struct work_struct *work_req)
515{
fbeb4384 516 struct delayed_work *d_work = to_delayed_work(work_req);
81d87cb1 517 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
81d87cb1
DJ
518
519 mutex_lock(&mem_ctls_mutex);
520
06e912d4 521 if (mci->op_state != OP_RUNNING_POLL) {
bf52fa4a
DT
522 mutex_unlock(&mem_ctls_mutex);
523 return;
524 }
525
d3116a08 526 if (edac_op_state == EDAC_OPSTATE_POLL)
81d87cb1
DJ
527 mci->edac_check(mci);
528
81d87cb1
DJ
529 mutex_unlock(&mem_ctls_mutex);
530
06e912d4 531 /* Queue ourselves again. */
c4cf3b45 532 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
81d87cb1
DJ
533}
534
81d87cb1 535/*
bce19683
DT
536 * edac_mc_reset_delay_period(unsigned long value)
537 *
538 * user space has updated our poll period value, need to
539 * reset our workq delays
81d87cb1 540 */
9da21b15 541void edac_mc_reset_delay_period(unsigned long value)
81d87cb1 542{
bce19683
DT
543 struct mem_ctl_info *mci;
544 struct list_head *item;
545
546 mutex_lock(&mem_ctls_mutex);
547
bce19683
DT
548 list_for_each(item, &mc_devices) {
549 mci = list_entry(item, struct mem_ctl_info, link);
550
fbedcaf4
NK
551 if (mci->op_state == OP_RUNNING_POLL)
552 edac_mod_work(&mci->work, value);
bce19683 553 }
81d87cb1
DJ
554 mutex_unlock(&mem_ctls_mutex);
555}
556
bce19683
DT
557
558
2d7bbb91
DT
559/* Return 0 on success, 1 on failure.
560 * Before calling this function, caller must
561 * assign a unique value to mci->mc_idx.
bf52fa4a
DT
562 *
563 * locking model:
564 *
565 * called with the mem_ctls_mutex lock held
2d7bbb91 566 */
079708b9 567static int add_mc_to_global_list(struct mem_ctl_info *mci)
da9bb1d2
AC
568{
569 struct list_head *item, *insert_before;
570 struct mem_ctl_info *p;
da9bb1d2 571
2d7bbb91 572 insert_before = &mc_devices;
da9bb1d2 573
c73e8833 574 p = __find_mci_by_dev(mci->pdev);
bf52fa4a 575 if (unlikely(p != NULL))
2d7bbb91 576 goto fail0;
da9bb1d2 577
2d7bbb91
DT
578 list_for_each(item, &mc_devices) {
579 p = list_entry(item, struct mem_ctl_info, link);
da9bb1d2 580
2d7bbb91
DT
581 if (p->mc_idx >= mci->mc_idx) {
582 if (unlikely(p->mc_idx == mci->mc_idx))
583 goto fail1;
da9bb1d2 584
2d7bbb91
DT
585 insert_before = item;
586 break;
da9bb1d2 587 }
da9bb1d2
AC
588 }
589
590 list_add_tail_rcu(&mci->link, insert_before);
591 return 0;
2d7bbb91 592
052dfb45 593fail0:
2d7bbb91 594 edac_printk(KERN_WARNING, EDAC_MC,
fd687502 595 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
17aa7e03 596 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
2d7bbb91
DT
597 return 1;
598
052dfb45 599fail1:
2d7bbb91 600 edac_printk(KERN_WARNING, EDAC_MC,
052dfb45
DT
601 "bug in low-level driver: attempt to assign\n"
602 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
2d7bbb91 603 return 1;
da9bb1d2
AC
604}
605
80cc7d87 606static int del_mc_from_global_list(struct mem_ctl_info *mci)
a1d03fcc
DP
607{
608 list_del_rcu(&mci->link);
e2e77098
LJ
609
610 /* these are for safe removal of devices from global list while
611 * NMI handlers may be traversing list
612 */
613 synchronize_rcu();
614 INIT_LIST_HEAD(&mci->link);
80cc7d87 615
97bb6c17 616 return list_empty(&mc_devices);
a1d03fcc
DP
617}
618
079708b9 619struct mem_ctl_info *edac_mc_find(int idx)
5da0831c 620{
c73e8833 621 struct mem_ctl_info *mci = NULL;
5da0831c 622 struct list_head *item;
c73e8833
BP
623
624 mutex_lock(&mem_ctls_mutex);
5da0831c
DT
625
626 list_for_each(item, &mc_devices) {
627 mci = list_entry(item, struct mem_ctl_info, link);
628
629 if (mci->mc_idx >= idx) {
c73e8833
BP
630 if (mci->mc_idx == idx) {
631 goto unlock;
632 }
5da0831c
DT
633 break;
634 }
635 }
636
c73e8833
BP
637unlock:
638 mutex_unlock(&mem_ctls_mutex);
639 return mci;
5da0831c
DT
640}
641EXPORT_SYMBOL(edac_mc_find);
642
da9bb1d2
AC
643
644/* FIXME - should a warning be printed if no error detection? correction? */
4e8d230d
TI
645int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
646 const struct attribute_group **groups)
da9bb1d2 647{
80cc7d87 648 int ret = -EINVAL;
956b9ba1 649 edac_dbg(0, "\n");
b8f6f975 650
88d84ac9
BP
651 if (mci->mc_idx >= EDAC_MAX_MCS) {
652 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
653 return -ENODEV;
654 }
655
da9bb1d2
AC
656#ifdef CONFIG_EDAC_DEBUG
657 if (edac_debug_level >= 3)
658 edac_mc_dump_mci(mci);
e7ecd891 659
da9bb1d2
AC
660 if (edac_debug_level >= 4) {
661 int i;
662
663 for (i = 0; i < mci->nr_csrows; i++) {
6e84d359
MCC
664 struct csrow_info *csrow = mci->csrows[i];
665 u32 nr_pages = 0;
da9bb1d2 666 int j;
e7ecd891 667
6e84d359
MCC
668 for (j = 0; j < csrow->nr_channels; j++)
669 nr_pages += csrow->channels[j]->dimm->nr_pages;
670 if (!nr_pages)
671 continue;
672 edac_mc_dump_csrow(csrow);
673 for (j = 0; j < csrow->nr_channels; j++)
674 if (csrow->channels[j]->dimm->nr_pages)
675 edac_mc_dump_channel(csrow->channels[j]);
da9bb1d2 676 }
4275be63 677 for (i = 0; i < mci->tot_dimms; i++)
6e84d359
MCC
678 if (mci->dimms[i]->nr_pages)
679 edac_mc_dump_dimm(mci->dimms[i], i);
da9bb1d2
AC
680 }
681#endif
63b7df91 682 mutex_lock(&mem_ctls_mutex);
da9bb1d2 683
80cc7d87
MCC
684 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
685 ret = -EPERM;
686 goto fail0;
687 }
688
da9bb1d2 689 if (add_mc_to_global_list(mci))
028a7b6d 690 goto fail0;
da9bb1d2
AC
691
692 /* set load time so that error rate can be tracked */
693 mci->start_time = jiffies;
694
88d84ac9
BP
695 mci->bus = &mc_bus[mci->mc_idx];
696
4e8d230d 697 if (edac_create_sysfs_mci_device(mci, groups)) {
9794f33d 698 edac_mc_printk(mci, KERN_WARNING,
052dfb45 699 "failed to create sysfs device\n");
9794f33d 700 goto fail1;
701 }
da9bb1d2 702
09667606 703 if (mci->edac_check) {
81d87cb1
DJ
704 mci->op_state = OP_RUNNING_POLL;
705
626a7a4d
BP
706 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
707 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
708
81d87cb1
DJ
709 } else {
710 mci->op_state = OP_RUNNING_INTERRUPT;
711 }
712
da9bb1d2 713 /* Report action taken */
7270a608
RR
714 edac_mc_printk(mci, KERN_INFO,
715 "Giving out device to module %s controller %s: DEV %s (%s)\n",
716 mci->mod_name, mci->ctl_name, mci->dev_name,
717 edac_op_state_to_string(mci->op_state));
da9bb1d2 718
80cc7d87
MCC
719 edac_mc_owner = mci->mod_name;
720
63b7df91 721 mutex_unlock(&mem_ctls_mutex);
028a7b6d 722 return 0;
da9bb1d2 723
052dfb45 724fail1:
028a7b6d
DP
725 del_mc_from_global_list(mci);
726
052dfb45 727fail0:
63b7df91 728 mutex_unlock(&mem_ctls_mutex);
80cc7d87 729 return ret;
da9bb1d2 730}
4e8d230d 731EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
da9bb1d2 732
079708b9 733struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
da9bb1d2 734{
18dbc337 735 struct mem_ctl_info *mci;
da9bb1d2 736
956b9ba1 737 edac_dbg(0, "\n");
bf52fa4a 738
63b7df91 739 mutex_lock(&mem_ctls_mutex);
18dbc337 740
bf52fa4a 741 /* find the requested mci struct in the global list */
c73e8833 742 mci = __find_mci_by_dev(dev);
bf52fa4a 743 if (mci == NULL) {
63b7df91 744 mutex_unlock(&mem_ctls_mutex);
18dbc337
DP
745 return NULL;
746 }
747
09667606
BP
748 /* mark MCI offline: */
749 mci->op_state = OP_OFFLINE;
750
97bb6c17 751 if (del_mc_from_global_list(mci))
80cc7d87 752 edac_mc_owner = NULL;
bf52fa4a 753
09667606 754 mutex_unlock(&mem_ctls_mutex);
bb31b312 755
09667606 756 if (mci->edac_check)
626a7a4d 757 edac_stop_work(&mci->work);
bb31b312
BP
758
759 /* remove from sysfs */
bf52fa4a
DT
760 edac_remove_sysfs_mci_device(mci);
761
537fba28 762 edac_printk(KERN_INFO, EDAC_MC,
052dfb45 763 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
17aa7e03 764 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
bf52fa4a 765
18dbc337 766 return mci;
da9bb1d2 767}
9110540f 768EXPORT_SYMBOL_GPL(edac_mc_del_mc);
da9bb1d2 769
2da1c119
AB
770static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
771 u32 size)
da9bb1d2
AC
772{
773 struct page *pg;
774 void *virt_addr;
775 unsigned long flags = 0;
776
956b9ba1 777 edac_dbg(3, "\n");
da9bb1d2
AC
778
779 /* ECC error page was not in our memory. Ignore it. */
079708b9 780 if (!pfn_valid(page))
da9bb1d2
AC
781 return;
782
783 /* Find the actual page structure then map it and fix */
784 pg = pfn_to_page(page);
785
786 if (PageHighMem(pg))
787 local_irq_save(flags);
788
4e5df7ca 789 virt_addr = kmap_atomic(pg);
da9bb1d2
AC
790
791 /* Perform architecture specific atomic scrub operation */
b01aec9b 792 edac_atomic_scrub(virt_addr + offset, size);
da9bb1d2
AC
793
794 /* Unmap and complete */
4e5df7ca 795 kunmap_atomic(virt_addr);
da9bb1d2
AC
796
797 if (PageHighMem(pg))
798 local_irq_restore(flags);
799}
800
da9bb1d2 801/* FIXME - should return -1 */
e7ecd891 802int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
da9bb1d2 803{
de3910eb 804 struct csrow_info **csrows = mci->csrows;
a895bf8b 805 int row, i, j, n;
da9bb1d2 806
956b9ba1 807 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
da9bb1d2
AC
808 row = -1;
809
810 for (i = 0; i < mci->nr_csrows; i++) {
de3910eb 811 struct csrow_info *csrow = csrows[i];
a895bf8b
MCC
812 n = 0;
813 for (j = 0; j < csrow->nr_channels; j++) {
de3910eb 814 struct dimm_info *dimm = csrow->channels[j]->dimm;
a895bf8b
MCC
815 n += dimm->nr_pages;
816 }
817 if (n == 0)
da9bb1d2
AC
818 continue;
819
956b9ba1
JP
820 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
821 mci->mc_idx,
822 csrow->first_page, page, csrow->last_page,
823 csrow->page_mask);
da9bb1d2
AC
824
825 if ((page >= csrow->first_page) &&
826 (page <= csrow->last_page) &&
827 ((page & csrow->page_mask) ==
828 (csrow->first_page & csrow->page_mask))) {
829 row = i;
830 break;
831 }
832 }
833
834 if (row == -1)
537fba28 835 edac_mc_printk(mci, KERN_ERR,
052dfb45
DT
836 "could not look up page error address %lx\n",
837 (unsigned long)page);
da9bb1d2
AC
838
839 return row;
840}
9110540f 841EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
da9bb1d2 842
4275be63
MCC
843const char *edac_layer_name[] = {
844 [EDAC_MC_LAYER_BRANCH] = "branch",
845 [EDAC_MC_LAYER_CHANNEL] = "channel",
846 [EDAC_MC_LAYER_SLOT] = "slot",
847 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
c66b5a79 848 [EDAC_MC_LAYER_ALL_MEM] = "memory",
4275be63
MCC
849};
850EXPORT_SYMBOL_GPL(edac_layer_name);
851
852static void edac_inc_ce_error(struct mem_ctl_info *mci,
9eb07a7f
MCC
853 bool enable_per_layer_report,
854 const int pos[EDAC_MAX_LAYERS],
855 const u16 count)
da9bb1d2 856{
4275be63 857 int i, index = 0;
da9bb1d2 858
9eb07a7f 859 mci->ce_mc += count;
da9bb1d2 860
4275be63 861 if (!enable_per_layer_report) {
9eb07a7f 862 mci->ce_noinfo_count += count;
da9bb1d2
AC
863 return;
864 }
e7ecd891 865
4275be63
MCC
866 for (i = 0; i < mci->n_layers; i++) {
867 if (pos[i] < 0)
868 break;
869 index += pos[i];
9eb07a7f 870 mci->ce_per_layer[i][index] += count;
4275be63
MCC
871
872 if (i < mci->n_layers - 1)
873 index *= mci->layers[i + 1].size;
874 }
875}
876
877static void edac_inc_ue_error(struct mem_ctl_info *mci,
878 bool enable_per_layer_report,
9eb07a7f
MCC
879 const int pos[EDAC_MAX_LAYERS],
880 const u16 count)
4275be63
MCC
881{
882 int i, index = 0;
883
9eb07a7f 884 mci->ue_mc += count;
4275be63
MCC
885
886 if (!enable_per_layer_report) {
993f88f1 887 mci->ue_noinfo_count += count;
da9bb1d2
AC
888 return;
889 }
890
4275be63
MCC
891 for (i = 0; i < mci->n_layers; i++) {
892 if (pos[i] < 0)
893 break;
894 index += pos[i];
9eb07a7f 895 mci->ue_per_layer[i][index] += count;
a7d7d2e1 896
4275be63
MCC
897 if (i < mci->n_layers - 1)
898 index *= mci->layers[i + 1].size;
899 }
900}
da9bb1d2 901
4275be63 902static void edac_ce_error(struct mem_ctl_info *mci,
9eb07a7f 903 const u16 error_count,
4275be63
MCC
904 const int pos[EDAC_MAX_LAYERS],
905 const char *msg,
906 const char *location,
907 const char *label,
908 const char *detail,
909 const char *other_detail,
910 const bool enable_per_layer_report,
911 const unsigned long page_frame_number,
912 const unsigned long offset_in_page,
53f2d028 913 long grain)
4275be63
MCC
914{
915 unsigned long remapped_page;
f430d570
BP
916 char *msg_aux = "";
917
918 if (*msg)
919 msg_aux = " ";
4275be63
MCC
920
921 if (edac_mc_get_log_ce()) {
922 if (other_detail && *other_detail)
923 edac_mc_printk(mci, KERN_WARNING,
f430d570
BP
924 "%d CE %s%son %s (%s %s - %s)\n",
925 error_count, msg, msg_aux, label,
926 location, detail, other_detail);
4275be63
MCC
927 else
928 edac_mc_printk(mci, KERN_WARNING,
f430d570
BP
929 "%d CE %s%son %s (%s %s)\n",
930 error_count, msg, msg_aux, label,
931 location, detail);
4275be63 932 }
9eb07a7f 933 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
da9bb1d2 934
aa2064d7 935 if (mci->scrub_mode == SCRUB_SW_SRC) {
da9bb1d2 936 /*
4275be63
MCC
937 * Some memory controllers (called MCs below) can remap
938 * memory so that it is still available at a different
939 * address when PCI devices map into memory.
940 * MC's that can't do this, lose the memory where PCI
941 * devices are mapped. This mapping is MC-dependent
942 * and so we call back into the MC driver for it to
943 * map the MC page to a physical (CPU) page which can
944 * then be mapped to a virtual page - which can then
945 * be scrubbed.
946 */
da9bb1d2 947 remapped_page = mci->ctl_page_to_phys ?
052dfb45
DT
948 mci->ctl_page_to_phys(mci, page_frame_number) :
949 page_frame_number;
da9bb1d2 950
4275be63
MCC
951 edac_mc_scrub_block(remapped_page,
952 offset_in_page, grain);
da9bb1d2
AC
953 }
954}
955
4275be63 956static void edac_ue_error(struct mem_ctl_info *mci,
9eb07a7f 957 const u16 error_count,
4275be63
MCC
958 const int pos[EDAC_MAX_LAYERS],
959 const char *msg,
960 const char *location,
961 const char *label,
962 const char *detail,
963 const char *other_detail,
964 const bool enable_per_layer_report)
da9bb1d2 965{
f430d570
BP
966 char *msg_aux = "";
967
968 if (*msg)
969 msg_aux = " ";
970
4275be63
MCC
971 if (edac_mc_get_log_ue()) {
972 if (other_detail && *other_detail)
973 edac_mc_printk(mci, KERN_WARNING,
f430d570
BP
974 "%d UE %s%son %s (%s %s - %s)\n",
975 error_count, msg, msg_aux, label,
976 location, detail, other_detail);
4275be63
MCC
977 else
978 edac_mc_printk(mci, KERN_WARNING,
f430d570
BP
979 "%d UE %s%son %s (%s %s)\n",
980 error_count, msg, msg_aux, label,
981 location, detail);
4275be63 982 }
e7ecd891 983
4275be63
MCC
984 if (edac_mc_get_panic_on_ue()) {
985 if (other_detail && *other_detail)
f430d570
BP
986 panic("UE %s%son %s (%s%s - %s)\n",
987 msg, msg_aux, label, location, detail, other_detail);
4275be63 988 else
f430d570
BP
989 panic("UE %s%son %s (%s%s)\n",
990 msg, msg_aux, label, location, detail);
4275be63
MCC
991 }
992
9eb07a7f 993 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
da9bb1d2
AC
994}
995
e7e24830
MCC
996void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
997 struct mem_ctl_info *mci,
998 struct edac_raw_error_desc *e)
999{
1000 char detail[80];
1001 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1002
1003 /* Memory type dependent details about the error */
1004 if (type == HW_EVENT_ERR_CORRECTED) {
1005 snprintf(detail, sizeof(detail),
1006 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1007 e->page_frame_number, e->offset_in_page,
1008 e->grain, e->syndrome);
1009 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1010 detail, e->other_detail, e->enable_per_layer_report,
1011 e->page_frame_number, e->offset_in_page, e->grain);
1012 } else {
1013 snprintf(detail, sizeof(detail),
1014 "page:0x%lx offset:0x%lx grain:%ld",
1015 e->page_frame_number, e->offset_in_page, e->grain);
1016
1017 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1018 detail, e->other_detail, e->enable_per_layer_report);
1019 }
1020
1021
1022}
1023EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
53f2d028 1024
4275be63
MCC
1025void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1026 struct mem_ctl_info *mci,
9eb07a7f 1027 const u16 error_count,
4275be63
MCC
1028 const unsigned long page_frame_number,
1029 const unsigned long offset_in_page,
1030 const unsigned long syndrome,
53f2d028
MCC
1031 const int top_layer,
1032 const int mid_layer,
1033 const int low_layer,
4275be63 1034 const char *msg,
03f7eae8 1035 const char *other_detail)
da9bb1d2 1036{
4275be63
MCC
1037 char *p;
1038 int row = -1, chan = -1;
53f2d028 1039 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
c7ef7645 1040 int i, n_labels = 0;
53f2d028 1041 u8 grain_bits;
c7ef7645 1042 struct edac_raw_error_desc *e = &mci->error_desc;
da9bb1d2 1043
956b9ba1 1044 edac_dbg(3, "MC%d\n", mci->mc_idx);
da9bb1d2 1045
c7ef7645
MCC
1046 /* Fills the error report buffer */
1047 memset(e, 0, sizeof (*e));
1048 e->error_count = error_count;
1049 e->top_layer = top_layer;
1050 e->mid_layer = mid_layer;
1051 e->low_layer = low_layer;
1052 e->page_frame_number = page_frame_number;
1053 e->offset_in_page = offset_in_page;
1054 e->syndrome = syndrome;
1055 e->msg = msg;
1056 e->other_detail = other_detail;
1057
4275be63
MCC
1058 /*
1059 * Check if the event report is consistent and if the memory
1060 * location is known. If it is known, enable_per_layer_report will be
1061 * true, the DIMM(s) label info will be filled and the per-layer
1062 * error counters will be incremented.
1063 */
1064 for (i = 0; i < mci->n_layers; i++) {
1065 if (pos[i] >= (int)mci->layers[i].size) {
4275be63
MCC
1066
1067 edac_mc_printk(mci, KERN_ERR,
1068 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1069 edac_layer_name[mci->layers[i].type],
1070 pos[i], mci->layers[i].size);
1071 /*
1072 * Instead of just returning it, let's use what's
1073 * known about the error. The increment routines and
1074 * the DIMM filter logic will do the right thing by
1075 * pointing the likely damaged DIMMs.
1076 */
1077 pos[i] = -1;
1078 }
1079 if (pos[i] >= 0)
c7ef7645 1080 e->enable_per_layer_report = true;
da9bb1d2
AC
1081 }
1082
4275be63
MCC
1083 /*
1084 * Get the dimm label/grain that applies to the match criteria.
1085 * As the error algorithm may not be able to point to just one memory
1086 * stick, the logic here will get all possible labels that could
1087 * pottentially be affected by the error.
1088 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1089 * to have only the MC channel and the MC dimm (also called "branch")
1090 * but the channel is not known, as the memory is arranged in pairs,
1091 * where each memory belongs to a separate channel within the same
1092 * branch.
1093 */
c7ef7645 1094 p = e->label;
4275be63 1095 *p = '\0';
4da1b7bf 1096
4275be63 1097 for (i = 0; i < mci->tot_dimms; i++) {
de3910eb 1098 struct dimm_info *dimm = mci->dimms[i];
da9bb1d2 1099
53f2d028 1100 if (top_layer >= 0 && top_layer != dimm->location[0])
4275be63 1101 continue;
53f2d028 1102 if (mid_layer >= 0 && mid_layer != dimm->location[1])
4275be63 1103 continue;
53f2d028 1104 if (low_layer >= 0 && low_layer != dimm->location[2])
4275be63 1105 continue;
da9bb1d2 1106
4275be63 1107 /* get the max grain, over the error match range */
c7ef7645
MCC
1108 if (dimm->grain > e->grain)
1109 e->grain = dimm->grain;
9794f33d 1110
4275be63
MCC
1111 /*
1112 * If the error is memory-controller wide, there's no need to
1113 * seek for the affected DIMMs because the whole
1114 * channel/memory controller/... may be affected.
1115 * Also, don't show errors for empty DIMM slots.
1116 */
c7ef7645
MCC
1117 if (e->enable_per_layer_report && dimm->nr_pages) {
1118 if (n_labels >= EDAC_MAX_LABELS) {
1119 e->enable_per_layer_report = false;
1120 break;
1121 }
1122 n_labels++;
1123 if (p != e->label) {
4275be63
MCC
1124 strcpy(p, OTHER_LABEL);
1125 p += strlen(OTHER_LABEL);
1126 }
1127 strcpy(p, dimm->label);
1128 p += strlen(p);
1129 *p = '\0';
1130
1131 /*
1132 * get csrow/channel of the DIMM, in order to allow
1133 * incrementing the compat API counters
1134 */
956b9ba1 1135 edac_dbg(4, "%s csrows map: (%d,%d)\n",
9713faec 1136 mci->csbased ? "rank" : "dimm",
956b9ba1 1137 dimm->csrow, dimm->cschannel);
4275be63
MCC
1138 if (row == -1)
1139 row = dimm->csrow;
1140 else if (row >= 0 && row != dimm->csrow)
1141 row = -2;
1142
1143 if (chan == -1)
1144 chan = dimm->cschannel;
1145 else if (chan >= 0 && chan != dimm->cschannel)
1146 chan = -2;
1147 }
9794f33d 1148 }
1149
c7ef7645
MCC
1150 if (!e->enable_per_layer_report) {
1151 strcpy(e->label, "any memory");
4275be63 1152 } else {
956b9ba1 1153 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
c7ef7645
MCC
1154 if (p == e->label)
1155 strcpy(e->label, "unknown memory");
4275be63
MCC
1156 if (type == HW_EVENT_ERR_CORRECTED) {
1157 if (row >= 0) {
9eb07a7f 1158 mci->csrows[row]->ce_count += error_count;
4275be63 1159 if (chan >= 0)
9eb07a7f 1160 mci->csrows[row]->channels[chan]->ce_count += error_count;
4275be63
MCC
1161 }
1162 } else
1163 if (row >= 0)
9eb07a7f 1164 mci->csrows[row]->ue_count += error_count;
9794f33d 1165 }
1166
4275be63 1167 /* Fill the RAM location data */
c7ef7645 1168 p = e->location;
4da1b7bf 1169
4275be63
MCC
1170 for (i = 0; i < mci->n_layers; i++) {
1171 if (pos[i] < 0)
1172 continue;
9794f33d 1173
4275be63
MCC
1174 p += sprintf(p, "%s:%d ",
1175 edac_layer_name[mci->layers[i].type],
1176 pos[i]);
9794f33d 1177 }
c7ef7645 1178 if (p > e->location)
53f2d028
MCC
1179 *(p - 1) = '\0';
1180
1181 /* Report the error via the trace interface */
c7ef7645 1182 grain_bits = fls_long(e->grain) + 1;
be1d1629
BP
1183
1184 if (IS_ENABLED(CONFIG_RAS))
1185 trace_mc_event(type, e->msg, e->label, e->error_count,
1186 mci->mc_idx, e->top_layer, e->mid_layer,
1187 e->low_layer,
1188 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1189 grain_bits, e->syndrome, e->other_detail);
a7d7d2e1 1190
e7e24830 1191 edac_raw_mc_handle_error(type, mci, e);
9794f33d 1192}
4275be63 1193EXPORT_SYMBOL_GPL(edac_mc_handle_error);