]> git.ipfire.org Git - people/ms/linux.git/blame - fs/pstore/zone.c
pstore/blk: Provide way to query pstore configuration
[people/ms/linux.git] / fs / pstore / zone.c
CommitLineData
d26c3321
WL
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Provide a pstore intermediate backend, organized into kernel memory
4 * allocated zones that are then mapped and flushed into a single
5 * contiguous region on a storage backend of some kind (block, mtd, etc).
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/mount.h>
14#include <linux/printk.h>
15#include <linux/fs.h>
16#include <linux/pstore_zone.h>
17#include <linux/kdev_t.h>
18#include <linux/device.h>
19#include <linux/namei.h>
20#include <linux/fcntl.h>
21#include <linux/uio.h>
22#include <linux/writeback.h>
23#include "internal.h"
24
25/**
26 * struct psz_head - header of zone to flush to storage
27 *
28 * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
29 * @datalen: length of data in @data
0dc06826 30 * @start: offset into @data where the beginning of the stored bytes begin
d26c3321
WL
31 * @data: zone data.
32 */
33struct psz_buffer {
34#define PSZ_SIG (0x43474244) /* DBGC */
35 uint32_t sig;
36 atomic_t datalen;
0dc06826 37 atomic_t start;
d26c3321
WL
38 uint8_t data[];
39};
40
41/**
42 * struct psz_kmsg_header - kmsg dump-specific header to flush to storage
43 *
44 * @magic: magic num for kmsg dump header
45 * @time: kmsg dump trigger time
46 * @compressed: whether conpressed
47 * @counter: kmsg dump counter
48 * @reason: the kmsg dump reason (e.g. oops, panic, etc)
49 * @data: pointer to log data
50 *
51 * This is a sub-header for a kmsg dump, trailing after &psz_buffer.
52 */
53struct psz_kmsg_header {
54#define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */
55 uint32_t magic;
56 struct timespec64 time;
57 bool compressed;
58 uint32_t counter;
59 enum kmsg_dump_reason reason;
60 uint8_t data[];
61};
62
63/**
64 * struct pstore_zone - single stored buffer
65 *
66 * @off: zone offset of storage
67 * @type: front-end type for this zone
68 * @name: front-end name for this zone
69 * @buffer: pointer to data buffer managed by this zone
70 * @oldbuf: pointer to old data buffer
71 * @buffer_size: bytes in @buffer->data
72 * @should_recover: whether this zone should recover from storage
73 * @dirty: whether the data in @buffer dirty
74 *
75 * zone structure in memory.
76 */
77struct pstore_zone {
78 loff_t off;
79 const char *name;
80 enum pstore_type_id type;
81
82 struct psz_buffer *buffer;
83 struct psz_buffer *oldbuf;
84 size_t buffer_size;
85 bool should_recover;
86 atomic_t dirty;
87};
88
89/**
90 * struct psz_context - all about running state of pstore/zone
91 *
92 * @kpszs: kmsg dump storage zones
0dc06826 93 * @ppsz: pmsg storage zone
cc9c4d1b 94 * @cpsz: console storage zone
34327e9f 95 * @fpszs: ftrace storage zones
d26c3321
WL
96 * @kmsg_max_cnt: max count of @kpszs
97 * @kmsg_read_cnt: counter of total read kmsg dumps
98 * @kmsg_write_cnt: counter of total kmsg dump writes
0dc06826 99 * @pmsg_read_cnt: counter of total read pmsg zone
cc9c4d1b 100 * @console_read_cnt: counter of total read console zone
34327e9f
WL
101 * @ftrace_max_cnt: max count of @fpszs
102 * @ftrace_read_cnt: counter of max read ftrace zone
d26c3321
WL
103 * @oops_counter: counter of oops dumps
104 * @panic_counter: counter of panic dumps
105 * @recovered: whether finished recovering data from storage
106 * @on_panic: whether panic is happening
107 * @pstore_zone_info_lock: lock to @pstore_zone_info
108 * @pstore_zone_info: information from backend
109 * @pstore: structure for pstore
110 */
111struct psz_context {
112 struct pstore_zone **kpszs;
0dc06826 113 struct pstore_zone *ppsz;
cc9c4d1b 114 struct pstore_zone *cpsz;
34327e9f 115 struct pstore_zone **fpszs;
d26c3321
WL
116 unsigned int kmsg_max_cnt;
117 unsigned int kmsg_read_cnt;
118 unsigned int kmsg_write_cnt;
0dc06826 119 unsigned int pmsg_read_cnt;
cc9c4d1b 120 unsigned int console_read_cnt;
34327e9f
WL
121 unsigned int ftrace_max_cnt;
122 unsigned int ftrace_read_cnt;
d26c3321
WL
123 /*
124 * These counters should be calculated during recovery.
125 * It records the oops/panic times after crashes rather than boots.
126 */
127 unsigned int oops_counter;
128 unsigned int panic_counter;
129 atomic_t recovered;
130 atomic_t on_panic;
131
132 /*
133 * pstore_zone_info_lock protects this entire structure during calls
134 * to register_pstore_zone()/unregister_pstore_zone().
135 */
136 struct mutex pstore_zone_info_lock;
137 struct pstore_zone_info *pstore_zone_info;
138 struct pstore_info pstore;
139};
140static struct psz_context pstore_zone_cxt;
141
cc9c4d1b
WL
142static void psz_flush_all_dirty_zones(struct work_struct *);
143static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones);
144
d26c3321
WL
145/**
146 * enum psz_flush_mode - flush mode for psz_zone_write()
147 *
148 * @FLUSH_NONE: do not flush to storage but update data on memory
149 * @FLUSH_PART: just flush part of data including meta data to storage
150 * @FLUSH_META: just flush meta data of zone to storage
151 * @FLUSH_ALL: flush all of zone
152 */
153enum psz_flush_mode {
154 FLUSH_NONE = 0,
155 FLUSH_PART,
156 FLUSH_META,
157 FLUSH_ALL,
158};
159
160static inline int buffer_datalen(struct pstore_zone *zone)
161{
162 return atomic_read(&zone->buffer->datalen);
163}
164
0dc06826
WL
165static inline int buffer_start(struct pstore_zone *zone)
166{
167 return atomic_read(&zone->buffer->start);
168}
169
d26c3321
WL
170static inline bool is_on_panic(void)
171{
172 return atomic_read(&pstore_zone_cxt.on_panic);
173}
174
0dc06826 175static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
d26c3321
WL
176 size_t len, unsigned long off)
177{
0dc06826 178 if (!buf || !zone || !zone->buffer)
d26c3321
WL
179 return -EINVAL;
180 if (off > zone->buffer_size)
181 return -EINVAL;
182 len = min_t(size_t, len, zone->buffer_size - off);
183 memcpy(buf, zone->buffer->data + off, len);
184 return len;
185}
186
0dc06826
WL
187static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
188 size_t len, unsigned long off)
189{
190 if (!buf || !zone || !zone->oldbuf)
191 return -EINVAL;
192 if (off > zone->buffer_size)
193 return -EINVAL;
194 len = min_t(size_t, len, zone->buffer_size - off);
195 memcpy(buf, zone->oldbuf->data + off, len);
196 return 0;
197}
198
d26c3321
WL
199static int psz_zone_write(struct pstore_zone *zone,
200 enum psz_flush_mode flush_mode, const char *buf,
201 size_t len, unsigned long off)
202{
203 struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
204 ssize_t wcnt = 0;
205 ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos);
206 size_t wlen;
207
208 if (off > zone->buffer_size)
209 return -EINVAL;
210
211 wlen = min_t(size_t, len, zone->buffer_size - off);
212 if (buf && wlen) {
213 memcpy(zone->buffer->data + off, buf, wlen);
214 atomic_set(&zone->buffer->datalen, wlen + off);
215 }
216
217 /* avoid to damage old records */
218 if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered))
219 goto dirty;
220
221 writeop = is_on_panic() ? info->panic_write : info->write;
222 if (!writeop)
223 goto dirty;
224
225 switch (flush_mode) {
226 case FLUSH_NONE:
227 if (unlikely(buf && wlen))
228 goto dirty;
229 return 0;
230 case FLUSH_PART:
231 wcnt = writeop((const char *)zone->buffer->data + off, wlen,
232 zone->off + sizeof(*zone->buffer) + off);
233 if (wcnt != wlen)
234 goto dirty;
235 fallthrough;
236 case FLUSH_META:
237 wlen = sizeof(struct psz_buffer);
238 wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
239 if (wcnt != wlen)
240 goto dirty;
241 break;
242 case FLUSH_ALL:
243 wlen = zone->buffer_size + sizeof(*zone->buffer);
244 wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
245 if (wcnt != wlen)
246 goto dirty;
247 break;
248 }
249
250 return 0;
251dirty:
335426c6
WL
252 /* no need to mark dirty if going to try next zone */
253 if (wcnt == -ENOMSG)
254 return -ENOMSG;
d26c3321 255 atomic_set(&zone->dirty, true);
cc9c4d1b
WL
256 /* flush dirty zones nicely */
257 if (wcnt == -EBUSY && !is_on_panic())
258 schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500));
d26c3321
WL
259 return -EBUSY;
260}
261
262static int psz_flush_dirty_zone(struct pstore_zone *zone)
263{
264 int ret;
265
266 if (unlikely(!zone))
267 return -EINVAL;
268
269 if (unlikely(!atomic_read(&pstore_zone_cxt.recovered)))
270 return -EBUSY;
271
272 if (!atomic_xchg(&zone->dirty, false))
273 return 0;
274
275 ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
276 if (ret)
277 atomic_set(&zone->dirty, true);
278 return ret;
279}
280
281static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt)
282{
283 int i, ret;
284 struct pstore_zone *zone;
285
286 if (!zones)
287 return -EINVAL;
288
289 for (i = 0; i < cnt; i++) {
290 zone = zones[i];
291 if (!zone)
292 return -EINVAL;
293 ret = psz_flush_dirty_zone(zone);
294 if (ret)
295 return ret;
296 }
297 return 0;
298}
299
300static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new)
301{
302 const char *data = (const char *)old->buffer->data;
303 int ret;
304
305 ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0);
306 if (ret) {
307 atomic_set(&new->buffer->datalen, 0);
308 atomic_set(&new->dirty, false);
309 return ret;
310 }
311 atomic_set(&old->buffer->datalen, 0);
312 return 0;
313}
314
cc9c4d1b
WL
315static void psz_flush_all_dirty_zones(struct work_struct *work)
316{
317 struct psz_context *cxt = &pstore_zone_cxt;
318 int ret = 0;
319
320 if (cxt->ppsz)
321 ret |= psz_flush_dirty_zone(cxt->ppsz);
322 if (cxt->cpsz)
323 ret |= psz_flush_dirty_zone(cxt->cpsz);
324 if (cxt->kpszs)
325 ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt);
34327e9f
WL
326 if (cxt->fpszs)
327 ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt);
cc9c4d1b
WL
328 if (ret && cxt->pstore_zone_info)
329 schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000));
330}
331
d26c3321
WL
332static int psz_kmsg_recover_data(struct psz_context *cxt)
333{
334 struct pstore_zone_info *info = cxt->pstore_zone_info;
335 struct pstore_zone *zone = NULL;
336 struct psz_buffer *buf;
337 unsigned long i;
338 ssize_t rcnt;
339
340 if (!info->read)
341 return -EINVAL;
342
343 for (i = 0; i < cxt->kmsg_max_cnt; i++) {
344 zone = cxt->kpszs[i];
345 if (unlikely(!zone))
346 return -EINVAL;
347 if (atomic_read(&zone->dirty)) {
348 unsigned int wcnt = cxt->kmsg_write_cnt;
349 struct pstore_zone *new = cxt->kpszs[wcnt];
350 int ret;
351
352 ret = psz_move_zone(zone, new);
353 if (ret) {
354 pr_err("move zone from %lu to %d failed\n",
355 i, wcnt);
356 return ret;
357 }
358 cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt;
359 }
360 if (!zone->should_recover)
361 continue;
362 buf = zone->buffer;
363 rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
364 zone->off);
365 if (rcnt != zone->buffer_size + sizeof(*buf))
366 return (int)rcnt < 0 ? (int)rcnt : -EIO;
367 }
368 return 0;
369}
370
371static int psz_kmsg_recover_meta(struct psz_context *cxt)
372{
373 struct pstore_zone_info *info = cxt->pstore_zone_info;
374 struct pstore_zone *zone;
375 size_t rcnt, len;
376 struct psz_buffer *buf;
377 struct psz_kmsg_header *hdr;
378 struct timespec64 time = { };
379 unsigned long i;
380 /*
381 * Recover may on panic, we can't allocate any memory by kmalloc.
382 * So, we use local array instead.
383 */
384 char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0};
385
386 if (!info->read)
387 return -EINVAL;
388
389 len = sizeof(*buf) + sizeof(*hdr);
390 buf = (struct psz_buffer *)buffer_header;
391 for (i = 0; i < cxt->kmsg_max_cnt; i++) {
392 zone = cxt->kpszs[i];
393 if (unlikely(!zone))
394 return -EINVAL;
395
396 rcnt = info->read((char *)buf, len, zone->off);
335426c6
WL
397 if (rcnt == -ENOMSG) {
398 pr_debug("%s with id %lu may be broken, skip\n",
399 zone->name, i);
400 continue;
401 } else if (rcnt != len) {
d26c3321
WL
402 pr_err("read %s with id %lu failed\n", zone->name, i);
403 return (int)rcnt < 0 ? (int)rcnt : -EIO;
404 }
405
406 if (buf->sig != zone->buffer->sig) {
407 pr_debug("no valid data in kmsg dump zone %lu\n", i);
408 continue;
409 }
410
411 if (zone->buffer_size < atomic_read(&buf->datalen)) {
412 pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n",
413 zone->name, i, zone->off,
414 zone->buffer_size);
415 continue;
416 }
417
418 hdr = (struct psz_kmsg_header *)buf->data;
419 if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) {
420 pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n",
421 zone->name, i, zone->off,
422 zone->buffer_size);
423 continue;
424 }
425
426 /*
427 * we get the newest zone, and the next one must be the oldest
428 * or unused zone, because we do write one by one like a circle.
429 */
430 if (hdr->time.tv_sec >= time.tv_sec) {
431 time.tv_sec = hdr->time.tv_sec;
432 cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt;
433 }
434
435 if (hdr->reason == KMSG_DUMP_OOPS)
436 cxt->oops_counter =
437 max(cxt->oops_counter, hdr->counter);
438 else if (hdr->reason == KMSG_DUMP_PANIC)
439 cxt->panic_counter =
440 max(cxt->panic_counter, hdr->counter);
441
442 if (!atomic_read(&buf->datalen)) {
443 pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
444 zone->name, i, zone->off,
445 zone->buffer_size,
446 atomic_read(&buf->datalen));
447 continue;
448 }
449
450 if (!is_on_panic())
451 zone->should_recover = true;
452 pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
453 zone->name, i, zone->off,
454 zone->buffer_size, atomic_read(&buf->datalen));
455 }
456
457 return 0;
458}
459
460static int psz_kmsg_recover(struct psz_context *cxt)
461{
462 int ret;
463
464 if (!cxt->kpszs)
465 return 0;
466
467 ret = psz_kmsg_recover_meta(cxt);
468 if (ret)
469 goto recover_fail;
470
471 ret = psz_kmsg_recover_data(cxt);
472 if (ret)
473 goto recover_fail;
474
475 return 0;
476recover_fail:
477 pr_debug("psz_recover_kmsg failed\n");
478 return ret;
479}
480
0dc06826
WL
481static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
482{
483 struct pstore_zone_info *info = cxt->pstore_zone_info;
484 struct psz_buffer *oldbuf, tmpbuf;
485 int ret = 0;
486 char *buf;
487 ssize_t rcnt, len, start, off;
488
489 if (!zone || zone->oldbuf)
490 return 0;
491
492 if (is_on_panic()) {
493 /* save data as much as possible */
494 psz_flush_dirty_zone(zone);
495 return 0;
496 }
497
498 if (unlikely(!info->read))
499 return -EINVAL;
500
501 len = sizeof(struct psz_buffer);
502 rcnt = info->read((char *)&tmpbuf, len, zone->off);
503 if (rcnt != len) {
504 pr_debug("read zone %s failed\n", zone->name);
505 return (int)rcnt < 0 ? (int)rcnt : -EIO;
506 }
507
508 if (tmpbuf.sig != zone->buffer->sig) {
509 pr_debug("no valid data in zone %s\n", zone->name);
510 return 0;
511 }
512
513 if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
514 zone->buffer_size < atomic_read(&tmpbuf.start)) {
515 pr_info("found overtop zone: %s: off %lld, size %zu\n",
516 zone->name, zone->off, zone->buffer_size);
517 /* just keep going */
518 return 0;
519 }
520
521 if (!atomic_read(&tmpbuf.datalen)) {
522 pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n",
523 zone->name, zone->off, zone->buffer_size,
524 atomic_read(&tmpbuf.datalen));
525 return 0;
526 }
527
528 pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n",
529 zone->name, zone->off, zone->buffer_size,
530 atomic_read(&tmpbuf.datalen));
531
532 len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf);
533 oldbuf = kzalloc(len, GFP_KERNEL);
534 if (!oldbuf)
535 return -ENOMEM;
536
537 memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf));
538 buf = (char *)oldbuf + sizeof(*oldbuf);
539 len = atomic_read(&oldbuf->datalen);
540 start = atomic_read(&oldbuf->start);
541 off = zone->off + sizeof(*oldbuf);
542
543 /* get part of data */
544 rcnt = info->read(buf, len - start, off + start);
545 if (rcnt != len - start) {
546 pr_err("read zone %s failed\n", zone->name);
547 ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
548 goto free_oldbuf;
549 }
550
551 /* get the rest of data */
552 rcnt = info->read(buf + len - start, start, off);
553 if (rcnt != start) {
554 pr_err("read zone %s failed\n", zone->name);
555 ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
556 goto free_oldbuf;
557 }
558
559 zone->oldbuf = oldbuf;
560 psz_flush_dirty_zone(zone);
561 return 0;
562
563free_oldbuf:
564 kfree(oldbuf);
565 return ret;
566}
567
34327e9f
WL
568static int psz_recover_zones(struct psz_context *cxt,
569 struct pstore_zone **zones, unsigned int cnt)
570{
571 int ret;
572 unsigned int i;
573 struct pstore_zone *zone;
574
575 if (!zones)
576 return 0;
577
578 for (i = 0; i < cnt; i++) {
579 zone = zones[i];
580 if (unlikely(!zone))
581 continue;
582 ret = psz_recover_zone(cxt, zone);
583 if (ret)
584 goto recover_fail;
585 }
586
587 return 0;
588recover_fail:
589 pr_debug("recover %s[%u] failed\n", zone->name, i);
590 return ret;
591}
592
d26c3321
WL
593/**
594 * psz_recovery() - recover data from storage
595 * @cxt: the context of pstore/zone
596 *
597 * recovery means reading data back from storage after rebooting
598 *
599 * Return: 0 on success, others on failure.
600 */
601static inline int psz_recovery(struct psz_context *cxt)
602{
603 int ret;
604
605 if (atomic_read(&cxt->recovered))
606 return 0;
607
608 ret = psz_kmsg_recover(cxt);
0dc06826
WL
609 if (ret)
610 goto out;
d26c3321 611
0dc06826 612 ret = psz_recover_zone(cxt, cxt->ppsz);
cc9c4d1b
WL
613 if (ret)
614 goto out;
615
616 ret = psz_recover_zone(cxt, cxt->cpsz);
34327e9f
WL
617 if (ret)
618 goto out;
619
620 ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt);
0dc06826
WL
621
622out:
d26c3321
WL
623 if (unlikely(ret))
624 pr_err("recover failed\n");
625 else {
626 pr_debug("recover end!\n");
627 atomic_set(&cxt->recovered, 1);
628 }
629 return ret;
630}
631
632static int psz_pstore_open(struct pstore_info *psi)
633{
634 struct psz_context *cxt = psi->data;
635
636 cxt->kmsg_read_cnt = 0;
0dc06826 637 cxt->pmsg_read_cnt = 0;
cc9c4d1b 638 cxt->console_read_cnt = 0;
34327e9f 639 cxt->ftrace_read_cnt = 0;
d26c3321
WL
640 return 0;
641}
642
0dc06826
WL
643static inline bool psz_old_ok(struct pstore_zone *zone)
644{
645 if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
646 return true;
647 return false;
648}
649
d26c3321
WL
650static inline bool psz_ok(struct pstore_zone *zone)
651{
652 if (zone && zone->buffer && buffer_datalen(zone))
653 return true;
654 return false;
655}
656
657static inline int psz_kmsg_erase(struct psz_context *cxt,
658 struct pstore_zone *zone, struct pstore_record *record)
659{
660 struct psz_buffer *buffer = zone->buffer;
661 struct psz_kmsg_header *hdr =
662 (struct psz_kmsg_header *)buffer->data;
663
664 if (unlikely(!psz_ok(zone)))
665 return 0;
666 /* this zone is already updated, no need to erase */
667 if (record->count != hdr->counter)
668 return 0;
669
670 atomic_set(&zone->buffer->datalen, 0);
671 return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
672}
673
0dc06826
WL
674static inline int psz_record_erase(struct psz_context *cxt,
675 struct pstore_zone *zone)
676{
677 if (unlikely(!psz_old_ok(zone)))
678 return 0;
679
680 kfree(zone->oldbuf);
681 zone->oldbuf = NULL;
682 /*
683 * if there are new data in zone buffer, that means the old data
684 * are already invalid. It is no need to flush 0 (erase) to
685 * block device.
686 */
687 if (!buffer_datalen(zone))
688 return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
689 psz_flush_dirty_zone(zone);
690 return 0;
691}
692
d26c3321
WL
693static int psz_pstore_erase(struct pstore_record *record)
694{
695 struct psz_context *cxt = record->psi->data;
696
697 switch (record->type) {
698 case PSTORE_TYPE_DMESG:
699 if (record->id >= cxt->kmsg_max_cnt)
700 return -EINVAL;
701 return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record);
0dc06826
WL
702 case PSTORE_TYPE_PMSG:
703 return psz_record_erase(cxt, cxt->ppsz);
cc9c4d1b
WL
704 case PSTORE_TYPE_CONSOLE:
705 return psz_record_erase(cxt, cxt->cpsz);
34327e9f
WL
706 case PSTORE_TYPE_FTRACE:
707 if (record->id >= cxt->ftrace_max_cnt)
708 return -EINVAL;
709 return psz_record_erase(cxt, cxt->fpszs[record->id]);
cc9c4d1b 710 default: return -EINVAL;
d26c3321
WL
711 }
712}
713
714static void psz_write_kmsg_hdr(struct pstore_zone *zone,
715 struct pstore_record *record)
716{
717 struct psz_context *cxt = record->psi->data;
718 struct psz_buffer *buffer = zone->buffer;
719 struct psz_kmsg_header *hdr =
720 (struct psz_kmsg_header *)buffer->data;
721
722 hdr->magic = PSTORE_KMSG_HEADER_MAGIC;
723 hdr->compressed = record->compressed;
724 hdr->time.tv_sec = record->time.tv_sec;
725 hdr->time.tv_nsec = record->time.tv_nsec;
726 hdr->reason = record->reason;
727 if (hdr->reason == KMSG_DUMP_OOPS)
728 hdr->counter = ++cxt->oops_counter;
729 else if (hdr->reason == KMSG_DUMP_PANIC)
730 hdr->counter = ++cxt->panic_counter;
731 else
732 hdr->counter = 0;
733}
734
335426c6
WL
735/*
736 * In case zone is broken, which may occur to MTD device, we try each zones,
737 * start at cxt->kmsg_write_cnt.
738 */
d26c3321
WL
739static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
740 struct pstore_record *record)
741{
742 size_t size, hlen;
743 struct pstore_zone *zone;
335426c6 744 unsigned int i;
d26c3321 745
335426c6
WL
746 for (i = 0; i < cxt->kmsg_max_cnt; i++) {
747 unsigned int zonenum, len;
748 int ret;
d26c3321 749
335426c6
WL
750 zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt;
751 zone = cxt->kpszs[zonenum];
752 if (unlikely(!zone))
753 return -ENOSPC;
754
755 /* avoid destroying old data, allocate a new one */
756 len = zone->buffer_size + sizeof(*zone->buffer);
757 zone->oldbuf = zone->buffer;
758 zone->buffer = kzalloc(len, GFP_KERNEL);
759 if (!zone->buffer) {
760 zone->buffer = zone->oldbuf;
761 return -ENOMEM;
762 }
763 zone->buffer->sig = zone->oldbuf->sig;
764
765 pr_debug("write %s to zone id %d\n", zone->name, zonenum);
766 psz_write_kmsg_hdr(zone, record);
767 hlen = sizeof(struct psz_kmsg_header);
768 size = min_t(size_t, record->size, zone->buffer_size - hlen);
769 ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
770 if (likely(!ret || ret != -ENOMSG)) {
771 cxt->kmsg_write_cnt = zonenum + 1;
772 cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt;
773 /* no need to try next zone, free last zone buffer */
774 kfree(zone->oldbuf);
775 zone->oldbuf = NULL;
776 return ret;
777 }
778
779 pr_debug("zone %u may be broken, try next dmesg zone\n",
780 zonenum);
781 kfree(zone->buffer);
782 zone->buffer = zone->oldbuf;
783 zone->oldbuf = NULL;
784 }
785
786 return -EBUSY;
d26c3321
WL
787}
788
789static int notrace psz_kmsg_write(struct psz_context *cxt,
790 struct pstore_record *record)
791{
792 int ret;
793
794 /*
795 * Explicitly only take the first part of any new crash.
796 * If our buffer is larger than kmsg_bytes, this can never happen,
797 * and if our buffer is smaller than kmsg_bytes, we don't want the
798 * report split across multiple records.
799 */
800 if (record->part != 1)
801 return -ENOSPC;
802
803 if (!cxt->kpszs)
804 return -ENOSPC;
805
806 ret = psz_kmsg_write_record(cxt, record);
cc9c4d1b
WL
807 if (!ret && is_on_panic()) {
808 /* ensure all data are flushed to storage when panic */
d26c3321 809 pr_debug("try to flush other dirty zones\n");
cc9c4d1b 810 psz_flush_all_dirty_zones(NULL);
d26c3321
WL
811 }
812
813 /* always return 0 as we had handled it on buffer */
814 return 0;
815}
816
0dc06826
WL
817static int notrace psz_record_write(struct pstore_zone *zone,
818 struct pstore_record *record)
819{
820 size_t start, rem;
821 bool is_full_data = false;
822 char *buf;
823 int cnt;
824
825 if (!zone || !record)
826 return -ENOSPC;
827
828 if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
829 is_full_data = true;
830
831 cnt = record->size;
832 buf = record->buf;
833 if (unlikely(cnt > zone->buffer_size)) {
834 buf += cnt - zone->buffer_size;
835 cnt = zone->buffer_size;
836 }
837
838 start = buffer_start(zone);
839 rem = zone->buffer_size - start;
840 if (unlikely(rem < cnt)) {
841 psz_zone_write(zone, FLUSH_PART, buf, rem, start);
842 buf += rem;
843 cnt -= rem;
844 start = 0;
845 is_full_data = true;
846 }
847
848 atomic_set(&zone->buffer->start, cnt + start);
849 psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
850
851 /**
852 * psz_zone_write will set datalen as start + cnt.
853 * It work if actual data length lesser than buffer size.
854 * If data length greater than buffer size, pmsg will rewrite to
855 * beginning of zone, which make buffer->datalen wrongly.
856 * So we should reset datalen as buffer size once actual data length
857 * greater than buffer size.
858 */
859 if (is_full_data) {
860 atomic_set(&zone->buffer->datalen, zone->buffer_size);
861 psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
862 }
863 return 0;
864}
865
d26c3321
WL
866static int notrace psz_pstore_write(struct pstore_record *record)
867{
868 struct psz_context *cxt = record->psi->data;
869
870 if (record->type == PSTORE_TYPE_DMESG &&
871 record->reason == KMSG_DUMP_PANIC)
872 atomic_set(&cxt->on_panic, 1);
873
cc9c4d1b
WL
874 /*
875 * if on panic, do not write except panic records
876 * Fix case that panic_write prints log which wakes up console backend.
877 */
878 if (is_on_panic() && record->type != PSTORE_TYPE_DMESG)
879 return -EBUSY;
880
d26c3321
WL
881 switch (record->type) {
882 case PSTORE_TYPE_DMESG:
883 return psz_kmsg_write(cxt, record);
cc9c4d1b
WL
884 case PSTORE_TYPE_CONSOLE:
885 return psz_record_write(cxt->cpsz, record);
0dc06826
WL
886 case PSTORE_TYPE_PMSG:
887 return psz_record_write(cxt->ppsz, record);
34327e9f
WL
888 case PSTORE_TYPE_FTRACE: {
889 int zonenum = smp_processor_id();
890
891 if (!cxt->fpszs)
892 return -ENOSPC;
893 return psz_record_write(cxt->fpszs[zonenum], record);
894 }
d26c3321
WL
895 default:
896 return -EINVAL;
897 }
898}
899
900static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt)
901{
902 struct pstore_zone *zone = NULL;
903
904 while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) {
905 zone = cxt->kpszs[cxt->kmsg_read_cnt++];
906 if (psz_ok(zone))
907 return zone;
908 }
909
34327e9f
WL
910 if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
911 /*
912 * No need psz_old_ok(). Let psz_ftrace_read() do so for
913 * combination. psz_ftrace_read() should traverse over
914 * all zones in case of some zone without data.
915 */
916 return cxt->fpszs[cxt->ftrace_read_cnt++];
917
0dc06826
WL
918 if (cxt->pmsg_read_cnt == 0) {
919 cxt->pmsg_read_cnt++;
920 zone = cxt->ppsz;
921 if (psz_old_ok(zone))
922 return zone;
923 }
924
cc9c4d1b
WL
925 if (cxt->console_read_cnt == 0) {
926 cxt->console_read_cnt++;
927 zone = cxt->cpsz;
928 if (psz_old_ok(zone))
929 return zone;
930 }
931
d26c3321
WL
932 return NULL;
933}
934
935static int psz_kmsg_read_hdr(struct pstore_zone *zone,
936 struct pstore_record *record)
937{
938 struct psz_buffer *buffer = zone->buffer;
939 struct psz_kmsg_header *hdr =
940 (struct psz_kmsg_header *)buffer->data;
941
942 if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC)
943 return -EINVAL;
944 record->compressed = hdr->compressed;
945 record->time.tv_sec = hdr->time.tv_sec;
946 record->time.tv_nsec = hdr->time.tv_nsec;
947 record->reason = hdr->reason;
948 record->count = hdr->counter;
949 return 0;
950}
951
952static ssize_t psz_kmsg_read(struct pstore_zone *zone,
953 struct pstore_record *record)
954{
955 ssize_t size, hlen = 0;
956
957 size = buffer_datalen(zone);
958 /* Clear and skip this kmsg dump record if it has no valid header */
959 if (psz_kmsg_read_hdr(zone, record)) {
960 atomic_set(&zone->buffer->datalen, 0);
961 atomic_set(&zone->dirty, 0);
962 return -ENOMSG;
963 }
964 size -= sizeof(struct psz_kmsg_header);
965
966 if (!record->compressed) {
967 char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
968 kmsg_dump_reason_str(record->reason),
969 record->count);
970 hlen = strlen(buf);
971 record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
972 if (!record->buf) {
973 kfree(buf);
974 return -ENOMEM;
975 }
976 } else {
977 record->buf = kmalloc(size, GFP_KERNEL);
978 if (!record->buf)
979 return -ENOMEM;
980 }
981
0dc06826 982 size = psz_zone_read_buffer(zone, record->buf + hlen, size,
d26c3321
WL
983 sizeof(struct psz_kmsg_header));
984 if (unlikely(size < 0)) {
985 kfree(record->buf);
986 return -ENOMSG;
987 }
988
989 return size + hlen;
990}
991
34327e9f
WL
992/* try to combine all ftrace zones */
993static ssize_t psz_ftrace_read(struct pstore_zone *zone,
994 struct pstore_record *record)
995{
996 struct psz_context *cxt;
997 struct psz_buffer *buf;
998 int ret;
999
1000 if (!zone || !record)
1001 return -ENOSPC;
1002
1003 if (!psz_old_ok(zone))
1004 goto out;
1005
1006 buf = (struct psz_buffer *)zone->oldbuf;
1007 if (!buf)
1008 return -ENOMSG;
1009
1010 ret = pstore_ftrace_combine_log(&record->buf, &record->size,
1011 (char *)buf->data, atomic_read(&buf->datalen));
1012 if (unlikely(ret))
1013 return ret;
1014
1015out:
1016 cxt = record->psi->data;
1017 if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
1018 /* then, read next ftrace zone */
1019 return -ENOMSG;
1020 record->id = 0;
1021 return record->size ? record->size : -ENOMSG;
1022}
1023
0dc06826
WL
1024static ssize_t psz_record_read(struct pstore_zone *zone,
1025 struct pstore_record *record)
1026{
1027 size_t len;
1028 struct psz_buffer *buf;
1029
1030 if (!zone || !record)
1031 return -ENOSPC;
1032
1033 buf = (struct psz_buffer *)zone->oldbuf;
1034 if (!buf)
1035 return -ENOMSG;
1036
1037 len = atomic_read(&buf->datalen);
1038 record->buf = kmalloc(len, GFP_KERNEL);
1039 if (!record->buf)
1040 return -ENOMEM;
1041
1042 if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
1043 kfree(record->buf);
1044 return -ENOMSG;
1045 }
1046
1047 return len;
1048}
1049
d26c3321
WL
1050static ssize_t psz_pstore_read(struct pstore_record *record)
1051{
1052 struct psz_context *cxt = record->psi->data;
1053 ssize_t (*readop)(struct pstore_zone *zone,
1054 struct pstore_record *record);
1055 struct pstore_zone *zone;
1056 ssize_t ret;
1057
1058 /* before read, we must recover from storage */
1059 ret = psz_recovery(cxt);
1060 if (ret)
1061 return ret;
1062
1063next_zone:
1064 zone = psz_read_next_zone(cxt);
1065 if (!zone)
1066 return 0;
1067
1068 record->type = zone->type;
1069 switch (record->type) {
1070 case PSTORE_TYPE_DMESG:
1071 readop = psz_kmsg_read;
1072 record->id = cxt->kmsg_read_cnt - 1;
1073 break;
34327e9f
WL
1074 case PSTORE_TYPE_FTRACE:
1075 readop = psz_ftrace_read;
1076 break;
cc9c4d1b
WL
1077 case PSTORE_TYPE_CONSOLE:
1078 fallthrough;
0dc06826
WL
1079 case PSTORE_TYPE_PMSG:
1080 readop = psz_record_read;
1081 break;
d26c3321
WL
1082 default:
1083 goto next_zone;
1084 }
1085
1086 ret = readop(zone, record);
1087 if (ret == -ENOMSG)
1088 goto next_zone;
1089 return ret;
1090}
1091
1092static struct psz_context pstore_zone_cxt = {
1093 .pstore_zone_info_lock =
1094 __MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock),
1095 .recovered = ATOMIC_INIT(0),
1096 .on_panic = ATOMIC_INIT(0),
1097 .pstore = {
1098 .owner = THIS_MODULE,
1099 .open = psz_pstore_open,
1100 .read = psz_pstore_read,
1101 .write = psz_pstore_write,
1102 .erase = psz_pstore_erase,
1103 },
1104};
1105
1106static void psz_free_zone(struct pstore_zone **pszone)
1107{
1108 struct pstore_zone *zone = *pszone;
1109
1110 if (!zone)
1111 return;
1112
1113 kfree(zone->buffer);
1114 kfree(zone);
1115 *pszone = NULL;
1116}
1117
1118static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt)
1119{
1120 struct pstore_zone **zones = *pszones;
1121
1122 if (!zones)
1123 return;
1124
1125 while (*cnt > 0) {
1126 (*cnt)--;
1127 psz_free_zone(&(zones[*cnt]));
1128 }
1129 kfree(zones);
1130 *pszones = NULL;
1131}
1132
1133static void psz_free_all_zones(struct psz_context *cxt)
1134{
1135 if (cxt->kpszs)
1136 psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt);
0dc06826
WL
1137 if (cxt->ppsz)
1138 psz_free_zone(&cxt->ppsz);
cc9c4d1b
WL
1139 if (cxt->cpsz)
1140 psz_free_zone(&cxt->cpsz);
34327e9f
WL
1141 if (cxt->fpszs)
1142 psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt);
d26c3321
WL
1143}
1144
1145static struct pstore_zone *psz_init_zone(enum pstore_type_id type,
1146 loff_t *off, size_t size)
1147{
1148 struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
1149 struct pstore_zone *zone;
1150 const char *name = pstore_type_to_name(type);
1151
1152 if (!size)
1153 return NULL;
1154
1155 if (*off + size > info->total_size) {
1156 pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n",
1157 name, size, *off, info->total_size);
1158 return ERR_PTR(-ENOMEM);
1159 }
1160
1161 zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL);
1162 if (!zone)
1163 return ERR_PTR(-ENOMEM);
1164
1165 zone->buffer = kmalloc(size, GFP_KERNEL);
1166 if (!zone->buffer) {
1167 kfree(zone);
1168 return ERR_PTR(-ENOMEM);
1169 }
1170 memset(zone->buffer, 0xFF, size);
1171 zone->off = *off;
1172 zone->name = name;
1173 zone->type = type;
1174 zone->buffer_size = size - sizeof(struct psz_buffer);
1175 zone->buffer->sig = type ^ PSZ_SIG;
0dc06826 1176 zone->oldbuf = NULL;
d26c3321
WL
1177 atomic_set(&zone->dirty, 0);
1178 atomic_set(&zone->buffer->datalen, 0);
0dc06826 1179 atomic_set(&zone->buffer->start, 0);
d26c3321
WL
1180
1181 *off += size;
1182
1183 pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
1184 zone->off, sizeof(*zone->buffer), zone->buffer_size);
1185 return zone;
1186}
1187
1188static struct pstore_zone **psz_init_zones(enum pstore_type_id type,
1189 loff_t *off, size_t total_size, ssize_t record_size,
1190 unsigned int *cnt)
1191{
1192 struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
1193 struct pstore_zone **zones, *zone;
1194 const char *name = pstore_type_to_name(type);
1195 int c, i;
1196
1197 *cnt = 0;
1198 if (!total_size || !record_size)
1199 return NULL;
1200
1201 if (*off + total_size > info->total_size) {
1202 pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n",
1203 name, total_size, *off, info->total_size);
1204 return ERR_PTR(-ENOMEM);
1205 }
1206
1207 c = total_size / record_size;
1208 zones = kcalloc(c, sizeof(*zones), GFP_KERNEL);
1209 if (!zones) {
1210 pr_err("allocate for zones %s failed\n", name);
1211 return ERR_PTR(-ENOMEM);
1212 }
1213 memset(zones, 0, c * sizeof(*zones));
1214
1215 for (i = 0; i < c; i++) {
1216 zone = psz_init_zone(type, off, record_size);
1217 if (!zone || IS_ERR(zone)) {
1218 pr_err("initialize zones %s failed\n", name);
1219 psz_free_zones(&zones, &i);
1220 return (void *)zone;
1221 }
1222 zones[i] = zone;
1223 }
1224
1225 *cnt = c;
1226 return zones;
1227}
1228
1229static int psz_alloc_zones(struct psz_context *cxt)
1230{
1231 struct pstore_zone_info *info = cxt->pstore_zone_info;
1232 loff_t off = 0;
1233 int err;
0dc06826
WL
1234 size_t off_size = 0;
1235
1236 off_size += info->pmsg_size;
1237 cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size);
1238 if (IS_ERR(cxt->ppsz)) {
1239 err = PTR_ERR(cxt->ppsz);
1240 cxt->ppsz = NULL;
1241 goto free_out;
1242 }
d26c3321 1243
cc9c4d1b
WL
1244 off_size += info->console_size;
1245 cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off,
1246 info->console_size);
1247 if (IS_ERR(cxt->cpsz)) {
1248 err = PTR_ERR(cxt->cpsz);
1249 cxt->cpsz = NULL;
1250 goto free_out;
1251 }
1252
34327e9f
WL
1253 off_size += info->ftrace_size;
1254 cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off,
1255 info->ftrace_size,
1256 info->ftrace_size / nr_cpu_ids,
1257 &cxt->ftrace_max_cnt);
1258 if (IS_ERR(cxt->fpszs)) {
1259 err = PTR_ERR(cxt->fpszs);
1260 cxt->fpszs = NULL;
1261 goto free_out;
1262 }
1263
0dc06826
WL
1264 cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off,
1265 info->total_size - off_size,
d26c3321
WL
1266 info->kmsg_size, &cxt->kmsg_max_cnt);
1267 if (IS_ERR(cxt->kpszs)) {
1268 err = PTR_ERR(cxt->kpszs);
1269 cxt->kpszs = NULL;
0dc06826 1270 goto free_out;
d26c3321
WL
1271 }
1272
1273 return 0;
0dc06826
WL
1274free_out:
1275 psz_free_all_zones(cxt);
d26c3321
WL
1276 return err;
1277}
1278
1279/**
1280 * register_pstore_zone() - register to pstore/zone
1281 *
1282 * @info: back-end driver information. See &struct pstore_zone_info.
1283 *
1284 * Only one back-end at one time.
1285 *
1286 * Return: 0 on success, others on failure.
1287 */
1288int register_pstore_zone(struct pstore_zone_info *info)
1289{
1290 int err = -EINVAL;
1291 struct psz_context *cxt = &pstore_zone_cxt;
1292
1293 if (info->total_size < 4096) {
1294 pr_warn("total_size must be >= 4096\n");
1295 return -EINVAL;
1296 }
1297
34327e9f
WL
1298 if (!info->kmsg_size && !info->pmsg_size && !info->console_size &&
1299 !info->ftrace_size) {
d26c3321
WL
1300 pr_warn("at least one record size must be non-zero\n");
1301 return -EINVAL;
1302 }
1303
1304 if (!info->name || !info->name[0])
1305 return -EINVAL;
1306
1307#define check_size(name, size) { \
1308 if (info->name > 0 && info->name < (size)) { \
1309 pr_err(#name " must be over %d\n", (size)); \
1310 return -EINVAL; \
1311 } \
1312 if (info->name & (size - 1)) { \
1313 pr_err(#name " must be a multiple of %d\n", \
1314 (size)); \
1315 return -EINVAL; \
1316 } \
1317 }
1318
1319 check_size(total_size, 4096);
1320 check_size(kmsg_size, SECTOR_SIZE);
0dc06826 1321 check_size(pmsg_size, SECTOR_SIZE);
cc9c4d1b 1322 check_size(console_size, SECTOR_SIZE);
34327e9f 1323 check_size(ftrace_size, SECTOR_SIZE);
d26c3321
WL
1324
1325#undef check_size
1326
1327 /*
1328 * the @read and @write must be applied.
1329 * if no @read, pstore may mount failed.
1330 * if no @write, pstore do not support to remove record file.
1331 */
1332 if (!info->read || !info->write) {
1333 pr_err("no valid general read/write interface\n");
1334 return -EINVAL;
1335 }
1336
1337 mutex_lock(&cxt->pstore_zone_info_lock);
1338 if (cxt->pstore_zone_info) {
1339 pr_warn("'%s' already loaded: ignoring '%s'\n",
1340 cxt->pstore_zone_info->name, info->name);
1341 mutex_unlock(&cxt->pstore_zone_info_lock);
1342 return -EBUSY;
1343 }
1344 cxt->pstore_zone_info = info;
1345
1346 pr_debug("register %s with properties:\n", info->name);
1347 pr_debug("\ttotal size : %ld Bytes\n", info->total_size);
1348 pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size);
0dc06826 1349 pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size);
cc9c4d1b 1350 pr_debug("\tconsole size : %ld Bytes\n", info->console_size);
34327e9f 1351 pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size);
d26c3321
WL
1352
1353 err = psz_alloc_zones(cxt);
1354 if (err) {
1355 pr_err("alloc zones failed\n");
1356 goto fail_out;
1357 }
1358
1359 if (info->kmsg_size) {
1360 cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size -
1361 sizeof(struct psz_kmsg_header);
1362 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
1363 if (!cxt->pstore.buf) {
1364 err = -ENOMEM;
1365 goto fail_free;
1366 }
1367 }
1368 cxt->pstore.data = cxt;
1369
1370 pr_info("registered %s as backend for", info->name);
1371 cxt->pstore.max_reason = info->max_reason;
1372 cxt->pstore.name = info->name;
1373 if (info->kmsg_size) {
1374 cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
1375 pr_cont(" kmsg(%s",
1376 kmsg_dump_reason_str(cxt->pstore.max_reason));
1377 if (cxt->pstore_zone_info->panic_write)
1378 pr_cont(",panic_write");
1379 pr_cont(")");
1380 }
0dc06826
WL
1381 if (info->pmsg_size) {
1382 cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
1383 pr_cont(" pmsg");
1384 }
cc9c4d1b
WL
1385 if (info->console_size) {
1386 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
1387 pr_cont(" console");
1388 }
34327e9f
WL
1389 if (info->ftrace_size) {
1390 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
1391 pr_cont(" ftrace");
1392 }
d26c3321
WL
1393 pr_cont("\n");
1394
1395 err = pstore_register(&cxt->pstore);
1396 if (err) {
1397 pr_err("registering with pstore failed\n");
1398 goto fail_free;
1399 }
1400 mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
1401
1402 return 0;
1403
1404fail_free:
1405 kfree(cxt->pstore.buf);
1406 cxt->pstore.buf = NULL;
1407 cxt->pstore.bufsize = 0;
1408 psz_free_all_zones(cxt);
1409fail_out:
1410 pstore_zone_cxt.pstore_zone_info = NULL;
1411 mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
1412 return err;
1413}
1414EXPORT_SYMBOL_GPL(register_pstore_zone);
1415
1416/**
1417 * unregister_pstore_zone() - unregister to pstore/zone
1418 *
1419 * @info: back-end driver information. See struct pstore_zone_info.
1420 */
1421void unregister_pstore_zone(struct pstore_zone_info *info)
1422{
1423 struct psz_context *cxt = &pstore_zone_cxt;
1424
1425 mutex_lock(&cxt->pstore_zone_info_lock);
1426 if (!cxt->pstore_zone_info) {
1427 mutex_unlock(&cxt->pstore_zone_info_lock);
1428 return;
1429 }
1430
1431 /* Stop incoming writes from pstore. */
1432 pstore_unregister(&cxt->pstore);
1433
cc9c4d1b
WL
1434 /* Flush any pending writes. */
1435 psz_flush_all_dirty_zones(NULL);
1436 flush_delayed_work(&psz_cleaner);
1437
d26c3321
WL
1438 /* Clean up allocations. */
1439 kfree(cxt->pstore.buf);
1440 cxt->pstore.buf = NULL;
1441 cxt->pstore.bufsize = 0;
1442 cxt->pstore_zone_info = NULL;
1443
1444 psz_free_all_zones(cxt);
1445
1446 /* Clear counters and zone state. */
1447 cxt->oops_counter = 0;
1448 cxt->panic_counter = 0;
1449 atomic_set(&cxt->recovered, 0);
1450 atomic_set(&cxt->on_panic, 0);
1451
1452 mutex_unlock(&cxt->pstore_zone_info_lock);
1453}
1454EXPORT_SYMBOL_GPL(unregister_pstore_zone);
1455
1456MODULE_LICENSE("GPL");
1457MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
1458MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
1459MODULE_DESCRIPTION("Storage Manager for pstore/blk");