]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/lightnvm/core.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[people/ms/linux.git] / drivers / lightnvm / core.c
1 /*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21 #include <linux/blkdev.h>
22 #include <linux/blk-mq.h>
23 #include <linux/list.h>
24 #include <linux/types.h>
25 #include <linux/sem.h>
26 #include <linux/bitmap.h>
27 #include <linux/module.h>
28 #include <linux/miscdevice.h>
29 #include <linux/lightnvm.h>
30 #include <uapi/linux/lightnvm.h>
31
32 static LIST_HEAD(nvm_targets);
33 static LIST_HEAD(nvm_mgrs);
34 static LIST_HEAD(nvm_devices);
35 static DECLARE_RWSEM(nvm_lock);
36
37 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
38 {
39 struct nvm_tgt_type *tt;
40
41 list_for_each_entry(tt, &nvm_targets, list)
42 if (!strcmp(name, tt->name))
43 return tt;
44
45 return NULL;
46 }
47
48 int nvm_register_target(struct nvm_tgt_type *tt)
49 {
50 int ret = 0;
51
52 down_write(&nvm_lock);
53 if (nvm_find_target_type(tt->name))
54 ret = -EEXIST;
55 else
56 list_add(&tt->list, &nvm_targets);
57 up_write(&nvm_lock);
58
59 return ret;
60 }
61 EXPORT_SYMBOL(nvm_register_target);
62
63 void nvm_unregister_target(struct nvm_tgt_type *tt)
64 {
65 if (!tt)
66 return;
67
68 down_write(&nvm_lock);
69 list_del(&tt->list);
70 up_write(&nvm_lock);
71 }
72 EXPORT_SYMBOL(nvm_unregister_target);
73
74 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler)
76 {
77 return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
78 dma_handler);
79 }
80 EXPORT_SYMBOL(nvm_dev_dma_alloc);
81
82 void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
83 dma_addr_t dma_handler)
84 {
85 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
86 }
87 EXPORT_SYMBOL(nvm_dev_dma_free);
88
89 static struct nvmm_type *nvm_find_mgr_type(const char *name)
90 {
91 struct nvmm_type *mt;
92
93 list_for_each_entry(mt, &nvm_mgrs, list)
94 if (!strcmp(name, mt->name))
95 return mt;
96
97 return NULL;
98 }
99
100 int nvm_register_mgr(struct nvmm_type *mt)
101 {
102 int ret = 0;
103
104 down_write(&nvm_lock);
105 if (nvm_find_mgr_type(mt->name))
106 ret = -EEXIST;
107 else
108 list_add(&mt->list, &nvm_mgrs);
109 up_write(&nvm_lock);
110
111 return ret;
112 }
113 EXPORT_SYMBOL(nvm_register_mgr);
114
115 void nvm_unregister_mgr(struct nvmm_type *mt)
116 {
117 if (!mt)
118 return;
119
120 down_write(&nvm_lock);
121 list_del(&mt->list);
122 up_write(&nvm_lock);
123 }
124 EXPORT_SYMBOL(nvm_unregister_mgr);
125
126 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
127 {
128 struct nvm_dev *dev;
129
130 list_for_each_entry(dev, &nvm_devices, devices)
131 if (!strcmp(name, dev->name))
132 return dev;
133
134 return NULL;
135 }
136
137 struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
138 unsigned long flags)
139 {
140 return dev->mt->get_blk(dev, lun, flags);
141 }
142 EXPORT_SYMBOL(nvm_get_blk);
143
144 /* Assumes that all valid pages have already been moved on release to bm */
145 void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
146 {
147 return dev->mt->put_blk(dev, blk);
148 }
149 EXPORT_SYMBOL(nvm_put_blk);
150
151 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
152 {
153 return dev->mt->submit_io(dev, rqd);
154 }
155 EXPORT_SYMBOL(nvm_submit_io);
156
157 int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
158 {
159 return dev->mt->erase_blk(dev, blk, 0);
160 }
161 EXPORT_SYMBOL(nvm_erase_blk);
162
163 static void nvm_core_free(struct nvm_dev *dev)
164 {
165 kfree(dev);
166 }
167
168 static int nvm_core_init(struct nvm_dev *dev)
169 {
170 struct nvm_id *id = &dev->identity;
171 struct nvm_id_group *grp = &id->groups[0];
172
173 /* device values */
174 dev->nr_chnls = grp->num_ch;
175 dev->luns_per_chnl = grp->num_lun;
176 dev->pgs_per_blk = grp->num_pg;
177 dev->blks_per_lun = grp->num_blk;
178 dev->nr_planes = grp->num_pln;
179 dev->sec_size = grp->csecs;
180 dev->oob_size = grp->sos;
181 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
182 dev->addr_mode = id->ppat;
183 dev->addr_format = id->ppaf;
184
185 dev->plane_mode = NVM_PLANE_SINGLE;
186 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
187
188 if (grp->mpos & 0x020202)
189 dev->plane_mode = NVM_PLANE_DOUBLE;
190 if (grp->mpos & 0x040404)
191 dev->plane_mode = NVM_PLANE_QUAD;
192
193 /* calculated values */
194 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
195 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
196 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
197 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
198
199 dev->total_blocks = dev->nr_planes *
200 dev->blks_per_lun *
201 dev->luns_per_chnl *
202 dev->nr_chnls;
203 dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
204 INIT_LIST_HEAD(&dev->online_targets);
205
206 return 0;
207 }
208
209 static void nvm_free(struct nvm_dev *dev)
210 {
211 if (!dev)
212 return;
213
214 if (dev->mt)
215 dev->mt->unregister_mgr(dev);
216
217 nvm_core_free(dev);
218 }
219
220 static int nvm_init(struct nvm_dev *dev)
221 {
222 struct nvmm_type *mt;
223 int ret = 0;
224
225 if (!dev->q || !dev->ops)
226 return -EINVAL;
227
228 if (dev->ops->identity(dev->q, &dev->identity)) {
229 pr_err("nvm: device could not be identified\n");
230 ret = -EINVAL;
231 goto err;
232 }
233
234 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
235 dev->identity.ver_id, dev->identity.vmnt,
236 dev->identity.cgrps);
237
238 if (dev->identity.ver_id != 1) {
239 pr_err("nvm: device not supported by kernel.");
240 goto err;
241 }
242
243 if (dev->identity.cgrps != 1) {
244 pr_err("nvm: only one group configuration supported.");
245 goto err;
246 }
247
248 ret = nvm_core_init(dev);
249 if (ret) {
250 pr_err("nvm: could not initialize core structures.\n");
251 goto err;
252 }
253
254 /* register with device with a supported manager */
255 list_for_each_entry(mt, &nvm_mgrs, list) {
256 ret = mt->register_mgr(dev);
257 if (ret < 0)
258 goto err; /* initialization failed */
259 if (ret > 0) {
260 dev->mt = mt;
261 break; /* successfully initialized */
262 }
263 }
264
265 if (!ret) {
266 pr_info("nvm: no compatible manager found.\n");
267 return 0;
268 }
269
270 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
271 dev->name, dev->sec_per_pg, dev->nr_planes,
272 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
273 dev->nr_chnls);
274 return 0;
275 err:
276 nvm_free(dev);
277 pr_err("nvm: failed to initialize nvm\n");
278 return ret;
279 }
280
281 static void nvm_exit(struct nvm_dev *dev)
282 {
283 if (dev->ppalist_pool)
284 dev->ops->destroy_dma_pool(dev->ppalist_pool);
285 nvm_free(dev);
286
287 pr_info("nvm: successfully unloaded\n");
288 }
289
290 int nvm_register(struct request_queue *q, char *disk_name,
291 struct nvm_dev_ops *ops)
292 {
293 struct nvm_dev *dev;
294 int ret;
295
296 if (!ops->identity)
297 return -EINVAL;
298
299 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
300 if (!dev)
301 return -ENOMEM;
302
303 dev->q = q;
304 dev->ops = ops;
305 strncpy(dev->name, disk_name, DISK_NAME_LEN);
306
307 ret = nvm_init(dev);
308 if (ret)
309 goto err_init;
310
311 down_write(&nvm_lock);
312 list_add(&dev->devices, &nvm_devices);
313 up_write(&nvm_lock);
314
315 if (dev->ops->max_phys_sect > 1) {
316 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
317 "ppalist");
318 if (!dev->ppalist_pool) {
319 pr_err("nvm: could not create ppa pool\n");
320 return -ENOMEM;
321 }
322 } else if (dev->ops->max_phys_sect > 256) {
323 pr_info("nvm: max sectors supported is 256.\n");
324 return -EINVAL;
325 }
326
327 return 0;
328 err_init:
329 kfree(dev);
330 return ret;
331 }
332 EXPORT_SYMBOL(nvm_register);
333
334 void nvm_unregister(char *disk_name)
335 {
336 struct nvm_dev *dev = nvm_find_nvm_dev(disk_name);
337
338 if (!dev) {
339 pr_err("nvm: could not find device %s to unregister\n",
340 disk_name);
341 return;
342 }
343
344 nvm_exit(dev);
345
346 down_write(&nvm_lock);
347 list_del(&dev->devices);
348 up_write(&nvm_lock);
349 }
350 EXPORT_SYMBOL(nvm_unregister);
351
352 static const struct block_device_operations nvm_fops = {
353 .owner = THIS_MODULE,
354 };
355
356 static int nvm_create_target(struct nvm_dev *dev,
357 struct nvm_ioctl_create *create)
358 {
359 struct nvm_ioctl_create_simple *s = &create->conf.s;
360 struct request_queue *tqueue;
361 struct nvmm_type *mt;
362 struct gendisk *tdisk;
363 struct nvm_tgt_type *tt;
364 struct nvm_target *t;
365 void *targetdata;
366 int ret = 0;
367
368 if (!dev->mt) {
369 /* register with device with a supported NVM manager */
370 list_for_each_entry(mt, &nvm_mgrs, list) {
371 ret = mt->register_mgr(dev);
372 if (ret < 0)
373 return ret; /* initialization failed */
374 if (ret > 0) {
375 dev->mt = mt;
376 break; /* successfully initialized */
377 }
378 }
379
380 if (!ret) {
381 pr_info("nvm: no compatible nvm manager found.\n");
382 return -ENODEV;
383 }
384 }
385
386 tt = nvm_find_target_type(create->tgttype);
387 if (!tt) {
388 pr_err("nvm: target type %s not found\n", create->tgttype);
389 return -EINVAL;
390 }
391
392 down_write(&nvm_lock);
393 list_for_each_entry(t, &dev->online_targets, list) {
394 if (!strcmp(create->tgtname, t->disk->disk_name)) {
395 pr_err("nvm: target name already exists.\n");
396 up_write(&nvm_lock);
397 return -EINVAL;
398 }
399 }
400 up_write(&nvm_lock);
401
402 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
403 if (!t)
404 return -ENOMEM;
405
406 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
407 if (!tqueue)
408 goto err_t;
409 blk_queue_make_request(tqueue, tt->make_rq);
410
411 tdisk = alloc_disk(0);
412 if (!tdisk)
413 goto err_queue;
414
415 sprintf(tdisk->disk_name, "%s", create->tgtname);
416 tdisk->flags = GENHD_FL_EXT_DEVT;
417 tdisk->major = 0;
418 tdisk->first_minor = 0;
419 tdisk->fops = &nvm_fops;
420 tdisk->queue = tqueue;
421
422 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
423 if (IS_ERR(targetdata))
424 goto err_init;
425
426 tdisk->private_data = targetdata;
427 tqueue->queuedata = targetdata;
428
429 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
430
431 set_capacity(tdisk, tt->capacity(targetdata));
432 add_disk(tdisk);
433
434 t->type = tt;
435 t->disk = tdisk;
436
437 down_write(&nvm_lock);
438 list_add_tail(&t->list, &dev->online_targets);
439 up_write(&nvm_lock);
440
441 return 0;
442 err_init:
443 put_disk(tdisk);
444 err_queue:
445 blk_cleanup_queue(tqueue);
446 err_t:
447 kfree(t);
448 return -ENOMEM;
449 }
450
451 static void nvm_remove_target(struct nvm_target *t)
452 {
453 struct nvm_tgt_type *tt = t->type;
454 struct gendisk *tdisk = t->disk;
455 struct request_queue *q = tdisk->queue;
456
457 lockdep_assert_held(&nvm_lock);
458
459 del_gendisk(tdisk);
460 if (tt->exit)
461 tt->exit(tdisk->private_data);
462
463 blk_cleanup_queue(q);
464
465 put_disk(tdisk);
466
467 list_del(&t->list);
468 kfree(t);
469 }
470
471 static int __nvm_configure_create(struct nvm_ioctl_create *create)
472 {
473 struct nvm_dev *dev;
474 struct nvm_ioctl_create_simple *s;
475
476 dev = nvm_find_nvm_dev(create->dev);
477 if (!dev) {
478 pr_err("nvm: device not found\n");
479 return -EINVAL;
480 }
481
482 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
483 pr_err("nvm: config type not valid\n");
484 return -EINVAL;
485 }
486 s = &create->conf.s;
487
488 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
489 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
490 s->lun_begin, s->lun_end, dev->nr_luns);
491 return -EINVAL;
492 }
493
494 return nvm_create_target(dev, create);
495 }
496
497 static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
498 {
499 struct nvm_target *t = NULL;
500 struct nvm_dev *dev;
501 int ret = -1;
502
503 down_write(&nvm_lock);
504 list_for_each_entry(dev, &nvm_devices, devices)
505 list_for_each_entry(t, &dev->online_targets, list) {
506 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
507 nvm_remove_target(t);
508 ret = 0;
509 break;
510 }
511 }
512 up_write(&nvm_lock);
513
514 if (ret) {
515 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
516 return -EINVAL;
517 }
518
519 return 0;
520 }
521
522 #ifdef CONFIG_NVM_DEBUG
523 static int nvm_configure_show(const char *val)
524 {
525 struct nvm_dev *dev;
526 char opcode, devname[DISK_NAME_LEN];
527 int ret;
528
529 ret = sscanf(val, "%c %32s", &opcode, devname);
530 if (ret != 2) {
531 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
532 return -EINVAL;
533 }
534
535 dev = nvm_find_nvm_dev(devname);
536 if (!dev) {
537 pr_err("nvm: device not found\n");
538 return -EINVAL;
539 }
540
541 if (!dev->mt)
542 return 0;
543
544 dev->mt->free_blocks_print(dev);
545
546 return 0;
547 }
548
549 static int nvm_configure_remove(const char *val)
550 {
551 struct nvm_ioctl_remove remove;
552 char opcode;
553 int ret;
554
555 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
556 if (ret != 2) {
557 pr_err("nvm: invalid command. Use \"d targetname\".\n");
558 return -EINVAL;
559 }
560
561 remove.flags = 0;
562
563 return __nvm_configure_remove(&remove);
564 }
565
566 static int nvm_configure_create(const char *val)
567 {
568 struct nvm_ioctl_create create;
569 char opcode;
570 int lun_begin, lun_end, ret;
571
572 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
573 create.tgtname, create.tgttype,
574 &lun_begin, &lun_end);
575 if (ret != 6) {
576 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
577 return -EINVAL;
578 }
579
580 create.flags = 0;
581 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
582 create.conf.s.lun_begin = lun_begin;
583 create.conf.s.lun_end = lun_end;
584
585 return __nvm_configure_create(&create);
586 }
587
588
589 /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
590 static int nvm_configure_by_str_event(const char *val,
591 const struct kernel_param *kp)
592 {
593 char opcode;
594 int ret;
595
596 ret = sscanf(val, "%c", &opcode);
597 if (ret != 1) {
598 pr_err("nvm: string must have the format of \"cmd ...\"\n");
599 return -EINVAL;
600 }
601
602 switch (opcode) {
603 case 'a':
604 return nvm_configure_create(val);
605 case 'd':
606 return nvm_configure_remove(val);
607 case 's':
608 return nvm_configure_show(val);
609 default:
610 pr_err("nvm: invalid command\n");
611 return -EINVAL;
612 }
613
614 return 0;
615 }
616
617 static int nvm_configure_get(char *buf, const struct kernel_param *kp)
618 {
619 int sz = 0;
620 char *buf_start = buf;
621 struct nvm_dev *dev;
622
623 buf += sprintf(buf, "available devices:\n");
624 down_write(&nvm_lock);
625 list_for_each_entry(dev, &nvm_devices, devices) {
626 if (sz > 4095 - DISK_NAME_LEN)
627 break;
628 buf += sprintf(buf, " %32s\n", dev->name);
629 }
630 up_write(&nvm_lock);
631
632 return buf - buf_start - 1;
633 }
634
635 static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
636 .set = nvm_configure_by_str_event,
637 .get = nvm_configure_get,
638 };
639
640 #undef MODULE_PARAM_PREFIX
641 #define MODULE_PARAM_PREFIX "lnvm."
642
643 module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
644 0644);
645
646 #endif /* CONFIG_NVM_DEBUG */
647
648 static long nvm_ioctl_info(struct file *file, void __user *arg)
649 {
650 struct nvm_ioctl_info *info;
651 struct nvm_tgt_type *tt;
652 int tgt_iter = 0;
653
654 if (!capable(CAP_SYS_ADMIN))
655 return -EPERM;
656
657 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
658 if (IS_ERR(info))
659 return -EFAULT;
660
661 info->version[0] = NVM_VERSION_MAJOR;
662 info->version[1] = NVM_VERSION_MINOR;
663 info->version[2] = NVM_VERSION_PATCH;
664
665 down_write(&nvm_lock);
666 list_for_each_entry(tt, &nvm_targets, list) {
667 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
668
669 tgt->version[0] = tt->version[0];
670 tgt->version[1] = tt->version[1];
671 tgt->version[2] = tt->version[2];
672 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
673
674 tgt_iter++;
675 }
676
677 info->tgtsize = tgt_iter;
678 up_write(&nvm_lock);
679
680 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info)))
681 return -EFAULT;
682
683 kfree(info);
684 return 0;
685 }
686
687 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
688 {
689 struct nvm_ioctl_get_devices *devices;
690 struct nvm_dev *dev;
691 int i = 0;
692
693 if (!capable(CAP_SYS_ADMIN))
694 return -EPERM;
695
696 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
697 if (!devices)
698 return -ENOMEM;
699
700 down_write(&nvm_lock);
701 list_for_each_entry(dev, &nvm_devices, devices) {
702 struct nvm_ioctl_device_info *info = &devices->info[i];
703
704 sprintf(info->devname, "%s", dev->name);
705 if (dev->mt) {
706 info->bmversion[0] = dev->mt->version[0];
707 info->bmversion[1] = dev->mt->version[1];
708 info->bmversion[2] = dev->mt->version[2];
709 sprintf(info->bmname, "%s", dev->mt->name);
710 } else {
711 sprintf(info->bmname, "none");
712 }
713
714 i++;
715 if (i > 31) {
716 pr_err("nvm: max 31 devices can be reported.\n");
717 break;
718 }
719 }
720 up_write(&nvm_lock);
721
722 devices->nr_devices = i;
723
724 if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices)))
725 return -EFAULT;
726
727 kfree(devices);
728 return 0;
729 }
730
731 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
732 {
733 struct nvm_ioctl_create create;
734
735 if (!capable(CAP_SYS_ADMIN))
736 return -EPERM;
737
738 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
739 return -EFAULT;
740
741 create.dev[DISK_NAME_LEN - 1] = '\0';
742 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
743 create.tgtname[DISK_NAME_LEN - 1] = '\0';
744
745 if (create.flags != 0) {
746 pr_err("nvm: no flags supported\n");
747 return -EINVAL;
748 }
749
750 return __nvm_configure_create(&create);
751 }
752
753 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
754 {
755 struct nvm_ioctl_remove remove;
756
757 if (!capable(CAP_SYS_ADMIN))
758 return -EPERM;
759
760 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
761 return -EFAULT;
762
763 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
764
765 if (remove.flags != 0) {
766 pr_err("nvm: no flags supported\n");
767 return -EINVAL;
768 }
769
770 return __nvm_configure_remove(&remove);
771 }
772
773 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
774 {
775 void __user *argp = (void __user *)arg;
776
777 switch (cmd) {
778 case NVM_INFO:
779 return nvm_ioctl_info(file, argp);
780 case NVM_GET_DEVICES:
781 return nvm_ioctl_get_devices(file, argp);
782 case NVM_DEV_CREATE:
783 return nvm_ioctl_dev_create(file, argp);
784 case NVM_DEV_REMOVE:
785 return nvm_ioctl_dev_remove(file, argp);
786 }
787 return 0;
788 }
789
790 static const struct file_operations _ctl_fops = {
791 .open = nonseekable_open,
792 .unlocked_ioctl = nvm_ctl_ioctl,
793 .owner = THIS_MODULE,
794 .llseek = noop_llseek,
795 };
796
797 static struct miscdevice _nvm_misc = {
798 .minor = MISC_DYNAMIC_MINOR,
799 .name = "lightnvm",
800 .nodename = "lightnvm/control",
801 .fops = &_ctl_fops,
802 };
803
804 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
805
806 static int __init nvm_mod_init(void)
807 {
808 int ret;
809
810 ret = misc_register(&_nvm_misc);
811 if (ret)
812 pr_err("nvm: misc_register failed for control device");
813
814 return ret;
815 }
816
817 static void __exit nvm_mod_exit(void)
818 {
819 misc_deregister(&_nvm_misc);
820 }
821
822 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
823 MODULE_LICENSE("GPL v2");
824 MODULE_VERSION("0.1");
825 module_init(nvm_mod_init);
826 module_exit(nvm_mod_exit);