]> git.ipfire.org Git - people/arne_f/kernel.git/blob - drivers/char/mem.c
Merge tag 'v4.9.32' into linux-4.9.x-grsecurity-3.1
[people/arne_f/kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/security.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32
33 #include <linux/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR 4
40
41 #if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42 extern const struct file_operations grsec_fops;
43 #endif
44
45 static inline unsigned long size_inside_page(unsigned long start,
46 unsigned long size)
47 {
48 unsigned long sz;
49
50 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
51
52 return min(sz, size);
53 }
54
55 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
56 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
57 {
58 return addr + count <= __pa(high_memory);
59 }
60
61 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
62 {
63 return 1;
64 }
65 #endif
66
67 #ifdef CONFIG_STRICT_DEVMEM
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
71 u64 to = from + size;
72 u64 cursor = from;
73
74 while (cursor < to) {
75 if (!devmem_is_allowed(pfn)) {
76 #ifdef CONFIG_GRKERNSEC_KMEM
77 gr_handle_mem_readwrite(from, to);
78 #endif
79 return 0;
80 }
81 cursor += PAGE_SIZE;
82 pfn++;
83 }
84 return 1;
85 }
86 #elif defined(CONFIG_GRKERNSEC_KMEM)
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89 return 0;
90 }
91 #else
92 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
93 {
94 return 1;
95 }
96 #endif
97
98 #ifndef unxlate_dev_mem_ptr
99 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
100 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
101 {
102 }
103 #endif
104
105 /*
106 * This funcion reads the *physical* memory. The f_pos points directly to the
107 * memory location.
108 */
109 static ssize_t read_mem(struct file *file, char __user *buf,
110 size_t count, loff_t *ppos)
111 {
112 phys_addr_t p = *ppos;
113 ssize_t read, sz;
114 void *ptr;
115 char *temp;
116
117 if (p != *ppos)
118 return 0;
119
120 if (!valid_phys_addr_range(p, count))
121 return -EFAULT;
122 read = 0;
123 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
124 /* we don't have page 0 mapped on sparc and m68k.. */
125 if (p < PAGE_SIZE) {
126 sz = size_inside_page(p, count);
127 if (sz > 0) {
128 if (clear_user(buf, sz))
129 return -EFAULT;
130 buf += sz;
131 p += sz;
132 count -= sz;
133 read += sz;
134 }
135 }
136 #endif
137
138 temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY);
139 if (!temp)
140 return -ENOMEM;
141
142 while (count > 0) {
143 unsigned long remaining;
144
145 sz = size_inside_page(p, count);
146
147 if (!range_is_allowed(p >> PAGE_SHIFT, count)) {
148 kfree(temp);
149 return -EPERM;
150 }
151
152 /*
153 * On ia64 if a page has been mapped somewhere as uncached, then
154 * it must also be accessed uncached by the kernel or data
155 * corruption may occur.
156 */
157 ptr = xlate_dev_mem_ptr(p);
158 if (!ptr || probe_kernel_read(temp, ptr, sz)) {
159 kfree(temp);
160 return -EFAULT;
161 }
162
163 remaining = copy_to_user(buf, temp, sz);
164 unxlate_dev_mem_ptr(p, ptr);
165 if (remaining) {
166 kfree(temp);
167 return -EFAULT;
168 }
169
170 buf += sz;
171 p += sz;
172 count -= sz;
173 read += sz;
174 }
175
176 kfree(temp);
177
178 *ppos += read;
179 return read;
180 }
181
182 static ssize_t write_mem(struct file *file, const char __user *buf,
183 size_t count, loff_t *ppos)
184 {
185 phys_addr_t p = *ppos;
186 ssize_t written, sz;
187 unsigned long copied;
188 void *ptr;
189
190 if (p != *ppos)
191 return -EFBIG;
192
193 if (!valid_phys_addr_range(p, count))
194 return -EFAULT;
195
196 written = 0;
197
198 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
199 /* we don't have page 0 mapped on sparc and m68k.. */
200 if (p < PAGE_SIZE) {
201 sz = size_inside_page(p, count);
202 /* Hmm. Do something? */
203 buf += sz;
204 p += sz;
205 count -= sz;
206 written += sz;
207 }
208 #endif
209
210 while (count > 0) {
211 sz = size_inside_page(p, count);
212
213 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
214 return -EPERM;
215
216 /*
217 * On ia64 if a page has been mapped somewhere as uncached, then
218 * it must also be accessed uncached by the kernel or data
219 * corruption may occur.
220 */
221 ptr = xlate_dev_mem_ptr(p);
222 if (!ptr) {
223 if (written)
224 break;
225 return -EFAULT;
226 }
227
228 copied = copy_from_user(ptr, buf, sz);
229 unxlate_dev_mem_ptr(p, ptr);
230 if (copied) {
231 written += sz - copied;
232 if (written)
233 break;
234 return -EFAULT;
235 }
236
237 buf += sz;
238 p += sz;
239 count -= sz;
240 written += sz;
241 }
242
243 *ppos += written;
244 return written;
245 }
246
247 int __weak phys_mem_access_prot_allowed(struct file *file,
248 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
249 {
250 return 1;
251 }
252
253 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
254
255 /*
256 * Architectures vary in how they handle caching for addresses
257 * outside of main memory.
258 *
259 */
260 #ifdef pgprot_noncached
261 static int uncached_access(struct file *file, phys_addr_t addr)
262 {
263 #if defined(CONFIG_IA64)
264 /*
265 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
266 * attribute aliases.
267 */
268 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
269 #elif defined(CONFIG_MIPS)
270 {
271 extern int __uncached_access(struct file *file,
272 unsigned long addr);
273
274 return __uncached_access(file, addr);
275 }
276 #else
277 /*
278 * Accessing memory above the top the kernel knows about or through a
279 * file pointer
280 * that was marked O_DSYNC will be done non-cached.
281 */
282 if (file->f_flags & O_DSYNC)
283 return 1;
284 return addr >= __pa(high_memory);
285 #endif
286 }
287 #endif
288
289 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
290 unsigned long size, pgprot_t vma_prot)
291 {
292 #ifdef pgprot_noncached
293 phys_addr_t offset = pfn << PAGE_SHIFT;
294
295 if (uncached_access(file, offset))
296 return pgprot_noncached(vma_prot);
297 #endif
298 return vma_prot;
299 }
300 #endif
301
302 #ifndef CONFIG_MMU
303 static unsigned long get_unmapped_area_mem(struct file *file,
304 unsigned long addr,
305 unsigned long len,
306 unsigned long pgoff,
307 unsigned long flags)
308 {
309 if (!valid_mmap_phys_addr_range(pgoff, len))
310 return (unsigned long) -EINVAL;
311 return pgoff << PAGE_SHIFT;
312 }
313
314 /* permit direct mmap, for read, write or exec */
315 static unsigned memory_mmap_capabilities(struct file *file)
316 {
317 return NOMMU_MAP_DIRECT |
318 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
319 }
320
321 static unsigned zero_mmap_capabilities(struct file *file)
322 {
323 return NOMMU_MAP_COPY;
324 }
325
326 /* can't do an in-place private mapping if there's no MMU */
327 static inline int private_mapping_ok(struct vm_area_struct *vma)
328 {
329 return vma->vm_flags & VM_MAYSHARE;
330 }
331 #else
332
333 static inline int private_mapping_ok(struct vm_area_struct *vma)
334 {
335 return 1;
336 }
337 #endif
338
339 static const struct vm_operations_struct mmap_mem_ops = {
340 #ifdef CONFIG_HAVE_IOREMAP_PROT
341 .access = generic_access_phys
342 #endif
343 };
344
345 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
346 {
347 size_t size = vma->vm_end - vma->vm_start;
348 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
349
350 /* It's illegal to wrap around the end of the physical address space. */
351 if (offset + (phys_addr_t)size - 1 < offset)
352 return -EINVAL;
353
354 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
355 return -EINVAL;
356
357 if (!private_mapping_ok(vma))
358 return -ENOSYS;
359
360 if (!range_is_allowed(vma->vm_pgoff, size))
361 return -EPERM;
362
363 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
364 &vma->vm_page_prot))
365 return -EINVAL;
366
367 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
368 size,
369 vma->vm_page_prot);
370
371 vma->vm_ops = &mmap_mem_ops;
372
373 /* Remap-pfn-range will mark the range VM_IO */
374 if (remap_pfn_range(vma,
375 vma->vm_start,
376 vma->vm_pgoff,
377 size,
378 vma->vm_page_prot)) {
379 return -EAGAIN;
380 }
381 return 0;
382 }
383
384 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
385 {
386 unsigned long pfn;
387
388 /* Turn a kernel-virtual address into a physical page frame */
389 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
390
391 /*
392 * RED-PEN: on some architectures there is more mapped memory than
393 * available in mem_map which pfn_valid checks for. Perhaps should add a
394 * new macro here.
395 *
396 * RED-PEN: vmalloc is not supported right now.
397 */
398 if (!pfn_valid(pfn))
399 return -EIO;
400
401 vma->vm_pgoff = pfn;
402 return mmap_mem(file, vma);
403 }
404
405 /*
406 * This function reads the *virtual* memory as seen by the kernel.
407 */
408 static ssize_t read_kmem(struct file *file, char __user *buf,
409 size_t count, loff_t *ppos)
410 {
411 unsigned long p = *ppos;
412 ssize_t low_count, read, sz;
413 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
414 int err = 0;
415
416 read = 0;
417 if (p < (unsigned long) high_memory) {
418 char *temp;
419
420 low_count = count;
421 if (count > (unsigned long)high_memory - p)
422 low_count = (unsigned long)high_memory - p;
423
424 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
425 /* we don't have page 0 mapped on sparc and m68k.. */
426 if (p < PAGE_SIZE && low_count > 0) {
427 sz = size_inside_page(p, low_count);
428 if (clear_user(buf, sz))
429 return -EFAULT;
430 buf += sz;
431 p += sz;
432 read += sz;
433 low_count -= sz;
434 count -= sz;
435 }
436 #endif
437
438 temp = kmalloc(PAGE_SIZE, GFP_KERNEL|GFP_USERCOPY);
439 if (!temp)
440 return -ENOMEM;
441
442 while (low_count > 0) {
443 sz = size_inside_page(p, low_count);
444
445 /*
446 * On ia64 if a page has been mapped somewhere as
447 * uncached, then it must also be accessed uncached
448 * by the kernel or data corruption may occur
449 */
450 kbuf = xlate_dev_kmem_ptr((void *)p);
451 if (!virt_addr_valid(kbuf))
452 return -ENXIO;
453
454 if (probe_kernel_read(temp, kbuf, sz) || copy_to_user(buf, temp, sz)) {
455 kfree(temp);
456 return -EFAULT;
457 }
458 buf += sz;
459 p += sz;
460 read += sz;
461 low_count -= sz;
462 count -= sz;
463 }
464
465 kfree(temp);
466 }
467
468 if (count > 0) {
469 kbuf = (char *)__get_free_page(GFP_KERNEL);
470 if (!kbuf)
471 return -ENOMEM;
472 while (count > 0) {
473 sz = size_inside_page(p, count);
474 if (!is_vmalloc_or_module_addr((void *)p)) {
475 err = -ENXIO;
476 break;
477 }
478 sz = vread(kbuf, (char *)p, sz);
479 if (!sz)
480 break;
481 if (copy_to_user(buf, kbuf, sz)) {
482 err = -EFAULT;
483 break;
484 }
485 count -= sz;
486 buf += sz;
487 read += sz;
488 p += sz;
489 }
490 free_page((unsigned long)kbuf);
491 }
492 *ppos = p;
493 return read ? read : err;
494 }
495
496
497 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
498 size_t count, loff_t *ppos)
499 {
500 ssize_t written, sz;
501 unsigned long copied;
502
503 written = 0;
504 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
505 /* we don't have page 0 mapped on sparc and m68k.. */
506 if (p < PAGE_SIZE) {
507 sz = size_inside_page(p, count);
508 /* Hmm. Do something? */
509 buf += sz;
510 p += sz;
511 count -= sz;
512 written += sz;
513 }
514 #endif
515
516 while (count > 0) {
517 void *ptr;
518
519 sz = size_inside_page(p, count);
520
521 /*
522 * On ia64 if a page has been mapped somewhere as uncached, then
523 * it must also be accessed uncached by the kernel or data
524 * corruption may occur.
525 */
526 ptr = xlate_dev_kmem_ptr((void *)p);
527 if (!virt_addr_valid(ptr))
528 return -ENXIO;
529
530 copied = copy_from_user(ptr, buf, sz);
531 if (copied) {
532 written += sz - copied;
533 if (written)
534 break;
535 return -EFAULT;
536 }
537 buf += sz;
538 p += sz;
539 count -= sz;
540 written += sz;
541 }
542
543 *ppos += written;
544 return written;
545 }
546
547 /*
548 * This function writes to the *virtual* memory as seen by the kernel.
549 */
550 static ssize_t write_kmem(struct file *file, const char __user *buf,
551 size_t count, loff_t *ppos)
552 {
553 unsigned long p = *ppos;
554 ssize_t wrote = 0;
555 ssize_t virtr = 0;
556 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
557 int err = 0;
558
559 if (p < (unsigned long) high_memory) {
560 unsigned long to_write = min_t(unsigned long, count,
561 (unsigned long)high_memory - p);
562 wrote = do_write_kmem(p, buf, to_write, ppos);
563 if (wrote != to_write)
564 return wrote;
565 p += wrote;
566 buf += wrote;
567 count -= wrote;
568 }
569
570 if (count > 0) {
571 kbuf = (char *)__get_free_page(GFP_KERNEL);
572 if (!kbuf)
573 return wrote ? wrote : -ENOMEM;
574 while (count > 0) {
575 unsigned long sz = size_inside_page(p, count);
576 unsigned long n;
577
578 if (!is_vmalloc_or_module_addr((void *)p)) {
579 err = -ENXIO;
580 break;
581 }
582 n = copy_from_user(kbuf, buf, sz);
583 if (n) {
584 err = -EFAULT;
585 break;
586 }
587 vwrite(kbuf, (char *)p, sz);
588 count -= sz;
589 buf += sz;
590 virtr += sz;
591 p += sz;
592 }
593 free_page((unsigned long)kbuf);
594 }
595
596 *ppos = p;
597 return virtr + wrote ? : err;
598 }
599
600 static ssize_t read_port(struct file *file, char __user *buf,
601 size_t count, loff_t *ppos)
602 {
603 unsigned long i = *ppos;
604 char __user *tmp = buf;
605
606 if (!access_ok(VERIFY_WRITE, buf, count))
607 return -EFAULT;
608 while (count-- > 0 && i < 65536) {
609 if (__put_user(inb(i), tmp) < 0)
610 return -EFAULT;
611 i++;
612 tmp++;
613 }
614 *ppos = i;
615 return tmp-buf;
616 }
617
618 static ssize_t write_port(struct file *file, const char __user *buf,
619 size_t count, loff_t *ppos)
620 {
621 unsigned long i = *ppos;
622 const char __user *tmp = buf;
623
624 if (!access_ok(VERIFY_READ, buf, count))
625 return -EFAULT;
626 while (count-- > 0 && i < 65536) {
627 char c;
628
629 if (__get_user(c, tmp)) {
630 if (tmp > buf)
631 break;
632 return -EFAULT;
633 }
634 outb(c, i);
635 i++;
636 tmp++;
637 }
638 *ppos = i;
639 return tmp-buf;
640 }
641
642 static ssize_t read_null(struct file *file, char __user *buf,
643 size_t count, loff_t *ppos)
644 {
645 return 0;
646 }
647
648 static ssize_t write_null(struct file *file, const char __user *buf,
649 size_t count, loff_t *ppos)
650 {
651 return count;
652 }
653
654 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
655 {
656 return 0;
657 }
658
659 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
660 {
661 size_t count = iov_iter_count(from);
662 iov_iter_advance(from, count);
663 return count;
664 }
665
666 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
667 struct splice_desc *sd)
668 {
669 return sd->len;
670 }
671
672 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
673 loff_t *ppos, size_t len, unsigned int flags)
674 {
675 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
676 }
677
678 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
679 {
680 size_t written = 0;
681
682 while (iov_iter_count(iter)) {
683 size_t chunk = iov_iter_count(iter), n;
684
685 if (chunk > PAGE_SIZE)
686 chunk = PAGE_SIZE; /* Just for latency reasons */
687 n = iov_iter_zero(chunk, iter);
688 if (!n && iov_iter_count(iter))
689 return written ? written : -EFAULT;
690 written += n;
691 if (signal_pending(current))
692 return written ? written : -ERESTARTSYS;
693 cond_resched();
694 }
695 return written;
696 }
697
698 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
699 {
700 #ifndef CONFIG_MMU
701 return -ENOSYS;
702 #endif
703 if (vma->vm_flags & VM_SHARED)
704 return shmem_zero_setup(vma);
705 return 0;
706 }
707
708 static unsigned long get_unmapped_area_zero(struct file *file,
709 unsigned long addr, unsigned long len,
710 unsigned long pgoff, unsigned long flags)
711 {
712 #ifdef CONFIG_MMU
713 if (flags & MAP_SHARED) {
714 /*
715 * mmap_zero() will call shmem_zero_setup() to create a file,
716 * so use shmem's get_unmapped_area in case it can be huge;
717 * and pass NULL for file as in mmap.c's get_unmapped_area(),
718 * so as not to confuse shmem with our handle on "/dev/zero".
719 */
720 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
721 }
722
723 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
724 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
725 #else
726 return -ENOSYS;
727 #endif
728 }
729
730 static ssize_t write_full(struct file *file, const char __user *buf,
731 size_t count, loff_t *ppos)
732 {
733 return -ENOSPC;
734 }
735
736 /*
737 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
738 * can fopen() both devices with "a" now. This was previously impossible.
739 * -- SRB.
740 */
741 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
742 {
743 return file->f_pos = 0;
744 }
745
746 /*
747 * The memory devices use the full 32/64 bits of the offset, and so we cannot
748 * check against negative addresses: they are ok. The return value is weird,
749 * though, in that case (0).
750 *
751 * also note that seeking relative to the "end of file" isn't supported:
752 * it has no meaning, so it returns -EINVAL.
753 */
754 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
755 {
756 loff_t ret;
757
758 inode_lock(file_inode(file));
759 switch (orig) {
760 case SEEK_CUR:
761 offset += file->f_pos;
762 case SEEK_SET:
763 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
764 if ((unsigned long long)offset >= -MAX_ERRNO) {
765 ret = -EOVERFLOW;
766 break;
767 }
768 file->f_pos = offset;
769 ret = file->f_pos;
770 force_successful_syscall_return();
771 break;
772 default:
773 ret = -EINVAL;
774 }
775 inode_unlock(file_inode(file));
776 return ret;
777 }
778
779 static int open_port(struct inode *inode, struct file *filp)
780 {
781 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
782 }
783
784 #define zero_lseek null_lseek
785 #define full_lseek null_lseek
786 #define write_zero write_null
787 #define write_iter_zero write_iter_null
788 #define open_mem open_port
789 #define open_kmem open_mem
790
791 static const struct file_operations __maybe_unused mem_fops = {
792 .llseek = memory_lseek,
793 .read = read_mem,
794 .write = write_mem,
795 .mmap = mmap_mem,
796 .open = open_mem,
797 #ifndef CONFIG_MMU
798 .get_unmapped_area = get_unmapped_area_mem,
799 .mmap_capabilities = memory_mmap_capabilities,
800 #endif
801 };
802
803 static const struct file_operations __maybe_unused kmem_fops = {
804 .llseek = memory_lseek,
805 .read = read_kmem,
806 .write = write_kmem,
807 .mmap = mmap_kmem,
808 .open = open_kmem,
809 #ifndef CONFIG_MMU
810 .get_unmapped_area = get_unmapped_area_mem,
811 .mmap_capabilities = memory_mmap_capabilities,
812 #endif
813 };
814
815 static const struct file_operations null_fops = {
816 .llseek = null_lseek,
817 .read = read_null,
818 .write = write_null,
819 .read_iter = read_iter_null,
820 .write_iter = write_iter_null,
821 .splice_write = splice_write_null,
822 };
823
824 static const struct file_operations __maybe_unused port_fops = {
825 .llseek = memory_lseek,
826 .read = read_port,
827 .write = write_port,
828 .open = open_port,
829 };
830
831 static const struct file_operations zero_fops = {
832 .llseek = zero_lseek,
833 .write = write_zero,
834 .read_iter = read_iter_zero,
835 .write_iter = write_iter_zero,
836 .mmap = mmap_zero,
837 .get_unmapped_area = get_unmapped_area_zero,
838 #ifndef CONFIG_MMU
839 .mmap_capabilities = zero_mmap_capabilities,
840 #endif
841 };
842
843 static const struct file_operations full_fops = {
844 .llseek = full_lseek,
845 .read_iter = read_iter_zero,
846 .write = write_full,
847 };
848
849 static const struct memdev {
850 const char *name;
851 umode_t mode;
852 const struct file_operations *fops;
853 fmode_t fmode;
854 } devlist[] = {
855 #ifdef CONFIG_DEVMEM
856 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
857 #endif
858 #ifdef CONFIG_DEVKMEM
859 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
860 #endif
861 [3] = { "null", 0666, &null_fops, 0 },
862 #ifdef CONFIG_DEVPORT
863 [4] = { "port", 0, &port_fops, 0 },
864 #endif
865 [5] = { "zero", 0666, &zero_fops, 0 },
866 [7] = { "full", 0666, &full_fops, 0 },
867 [8] = { "random", 0666, &random_fops, 0 },
868 [9] = { "urandom", 0666, &urandom_fops, 0 },
869 #ifdef CONFIG_PRINTK
870 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
871 #endif
872 #if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
873 [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
874 #endif
875 };
876
877 static int memory_open(struct inode *inode, struct file *filp)
878 {
879 int minor;
880 const struct memdev *dev;
881
882 minor = iminor(inode);
883 if (minor >= ARRAY_SIZE(devlist))
884 return -ENXIO;
885
886 dev = &devlist[minor];
887 if (!dev->fops)
888 return -ENXIO;
889
890 filp->f_op = dev->fops;
891 filp->f_mode |= dev->fmode;
892
893 if (dev->fops->open)
894 return dev->fops->open(inode, filp);
895
896 return 0;
897 }
898
899 static const struct file_operations memory_fops = {
900 .open = memory_open,
901 .llseek = noop_llseek,
902 };
903
904 static char *mem_devnode(struct device *dev, umode_t *mode)
905 {
906 if (mode && devlist[MINOR(dev->devt)].mode)
907 *mode = devlist[MINOR(dev->devt)].mode;
908 return NULL;
909 }
910
911 static struct class *mem_class;
912
913 static int __init chr_dev_init(void)
914 {
915 int minor;
916
917 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
918 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
919
920 mem_class = class_create(THIS_MODULE, "mem");
921 if (IS_ERR(mem_class))
922 return PTR_ERR(mem_class);
923
924 mem_class->devnode = mem_devnode;
925 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
926 if (!devlist[minor].name)
927 continue;
928
929 /*
930 * Create /dev/port?
931 */
932 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
933 continue;
934
935 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
936 NULL, "%s", devlist[minor].name);
937 }
938
939 return tty_init();
940 }
941
942 fs_initcall(chr_dev_init);