]> git.ipfire.org Git - people/ms/linux.git/blobdiff - kernel/events/core.c
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / kernel / events / core.c
index 19efcf13375a2960e6d8e6994a8aa660d6221253..7c05c93dfcfc8b563a7932807dfccba64d710e77 100644 (file)
@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
+#else
+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
 
        tmp *= sysctl_perf_cpu_time_max_percent;
        do_div(tmp, 100);
-       ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
+       ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
 }
 
 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
        }
 }
 
-static atomic64_t perf_event_id;
+static atomic64_unchecked_t perf_event_id;
 
 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type);
@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
 
 static inline u64 perf_event_count(struct perf_event *event)
 {
-       return local64_read(&event->count) + atomic64_read(&event->child_count);
+       return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
 }
 
 static u64 perf_event_read(struct perf_event *event)
@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
        mutex_lock(&event->child_mutex);
        total += perf_event_read(event);
        *enabled += event->total_time_enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
        *running += event->total_time_running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
 
        list_for_each_entry(child, &event->child_list, child_list) {
                total += perf_event_read(child);
@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
                userpg->offset -= local64_read(&event->hw.prev_count);
 
        userpg->time_enabled = enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
 
        userpg->time_running = running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
 
        arch_perf_update_userpage(userpg, now);
 
@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
-               rem = __output_copy_user(handle, (void *) sp, dump_size);
+               rem = __output_copy_user(handle, (void __user *) sp, dump_size);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
        values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
                values[n++] = enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
        }
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
                values[n++] = running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
        }
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        event->parent           = parent_event;
 
        event->ns               = get_pid_ns(task_active_pid_ns(current));
-       event->id               = atomic64_inc_return(&perf_event_id);
+       event->id               = atomic64_inc_return_unchecked(&perf_event_id);
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
        if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
 
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+       if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+               return -EACCES;
+#endif
+
        err = perf_copy_attr(attr_uptr, &attr);
        if (err)
                return err;
@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
        /*
         * Add back the child's count to the parent's count:
         */
-       atomic64_add(child_val, &parent_event->child_count);
-       atomic64_add(child_event->total_time_enabled,
+       atomic64_add_unchecked(child_val, &parent_event->child_count);
+       atomic64_add_unchecked(child_event->total_time_enabled,
                     &parent_event->child_total_time_enabled);
-       atomic64_add(child_event->total_time_running,
+       atomic64_add_unchecked(child_event->total_time_running,
                     &parent_event->child_total_time_running);
 
        /*