]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ipmi: Make the smi watcher be disabled immediately when not needed
authorCorey Minyard <cminyard@mvista.com>
Wed, 24 Oct 2018 20:17:04 +0000 (15:17 -0500)
committerCorey Minyard <cminyard@mvista.com>
Sun, 10 Feb 2019 01:48:42 +0000 (19:48 -0600)
The code to tell the lower layer to enable or disable watching for
certain things was lazy in disabling, it waited until a timer tick
to see if a disable was necessary.  Not a really big deal, but it
could be improved.

Modify the code to enable and disable watching immediately and don't
do it from the background timer any more.

Signed-off-by: Corey Minyard <cminyard@mvista.com>
Tested-by: Kamlakant Patel <kamlakant.patel@cavium.com>
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
include/linux/ipmi_smi.h

index 2e008efa735f3682c3fd00aa08599a8ef517d35f..5bc84fbbeee55bbd956d9162ef8ae66cbd17a4de 100644 (file)
@@ -534,15 +534,20 @@ struct ipmi_smi {
        atomic_t         event_waiters;
        unsigned int     ticks_to_req_ev;
 
+       spinlock_t       watch_lock; /* For dealing with watch stuff below. */
+
        /* How many users are waiting for commands? */
-       atomic_t         command_waiters;
+       unsigned int     command_waiters;
 
        /* How many users are waiting for watchdogs? */
-       atomic_t         watchdog_waiters;
+       unsigned int     watchdog_waiters;
+
+       /* How many users are waiting for message responses? */
+       unsigned int     response_waiters;
 
        /*
         * Tells what the lower layer has last been asked to watch for,
-        * messages and/or watchdogs.  Protected by xmit_msgs_lock.
+        * messages and/or watchdogs.  Protected by watch_lock.
         */
        unsigned int     last_watch_mask;
 
@@ -938,6 +943,64 @@ static void deliver_err_response(struct ipmi_smi *intf,
        deliver_local_response(intf, msg);
 }
 
+static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+       unsigned long iflags;
+
+       if (!intf->handlers->set_need_watch)
+               return;
+
+       spin_lock_irqsave(&intf->watch_lock, iflags);
+       if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+               intf->response_waiters++;
+
+       if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+               intf->watchdog_waiters++;
+
+       if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+               intf->command_waiters++;
+
+       if ((intf->last_watch_mask & flags) != flags) {
+               intf->last_watch_mask |= flags;
+               intf->handlers->set_need_watch(intf->send_info,
+                                              intf->last_watch_mask);
+       }
+       spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
+static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
+{
+       unsigned long iflags;
+
+       if (!intf->handlers->set_need_watch)
+               return;
+
+       spin_lock_irqsave(&intf->watch_lock, iflags);
+       if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
+               intf->response_waiters--;
+
+       if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
+               intf->watchdog_waiters--;
+
+       if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
+               intf->command_waiters--;
+
+       flags = 0;
+       if (intf->response_waiters)
+               flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+       if (intf->watchdog_waiters)
+               flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
+       if (intf->command_waiters)
+               flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
+
+       if (intf->last_watch_mask != flags) {
+               intf->last_watch_mask = flags;
+               intf->handlers->set_need_watch(intf->send_info,
+                                              intf->last_watch_mask);
+       }
+       spin_unlock_irqrestore(&intf->watch_lock, iflags);
+}
+
 /*
  * Find the next sequence number not being used and add the given
  * message with the given timeout to the sequence table.  This must be
@@ -981,6 +1044,7 @@ static int intf_next_seq(struct ipmi_smi      *intf,
                *seq = i;
                *seqid = intf->seq_table[i].seqid;
                intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+               smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
                need_waiter(intf);
        } else {
                rv = -EAGAIN;
@@ -1019,6 +1083,7 @@ static int intf_find_seq(struct ipmi_smi      *intf,
                                && (ipmi_addr_equal(addr, &msg->addr))) {
                        *recv_msg = msg;
                        intf->seq_table[seq].inuse = 0;
+                       smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
                        rv = 0;
                }
        }
@@ -1080,6 +1145,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
                struct seq_table *ent = &intf->seq_table[seq];
 
                ent->inuse = 0;
+               smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
                msg = ent->recv_msg;
                rv = 0;
        }
@@ -1091,30 +1157,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
        return rv;
 }
 
-/* Must be called with xmit_msgs_lock held. */
-static void smi_tell_to_watch(struct ipmi_smi *intf,
-                             unsigned int flags,
-                             struct ipmi_smi_msg *smi_msg)
-{
-       if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
-               if (!smi_msg)
-                       return;
-
-               if (!smi_msg->needs_response)
-                       return;
-       }
-
-       if (!intf->handlers->set_need_watch)
-               return;
-
-       if ((intf->last_watch_mask & flags) == flags)
-               return;
-
-       intf->last_watch_mask |= flags;
-       intf->handlers->set_need_watch(intf->send_info,
-                                      intf->last_watch_mask);
-}
-
 int ipmi_create_user(unsigned int          if_num,
                     const struct ipmi_user_hndl *handler,
                     void                  *handler_data,
@@ -1175,12 +1217,9 @@ int ipmi_create_user(unsigned int          if_num,
        spin_lock_irqsave(&intf->seq_lock, flags);
        list_add_rcu(&new_user->link, &intf->users);
        spin_unlock_irqrestore(&intf->seq_lock, flags);
-       if (handler->ipmi_watchdog_pretimeout) {
+       if (handler->ipmi_watchdog_pretimeout)
                /* User wants pretimeouts, so make sure to watch for them. */
-               if (atomic_inc_return(&intf->watchdog_waiters) == 1)
-                       smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
-                                         NULL);
-       }
+               smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
        srcu_read_unlock(&ipmi_interfaces_srcu, index);
        *user = new_user;
        return 0;
@@ -1251,7 +1290,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
                user->handler->shutdown(user->handler_data);
 
        if (user->handler->ipmi_watchdog_pretimeout)
-               atomic_dec(&intf->watchdog_waiters);
+               smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
 
        if (user->gets_events)
                atomic_dec(&intf->event_waiters);
@@ -1264,6 +1303,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
                if (intf->seq_table[i].inuse
                    && (intf->seq_table[i].recv_msg->user == user)) {
                        intf->seq_table[i].inuse = 0;
+                       smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
                }
        }
@@ -1606,8 +1646,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
                goto out_unlock;
        }
 
-       if (atomic_inc_return(&intf->command_waiters) == 1)
-               smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);
+       smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
 
        list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
 
@@ -1657,7 +1696,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
        synchronize_rcu();
        release_ipmi_user(user, index);
        while (rcvrs) {
-               atomic_dec(&intf->command_waiters);
+               smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
                rcvr = rcvrs;
                rcvrs = rcvr->next;
                kfree(rcvr);
@@ -1785,8 +1824,6 @@ static void smi_send(struct ipmi_smi *intf,
                spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
        smi_msg = smi_add_send_msg(intf, smi_msg, priority);
 
-       smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);
-
        if (!run_to_completion)
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
 
@@ -1986,9 +2023,6 @@ static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
                                ipmb_seq, broadcast,
                                source_address, source_lun);
 
-               /* We will be getting a response in the BMC message queue. */
-               smi_msg->needs_response = true;
-
                /*
                 * Copy the message into the recv message data, so we
                 * can retransmit it later if necessary.
@@ -2176,7 +2210,6 @@ static int i_ipmi_request(struct ipmi_user     *user,
                        goto out;
                }
        }
-       smi_msg->needs_response = false;
 
        rcu_read_lock();
        if (intf->in_shutdown) {
@@ -3390,9 +3423,8 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
        INIT_LIST_HEAD(&intf->xmit_msgs);
        INIT_LIST_HEAD(&intf->hp_xmit_msgs);
        spin_lock_init(&intf->events_lock);
+       spin_lock_init(&intf->watch_lock);
        atomic_set(&intf->event_waiters, 0);
-       atomic_set(&intf->watchdog_waiters, 0);
-       atomic_set(&intf->command_waiters, 0);
        intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
        INIT_LIST_HEAD(&intf->waiting_events);
        intf->waiting_events_count = 0;
@@ -4408,8 +4440,6 @@ static void smi_recv_tasklet(unsigned long val)
                }
        }
 
-       smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);
-
        if (!run_to_completion)
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
        if (newmsg)
@@ -4537,7 +4567,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
                              struct list_head *timeouts,
                              unsigned long timeout_period,
                              int slot, unsigned long *flags,
-                             unsigned int *watch_mask)
+                             bool *need_timer)
 {
        struct ipmi_recv_msg *msg;
 
@@ -4549,13 +4579,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
 
        if (timeout_period < ent->timeout) {
                ent->timeout -= timeout_period;
-               *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+               *need_timer = true;
                return;
        }
 
        if (ent->retries_left == 0) {
                /* The message has used all its retries. */
                ent->inuse = 0;
+               smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
                msg = ent->recv_msg;
                list_add_tail(&msg->link, timeouts);
                if (ent->broadcast)
@@ -4568,7 +4599,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
                struct ipmi_smi_msg *smi_msg;
                /* More retries, send again. */
 
-               *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+               *need_timer = true;
 
                /*
                 * Start with the max timer, set to normal timer after
@@ -4613,20 +4644,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
        }
 }
 
-static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
-                                        unsigned long timeout_period)
+static bool ipmi_timeout_handler(struct ipmi_smi *intf,
+                                unsigned long timeout_period)
 {
        struct list_head     timeouts;
        struct ipmi_recv_msg *msg, *msg2;
        unsigned long        flags;
        int                  i;
-       unsigned int         watch_mask = 0;
+       bool                 need_timer = false;
 
        if (!intf->bmc_registered) {
                kref_get(&intf->refcount);
                if (!schedule_work(&intf->bmc_reg_work)) {
                        kref_put(&intf->refcount, intf_free);
-                       watch_mask |= IPMI_WATCH_MASK_INTERNAL;
+                       need_timer = true;
                }
        }
 
@@ -4646,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
                check_msg_timeout(intf, &intf->seq_table[i],
                                  &timeouts, timeout_period, i,
-                                 &flags, &watch_mask);
+                                 &flags, &need_timer);
        spin_unlock_irqrestore(&intf->seq_lock, flags);
 
        list_for_each_entry_safe(msg, msg2, &timeouts, link)
@@ -4677,7 +4708,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
 
        tasklet_schedule(&intf->recv_tasklet);
 
-       return watch_mask;
+       return need_timer;
 }
 
 static void ipmi_request_event(struct ipmi_smi *intf)
@@ -4697,9 +4728,8 @@ static atomic_t stop_operation;
 static void ipmi_timeout(struct timer_list *unused)
 {
        struct ipmi_smi *intf;
-       unsigned int watch_mask = 0;
+       bool need_timer = false;
        int index;
-       unsigned long flags;
 
        if (atomic_read(&stop_operation))
                return;
@@ -4712,28 +4742,14 @@ static void ipmi_timeout(struct timer_list *unused)
                                ipmi_request_event(intf);
                                intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
                        }
-                       watch_mask |= IPMI_WATCH_MASK_INTERNAL;
+                       need_timer = true;
                }
 
-               if (atomic_read(&intf->watchdog_waiters))
-                       watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
-
-               if (atomic_read(&intf->command_waiters))
-                       watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
-
-               watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
-
-               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
-               if (watch_mask != intf->last_watch_mask &&
-                                       intf->handlers->set_need_watch)
-                       intf->handlers->set_need_watch(intf->send_info,
-                                                      watch_mask);
-               intf->last_watch_mask = watch_mask;
-               spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+               need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
        }
        srcu_read_unlock(&ipmi_interfaces_srcu, index);
 
-       if (watch_mask)
+       if (need_timer)
                mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 }
 
index c81c84a723b6d774aec3b2366bf2d6ccbd12ea1c..ae99d6a14789e06769ca51c1299ef674c3565322 100644 (file)
@@ -1066,7 +1066,7 @@ static void set_need_watch(void *send_info, unsigned int watch_mask)
        unsigned long flags;
        int enable;
 
-       enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL);
+       enable = !!watch_mask;
 
        atomic_set(&smi_info->need_watch, enable);
        spin_lock_irqsave(&smi_info->si_lock, flags);
index a1219af32105c5291d97305aaf4458b703f52c90..e4abaa8e22bc572323600ee6a5a8b63a9b906c99 100644 (file)
@@ -1129,7 +1129,7 @@ static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
 
        if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
                timeout = SSIF_WATCH_MSG_TIMEOUT;
-       else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL)
+       else if (watch_mask)
                timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
 
        flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
index da6abb06a5dc72d665428c5e96aea0a5bed76808..4dc66157d8723631b866947c96d8455b0a92f1f0 100644 (file)
@@ -32,14 +32,11 @@ struct ipmi_smi;
 
 /*
  * Flags for set_check_watch() below.  Tells if the SMI should be
- * waiting for watchdog timeouts, commands and/or messages.  There is
- * also an internal flag for the message handler, SMIs should ignore
- * it.
+ * waiting for watchdog timeouts, commands and/or messages.
  */
-#define IPMI_WATCH_MASK_INTERNAL       (1 << 0)
-#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 1)
-#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 2)
-#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 3)
+#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0)
+#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1)
+#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
 
 /*
  * Messages to/from the lower layer.  The smi interface will take one
@@ -66,12 +63,6 @@ struct ipmi_smi_msg {
        int           rsp_size;
        unsigned char rsp[IPMI_MAX_MSG_LENGTH];
 
-       /*
-        * There should be a response message coming back in the BMC
-        * message queue.
-        */
-       bool needs_response;
-
        /*
         * Will be called when the system is done with the message
         * (presumably to free it).