]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 May 2020 20:06:56 +0000 (13:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 May 2020 20:06:56 +0000 (13:06 -0700)
Pull rdma fixes from Jason Gunthorpe:
 "A few minor bug fixes for user visible defects, and one regression:

   - Various bugs from static checkers and syzkaller

   - Add missing error checking in mlx4

   - Prevent RTNL lock recursion in i40iw

   - Fix segfault in cxgb4 in peer abort cases

   - Fix a regression added in 5.7 where the IB_EVENT_DEVICE_FATAL could
     be lost, and wasn't delivered to all the FDs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/uverbs: Move IB_EVENT_DEVICE_FATAL to destroy_uobj
  RDMA/uverbs: Do not discard the IB_EVENT_DEVICE_FATAL event
  RDMA/iw_cxgb4: Fix incorrect function parameters
  RDMA/core: Fix double put of resource
  IB/core: Fix potential NULL pointer dereference in pkey cache
  IB/hfi1: Fix another case where pq is left on waitlist
  IB/i40iw: Remove bogus call to netdev_master_upper_dev_get()
  IB/mlx4: Test return value of calls to ib_get_cached_pkey
  RDMA/rxe: Always return ERR_PTR from rxe_create_mmap_info()
  i40iw: Fix error handling in i40iw_manage_arp_cache()

13 files changed:
drivers/infiniband/core/cache.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_std_types_async_fd.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/sw/rxe/rxe_mmap.c
drivers/infiniband/sw/rxe/rxe_queue.c

index 717b798cddad4f7f7e5bceeadb46476565a9543f..a670209bbce6064135b499d567095f64caf47e01 100644 (file)
@@ -1553,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device)
        if (err)
                return err;
 
-       rdma_for_each_port (device, p)
-               ib_cache_update(device, p, true);
+       rdma_for_each_port (device, p) {
+               err = ib_cache_update(device, p, true);
+               if (err)
+                       return err;
+       }
 
        return 0;
 }
index 9eec26d10d7b1884bf0d15aa7f2d6fa1fed40a77..e16105be2eb238934655aa186693317e54b1375c 100644 (file)
@@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
 
        ret = fill_func(msg, has_cap_net_admin, res, port);
-
-       rdma_restrack_put(res);
        if (ret)
                goto err_free;
 
+       rdma_restrack_put(res);
        nlmsg_end(msg, nlh);
        ib_device_put(device);
        return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
index 177333d8bcdaeeb3a8e455248715776e892adb40..bf8e149d31911a3463f5bf7abd704a44afaec031 100644 (file)
@@ -459,7 +459,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
        struct ib_uobject *uobj;
        struct file *filp;
 
-       if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+       if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
+                   fd_type->fops->release != &uverbs_async_event_release))
                return ERR_PTR(-EINVAL);
 
        new_fd = get_unused_fd_flags(O_CLOEXEC);
index 7df71983212d6f9dfecb6ac8bc3455f29818e7db..3d189c7ee59e6663dd73c7387b6ef6b33e6b3edd 100644 (file)
@@ -219,6 +219,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
 void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
 void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+int uverbs_async_event_release(struct inode *inode, struct file *filp);
 
 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
 int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
@@ -227,6 +228,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
                           struct ib_ucq_object *uobj);
 void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
 void ib_uverbs_release_file(struct kref *ref);
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+                            __u64 element, __u64 event,
+                            struct list_head *obj_list, u32 *counter);
 
 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
index 17fc25db031145df82d7af89eade3d3e25d0b2e9..1bab8de14757416aef3af4254abfaea653811861 100644 (file)
@@ -346,7 +346,7 @@ const struct file_operations uverbs_async_event_fops = {
        .owner   = THIS_MODULE,
        .read    = ib_uverbs_async_event_read,
        .poll    = ib_uverbs_async_event_poll,
-       .release = uverbs_uobject_fd_release,
+       .release = uverbs_async_event_release,
        .fasync  = ib_uverbs_async_event_fasync,
        .llseek  = no_llseek,
 };
@@ -386,10 +386,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
        kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
 }
 
-static void
-ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
-                       __u64 element, __u64 event, struct list_head *obj_list,
-                       u32 *counter)
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+                            __u64 element, __u64 event,
+                            struct list_head *obj_list, u32 *counter)
 {
        struct ib_uverbs_event *entry;
        unsigned long flags;
@@ -1187,9 +1186,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
                 */
                mutex_unlock(&uverbs_dev->lists_mutex);
 
-               ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
-                                       IB_EVENT_DEVICE_FATAL, NULL, NULL);
-
                uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
                kref_put(&file->ref, ib_uverbs_release_file);
 
index 82ec0806b34bd61b6ebf4eb248b03f6e9fbbdd8b..61899eaf1f91ff6a12ae5220f6d1e887293fc05c 100644 (file)
@@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
                container_of(uobj, struct ib_uverbs_async_event_file, uobj);
 
        ib_unregister_event_handler(&event_file->event_handler);
-       ib_uverbs_free_event_queue(&event_file->ev_queue);
+
+       if (why == RDMA_REMOVE_DRIVER_REMOVE)
+               ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
+                                       NULL, NULL);
        return 0;
 }
 
+int uverbs_async_event_release(struct inode *inode, struct file *filp)
+{
+       struct ib_uverbs_async_event_file *event_file;
+       struct ib_uobject *uobj = filp->private_data;
+       int ret;
+
+       if (!uobj)
+               return uverbs_uobject_fd_release(inode, filp);
+
+       event_file =
+               container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+       /*
+        * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
+        * disassociation, so cleaning the event list must only happen after
+        * release. The user knows it has reached the end of the event stream
+        * when it sees IB_EVENT_DEVICE_FATAL.
+        */
+       uverbs_uobject_get(uobj);
+       ret = uverbs_uobject_fd_release(inode, filp);
+       ib_uverbs_free_event_queue(&event_file->ev_queue);
+       uverbs_uobject_put(uobj);
+       return ret;
+}
+
 DECLARE_UVERBS_NAMED_METHOD(
        UVERBS_METHOD_ASYNC_EVENT_ALLOC,
        UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
index d69dece3b1d541ad3b834cc9ea128de7c9f20168..30e08bcc9afb53dfb45d83345939a2124c0b6eb6 100644 (file)
@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                        srqidx = ABORT_RSS_SRQIDX_G(
                                        be32_to_cpu(req->srqidx_status));
                        if (srqidx) {
-                               complete_cached_srq_buffers(ep,
-                                                           req->srqidx_status);
+                               complete_cached_srq_buffers(ep, srqidx);
                        } else {
                                /* Hold ep ref until finish_peer_abort() */
                                c4iw_get_ep(&ep->com);
@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                return 0;
        }
 
-       ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
-                       TCB_RQ_START_S);
+       ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+                                         TCB_RQ_START_S);
 cleanup:
        pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
 
index 13e4203497b33770cd9407df8b327d43f2fffd99..a92346e88628bd0a9d214df2b58e0aaf4e891ec9 100644 (file)
@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
 
        set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
        pq->state = SDMA_PKT_Q_ACTIVE;
-       /* Send the first N packets in the request to buy us some time */
-       ret = user_sdma_send_pkts(req, pcount);
-       if (unlikely(ret < 0 && ret != -EBUSY))
-               goto free_req;
 
        /*
         * This is a somewhat blocking send implementation.
index bb78d3280accdc7bd54a923db5e9a312217ceb12..fa7a5ff498c73bfd851030db4bfda3b010aca81b 100644 (file)
@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
        struct rtable *rt;
        struct neighbour *neigh;
        int rc = arpindex;
-       struct net_device *netdev = iwdev->netdev;
        __be32 dst_ipaddr = htonl(dst_ip);
        __be32 src_ipaddr = htonl(src_ip);
 
@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
                return rc;
        }
 
-       if (netif_is_bond_slave(netdev))
-               netdev = netdev_master_upper_dev_get(netdev);
-
        neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
 
        rcu_read_lock();
@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
 {
        struct neighbour *neigh;
        int rc = arpindex;
-       struct net_device *netdev = iwdev->netdev;
        struct dst_entry *dst;
        struct sockaddr_in6 dst_addr;
        struct sockaddr_in6 src_addr;
@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
                return rc;
        }
 
-       if (netif_is_bond_slave(netdev))
-               netdev = netdev_master_upper_dev_get(netdev);
-
        neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
 
        rcu_read_lock();
index 55a1fbf0e670c7fa1a775d94e35dd77ee07e0cba..ae8b97c3066575388c6d3381b0d76f03b5eb8910 100644 (file)
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
        int arp_index;
 
        arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
-       if (arp_index == -1)
+       if (arp_index < 0)
                return;
        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
        if (!cqp_request)
index 2f9f78912267d7eed77b387dd7d2835cd00d2b77..cf51e3cbd96919421927d4b5395a03fd5980082b 100644 (file)
@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
        int send_size;
        int header_size;
        int spc;
+       int err;
        int i;
 
        if (wr->wr.opcode != IB_WR_SEND)
@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 
        sqp->ud_header.lrh.virtual_lane    = 0;
        sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
-       ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+       err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+       if (err)
+               return err;
        sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
                sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
        }
        sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
        if (!sqp->qp.ibqp.qp_num)
-               ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+               err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+                                        &pkey);
        else
-               ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
+               err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+                                        &pkey);
+       if (err)
+               return err;
+
        sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
        sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
index 48f48122ddcb8f8dc41d2692ab45e009514ef0e3..6a413d73b95dd84097a9fac9b2cdb3c29708a6d7 100644 (file)
@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
 
        ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        size = PAGE_ALIGN(size);
 
index ff92704de32ff2a8fd98a2d01b0973a9aa217059..245040c3a35d0e03112ce1f407cf3962e94d1637 100644 (file)
@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
 
        if (outbuf) {
                ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
-               if (!ip)
+               if (IS_ERR(ip)) {
+                       err = PTR_ERR(ip);
                        goto err1;
+               }
 
-               err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
-               if (err)
+               if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
+                       err = -EFAULT;
                        goto err2;
+               }
 
                spin_lock_bh(&rxe->pending_lock);
                list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
 err2:
        kfree(ip);
 err1:
-       return -EINVAL;
+       return err;
 }
 
 inline void rxe_queue_reset(struct rxe_queue *q)