lkml.org 
[lkml]   [2018]   [Aug]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: linux-next: manual merge of the tip tree with the rdma tree
Hi all,

On Mon, 6 Aug 2018 14:53:31 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Today's linux-next merge of the tip tree got a conflict in:
>
> drivers/infiniband/core/rdma_core.c
>
> between commit:
>
> 9867f5c6695f ("IB/uverbs: Convert 'bool exclusive' into an enum")
>
> from the rdma tree and commit:
>
> bfc18e389c7a ("atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()")
>
> from the tip tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
> --
> Cheers,
> Stephen Rothwell
>
> diff --cc drivers/infiniband/core/rdma_core.c
> index 4235b9ddc2ad,475910ffbcb6..000000000000
> --- a/drivers/infiniband/core/rdma_core.c
> +++ b/drivers/infiniband/core/rdma_core.c
> @@@ -122,206 -120,19 +122,206 @@@ static int uverbs_try_lock_object(struc
> * concurrently, setting the counter to zero is enough for releasing
> * this lock.
> */
> - if (!exclusive)
> + switch (mode) {
> + case UVERBS_LOOKUP_READ:
> - return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
> + return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
> -EBUSY : 0;
> + case UVERBS_LOOKUP_WRITE:
> + /* lock is exclusive */
> + return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
> + case UVERBS_LOOKUP_DESTROY:
> + return 0;
> + }
> + return 0;
> +}
> +
> +static void assert_uverbs_usecnt(struct ib_uobject *uobj,
> + enum rdma_lookup_mode mode)
> +{
> +#ifdef CONFIG_LOCKDEP
> + switch (mode) {
> + case UVERBS_LOOKUP_READ:
> + WARN_ON(atomic_read(&uobj->usecnt) <= 0);
> + break;
> + case UVERBS_LOOKUP_WRITE:
> + WARN_ON(atomic_read(&uobj->usecnt) != -1);
> + break;
> + case UVERBS_LOOKUP_DESTROY:
> + break;
> + }
> +#endif
> +}
> +
> +/*
> + * This must be called with the hw_destroy_rwsem locked for read or write,
> + * also the uobject itself must be locked for write.
> + *
> + * Upon return the HW object is guaranteed to be destroyed.
> + *
> + * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
> + * however the type's allocat_commit function cannot have been called and the
> + * uobject cannot be on the uobjects_lists
> + *
> + * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
> + * rdma_lookup_get_uobject) and the object is left in a state where the caller
> + * needs to call rdma_lookup_put_uobject.
> + *
> + * For all other destroy modes this function internally unlocks the uobject
> + * and consumes the kref on the uobj.
> + */
> +static int uverbs_destroy_uobject(struct ib_uobject *uobj,
> + enum rdma_remove_reason reason)
> +{
> + struct ib_uverbs_file *ufile = uobj->ufile;
> + unsigned long flags;
> + int ret;
> +
> + lockdep_assert_held(&ufile->hw_destroy_rwsem);
> + assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
> +
> + if (uobj->object) {
> + ret = uobj->type->type_class->destroy_hw(uobj, reason);
> + if (ret) {
> + if (ib_is_destroy_retryable(ret, reason, uobj))
> + return ret;
> +
> + /* Nothing to be done, dangle the memory and move on */
> + WARN(true,
> + "ib_uverbs: failed to remove uobject id %d, driver err=%d",
> + uobj->id, ret);
> + }
> +
> + uobj->object = NULL;
> + }
> +
> + if (reason == RDMA_REMOVE_ABORT) {
> + WARN_ON(!list_empty(&uobj->list));
> + WARN_ON(!uobj->context);
> + uobj->type->type_class->alloc_abort(uobj);
> + }
> +
> + uobj->context = NULL;
> +
> + /*
> + * For DESTROY the usecnt is held write locked, the caller is expected
> + * to put it unlock and put the object when done with it. Only DESTROY
> + * can remove the IDR handle.
> + */
> + if (reason != RDMA_REMOVE_DESTROY)
> + atomic_set(&uobj->usecnt, 0);
> + else
> + uobj->type->type_class->remove_handle(uobj);
> +
> + if (!list_empty(&uobj->list)) {
> + spin_lock_irqsave(&ufile->uobjects_lock, flags);
> + list_del_init(&uobj->list);
> + spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
> +
> + /*
> + * Pairs with the get in rdma_alloc_commit_uobject(), could
> + * destroy uobj.
> + */
> + uverbs_uobject_put(uobj);
> + }
>
> - /* lock is either WRITE or DESTROY - should be exclusive */
> - return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
> + /*
> + * When aborting the stack kref remains owned by the core code, and is
> + * not transferred into the type. Pairs with the get in alloc_uobj
> + */
> + if (reason == RDMA_REMOVE_ABORT)
> + uverbs_uobject_put(uobj);
> +
> + return 0;
> }
>
> -static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
> +/*
> + * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
> + * sequence. It should only be used from command callbacks. On success the
> + * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
> + * version requires the caller to have already obtained an
> + * LOOKUP_DESTROY uobject kref.
> + */
> +int uobj_destroy(struct ib_uobject *uobj)
> +{
> + struct ib_uverbs_file *ufile = uobj->ufile;
> + int ret;
> +
> + down_read(&ufile->hw_destroy_rwsem);
> +
> + ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
> + if (ret)
> + goto out_unlock;
> +
> + ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
> + if (ret) {
> + atomic_set(&uobj->usecnt, 0);
> + goto out_unlock;
> + }
> +
> +out_unlock:
> + up_read(&ufile->hw_destroy_rwsem);
> + return ret;
> +}
> +
> +/*
> + * uobj_get_destroy destroys the HW object and returns a handle to the uobj
> + * with a NULL object pointer. The caller must pair this with
> + * uverbs_put_destroy.
> + */
> +struct ib_uobject *__uobj_get_destroy(const struct uverbs_obj_type *type,
> + u32 id, struct ib_uverbs_file *ufile)
> +{
> + struct ib_uobject *uobj;
> + int ret;
> +
> + uobj = rdma_lookup_get_uobject(type, ufile, id, UVERBS_LOOKUP_DESTROY);
> + if (IS_ERR(uobj))
> + return uobj;
> +
> + ret = uobj_destroy(uobj);
> + if (ret) {
> + rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
> + return ERR_PTR(ret);
> + }
> +
> + return uobj;
> +}
> +
> +/*
> + * Does both uobj_get_destroy() and uobj_put_destroy(). Returns success_res
> + * on success (negative errno on failure). For use by callers that do not need
> + * the uobj.
> + */
> +int __uobj_perform_destroy(const struct uverbs_obj_type *type, u32 id,
> + struct ib_uverbs_file *ufile, int success_res)
> +{
> + struct ib_uobject *uobj;
> +
> + uobj = __uobj_get_destroy(type, id, ufile);
> + if (IS_ERR(uobj))
> + return PTR_ERR(uobj);
> +
> + /*
> + * FIXME: After destroy this is not safe. We no longer hold the rwsem
> + * so disassociation could have completed and unloaded the module that
> + * backs the uobj->type pointer.
> + */
> + rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
> + return success_res;
> +}
> +
> +/* alloc_uobj must be undone by uverbs_destroy_uobject() */
> +static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
> const struct uverbs_obj_type *type)
> {
> - struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
> + struct ib_uobject *uobj;
> + struct ib_ucontext *ucontext;
> +
> + ucontext = ib_uverbs_get_ucontext(ufile);
> + if (IS_ERR(ucontext))
> + return ERR_CAST(ucontext);
>
> + uobj = kzalloc(type->obj_size, GFP_KERNEL);
> if (!uobj)
> return ERR_PTR(-ENOMEM);
> /*

This is now a conflict between Linus' tree and the rdma tree.

--
Cheers,
Stephen Rothwell
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2018-08-15 03:53    [W:0.057 / U:0.480 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site