dma-buf/sync-file: fix warning about fence containers

The dma_fence_chain containers can show up in sync_files as well resulting in
warnings that those can't be added to dma_fence_array containers when merging
multiple sync_files together.

Solve this by using the dma_fence_unwrap iterator to deep dive into the
contained fences and then add those flatten out into a dma_fence_array.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220311110244.1245-2-christian.koenig@amd.com
This commit is contained in:
Christian König 2022-03-11 10:32:26 +01:00
parent 64a8f92fd7
commit 519f490db0

View file

@ -5,6 +5,7 @@
* Copyright (C) 2012 Google, Inc. * Copyright (C) 2012 Google, Inc.
*/ */
#include <linux/dma-fence-unwrap.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0; return 0;
} }
static struct dma_fence **get_fences(struct sync_file *sync_file,
int *num_fences)
{
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
}
*num_fences = 1;
return &sync_file->fence;
}
static void add_fence(struct dma_fence **fences, static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence) int *i, struct dma_fence *fence)
{ {
@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b) struct sync_file *b)
{ {
struct dma_fence *a_fence, *b_fence, **fences;
struct dma_fence_unwrap a_iter, b_iter;
unsigned int index, num_fences;
struct sync_file *sync_file; struct sync_file *sync_file;
struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc(); sync_file = sync_file_alloc();
if (!sync_file) if (!sync_file)
return NULL; return NULL;
a_fences = get_fences(a, &a_num_fences); num_fences = 0;
b_fences = get_fences(b, &b_num_fences); dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
if (a_num_fences > INT_MAX - b_num_fences) ++num_fences;
goto err; dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
++num_fences;
num_fences = a_num_fences + b_num_fences; if (num_fences > INT_MAX)
goto err_free_sync_file;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) if (!fences)
goto err; goto err_free_sync_file;
/* /*
* Assume sync_file a and b are both ordered and have no * We can't guarantee that fences in both a and b are ordered, but it is
* duplicates with the same context. * still quite likely.
* *
* If a sync_file can only be created with sync_file_merge * So attempt to order the fences as we pass over them and merge fences
* and sync_file_create, this is a reasonable assumption. * with the same context.
*/ */
for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) { index = 0;
add_fence(fences, &i, pt_a); for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
a_fence || b_fence; ) {
i_a++; if (!b_fence) {
} else if (pt_a->context > pt_b->context) { add_fence(fences, &index, a_fence);
add_fence(fences, &i, pt_b); a_fence = dma_fence_unwrap_next(&a_iter);
} else if (!a_fence) {
add_fence(fences, &index, b_fence);
b_fence = dma_fence_unwrap_next(&b_iter);
} else if (a_fence->context < b_fence->context) {
add_fence(fences, &index, a_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
} else if (b_fence->context < a_fence->context) {
add_fence(fences, &index, b_fence);
b_fence = dma_fence_unwrap_next(&b_iter);
} else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
a_fence->ops)) {
add_fence(fences, &index, a_fence);
a_fence = dma_fence_unwrap_next(&a_iter);
b_fence = dma_fence_unwrap_next(&b_iter);
i_b++;
} else { } else {
if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno, add_fence(fences, &index, b_fence);
pt_a->ops)) a_fence = dma_fence_unwrap_next(&a_iter);
add_fence(fences, &i, pt_a); b_fence = dma_fence_unwrap_next(&b_iter);
else
add_fence(fences, &i, pt_b);
i_a++;
i_b++;
} }
} }
for (; i_a < a_num_fences; i_a++) if (index == 0)
add_fence(fences, &i, a_fences[i_a]); add_fence(fences, &index, dma_fence_get_stub());
for (; i_b < b_num_fences; i_b++) if (num_fences > index) {
add_fence(fences, &i, b_fences[i_b]); struct dma_fence **tmp;
if (i == 0) /* Keep going even when reducing the size failed */
fences[i++] = dma_fence_get(a_fences[0]); tmp = krealloc_array(fences, index, sizeof(*fences),
GFP_KERNEL);
if (num_fences > i) { if (tmp)
nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL); fences = tmp;
if (!nfences)
goto err;
fences = nfences;
} }
if (sync_file_set_fence(sync_file, fences, i) < 0) if (sync_file_set_fence(sync_file, fences, index) < 0)
goto err; goto err_put_fences;
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file; return sync_file;
err: err_put_fences:
while (i) while (index)
dma_fence_put(fences[--i]); dma_fence_put(fences[--index]);
kfree(fences); kfree(fences);
err_free_sync_file:
fput(sync_file->file); fput(sync_file->file);
return NULL; return NULL;
} }
static int sync_file_release(struct inode *inode, struct file *file) static int sync_file_release(struct inode *inode, struct file *file)
@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence,
static long sync_file_ioctl_fence_info(struct sync_file *sync_file, static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
unsigned long arg) unsigned long arg)
{ {
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL; struct sync_fence_info *fence_info = NULL;
struct dma_fence **fences; struct dma_fence_unwrap iter;
struct sync_file_info info;
unsigned int num_fences;
struct dma_fence *fence;
int ret;
__u32 size; __u32 size;
int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info))) if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT; return -EFAULT;
@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad) if (info.flags || info.pad)
return -EINVAL; return -EINVAL;
fences = get_fences(sync_file, &num_fences); num_fences = 0;
dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
++num_fences;
/* /*
* Passing num_fences = 0 means that userspace doesn't want to * Passing num_fences = 0 means that userspace doesn't want to
@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info) if (!fence_info)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < num_fences; i++) { num_fences = 0;
int status = sync_fill_fence_info(fences[i], &fence_info[i]); dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
int status;
status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
info.status = info.status <= 0 ? info.status : status; info.status = info.status <= 0 ? info.status : status;
} }