Skip to content
Snippets Groups Projects
Commit f54d1867 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

dma-buf: Rename struct fence to dma_fence

I plan to usurp the short name of struct fence for a core kernel struct,
and so I need to rename the specialised fence/timeline for DMA
operations to make room.

A consensus was reached in
https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html


that making clear this fence applies to DMA operations was a good thing.
Since then the patch has grown a bit as usage increases, so hopefully it
remains a good thing!

(v2...: rebase, rerun spatch)
v3: Compile on msm, spotted a manual fixup that I broke.
v4: Try again for msm, sorry Daniel

coccinelle script:
@@

@@
- struct fence
+ struct dma_fence
@@

@@
- struct fence_ops
+ struct dma_fence_ops
@@

@@
- struct fence_cb
+ struct dma_fence_cb
@@

@@
- struct fence_array
+ struct dma_fence_array
@@

@@
- enum fence_flag_bits
+ enum dma_fence_flag_bits
@@

@@
(
- fence_init
+ dma_fence_init
|
- fence_release
+ dma_fence_release
|
- fence_free
+ dma_fence_free
|
- fence_get
+ dma_fence_get
|
- fence_get_rcu
+ dma_fence_get_rcu
|
- fence_put
+ dma_fence_put
|
- fence_signal
+ dma_fence_signal
|
- fence_signal_locked
+ dma_fence_signal_locked
|
- fence_default_wait
+ dma_fence_default_wait
|
- fence_add_callback
+ dma_fence_add_callback
|
- fence_remove_callback
+ dma_fence_remove_callback
|
- fence_enable_sw_signaling
+ dma_fence_enable_sw_signaling
|
- fence_is_signaled_locked
+ dma_fence_is_signaled_locked
|
- fence_is_signaled
+ dma_fence_is_signaled
|
- fence_is_later
+ dma_fence_is_later
|
- fence_later
+ dma_fence_later
|
- fence_wait_timeout
+ dma_fence_wait_timeout
|
- fence_wait_any_timeout
+ dma_fence_wait_any_timeout
|
- fence_wait
+ dma_fence_wait
|
- fence_context_alloc
+ dma_fence_context_alloc
|
- fence_array_create
+ dma_fence_array_create
|
- to_fence_array
+ to_dma_fence_array
|
- fence_is_array
+ dma_fence_is_array
|
- trace_fence_emit
+ trace_dma_fence_emit
|
- FENCE_TRACE
+ DMA_FENCE_TRACE
|
- FENCE_WARN
+ DMA_FENCE_WARN
|
- FENCE_ERR
+ DMA_FENCE_ERR
)
 (
 ...
 )

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarGustavo Padovan <gustavo.padovan@collabora.co.uk>
Acked-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
parent 0fc4f78f
Branches
No related tags found
No related merge requests found
Showing
with 392 additions and 383 deletions
......@@ -6,7 +6,7 @@
This document serves as a guide for device drivers writers on what the
sync_file API is, and how drivers can support it. Sync file is the carrier of
the fences(struct fence) that are needed to synchronize between drivers or
the fences(struct dma_fence) that are needed to synchronize between drivers or
across process boundaries.
The sync_file API is meant to be used to send and receive fence information
......@@ -32,9 +32,9 @@ in-fences and out-fences
Sync files can go either to or from userspace. When a sync_file is sent from
the driver to userspace we call the fences it contains 'out-fences'. They are
related to a buffer that the driver is processing or is going to process, so
the driver creates an out-fence to be able to notify, through fence_signal(),
when it has finished using (or processing) that buffer. Out-fences are fences
that the driver creates.
the driver creates an out-fence to be able to notify, through
dma_fence_signal(), when it has finished using (or processing) that buffer.
Out-fences are fences that the driver creates.
On the other hand if the driver receives fence(s) through a sync_file from
userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
......@@ -47,7 +47,7 @@ Creating Sync Files
When a driver needs to send an out-fence userspace it creates a sync_file.
Interface:
struct sync_file *sync_file_create(struct fence *fence);
struct sync_file *sync_file_create(struct dma_fence *fence);
The caller pass the out-fence and gets back the sync_file. That is just the
first step, next it needs to install an fd on sync_file->file. So it gets an
......@@ -72,11 +72,11 @@ of the Sync File to the kernel. The kernel can then retrieve the fences
from it.
Interface:
struct fence *sync_file_get_fence(int fd);
struct dma_fence *sync_file_get_fence(int fd);
The returned reference is owned by the caller and must be disposed of
afterwards using fence_put(). In case of error, a NULL is returned instead.
afterwards using dma_fence_put(). In case of error, a NULL is returned instead.
References:
[1] struct sync_file in include/linux/sync_file.h
......
......@@ -248,11 +248,11 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
config FENCE_TRACE
bool "Enable verbose FENCE_TRACE messages"
config DMA_FENCE_TRACE
bool "Enable verbose DMA_FENCE_TRACE messages"
depends on DMA_SHARED_BUFFER
help
Enable the FENCE_TRACE printks. This will add extra
Enable the DMA_FENCE_TRACE printks. This will add extra
spam to the console log, but will make it easier to diagnose
lockup related problems for dma-buffers shared across multiple
devices.
......
......@@ -7,7 +7,7 @@ config SYNC_FILE
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
userspace. It enables send/receive 'struct fence' objects to/from
userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
......
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
......@@ -25,7 +25,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
......@@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
return base + offset;
}
static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
unsigned long flags;
......@@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct fence *fence_excl;
struct dma_fence *fence_excl;
unsigned long events;
unsigned shared_count, seq;
......@@ -187,20 +187,20 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
if (!fence_get_rcu(fence_excl)) {
if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!fence_add_callback(fence_excl, &dcb->cb,
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
fence_put(fence_excl);
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
fence_put(fence_excl);
dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
}
......@@ -222,9 +222,9 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
goto out;
for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]);
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
if (!fence_get_rcu(fence)) {
if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
......@@ -235,13 +235,13 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
if (!fence_add_callback(fence, &dcb->cb,
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
fence_put(fence);
dma_fence_put(fence);
events &= ~POLLOUT;
break;
}
fence_put(fence);
dma_fence_put(fence);
}
/* No callback queued, wake up any additional waiters. */
......
/*
* fence-array: aggregate fences to be waited together
* dma-fence-array: aggregate fences to be waited together
*
* Copyright (C) 2016 Collabora Ltd
* Copyright (C) 2016 Advanced Micro Devices, Inc.
......@@ -19,35 +19,34 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/fence-array.h>
#include <linux/dma-fence-array.h>
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
static const char *fence_array_get_driver_name(struct fence *fence)
static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
return "fence_array";
return "dma_fence_array";
}
static const char *fence_array_get_timeline_name(struct fence *fence)
static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
{
return "unbound";
}
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
static void dma_fence_array_cb_func(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct fence_array_cb *array_cb =
container_of(cb, struct fence_array_cb, cb);
struct fence_array *array = array_cb->array;
struct dma_fence_array_cb *array_cb =
container_of(cb, struct dma_fence_array_cb, cb);
struct dma_fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
fence_signal(&array->base);
fence_put(&array->base);
dma_fence_signal(&array->base);
dma_fence_put(&array->base);
}
static bool fence_array_enable_signaling(struct fence *fence)
static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct fence_array_cb *cb = (void *)(&array[1]);
struct dma_fence_array *array = to_dma_fence_array(fence);
struct dma_fence_array_cb *cb = (void *)(&array[1]);
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
......@@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence)
* until we signal the array as complete (but that is now
* insufficient).
*/
fence_get(&array->base);
if (fence_add_callback(array->fences[i], &cb[i].cb,
fence_array_cb_func)) {
fence_put(&array->base);
dma_fence_get(&array->base);
if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
dma_fence_array_cb_func)) {
dma_fence_put(&array->base);
if (atomic_dec_and_test(&array->num_pending))
return false;
}
......@@ -72,68 +71,70 @@ static bool fence_array_enable_signaling(struct fence *fence)
return true;
}
static bool fence_array_signaled(struct fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct dma_fence_array *array = to_dma_fence_array(fence);
return atomic_read(&array->num_pending) <= 0;
}
static void fence_array_release(struct fence *fence)
static void dma_fence_array_release(struct dma_fence *fence)
{
struct fence_array *array = to_fence_array(fence);
struct dma_fence_array *array = to_dma_fence_array(fence);
unsigned i;
for (i = 0; i < array->num_fences; ++i)
fence_put(array->fences[i]);
dma_fence_put(array->fences[i]);
kfree(array->fences);
fence_free(fence);
dma_fence_free(fence);
}
const struct fence_ops fence_array_ops = {
.get_driver_name = fence_array_get_driver_name,
.get_timeline_name = fence_array_get_timeline_name,
.enable_signaling = fence_array_enable_signaling,
.signaled = fence_array_signaled,
.wait = fence_default_wait,
.release = fence_array_release,
const struct dma_fence_ops dma_fence_array_ops = {
.get_driver_name = dma_fence_array_get_driver_name,
.get_timeline_name = dma_fence_array_get_timeline_name,
.enable_signaling = dma_fence_array_enable_signaling,
.signaled = dma_fence_array_signaled,
.wait = dma_fence_default_wait,
.release = dma_fence_array_release,
};
EXPORT_SYMBOL(fence_array_ops);
EXPORT_SYMBOL(dma_fence_array_ops);
/**
* fence_array_create - Create a custom fence array
* dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
* Allocate a fence_array object and initialize the base fence with fence_init().
* Allocate a dma_fence_array object and initialize the base fence with
* dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
* array is taken and fence_put() is used on each fence on release.
* array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
struct fence_array *fence_array_create(int num_fences, struct fence **fences,
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
{
struct fence_array *array;
struct dma_fence_array *array;
size_t size = sizeof(*array);
/* Allocate the callback structures behind the array. */
size += num_fences * sizeof(struct fence_array_cb);
size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
if (!array)
return NULL;
spin_lock_init(&array->lock);
fence_init(&array->base, &fence_array_ops, &array->lock,
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
array->num_fences = num_fences;
......@@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences,
return array;
}
EXPORT_SYMBOL(fence_array_create);
EXPORT_SYMBOL(dma_fence_array_create);
......@@ -21,13 +21,13 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#define CREATE_TRACE_POINTS
#include <trace/events/fence.h>
#include <trace/events/dma_fence.h>
EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
/*
* fence context counter: each execution context should have its own
......@@ -35,37 +35,37 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
* dma_fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
u64 fence_context_alloc(unsigned num)
u64 dma_fence_context_alloc(unsigned num)
{
BUG_ON(!num);
return atomic64_add_return(num, &fence_context_counter) - num;
return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
EXPORT_SYMBOL(dma_fence_context_alloc);
/**
* fence_signal_locked - signal completion of a fence
* dma_fence_signal_locked - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*
* Unlike fence_signal, this function must be called with fence->lock held.
* Unlike dma_fence_signal, this function must be called with fence->lock held.
*/
int fence_signal_locked(struct fence *fence)
int dma_fence_signal_locked(struct dma_fence *fence)
{
struct fence_cb *cur, *tmp;
struct dma_fence_cb *cur, *tmp;
int ret = 0;
lockdep_assert_held(fence->lock);
......@@ -78,15 +78,15 @@ int fence_signal_locked(struct fence *fence)
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
/*
* we might have raced with the unlocked fence_signal,
* we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
} else
trace_fence_signaled(fence);
trace_dma_fence_signaled(fence);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
......@@ -94,19 +94,19 @@ int fence_signal_locked(struct fence *fence)
}
return ret;
}
EXPORT_SYMBOL(fence_signal_locked);
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
* fence_signal - signal completion of a fence
* dma_fence_signal - signal completion of a fence
* @fence: the fence to signal
*
* Signal completion for software callbacks on a fence, this will unblock
* fence_wait() calls and run all the callbacks added with
* fence_add_callback(). Can be called multiple times, but since a fence
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from unsignaled to signaled state, it will only be effective
* the first time.
*/
int fence_signal(struct fence *fence)
int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
......@@ -118,13 +118,13 @@ int fence_signal(struct fence *fence)
smp_mb__before_atomic();
}
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
trace_fence_signaled(fence);
trace_dma_fence_signaled(fence);
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct fence_cb *cur, *tmp;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct dma_fence_cb *cur, *tmp;
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
......@@ -135,10 +135,10 @@ int fence_signal(struct fence *fence)
}
return 0;
}
EXPORT_SYMBOL(fence_signal);
EXPORT_SYMBOL(dma_fence_signal);
/**
* fence_wait_timeout - sleep until the fence gets signaled
* dma_fence_wait_timeout - sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
......@@ -154,7 +154,7 @@ EXPORT_SYMBOL(fence_signal);
* freed before return, resulting in undefined behavior.
*/
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
{
signed long ret;
......@@ -162,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
return -EINVAL;
if (timeout == 0)
return fence_is_signaled(fence);
return dma_fence_is_signaled(fence);
trace_fence_wait_start(fence);
trace_dma_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
trace_fence_wait_end(fence);
trace_dma_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(fence_wait_timeout);
EXPORT_SYMBOL(dma_fence_wait_timeout);
void fence_release(struct kref *kref)
void dma_fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
trace_fence_destroy(fence);
trace_dma_fence_destroy(fence);
BUG_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
dma_fence_free(fence);
}
EXPORT_SYMBOL(fence_release);
EXPORT_SYMBOL(dma_fence_release);
void fence_free(struct fence *fence)
void dma_fence_free(struct dma_fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
EXPORT_SYMBOL(dma_fence_free);
/**
* fence_enable_sw_signaling - enable signaling on fence
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: [in] the fence to enable
*
* this will request for sw signaling to be enabled, to make the fence
* complete as soon as possible
*/
void fence_enable_sw_signaling(struct fence *fence)
void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
trace_fence_enable_signal(fence);
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
trace_dma_fence_enable_signal(fence);
spin_lock_irqsave(fence->lock, flags);
if (!fence->ops->enable_signaling(fence))
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
spin_unlock_irqrestore(fence->lock, flags);
}
}
EXPORT_SYMBOL(fence_enable_sw_signaling);
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
/**
* fence_add_callback - add a callback to be called when the fence
* dma_fence_add_callback - add a callback to be called when the fence
* is signaled
* @fence: [in] the fence to wait on
* @cb: [in] the callback to register
* @func: [in] the function to call
*
* cb will be initialized by fence_add_callback, no initialization
* cb will be initialized by dma_fence_add_callback, no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
......@@ -234,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling);
* *not* call the callback)
*
* Add a software callback to the fence. Same restrictions apply to
* refcount as it does to fence_wait, however the caller doesn't need to
* refcount as it does to dma_fence_wait, however the caller doesn't need to
* keep a refcount to fence afterwards: when software access is enabled,
* the creator of the fence is required to keep the fence alive until
* after it signals with fence_signal. The callback itself can be called
* after it signals with dma_fence_signal. The callback itself can be called
* from irq context.
*
*/
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
fence_func_t func)
int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
dma_fence_func_t func)
{
unsigned long flags;
int ret = 0;
......@@ -251,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
if (WARN_ON(!fence || !func))
return -EINVAL;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
INIT_LIST_HEAD(&cb->node);
return -ENOENT;
}
spin_lock_irqsave(fence->lock, flags);
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
ret = -ENOENT;
else if (!was_set) {
trace_fence_enable_signal(fence);
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
ret = -ENOENT;
}
}
......@@ -280,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
return ret;
}
EXPORT_SYMBOL(fence_add_callback);
EXPORT_SYMBOL(dma_fence_add_callback);
/**
* fence_remove_callback - remove a callback from the signaling list
* dma_fence_remove_callback - remove a callback from the signaling list
* @fence: [in] the fence to wait on
* @cb: [in] the callback to remove
*
......@@ -298,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback);
* with a reference held to the fence.
*/
bool
fence_remove_callback(struct fence *fence, struct fence_cb *cb)
dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
{
unsigned long flags;
bool ret;
......@@ -313,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
return ret;
}
EXPORT_SYMBOL(fence_remove_callback);
EXPORT_SYMBOL(dma_fence_remove_callback);
struct default_wait_cb {
struct fence_cb base;
struct dma_fence_cb base;
struct task_struct *task;
};
static void
fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct default_wait_cb *wait =
container_of(cb, struct default_wait_cb, base);
......@@ -330,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
}
/**
* fence_default_wait - default sleep until the fence gets signaled
* dma_fence_default_wait - default sleep until the fence gets signaled
* or until timeout elapses
* @fence: [in] the fence to wait on
* @intr: [in] if true, do an interruptible wait
......@@ -340,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
* remaining timeout in jiffies on success.
*/
signed long
fence_default_wait(struct fence *fence, bool intr, signed long timeout)
dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
{
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout;
bool was_set;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return timeout;
spin_lock_irqsave(fence->lock, flags);
......@@ -357,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
goto out;
}
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&fence->flags);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
goto out;
if (!was_set) {
trace_fence_enable_signal(fence);
trace_dma_fence_enable_signal(fence);
if (!fence->ops->enable_signaling(fence)) {
fence_signal_locked(fence);
dma_fence_signal_locked(fence);
goto out;
}
}
cb.base.func = fence_default_wait_cb;
cb.base.func = dma_fence_default_wait_cb;
cb.task = current;
list_add(&cb.base.node, &fence->cb_list);
while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
if (intr)
__set_current_state(TASK_INTERRUPTIBLE);
else
......@@ -397,23 +400,23 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
spin_unlock_irqrestore(fence->lock, flags);
return ret;
}
EXPORT_SYMBOL(fence_default_wait);
EXPORT_SYMBOL(dma_fence_default_wait);
static bool
fence_test_signaled_any(struct fence **fences, uint32_t count)
dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
{
int i;
for (i = 0; i < count; ++i) {
struct fence *fence = fences[i];
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
struct dma_fence *fence = fences[i];
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
}
return false;
}
/**
* fence_wait_any_timeout - sleep until any fence gets signaled
* dma_fence_wait_any_timeout - sleep until any fence gets signaled
* or until timeout elapses
* @fences: [in] array of fences to wait on
* @count: [in] number of fences to wait on
......@@ -429,7 +432,7 @@ fence_test_signaled_any(struct fence **fences, uint32_t count)
* fence might be freed before return, resulting in undefined behavior.
*/
signed long
fence_wait_any_timeout(struct fence **fences, uint32_t count,
dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
bool intr, signed long timeout)
{
struct default_wait_cb *cb;
......@@ -441,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
if (fence_is_signaled(fences[i]))
if (dma_fence_is_signaled(fences[i]))
return 1;
return 0;
......@@ -454,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
}
for (i = 0; i < count; ++i) {
struct fence *fence = fences[i];
struct dma_fence *fence = fences[i];
if (fence->ops->wait != fence_default_wait) {
if (fence->ops->wait != dma_fence_default_wait) {
ret = -EINVAL;
goto fence_rm_cb;
}
cb[i].task = current;
if (fence_add_callback(fence, &cb[i].base,
fence_default_wait_cb)) {
if (dma_fence_add_callback(fence, &cb[i].base,
dma_fence_default_wait_cb)) {
/* This fence is already signaled */
goto fence_rm_cb;
}
......@@ -475,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
else
set_current_state(TASK_UNINTERRUPTIBLE);
if (fence_test_signaled_any(fences, count))
if (dma_fence_test_signaled_any(fences, count))
break;
ret = schedule_timeout(ret);
......@@ -488,33 +491,33 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count,
fence_rm_cb:
while (i-- > 0)
fence_remove_callback(fences[i], &cb[i].base);
dma_fence_remove_callback(fences[i], &cb[i].base);
err_free_cb:
kfree(cb);
return ret;
}
EXPORT_SYMBOL(fence_wait_any_timeout);
EXPORT_SYMBOL(dma_fence_wait_any_timeout);
/**
* fence_init - Initialize a custom fence.
* dma_fence_init - Initialize a custom fence.
* @fence: [in] the fence to initialize
* @ops: [in] the fence_ops for operations on this fence
* @ops: [in] the dma_fence_ops for operations on this fence
* @lock: [in] the irqsafe spinlock to use for locking this fence
* @context: [in] the execution context this fence is run on
* @seqno: [in] a linear increasing sequence number for this context
*
* Initializes an allocated fence, the caller doesn't have to keep its
* refcount after committing with this fence, but it will need to hold a
* refcount again if fence_ops.enable_signaling gets called. This can
* refcount again if dma_fence_ops.enable_signaling gets called. This can
* be used for other implementing other types of fence.
*
* context and seqno are used for easy comparison between fences, allowing
* to check which fence is later by simply using fence_later.
* to check which fence is later by simply using dma_fence_later.
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
......@@ -529,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
fence->seqno = seqno;
fence->flags = 0UL;
trace_fence_init(fence);
trace_dma_fence_init(fence);
}
EXPORT_SYMBOL(fence_init);
EXPORT_SYMBOL(dma_fence_init);
......@@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared);
static void
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct fence *fence)
struct dma_fence *fence)
{
u32 i;
fence_get(fence);
dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < fobj->shared_count; ++i) {
struct fence *old_fence;
struct dma_fence *old_fence;
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
......@@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
write_seqcount_end(&obj->seq);
preempt_enable();
fence_put(old_fence);
dma_fence_put(old_fence);
return;
}
}
......@@ -143,12 +143,12 @@ static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
struct fence *fence)
struct dma_fence *fence)
{
unsigned i;
struct fence *old_fence = NULL;
struct dma_fence *old_fence = NULL;
fence_get(fence);
dma_fence_get(fence);
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
......@@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
fobj->shared_count = old->shared_count;
for (i = 0; i < old->shared_count; ++i) {
struct fence *check;
struct dma_fence *check;
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
......@@ -196,7 +196,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
kfree_rcu(old, rcu);
if (old_fence)
fence_put(old_fence);
dma_fence_put(old_fence);
}
/**
......@@ -208,7 +208,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
struct dma_fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
......@@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence);
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
struct dma_fence *fence)
{
struct fence *old_fence = reservation_object_get_excl(obj);
struct dma_fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
......@@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
i = old->shared_count;
if (fence)
fence_get(fence);
dma_fence_get(fence);
preempt_disable();
write_seqcount_begin(&obj->seq);
......@@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
fence_put(rcu_dereference_protected(old->shared[i],
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
if (old_fence)
fence_put(old_fence);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
......@@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* Zero or -errno
*/
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
struct dma_fence **pfence_excl,
unsigned *pshared_count,
struct fence ***pshared)
struct dma_fence ***pshared)
{
struct fence **shared = NULL;
struct fence *fence_excl;
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
unsigned int shared_count;
int ret = 1;
......@@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
seq = read_seqcount_begin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !fence_get_rcu(fence_excl))
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
struct fence **nshared;
struct dma_fence **nshared;
size_t sz = sizeof(*shared) * fobj->shared_max;
nshared = krealloc(shared, sz,
......@@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
for (i = 0; i < shared_count; ++i) {
shared[i] = rcu_dereference(fobj->shared[i]);
if (!fence_get_rcu(shared[i]))
if (!dma_fence_get_rcu(shared[i]))
break;
}
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
fence_put(shared[i]);
fence_put(fence_excl);
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
goto unlock;
}
......@@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
{
struct fence *fence;
struct dma_fence *fence;
unsigned seq, shared_count, i = 0;
long ret = timeout;
......@@ -389,16 +389,17 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
shared_count = fobj->shared_count;
for (i = 0; i < shared_count; ++i) {
struct fence *lfence = rcu_dereference(fobj->shared[i]);
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&lfence->flags))
continue;
if (!fence_get_rcu(lfence))
if (!dma_fence_get_rcu(lfence))
goto unlock_retry;
if (fence_is_signaled(lfence)) {
fence_put(lfence);
if (dma_fence_is_signaled(lfence)) {
dma_fence_put(lfence);
continue;
}
......@@ -408,15 +409,16 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
}
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
if (!fence_get_rcu(fence_excl))
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence_excl->flags)) {
if (!dma_fence_get_rcu(fence_excl))
goto unlock_retry;
if (fence_is_signaled(fence_excl))
fence_put(fence_excl);
if (dma_fence_is_signaled(fence_excl))
dma_fence_put(fence_excl);
else
fence = fence_excl;
}
......@@ -425,12 +427,12 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
fence_put(fence);
dma_fence_put(fence);
goto retry;
}
ret = fence_wait_timeout(fence, intr, ret);
fence_put(fence);
ret = dma_fence_wait_timeout(fence, intr, ret);
dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;
}
......@@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
static inline int
reservation_object_test_signaled_single(struct fence *passed_fence)
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
{
struct fence *fence, *lfence = passed_fence;
struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
fence = fence_get_rcu(lfence);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
fence = dma_fence_get_rcu(lfence);
if (!fence)
return -1;
ret = !!fence_is_signaled(fence);
fence_put(fence);
ret = !!dma_fence_is_signaled(fence);
dma_fence_put(fence);
}
return ret;
}
......@@ -492,7 +494,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
shared_count = fobj->shared_count;
for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]);
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
......@@ -506,7 +508,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
}
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
ret = reservation_object_test_signaled_single(
......
......@@ -21,35 +21,35 @@
#include <linux/export.h>
#include <linux/seqno-fence.h>
static const char *seqno_fence_get_driver_name(struct fence *fence)
static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
static const char *seqno_fence_get_timeline_name(struct fence *fence)
static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
static bool seqno_enable_signaling(struct fence *fence)
static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
static bool seqno_signaled(struct fence *fence)
static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
static void seqno_release(struct fence *fence)
static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
......@@ -57,10 +57,10 @@ static void seqno_release(struct fence *fence)
if (f->ops->release)
f->ops->release(fence);
else
fence_free(&f->base);
dma_fence_free(&f->base);
}
static signed long seqno_wait(struct fence *fence, bool intr,
static signed long seqno_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
......@@ -68,7 +68,7 @@ static signed long seqno_wait(struct fence *fence, bool intr,
return f->ops->wait(fence, intr, timeout);
}
const struct fence_ops seqno_fence_ops = {
const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
......
......@@ -68,9 +68,9 @@ struct sw_sync_create_fence_data {
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
static const struct fence_ops timeline_fence_ops;
static const struct dma_fence_ops timeline_fence_ops;
static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
{
if (fence->ops != &timeline_fence_ops)
return NULL;
......@@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name)
return NULL;
kref_init(&obj->kref);
obj->context = fence_context_alloc(1);
obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
INIT_LIST_HEAD(&obj->child_list_head);
......@@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
if (fence_is_signaled_locked(&pt->base))
if (dma_fence_is_signaled_locked(&pt->base))
list_del_init(&pt->active_list);
}
......@@ -179,7 +179,7 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
obj->context, value);
list_add_tail(&pt->child_list, &obj->child_list_head);
INIT_LIST_HEAD(&pt->active_list);
......@@ -187,22 +187,22 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
return pt;
}
static const char *timeline_fence_get_driver_name(struct fence *fence)
static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
}
static const char *timeline_fence_get_timeline_name(struct fence *fence)
static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
return parent->name;
}
static void timeline_fence_release(struct fence *fence)
static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = fence_to_sync_pt(fence);
struct sync_timeline *parent = fence_parent(fence);
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
......@@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence)
spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
fence_free(fence);
dma_fence_free(fence);
}
static bool timeline_fence_signaled(struct fence *fence)
static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
return (fence->seqno > parent->value) ? false : true;
}
static bool timeline_fence_enable_signaling(struct fence *fence)
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
struct sync_pt *pt = fence_to_sync_pt(fence);
struct sync_timeline *parent = fence_parent(fence);
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
if (timeline_fence_signaled(fence))
return false;
......@@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence)
return true;
}
static void timeline_fence_value_str(struct fence *fence,
static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
}
static void timeline_fence_timeline_value_str(struct fence *fence,
static void timeline_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
snprintf(str, size, "%d", parent->value);
}
static const struct fence_ops timeline_fence_ops = {
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
.enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
.wait = fence_default_wait,
.wait = dma_fence_default_wait,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
......@@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
sync_file = sync_file_create(&pt->base);
if (!sync_file) {
fence_put(&pt->base);
dma_fence_put(&pt->base);
err = -ENOMEM;
goto err;
}
......
......@@ -71,12 +71,13 @@ static const char *sync_status_str(int status)
return "error";
}
static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
static void sync_print_fence(struct seq_file *s,
struct dma_fence *fence, bool show)
{
int status = 1;
struct sync_timeline *parent = fence_parent(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
if (fence_is_signaled_locked(fence))
if (dma_fence_is_signaled_locked(fence))
status = fence->status;
seq_printf(s, " %s%sfence %s",
......@@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
sync_status_str(!fence_is_signaled(sync_file->fence)));
sync_status_str(!dma_fence_is_signaled(sync_file->fence)));
if (fence_is_array(sync_file->fence)) {
struct fence_array *array = to_fence_array(sync_file->fence);
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
for (i = 0; i < array->num_fences; ++i)
sync_print_fence(s, array->fences[i], true);
......
......@@ -15,7 +15,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include <uapi/linux/sync_file.h>
......@@ -45,10 +45,9 @@ struct sync_timeline {
struct list_head sync_timeline_list;
};
static inline struct sync_timeline *fence_parent(struct fence *fence)
static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
return container_of(fence->lock, struct sync_timeline,
child_list_lock);
return container_of(fence->lock, struct sync_timeline, child_list_lock);
}
/**
......@@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
* @active_list: sync timeline active child's list
*/
struct sync_pt {
struct fence base;
struct dma_fence base;
struct list_head child_list;
struct list_head active_list;
};
......
......@@ -54,7 +54,7 @@ static struct sync_file *sync_file_alloc(void)
return NULL;
}
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct sync_file *sync_file;
......@@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
* takes ownership of @fence. The sync_file can be released with
* fput(sync_file->file). Returns the sync_file or NULL in case of error.
*/
struct sync_file *sync_file_create(struct fence *fence)
struct sync_file *sync_file_create(struct dma_fence *fence)
{
struct sync_file *sync_file;
......@@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
sync_file->fence = fence_get(fence);
sync_file->fence = dma_fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
......@@ -121,16 +121,16 @@ static struct sync_file *sync_file_fdget(int fd)
* Ensures @fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file. On error NULL is returned.
*/
struct fence *sync_file_get_fence(int fd)
struct dma_fence *sync_file_get_fence(int fd)
{
struct sync_file *sync_file;
struct fence *fence;
struct dma_fence *fence;
sync_file = sync_file_fdget(fd);
if (!sync_file)
return NULL;
fence = fence_get(sync_file->fence);
fence = dma_fence_get(sync_file->fence);
fput(sync_file->file);
return fence;
......@@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd)
EXPORT_SYMBOL(sync_file_get_fence);
static int sync_file_set_fence(struct sync_file *sync_file,
struct fence **fences, int num_fences)
struct dma_fence **fences, int num_fences)
{
struct fence_array *array;
struct dma_fence_array *array;
/*
* The reference for the fences in the new sync_file and held
* in add_fence() during the merge procedure, so for num_fences == 1
* we already own a new reference to the fence. For num_fence > 1
* we own the reference of the fence_array creation.
* we own the reference of the dma_fence_array creation.
*/
if (num_fences == 1) {
sync_file->fence = fences[0];
kfree(fences);
} else {
array = fence_array_create(num_fences, fences,
fence_context_alloc(1), 1, false);
array = dma_fence_array_create(num_fences, fences,
dma_fence_context_alloc(1),
1, false);
if (!array)
return -ENOMEM;
......@@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
static struct dma_fence **get_fences(struct sync_file *sync_file,
int *num_fences)
{
if (fence_is_array(sync_file->fence)) {
struct fence_array *array = to_fence_array(sync_file->fence);
if (dma_fence_is_array(sync_file->fence)) {
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
*num_fences = array->num_fences;
return array->fences;
......@@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
return &sync_file->fence;
}
static void add_fence(struct fence **fences, int *i, struct fence *fence)
static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence)
{
fences[*i] = fence;
if (!fence_is_signaled(fence)) {
fence_get(fence);
if (!dma_fence_is_signaled(fence)) {
dma_fence_get(fence);
(*i)++;
}
}
......@@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
struct fence **fences, **nfences, **a_fences, **b_fences;
struct dma_fence **fences, **nfences, **a_fences, **b_fences;
int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
......@@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
struct fence *pt_a = a_fences[i_a];
struct fence *pt_b = b_fences[i_b];
struct dma_fence *pt_a = a_fences[i_a];
struct dma_fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
add_fence(fences, &i, pt_a);
......@@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
add_fence(fences, &i, b_fences[i_b]);
if (i == 0)
fences[i++] = fence_get(a_fences[0]);
fences[i++] = dma_fence_get(a_fences[0]);
if (num_fences > i) {
nfences = krealloc(fences, i * sizeof(*fences),
......@@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref)
kref);
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
fence_remove_callback(sync_file->fence, &sync_file->cb);
fence_put(sync_file->fence);
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
}
......@@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
if (!poll_does_not_wait(wait) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
if (fence_add_callback(sync_file->fence, &sync_file->cb,
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
}
return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
......@@ -370,14 +373,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
return err;
}
static void sync_fill_fence_info(struct fence *fence,
static void sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
if (fence_is_signaled(fence))
if (dma_fence_is_signaled(fence))
info->status = fence->status >= 0 ? 1 : fence->status;
else
info->status = 0;
......@@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
struct fence **fences;
struct dma_fence **fences;
__u32 size;
int num_fences, ret, i;
......@@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
info.status = fence_is_signaled(sync_file->fence);
info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
......
......@@ -34,7 +34,7 @@
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <linux/hashtable.h>
#include <linux/fence.h>
#include <linux/dma-fence.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
......@@ -378,7 +378,7 @@ struct amdgpu_fence_driver {
struct timer_list fallback_timer;
unsigned num_fences_mask;
spinlock_t lock;
struct fence **fences;
struct dma_fence **fences;
};
/* some special values for the owner field */
......@@ -399,7 +399,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
......@@ -427,7 +427,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
struct fence *last_pt_update;
struct dma_fence *last_pt_update;
unsigned ref_count;
/* protected by vm mutex and spinlock */
......@@ -543,7 +543,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct fence *fence;
struct dma_fence *fence;
};
/*
......@@ -566,19 +566,19 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
*/
struct amdgpu_sync {
DECLARE_HASHTABLE(fences, 4);
struct fence *last_vm_update;
struct dma_fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct fence *f);
struct dma_fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
......@@ -703,10 +703,10 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
struct fence *excl;
struct dma_fence *excl;
unsigned shared_count;
struct fence **shared;
struct fence_cb cb;
struct dma_fence **shared;
struct dma_fence_cb cb;
bool async;
};
......@@ -742,7 +742,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
struct dma_fence **f);
struct amdgpu_ring {
struct amdgpu_device *adev;
......@@ -844,7 +844,7 @@ struct amdgpu_vm {
/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
struct dma_fence *page_directory_fence;
uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
......@@ -865,14 +865,14 @@ struct amdgpu_vm {
struct amdgpu_vm_id {
struct list_head list;
struct fence *first;
struct dma_fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
struct dma_fence *last_flush;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
struct dma_fence *flushed_updates;
uint32_t current_gpu_reset_count;
......@@ -921,7 +921,7 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
......@@ -957,7 +957,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
struct fence **fences;
struct dma_fence **fences;
struct amd_sched_entity entity;
};
......@@ -966,7 +966,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
unsigned reset_counter;
spinlock_t ring_lock;
struct fence **fences;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
};
......@@ -982,8 +982,8 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct dma_fence *fence);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
......@@ -1181,10 +1181,10 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f);
struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, struct fence *last_vm_update,
struct amdgpu_job *job, struct fence **f);
struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
struct amdgpu_job *job, struct dma_fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
......@@ -1225,7 +1225,7 @@ struct amdgpu_cs_parser {
struct amdgpu_bo_list *bo_list;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
struct fence *fence;
struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
struct amdgpu_bo_list_entry *evictable;
......@@ -1245,7 +1245,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */
struct dma_fence *fence; /* the hw fence */
uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
......
......@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
struct fence *fence = NULL;
struct dma_fence *fence = NULL;
int i, r;
start_jiffies = jiffies;
......@@ -43,17 +43,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r)
goto exit_do_move;
fence_put(fence);
dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
fence_put(fence);
dma_fence_put(fence);
return r;
}
......
......@@ -719,7 +719,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
fence_put(parser->fence);
dma_fence_put(parser->fence);
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
......@@ -756,7 +756,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
struct fence *f;
struct dma_fence *f;
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
......@@ -956,7 +956,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
for (j = 0; j < num_deps; ++j) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
struct fence *fence;
struct dma_fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
......@@ -978,7 +978,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
} else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync,
fence);
fence_put(fence);
dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
return r;
......@@ -1008,7 +1008,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->fence_ctx = entity->fence_context;
p->fence = fence_get(&job->base.s_fence->finished);
p->fence = dma_fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
......@@ -1091,7 +1091,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct fence *fence;
struct dma_fence *fence;
long r;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
......@@ -1107,8 +1107,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
r = fence_wait_timeout(fence, true, timeout);
fence_put(fence);
r = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
} else
r = 1;
......
......@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
sizeof(struct fence*), GFP_KERNEL);
sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
......@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
dma_fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
ctx->fences = NULL;
......@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence)
struct dma_fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = 0;
struct fence *other = NULL;
struct dma_fence *other = NULL;
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
fence_get(fence);
dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
spin_unlock(&ctx->ring_lock);
fence_put(other);
dma_fence_put(other);
return seq;
}
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
......@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return NULL;
}
fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
......
......@@ -1599,7 +1599,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
......@@ -2193,7 +2193,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct fence **fence)
struct dma_fence **fence)
{
uint32_t domain;
int r;
......@@ -2312,30 +2312,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
if (need_full_reset && amdgpu_need_backup(adev)) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
struct fence *fence = NULL, *next = NULL;
struct dma_fence *fence = NULL, *next = NULL;
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r) {
WARN(r, "recovery from shadow isn't comleted\n");
break;
}
}
fence_put(fence);
dma_fence_put(fence);
fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
r = fence_wait(fence, false);
r = dma_fence_wait(fence, false);
if (r)
WARN(r, "recovery from shadow isn't comleted\n");
}
fence_put(fence);
dma_fence_put(fence);
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
......
......@@ -35,29 +35,29 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
struct fence **f)
struct dma_fence **f)
{
struct fence *fence= *f;
struct dma_fence *fence= *f;
if (fence == NULL)
return false;
*f = NULL;
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
fence_put(fence);
dma_fence_put(fence);
return false;
}
......@@ -244,9 +244,9 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
cleanup:
amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
......
......@@ -48,7 +48,7 @@
*/
struct amdgpu_fence {
struct fence base;
struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
......@@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void)
/*
* Cast helper
*/
static const struct fence_ops amdgpu_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
static const struct dma_fence_ops amdgpu_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
......@@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
struct fence *old, **ptr;
struct dma_fence *old, **ptr;
uint32_t seq;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
......@@ -143,7 +143,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
fence_init(&fence->base, &amdgpu_fence_ops,
dma_fence_init(&fence->base, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
......@@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
if (old && !fence_is_signaled(old)) {
if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
fence_wait(old, false);
dma_fence_wait(old, false);
}
rcu_assign_pointer(*ptr, fence_get(&fence->base));
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
......@@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
seq &= drv->num_fences_mask;
do {
struct fence *fence, **ptr;
struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
......@@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
r = fence_signal(fence);
r = dma_fence_signal(fence);
if (!r)
FENCE_TRACE(fence, "signaled from irq context\n");
DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
fence_put(fence);
dma_fence_put(fence);
} while (last_seq != seq);
}
......@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
struct fence *fence, **ptr;
struct dma_fence *fence, **ptr;
int r;
if (!seq)
......@@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
if (!fence || !fence_get_rcu(fence)) {
if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
r = fence_wait(fence, false);
fence_put(fence);
r = dma_fence_wait(fence, false);
dma_fence_put(fence);
return r;
}
......@@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
fence_put(ring->fence_drv.fences[j]);
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
......@@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
* Common fence implementation
*/
static const char *amdgpu_fence_get_driver_name(struct fence *fence)
static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
static const char *amdgpu_fence_get_timeline_name(struct fence *f)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
......@@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
static bool amdgpu_fence_enable_signaling(struct fence *f)
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
......@@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true;
}
......@@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
......@@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
static void amdgpu_fence_release(struct fence *f)
static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
static const struct fence_ops amdgpu_fence_ops = {
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
.wait = fence_default_wait,
.wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment