Skip to content
Snippets Groups Projects
Commit 14aad287 authored by Stephen Rothwell's avatar Stephen Rothwell
Browse files

Merge remote-tracking branch 'rpmsg/for-next'

parents 1e12aa21 c8b62c88
No related branches found
No related tags found
No related merge requests found
......@@ -73,3 +73,23 @@ Description:
This sysfs entry tells us whether the channel is a local
server channel that is announced (values are either
true or false).
What: /sys/bus/rpmsg/devices/.../driver_override
Date: April 2018
KernelVersion: 4.18
Contact: Bjorn Andersson <bjorn.andersson@linaro.org>
Description:
Every rpmsg device is a communication channel with a remote
processor. Channels are identified by a textual name (see
/sys/bus/rpmsg/devices/.../name above) and have a local
("source") rpmsg address, and remote ("destination") rpmsg
address.
The listening entity (or client) which communicates with a
remote processor is referred as rpmsg driver. The rpmsg device
and rpmsg driver are matched based on rpmsg device name and
rpmsg driver ID table.
This sysfs entry allows the rpmsg driver for a rpmsg device
to be specified which will override standard OF, ID table
and name matching.
......@@ -22,9 +22,15 @@ The edge is described by the following properties:
Definition: should specify the IRQ used by the remote processor to
signal this processor about communication related updates
- qcom,ipc:
- mboxes:
Usage: required
Value type: <prop-encoded-array>
Definition: reference to the associated doorbell in APCS, as described
in mailbox/mailbox.txt
- qcom,ipc:
Usage: required, unless mboxes is specified
Value type: <prop-encoded-array>
Definition: three entries specifying the outgoing ipc bit used for
signaling the remote processor:
- phandle to a syscon node representing the apcs registers
......
......@@ -71,10 +71,16 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
* This function attempts to lock an hwspinlock, and will immediately
* fail if the hwspinlock is already taken.
*
* Upon a successful return from this function, preemption (and possibly
* interrupts) is disabled, so the caller must not sleep, and is advised to
* release the hwspinlock as soon as possible. This is required in order to
* minimize remote cores polling on the hardware interconnect.
* Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
* of getting hardware lock with mutex or spinlock. Since in some scenarios,
* user need some time-consuming or sleepable operations under the hardware
* lock, they need one sleepable lock (like mutex) to protect the operations.
*
* If the mode is not HWLOCK_RAW, upon a successful return from this function,
* preemption (and possibly interrupts) is disabled, so the caller must not
* sleep, and is advised to release the hwspinlock as soon as possible. This is
* required in order to minimize remote cores polling on the hardware
* interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
......@@ -106,12 +112,20 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
* problems with hwspinlock usage (e.g. scheduler checks like
* 'scheduling while atomic' etc.)
*/
if (mode == HWLOCK_IRQSTATE)
switch (mode) {
case HWLOCK_IRQSTATE:
ret = spin_trylock_irqsave(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
break;
case HWLOCK_IRQ:
ret = spin_trylock_irq(&hwlock->lock);
else
break;
case HWLOCK_RAW:
ret = 1;
break;
default:
ret = spin_trylock(&hwlock->lock);
break;
}
/* is lock already taken by another context on the local cpu ? */
if (!ret)
......@@ -122,12 +136,20 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
/* if hwlock is already taken, undo spin_trylock_* and exit */
if (!ret) {
if (mode == HWLOCK_IRQSTATE)
switch (mode) {
case HWLOCK_IRQSTATE:
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
break;
case HWLOCK_IRQ:
spin_unlock_irq(&hwlock->lock);
else
break;
case HWLOCK_RAW:
/* Nothing to do */
break;
default:
spin_unlock(&hwlock->lock);
break;
}
return -EBUSY;
}
......@@ -160,9 +182,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
* is already taken, the function will busy loop waiting for it to
* be released, but give up after @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption is disabled
* (and possibly local interrupts, too), so the caller must not sleep,
* and is advised to release the hwspinlock as soon as possible.
* Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
* of getting hardware lock with mutex or spinlock. Since in some scenarios,
* user need some time-consuming or sleepable operations under the hardware
* lock, they need one sleepable lock (like mutex) to protect the operations.
*
* If the mode is not HWLOCK_RAW, upon a successful return from this function,
* preemption is disabled (and possibly local interrupts, too), so the caller
* must not sleep, and is advised to release the hwspinlock as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
......@@ -249,12 +276,20 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
hwlock->bank->ops->unlock(hwlock);
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
if (mode == HWLOCK_IRQSTATE)
switch (mode) {
case HWLOCK_IRQSTATE:
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
break;
case HWLOCK_IRQ:
spin_unlock_irq(&hwlock->lock);
else
break;
case HWLOCK_RAW:
/* Nothing to do */
break;
default:
spin_unlock(&hwlock->lock);
break;
}
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
......
......@@ -24,7 +24,6 @@ config IMX_REMOTEPROC
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
depends on HAS_DMA
depends on ARCH_OMAP4 || SOC_OMAP5
depends on OMAP_IOMMU
select MAILBOX
......
......@@ -761,13 +761,11 @@ static int q6v5_start(struct rproc *rproc)
}
/* Assign MBA image access in DDR to q6 */
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mba memory failed: %d\n",
xfermemop_ret);
"assigning Q6 access to mba memory failed: %d\n", ret);
goto disable_active_clks;
}
......@@ -1083,6 +1081,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
dev_err(qproc->dev, "unable to resolve mba region\n");
return ret;
}
of_node_put(node);
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
......@@ -1100,6 +1099,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
dev_err(qproc->dev, "unable to resolve mpss region\n");
return ret;
}
of_node_put(node);
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
......
......@@ -1163,7 +1163,7 @@ int rproc_trigger_recovery(struct rproc *rproc)
if (ret)
return ret;
ret = rproc_stop(rproc, false);
ret = rproc_stop(rproc, true);
if (ret)
goto unlock_mutex;
......@@ -1316,7 +1316,7 @@ void rproc_shutdown(struct rproc *rproc)
if (!atomic_dec_and_test(&rproc->power))
goto out;
ret = rproc_stop(rproc, true);
ret = rproc_stop(rproc, false);
if (ret) {
atomic_inc(&rproc->power);
goto out;
......
......@@ -39,6 +39,7 @@ config RPMSG_QCOM_GLINK_SMEM
config RPMSG_QCOM_SMD
tristate "Qualcomm Shared Memory Driver (SMD)"
depends on MAILBOX
depends on QCOM_SMEM
select RPMSG
help
......
......@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_irq.h>
......@@ -107,6 +108,8 @@ static const struct {
* @ipc_regmap: regmap handle holding the outgoing ipc register
* @ipc_offset: offset within @ipc_regmap of the register for ipc
* @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
* @mbox_client: mailbox client handle
* @mbox_chan: apcs ipc mailbox channel handle
* @channels: list of all channels detected on this edge
* @channels_lock: guard for modifications of @channels
* @allocated: array of bitmaps representing already allocated channels
......@@ -129,6 +132,9 @@ struct qcom_smd_edge {
int ipc_offset;
int ipc_bit;
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
struct list_head channels;
spinlock_t channels_lock;
......@@ -366,7 +372,17 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
{
struct qcom_smd_edge *edge = channel->edge;
regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
if (edge->mbox_chan) {
/*
* We can ignore a failing mbox_send_message() as the only
* possible cause is that the FIFO in the framework is full of
* other writes to the same bit.
*/
mbox_send_message(edge->mbox_chan, NULL);
mbox_client_txdone(edge->mbox_chan, 0);
} else {
regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
}
}
/*
......@@ -1326,27 +1342,37 @@ static int qcom_smd_parse_edge(struct device *dev,
key = "qcom,remote-pid";
of_property_read_u32(node, key, &edge->remote_pid);
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
return -ENODEV;
}
edge->mbox_client.dev = dev;
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
if (PTR_ERR(edge->mbox_chan) != -ENODEV)
return PTR_ERR(edge->mbox_chan);
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
if (IS_ERR(edge->ipc_regmap))
return PTR_ERR(edge->ipc_regmap);
edge->mbox_chan = NULL;
key = "qcom,ipc";
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
if (ret < 0) {
dev_err(dev, "no offset in %s\n", key);
return -EINVAL;
}
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
return -ENODEV;
}
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
if (ret < 0) {
dev_err(dev, "no bit in %s\n", key);
return -EINVAL;
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
if (IS_ERR(edge->ipc_regmap))
return PTR_ERR(edge->ipc_regmap);
key = "qcom,ipc";
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
if (ret < 0) {
dev_err(dev, "no offset in %s\n", key);
return -EINVAL;
}
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
if (ret < 0) {
dev_err(dev, "no bit in %s\n", key);
return -EINVAL;
}
}
ret = of_property_read_string(node, "label", &edge->name);
......@@ -1453,6 +1479,9 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
return edge;
unregister_dev:
if (!IS_ERR_OR_NULL(edge->mbox_chan))
mbox_free_channel(edge->mbox_chan);
device_unregister(&edge->dev);
return ERR_PTR(ret);
}
......@@ -1481,6 +1510,7 @@ int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
if (ret)
dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
mbox_free_channel(edge->mbox_chan);
device_unregister(&edge->dev);
return 0;
......
......@@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
}
module_exit(rpmsg_chrdev_exit);
MODULE_ALIAS("rpmsg:rpmsg_chrdev");
MODULE_LICENSE("GPL v2");
......@@ -333,11 +333,49 @@ field##_show(struct device *dev, \
} \
static DEVICE_ATTR_RO(field);
#define rpmsg_string_attr(field, member) \
static ssize_t \
field##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t sz) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
char *new, *old; \
\
new = kstrndup(buf, sz, GFP_KERNEL); \
if (!new) \
return -ENOMEM; \
new[strcspn(new, "\n")] = '\0'; \
\
device_lock(dev); \
old = rpdev->member; \
if (strlen(new)) { \
rpdev->member = new; \
} else { \
kfree(new); \
rpdev->member = NULL; \
} \
device_unlock(dev); \
\
kfree(old); \
\
return sz; \
} \
static ssize_t \
field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
\
return sprintf(buf, "%s\n", rpdev->member); \
} \
static DEVICE_ATTR_RW(field)
/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
rpmsg_show_attr(name, id.name, "%s\n");
rpmsg_show_attr(src, src, "0x%x\n");
rpmsg_show_attr(dst, dst, "0x%x\n");
rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
rpmsg_string_attr(driver_override, driver_override);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
......@@ -359,6 +397,7 @@ static struct attribute *rpmsg_dev_attrs[] = {
&dev_attr_dst.attr,
&dev_attr_src.attr,
&dev_attr_announce.attr,
&dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpmsg_dev);
......
......@@ -24,6 +24,7 @@
/* hwspinlock mode argument */
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
#define HWLOCK_RAW 0x03
struct device;
struct device_node;
......@@ -175,6 +176,25 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
}
/**
* hwspin_trylock_raw() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
* This function attempts to lock an hwspinlock, and will immediately fail
* if the hwspinlock is already taken.
*
* Caution: User must protect the routine of getting hardware lock with mutex
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
* or sleepable operations under the hardware lock.
*
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
*/
static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
{
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
}
/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
......@@ -242,6 +262,29 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
}
/**
* hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
*
* This function locks the underlying @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up when @timeout msecs have elapsed.
*
* Caution: User must protect the routine of getting hardware lock with mutex
* or spinlock to avoid dead-lock, that will let user can do some time-consuming
* or sleepable operations under the hardware lock.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
static inline
int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
{
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
}
/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
......@@ -301,6 +344,21 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
}
/**
* hwspin_unlock_raw() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
* This function will unlock a specific hwspinlock.
*
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
* this function: it is a bug to call unlock on a @hwlock that is already
* unlocked.
*/
static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
{
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
}
/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
......
......@@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
void rproc_add_subdev(struct rproc *rproc,
struct rproc_subdev *subdev,
int (*probe)(struct rproc_subdev *subdev),
void (*remove)(struct rproc_subdev *subdev, bool graceful));
void (*remove)(struct rproc_subdev *subdev, bool crashed));
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment