diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index f4a59f5631e47921493130bd47b1b8e8c172ced0..5278d0560a4aa52612e0c57dbaba7034d1807eb3 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -71,10 +71,16 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
  * This function attempts to lock an hwspinlock, and will immediately
  * fail if the hwspinlock is already taken.
  *
- * Upon a successful return from this function, preemption (and possibly
- * interrupts) is disabled, so the caller must not sleep, and is advised to
- * release the hwspinlock as soon as possible. This is required in order to
- * minimize remote cores polling on the hardware interconnect.
+ * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
+ * of getting hardware lock with mutex or spinlock. Since in some scenarios,
+ * user need some time-consuming or sleepable operations under the hardware
+ * lock, they need one sleepable lock (like mutex) to protect the operations.
+ *
+ * If the mode is not HWLOCK_RAW, upon a successful return from this function,
+ * preemption (and possibly interrupts) is disabled, so the caller must not
+ * sleep, and is advised to release the hwspinlock as soon as possible. This is
+ * required in order to minimize remote cores polling on the hardware
+ * interconnect.
  *
  * The user decides whether local interrupts are disabled or not, and if yes,
  * whether he wants their previous state to be saved. It is up to the user
@@ -113,6 +119,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 	case HWLOCK_IRQ:
 		ret = spin_trylock_irq(&hwlock->lock);
 		break;
+	case HWLOCK_RAW:
+		ret = 1;
+		break;
 	default:
 		ret = spin_trylock(&hwlock->lock);
 		break;
@@ -134,6 +143,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 		case HWLOCK_IRQ:
 			spin_unlock_irq(&hwlock->lock);
 			break;
+		case HWLOCK_RAW:
+			/* Nothing to do */
+			break;
 		default:
 			spin_unlock(&hwlock->lock);
 			break;
@@ -170,9 +182,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
  * is already taken, the function will busy loop waiting for it to
  * be released, but give up after @timeout msecs have elapsed.
  *
- * Upon a successful return from this function, preemption is disabled
- * (and possibly local interrupts, too), so the caller must not sleep,
- * and is advised to release the hwspinlock as soon as possible.
+ * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine
+ * of getting hardware lock with mutex or spinlock. Since in some scenarios,
+ * user need some time-consuming or sleepable operations under the hardware
+ * lock, they need one sleepable lock (like mutex) to protect the operations.
+ *
+ * If the mode is not HWLOCK_RAW, upon a successful return from this function,
+ * preemption is disabled (and possibly local interrupts, too), so the caller
+ * must not sleep, and is advised to release the hwspinlock as soon as possible.
  * This is required in order to minimize remote cores polling on the
  * hardware interconnect.
  *
@@ -266,6 +283,9 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 	case HWLOCK_IRQ:
 		spin_unlock_irq(&hwlock->lock);
 		break;
+	case HWLOCK_RAW:
+		/* Nothing to do */
+		break;
 	default:
 		spin_unlock(&hwlock->lock);
 		break;
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 859d673d98c80239715df9ef56c32672647e47a1..fe450ee58d553a3d89564e26f4e85a76cf5922f9 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -24,6 +24,7 @@
 /* hwspinlock mode argument */
 #define HWLOCK_IRQSTATE	0x01	/* Disable interrupts, save state */
 #define HWLOCK_IRQ	0x02	/* Disable interrupts, don't save state */
+#define HWLOCK_RAW	0x03
 
 struct device;
 struct device_node;
@@ -175,6 +176,25 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
 	return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
 }
 
+/**
+ * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
+{
+	return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
+}
+
 /**
  * hwspin_trylock() - attempt to lock a specific hwspinlock
  * @hwlock: an hwspinlock which we want to trylock
@@ -242,6 +262,29 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
 	return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
 }
 
+/**
+ * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
+{
+	return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
+}
+
 /**
  * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  * @hwlock: the hwspinlock to be locked
@@ -301,6 +344,21 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
 	__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
 }
 
+/**
+ * hwspin_unlock_raw() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
+{
+	__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
+}
+
 /**
  * hwspin_unlock() - unlock hwspinlock
  * @hwlock: a previously-acquired hwspinlock which we want to unlock