diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c70d6d72b9aa5f108470dd890752cd..e294ae445c9ab4b7e97dd1a46efc7dc7d6a1848d 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -3,6 +3,14 @@
 
 #ifdef CONFIG_CMPXCHG_LOCKREF
 
+/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
 /*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
@@ -14,8 +22,9 @@
 	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
 		struct lockref new = old, prev = old;				\
 		CODE								\
-		old.lock_count = cmpxchg64(&lockref->lock_count,		\
-					   old.lock_count, new.lock_count);	\
+		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
+						   old.lock_count,		\
+						   new.lock_count);		\
 		if (likely(old.lock_count == prev.lock_count)) {		\
 			SUCCESS;						\
 		}								\