diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f1cadad4e81cacfce23887098bf83e6c028f36a2..4575f368455d581589d594d6346efc6da6b7d83e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -206,14 +206,14 @@ static void remove_from_engine(struct i915_request *rq)
 	 * check that the rq still belongs to the newly locked engine.
 	 */
 	locked = READ_ONCE(rq->engine);
-	spin_lock(&locked->active.lock);
+	spin_lock_irq(&locked->active.lock);
 	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 		spin_unlock(&locked->active.lock);
 		spin_lock(&engine->active.lock);
 		locked = engine;
 	}
 	list_del(&rq->sched.link);
-	spin_unlock(&locked->active.lock);
+	spin_unlock_irq(&locked->active.lock);
 }
 
 bool i915_request_retire(struct i915_request *rq)
@@ -242,8 +242,6 @@ bool i915_request_retire(struct i915_request *rq)
 				  &i915_request_timeline(rq)->requests));
 	rq->ring->head = rq->postfix;
 
-	local_irq_disable();
-
 	/*
 	 * We only loosely track inflight requests across preemption,
 	 * and so we may find ourselves attempting to retire a _completed_
@@ -252,7 +250,7 @@ bool i915_request_retire(struct i915_request *rq)
 	 */
 	remove_from_engine(rq);
 
-	spin_lock(&rq->lock);
+	spin_lock_irq(&rq->lock);
 	i915_request_mark_complete(rq);
 	if (!i915_request_signaled(rq))
 		dma_fence_signal_locked(&rq->fence);
@@ -267,9 +265,7 @@ bool i915_request_retire(struct i915_request *rq)
 		__notify_execute_cb(rq);
 	}
 	GEM_BUG_ON(!list_empty(&rq->execute_cb));
-	spin_unlock(&rq->lock);
-
-	local_irq_enable();
+	spin_unlock_irq(&rq->lock);
 
 	remove_from_client(rq);
 	list_del(&rq->link);