diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ee2c4d498d7d784645e86cae436ca0d7b9f803d1..5020265bfbd95dca36f30340c712aa9e28ebfb5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1043,6 +1043,7 @@ nouveau_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 3ba980d80a1a1231f1e860efb1758cda9f3fe93e..93ed43c413f0bbff28c393675b527376518c4319 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -22,6 +22,7 @@
 #include "nouveau_svm.h"
 #include "nouveau_drv.h"
 #include "nouveau_chan.h"
+#include "nouveau_dmem.h"
 
 #include <nvif/notify.h>
 #include <nvif/object.h>
@@ -104,6 +105,101 @@ struct nouveau_svmm {
 #define SVMM_ERR(s,f,a...)                                                     \
 	NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
 
+int
+nouveau_svmm_bind(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct drm_nouveau_svm_bind *args = data;
+	unsigned target, cmd, priority;
+	unsigned long addr, end, size;
+	struct mm_struct *mm;
+
+	args->va_start &= PAGE_MASK;
+	args->va_end &= PAGE_MASK;
+
+	/* Sanity check arguments */
+	if (args->reserved0 || args->reserved1)
+		return -EINVAL;
+	if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
+		return -EINVAL;
+	if (args->va_start >= args->va_end)
+		return -EINVAL;
+	if (!args->npages)
+		return -EINVAL;
+
+	cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
+	cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
+	switch (cmd) {
+	case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
+	priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
+
+	/* FIXME support CPU target ie all target value < GPU_VRAM */
+	target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
+	target &= NOUVEAU_SVM_BIND_TARGET_MASK;
+	switch (target) {
+	case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * FIXME: For now refuse non 0 stride, we need to change the migrate
+	 * kernel function to handle stride to avoid to create a mess within
+	 * each device driver.
+	 */
+	if (args->stride)
+		return -EINVAL;
+
+	size = ((unsigned long)args->npages) << PAGE_SHIFT;
+	if ((args->va_start + size) <= args->va_start)
+		return -EINVAL;
+	if ((args->va_start + size) > args->va_end)
+		return -EINVAL;
+
+	/*
+	 * Ok we are ask to do something sane, for now we only support migrate
+	 * commands but we will add things like memory policy (what to do on
+	 * page fault) and maybe some other commands.
+	 */
+
+	mm = get_task_mm(current);
+	down_read(&mm->mmap_sem);
+
+	for (addr = args->va_start, end = args->va_start + size; addr < end;) {
+		struct vm_area_struct *vma;
+		unsigned long next;
+
+		vma = find_vma_intersection(mm, addr, end);
+		if (!vma)
+			break;
+
+		next = min(vma->vm_end, end);
+		/* This is a best effort so we ignore errors */
+		nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+		addr = next;
+	}
+
+	/*
+	 * FIXME Return the number of page we have migrated, again we need to
+	 * update the migrate API to return that information so that we can
+	 * report it to user space.
+	 */
+	args->result = 0;
+
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+
+	return 0;
+}
+
 /* Unlink channel instance from SVMM. */
 void
 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index 0379a1c316ca42f6f61d9ee346e02d0ac45d5280..e839d81894611cb4b0bec4eef6fd6944e1db3b6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -17,6 +17,7 @@ int nouveau_svmm_init(struct drm_device *, void *, struct drm_file *);
 void nouveau_svmm_fini(struct nouveau_svmm **);
 int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
 void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
+int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
 #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
 static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
@@ -37,5 +38,11 @@ static inline int nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
 }
 
 static inline void nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) {}
+
+static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
+				    struct drm_file *file)
+{
+	return -ENOSYS;
+}
 #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 #endif
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index afac182c80d8d5e504ae2dbcb16790e00fafaa07..9459a6e3bc1f4eee615f548e1617ac2d46114a40 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -134,6 +134,7 @@ struct drm_nouveau_gem_cpu_fini {
 #define DRM_NOUVEAU_GPUOBJ_FREE        0x06 /* deprecated */
 #define DRM_NOUVEAU_NVIF               0x07
 #define DRM_NOUVEAU_SVM_INIT           0x08
+#define DRM_NOUVEAU_SVM_BIND           0x09
 #define DRM_NOUVEAU_GEM_NEW            0x40
 #define DRM_NOUVEAU_GEM_PUSHBUF        0x41
 #define DRM_NOUVEAU_GEM_CPU_PREP       0x42
@@ -145,7 +146,49 @@ struct drm_nouveau_svm_init {
 	__u64 unmanaged_size;
 };
 
+struct drm_nouveau_svm_bind {
+	__u64 header;
+	__u64 va_start;
+	__u64 va_end;
+	__u64 npages;
+	__u64 stride;
+	__u64 result;
+	__u64 reserved0;
+	__u64 reserved1;
+};
+
+#define NOUVEAU_SVM_BIND_COMMAND_SHIFT          0
+#define NOUVEAU_SVM_BIND_COMMAND_BITS           8
+#define NOUVEAU_SVM_BIND_COMMAND_MASK           ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT         8
+#define NOUVEAU_SVM_BIND_PRIORITY_BITS          8
+#define NOUVEAU_SVM_BIND_PRIORITY_MASK          ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_TARGET_SHIFT           16
+#define NOUVEAU_SVM_BIND_TARGET_BITS            32
+#define NOUVEAU_SVM_BIND_TARGET_MASK            0xffffffff
+
+/*
+ * Below is use to validate ioctl argument, userspace can also use it to make
+ * sure that no bit are set beyond known fields for a given kernel version.
+ */
+#define NOUVEAU_SVM_BIND_VALID_BITS     48
+#define NOUVEAU_SVM_BIND_VALID_MASK     ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
+
+
+/*
+ * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
+ * result: number of page successfuly migrate to the target memory.
+ */
+#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE               0
+
+/*
+ * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
+ */
+#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM               (1UL << 31)
+
+
 #define DRM_IOCTL_NOUVEAU_SVM_INIT           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
+#define DRM_IOCTL_NOUVEAU_SVM_BIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
 
 #define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
 #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)