Commit b2dbdf2c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Nothing too earth shattering here, it just seems like lots of little
  things all over the place.

  msm has probably the larger amount of changes, but they all seem fine,
  otherwise, some rockchip, i915, etnaviv and exynos fixes, along with
  one nouveau regression fix for some older GPUs"

* tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux: (35 commits)
  drm/nouveau/disp/nv04: avoid creation of output paths
  drm: make DRM_STM default n
  drm/exynos: forbid creating framebuffers from too small GEM buffers
  drm/etnaviv: Fix off-by-one error in reloc checking
  drm/i915: fix backlight invert for non-zero minimum brightness
  drm/i915/shrinker: Wrap need_resched() inside preempt-disable
  drm/i915/perf: fix flex eu registers programming
  drm/i915: Fix out-of-bounds array access in bdw_load_gamma_lut
  drm/i915/gvt: Change the max length of mmio_reg_rw from 4 to 8
  drm/i915/gvt: Initialize MMIO Block with HW state
  drm/rockchip: vop: report error when check resource error
  drm/rockchip: vop: round_up pitches to word align
  drm/rockchip: vop: fix NV12 video display error
  drm/rockchip: vop: fix iommu page fault when resume
  drm/i915/gvt: clean workload queue if error happened
  drm/i915/gvt: change resetting to resetting_eng
  drm/msm: gpu: don't abuse dma_alloc for non-DMA allocations
  drm/msm: gpu: call qcom_mdt interfaces only for ARCH_QCOM
  drm/msm/adreno: Prevent unclocked access when retrieving timestamps
  drm/msm: Remove __user from __u64 data types
  ...
parents 27df704d 46828dc7
......@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
{
struct sync_file *sync_file = file->private_data;
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
......@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
if (list_empty(&sync_file->cb.node) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
......
......@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
if (ret)
if (ret && ret != -ENODEV)
return ret;
/* Shut down GPIO is optional */
......
......@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret)
return ret;
if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
DRM_ERROR("relocation %u outside object", i);
if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
......
......@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
for (i = 0; i < info->num_planes; i++) {
unsigned int height = (i == 0) ? mode_cmd->height :
DIV_ROUND_UP(mode_cmd->height, info->vsub);
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
......@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_gem[i] = to_exynos_gem(obj);
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto err;
}
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
......
......@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
......@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_execlist *execlist =
&vgpu->execlist[workload->ring_id];
int ring_id = workload->ring_id;
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
......@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (workload->status || vgpu->resetting)
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
* So this error is a vGPU hang actually to the guest.
* According to this we should emunlate a vGPU hang. If
* there are pending workloads which are already submitted
* from guest, we should clean them up like HW GPU does.
*
* if it is in middle of engine resetting, the pending
* workloads won't be submitted to HW GPU and will be
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out;
}
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
......
......@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
int i;
int i, j;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
......@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
for (i = 0; i < num; i++, block++) {
for (j = 0; j < block->size; j += 4)
*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
block->offset) + j));
}
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
......
......@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active;
bool pv_notified;
bool failsafe;
bool resetting;
unsigned int resetting_eng;
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
......@@ -195,6 +195,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
/* Special MMIO blocks. */
struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
gvt_mmio_func write;
};
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
......@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
......
......@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0;
}
/* Special MMIO blocks. */
static struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
gvt_mmio_func write;
} gvt_mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
struct gvt_mmio_block *block = gvt_mmio_blocks;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
int i;
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
......@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
......@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
......@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func;
int ret;
if (WARN_ON(bytes > 4))
if (WARN_ON(bytes > 8))
return -EINVAL;
/*
......
......@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
......
......@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
vgpu->resetting = true;
vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu);
/*
......@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
......@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
}
}
vgpu->resetting = false;
vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
......
......@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
break;
}
} while (!need_resched());
preempt_enable();
return *unlock;
return false;
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
}
BUG();
......
......@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
u32 *cs;
int i;
cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
......
......@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Program the max register to clamp values > 1.0. */
i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
......
......@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val;
return panel->backlight.max - val + panel->backlight.min;
}
return val;
......
......@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
......
......@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
#include <linux/of_reserved_mem.h>
#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
......@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
struct device_node *np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
if (!IS_ENABLED(CONFIG_ARCH_QCOM))
return -EINVAL;
np = of_get_child_by_name(dev->of_node, "zap-shader");
if (!np)
return -ENODEV;
np = of_parse_phandle(np, "memory-region", 0);
if (!np)
return -EINVAL;
ret = of_address_to_resource(np, 0, &r);
if (ret)
return ret;
mem_phys = r.start;
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
......@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
......@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
if (mem_region)
memunmap(mem_region);
release_firmware(fw);
return ret;
}
#else
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
return -ENODEV;
}
#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
......@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
struct a5xx_hwcg {
static const struct {
u32 offset;
u32 value;
};
static const struct a5xx_hwcg a530_hwcg[] = {
} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
......@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
static const struct {
int (*test)(struct adreno_gpu *gpu);
const struct a5xx_hwcg *regs;
unsigned int count;
} a5xx_hwcg_regs[] = {
{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
};
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
const struct a5xx_hwcg *regs, unsigned int count)
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
for (i = 0; i < count; i++)
gpu_write(gpu, regs[i].offset, regs[i].value);
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
gpu_write(gpu, a5xx_hwcg[i].offset,
state ? a5xx_hwcg[i].value : 0);
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
}
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
a5xx_hwcg_regs[i].count);
return;
}
}
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
......@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
/* Set up a child device to "own" the zap shader */
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
{
struct device_node *node;
int ret;
if (dev->parent)
return 0;
/* Find the sub-node for the zap shader */
node = of_get_child_by_name(parent->of_node, "zap-shader");
if (!node) {
DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
return -ENODEV;
}
dev->parent = parent;
dev->of_node = node;
dev_set_name(dev, "adreno_zap_shader");
ret = device_register(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
goto out;
}
ret = of_reserved_mem_device_init(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
device_unregister(dev);
}
out:
if (ret)
dev->parent = NULL;
return ret;
}
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
......@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
if (!ret)
ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
adreno_gpu->info->zapfw);
ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
......@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
a5xx_enable_hwcg(gpu);
a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
......@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
if (a5xx_gpu->zap_dev.parent)
device_unregister(&a5xx_gpu->zap_dev);
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
......@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,