Commit 16e205cf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.17-rc1' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "One omap, and one alsa pm fix (we merged the breaking patch via drm
  tree).

  Otherwise it's two bunches of amdgpu fixes, removing an unneeded file,
  some DC fixes, HDMI audio regression fix, and some vega12 fixes"

* tag 'drm-fixes-for-v4.17-rc1' of git://people.freedesktop.org/~airlied/linux: (27 commits)
  Revert "drm/amd/display: disable CRTCs with NULL FB on their primary plane (V2)"
  Revert "drm/amd/display: fix dereferencing possible ERR_PTR()"
  drm/amd/display: Fix regamma not affecting full-intensity color values
  drm/amd/display: Fix FBC text console corruption
  drm/amd/display: Only register backlight device if embedded panel connected
  drm/amd/display: fix brightness level after resume from suspend
  drm/amd/display: HDMI has no sound after Panel power off/on
  drm/amdgpu: add MP1 and THM hw ip base reg offset
  drm/amdgpu: fix null pointer panic with direct fw loading on gpu reset
  drm/radeon: add PX quirk for Asus K73TK
  drm/omap: fix crash if there's no video PLL
  drm/amdgpu: Fix memory leaks at amdgpu_init() error path
  drm/amdgpu: Fix PCIe lane width calculation
  drm/radeon: Fix PCIe lane width calculation
  drm/amdgpu/si: implement get/set pcie_lanes asic callback
  drm/amdgpu: Add support for SRBM selection v3
  Revert "drm/amdgpu: Don't change preferred domian when fallback GTT v5"
  drm/amd/powerply: fix power reading on Fiji
  drm/amd/powerplay: Enable ACG SS feature
  drm/amdgpu/sdma: fix mask in emit_pipeline_sync
  ...
parents affb0280 a10beabb
......@@ -890,6 +890,7 @@ struct amdgpu_gfx_funcs {
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
};
struct amdgpu_ngg_buf {
......@@ -1378,6 +1379,7 @@ enum amd_hw_ip_block_type {
ATHUB_HWIP,
NBIO_HWIP,
MP0_HWIP,
MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
VCE_HWIP,
......@@ -1387,6 +1389,7 @@ enum amd_hw_ip_block_type {
SMUIO_HWIP,
PWR_HWIP,
NBIF_HWIP,
THM_HWIP,
MAX_HWIP
};
......@@ -1812,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
/* Common functions */
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
......
......@@ -64,16 +64,21 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
#if defined(CONFIG_DEBUG_FS)
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
char __user *buf, size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
bool pm_pg_lock, use_bank, use_ring;
unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
if (size & 0x3 || *pos & 0x3)
pm_pg_lock = use_bank = use_ring = false;
instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
if (size & 0x3 || *pos & 0x3 ||
((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
......@@ -91,8 +96,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else if (*pos & (1ULL << 61)) {
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
use_ring = 1;
} else {
use_bank = 0;
use_bank = use_ring = 0;
}
*pos &= (1UL << 22) - 1;
......@@ -104,6 +116,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
} else if (use_ring) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
}
if (pm_pg_lock)
......@@ -115,8 +130,14 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (*pos > adev->rmmio_size)
goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (read) {
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
WREG32(*pos >> 2, value);
}
if (r) {
result = r;
goto end;
......@@ -132,6 +153,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
} else if (use_ring) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
if (pm_pg_lock)
......@@ -140,78 +164,17 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
return result;
}
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
if (sh_bank == 0x3FF)
sh_bank = 0xFFFFFFFF;
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
} else {
use_bank = 0;
}
*pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
return -EINVAL;
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
}
if (pm_pg_lock)
mutex_lock(&adev->pm.mutex);
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
return result;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
WREG32(*pos >> 2, value);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
return result;
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
......
......@@ -922,6 +922,11 @@ static int __init amdgpu_init(void)
{
int r;
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
r = amdgpu_sync_init();
if (r)
goto error_sync;
......@@ -930,10 +935,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
......
......@@ -410,6 +410,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
long timeout;
int r;
/* Check that num_hw_submission is a power of two */
......@@ -433,11 +434,16 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
/* for non-sriov case, no timeout enforce on compute ring */
if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
&& !amdgpu_sriov_vf(ring->adev))
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
(ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ?
MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
ring->name);
timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
......
......@@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
retry:
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, type, resv, &bo);
if (r) {
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
}
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
}
DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
}
*obj = &bo->gem_base;
......
......@@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
struct amdgpu_bo *bo;
unsigned long page_align;
size_t acc_size;
u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
......@@ -418,23 +417,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
#endif
bo->tbo.bdev = &adev->mman.bdev;
domains = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domains);
amdgpu_ttm_placement_from_domain(bo, domain);
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
NULL, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r && r != -ERESTARTSYS)) {
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
goto retry;
} else if (domains != bo->preferred_domains) {
domains = bo->allowed_domains;
goto retry;
}
}
if (unlikely(r))
if (unlikely(r != 0))
return r;
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
......
......@@ -505,6 +505,9 @@ static int psp_resume(void *handle)
int psp_gpu_reset(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
return psp_mode1_reset(&adev->psp);
}
......
......@@ -859,7 +859,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
......
......@@ -3061,11 +3061,18 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
DRM_INFO("Not implemented\n");
}
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
.read_wave_data = &gfx_v6_0_read_wave_data,
.read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
static int gfx_v6_0_early_init(void *handle)
......
......@@ -4270,11 +4270,18 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
cik_srbm_select(adev, me, pipe, q, 0);
}
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
.read_wave_data = &gfx_v7_0_read_wave_data,
.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
......
......@@ -3475,6 +3475,12 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
WREG32(mmGRBM_GFX_INDEX, data);
}
static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
vi_srbm_select(adev, me, pipe, q, 0);
}
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
......@@ -5442,6 +5448,7 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
static int gfx_v8_0_early_init(void *handle)
......
......@@ -998,12 +998,19 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q)
{
soc15_grbm_select(adev, me, pipe, q, 0);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
......@@ -2757,6 +2764,45 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
return 0;
}
static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int j;
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
if (j == AMDGPU_MAX_USEC_TIMEOUT) {
DRM_DEBUG("KIQ dequeue request failed.\n");
/* Manual disable if dequeue request times out */
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
}
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
0);
}
WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
return 0;
}
static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
......@@ -3010,7 +3056,6 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
return r;
}
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
......@@ -3033,6 +3078,20 @@ static int gfx_v9_0_hw_fini(void *handle)
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
adev->gfx.kiq.ring.queue, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
......
......@@ -837,7 +837,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
......
......@@ -1105,7 +1105,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
......
......@@ -1121,7 +1121,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xfffffff); /* mask */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
......
......@@ -1252,6 +1252,71 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
}
}
static int si_get_pcie_lanes(struct amdgpu_device *adev)
{
u32 link_width_cntl;
if (adev->flags & AMD_IS_APU)
return 0;
link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
return 2;
case LC_LINK_WIDTH_X4:
return 4;
case LC_LINK_WIDTH_X8:
return 8;
case LC_LINK_WIDTH_X0:
case LC_LINK_WIDTH_X16:
default:
return 16;
}
}
static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
{
u32 link_width_cntl, mask;
if (adev->flags & AMD_IS_APU)
return;
switch (lanes) {
case 0:
mask = LC_LINK_WIDTH_X0;
break;
case 1:
mask = LC_LINK_WIDTH_X1;
break;
case 2:
mask = LC_LINK_WIDTH_X2;
break;
case 4:
mask = LC_LINK_WIDTH_X4;
break;
case 8:
mask = LC_LINK_WIDTH_X8;
break;
case 16:
mask = LC_LINK_WIDTH_X16;
break;
default:
DRM_ERROR("invalid pcie lane request: %d\n", lanes);
return;
}
link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_LINK_WIDTH_MASK;
link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
link_width_cntl |= (LC_RECONFIG_NOW |
LC_RECONFIG_ARC_MISSING_ESCAPE);
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
......@@ -1262,6 +1327,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
.get_pcie_lanes = &si_get_pcie_lanes,
.set_pcie_lanes = &si_set_pcie_lanes,
.get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
......
......@@ -6372,9 +6372,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
{
u32 lane_width;
u32 new_lane_width =
(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);
......
......@@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
......@@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}
......
......@@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
return ret;
}
static void register_backlight_device(struct amdgpu_display_manager *dm,
struct dc_link *link)
{
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
/* Event if registration failed, we should continue with
* DM initialization because not having a backlight control
* is better then a black screen.
*/
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev)
dm->backlight_link = link;
}
#endif
}
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
......@@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
......@@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
DETECT_REASON_BOOT))
link = dc_get_link_at_index(dm->dc, i);
if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
}
/* Software is initialized. Now we can register interrupt handlers. */
......@@ -2685,7 +2713,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev) {
......@@ -3561,6 +3590,7 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}