Newer
Older
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 MediaTek Inc.
* Authors:
* Stanley Chu <stanley.chu@mediatek.com>
* Peter Wang <peter.wang@mediatek.com>
*/
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
#undef CREATE_TRACE_POINTS
#define MAX_SUPP_MAC 64
#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
{}
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
{},
};
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
/*
* Details of UIC Errors
*/
static const char *const ufs_uic_err_str[] = {
"PHY Adapter Layer",
"Data Link Layer",
"Network Link Layer",
"Transport Link Layer",
"DME"
};
static const char *const ufs_uic_pa_err_str[] = {
"PHY error on Lane 0",
"PHY error on Lane 1",
"PHY error on Lane 2",
"PHY error on Lane 3",
"Generic PHY Adapter Error. This should be the LINERESET indication"
};
static const char *const ufs_uic_dl_err_str[] = {
"NAC_RECEIVED",
"TCx_REPLAY_TIMER_EXPIRED",
"AFCx_REQUEST_TIMER_EXPIRED",
"FCx_PROTECTION_TIMER_EXPIRED",
"CRC_ERROR",
"RX_BUFFER_OVERFLOW",
"MAX_FRAME_LENGTH_EXCEEDED",
"WRONG_SEQUENCE_NUMBER",
"AFC_FRAME_SYNTAX_ERROR",
"NAC_FRAME_SYNTAX_ERROR",
"EOF_SYNTAX_ERROR",
"FRAME_SYNTAX_ERROR",
"BAD_CTRL_SYMBOL_TYPE",
"PA_INIT_ERROR",
"PA_ERROR_IND_RECEIVED",
"PA_INIT"
};
static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
{
u32 tmp;
if (enable) {
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
tmp = tmp |
(1 << RX_SYMBOL_CLK_GATE_EN) |
(1 << SYS_CLK_GATE_EN) |
(1 << TX_CLK_GATE_EN);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
} else {
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
(1 << SYS_CLK_GATE_EN) |
(1 << TX_CLK_GATE_EN));
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
}
}
static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
{
struct arm_smccc_res res;
ufs_mtk_crypto_ctrl(res, 1);
if (res.a0) {
dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
__func__, res.a0);
hba->caps &= ~UFSHCD_CAP_CRYPTO;
}
}
static void ufs_mtk_host_reset(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
reset_control_assert(host->hci_reset);
reset_control_assert(host->crypto_reset);
reset_control_assert(host->unipro_reset);
reset_control_assert(host->mphy_reset);
usleep_range(100, 110);
reset_control_deassert(host->unipro_reset);
reset_control_deassert(host->crypto_reset);
reset_control_deassert(host->hci_reset);
reset_control_deassert(host->mphy_reset);
/* restore mphy setting aftre mphy reset */
if (host->mphy_reset)
ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
}
static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
struct reset_control **rc,
char *str)
{
*rc = devm_reset_control_get(hba->dev, str);
if (IS_ERR(*rc)) {
dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
str, PTR_ERR(*rc));
*rc = NULL;
}
}
static void ufs_mtk_init_reset(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_init_reset_control(hba, &host->hci_reset,
"hci_rst");
ufs_mtk_init_reset_control(hba, &host->unipro_reset,
"unipro_rst");
ufs_mtk_init_reset_control(hba, &host->crypto_reset,
"crypto_rst");
ufs_mtk_init_reset_control(hba, &host->mphy_reset,
"mphy_rst");
static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
hba->vps->hba_enable_delay_us = 0;
hba->vps->hba_enable_delay_us = 600;
ufs_mtk_host_reset(hba);
}
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_mtk_crypto_enable(hba);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
ufshcd_writel(hba, 0,
REG_AUTO_HIBERNATE_IDLE_TIMER);
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
hba->ahit = 0;
}
/*
* Turn on CLK_CG early to bypass abnormal ERR_CHK signal
* to prevent host hang issue
*/
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
}
return 0;
}
static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
int err = 0;
host->mphy = devm_of_phy_get_by_index(dev, np, 0);
if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
/*
* UFS driver might be probed before the phy driver does.
* In that case we would like to return EPROBE_DEFER code.
*/
err = -EPROBE_DEFER;
dev_info(dev,
"%s: required phy hasn't probed yet. err = %d\n",
__func__, err);
} else if (IS_ERR(host->mphy)) {
err = PTR_ERR(host->mphy);
if (err != -ENODEV) {
dev_info(dev, "%s: PHY get failed %d\n", __func__,
err);
}
}
if (err)
host->mphy = NULL;
/*
* Allow unbound mphy because not every platform needs specific
* mphy control.
*/
if (err == -ENODEV)
err = 0;
return err;
}
static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
ktime_t timeout, time_checked;
u32 value;
if (host->ref_clk_enabled == on)
return 0;
ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
if (on) {
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
}
/* Wait for ack */
timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
time_checked = ktime_get();
value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
/* Wait until ack bit equals to req bit */
if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
goto out;
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
return -ETIMEDOUT;
out:
host->ref_clk_enabled = on;
if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
return 0;
}
static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
u16 gating_us)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->dev_info.clk_gating_wait_us) {
host->ref_clk_gating_wait_us =
hba->dev_info.clk_gating_wait_us;
} else {
host->ref_clk_gating_wait_us = gating_us;
}
host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
} else {
ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
}
}
static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
u32 val, sm;
bool wait_idle;
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
/* wait a specific time after check base */
udelay(10);
wait_idle = false;
do {
time_checked = ktime_get_mono_fast_ns();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
sm = val & 0x1f;
/*
* if state is in H8 enter and H8 enter confirm
* wait until return to idle state.
*/
if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
wait_idle = true;
udelay(50);
continue;
} else if (!wait_idle)
break;
if (wait_idle && (sm == VS_HCE_BASE))
break;
} while (time_checked < timeout);
if (wait_idle && sm != VS_HCE_BASE)
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
unsigned long max_wait_ms)
ktime_t timeout, time_checked;
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
if (val == state)
return 0;
/* Sleep for max. 200us */
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
return -ETIMEDOUT;
static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct phy *mphy = host->mphy;
struct arm_smccc_res res;
int ret = 0;
if (!mphy || !(on ^ host->mphy_powered_on))
return 0;
if (ufs_mtk_is_va09_supported(hba)) {
ret = regulator_enable(host->reg_va09);
if (ret < 0)
goto out;
/* wait 200 us to stablize VA09 */
usleep_range(200, 210);
ufs_mtk_va09_pwr_ctrl(res, 1);
}
if (ufs_mtk_is_va09_supported(hba)) {
ufs_mtk_va09_pwr_ctrl(res, 0);
ret = regulator_disable(host->reg_va09);
}
}
out:
if (ret) {
dev_info(hba->dev,
"failed to %s va09: %d\n",
on ? "enable" : "disable",
ret);
} else {
host->mphy_powered_on = on;
}
return ret;
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
struct clk **clk_out)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk))
err = PTR_ERR(clk);
else
*clk_out = clk;
return err;
}
static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_crypt_cfg *cfg;
struct regulator *reg;
int volt, ret;
if (!ufs_mtk_is_boost_crypt_enabled(hba))
return;
cfg = host->crypt;
volt = cfg->vcore_volt;
reg = cfg->reg_vcore;
ret = clk_prepare_enable(cfg->clk_crypt_mux);
if (ret) {
dev_info(hba->dev, "clk_prepare_enable(): %d\n",
ret);
return;
}
if (boost) {
ret = regulator_set_voltage(reg, volt, INT_MAX);
if (ret) {
dev_info(hba->dev,
"failed to set vcore to %d\n", volt);
goto out;
}
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_perf);
if (ret) {
dev_info(hba->dev,
"failed to set clk_crypt_perf\n");
regulator_set_voltage(reg, 0, INT_MAX);
goto out;
}
} else {
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_lp);
if (ret) {
dev_info(hba->dev,
"failed to set clk_crypt_lp\n");
goto out;
}
ret = regulator_set_voltage(reg, 0, INT_MAX);
if (ret) {
dev_info(hba->dev,
"failed to set vcore to MIN\n");
}
}
out:
clk_disable_unprepare(cfg->clk_crypt_mux);
}
static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
struct clk **clk)
{
int ret;
ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
if (ret) {
dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
name, ret);
}
return ret;
}
static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_crypt_cfg *cfg;
struct device *dev = hba->dev;
struct regulator *reg;
u32 volt;
host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
GFP_KERNEL);
if (!host->crypt)
goto disable_caps;
reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
if (IS_ERR(reg)) {
dev_info(dev, "failed to get dvfsrc-vcore: %ld",
PTR_ERR(reg));
goto disable_caps;
}
if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
&volt)) {
dev_info(dev, "failed to get boost-crypt-vcore-min");
goto disable_caps;
}
cfg = host->crypt;
if (ufs_mtk_init_host_clk(hba, "crypt_mux",
&cfg->clk_crypt_mux))
goto disable_caps;
if (ufs_mtk_init_host_clk(hba, "crypt_lp",
&cfg->clk_crypt_lp))
goto disable_caps;
if (ufs_mtk_init_host_clk(hba, "crypt_perf",
&cfg->clk_crypt_perf))
goto disable_caps;
cfg->reg_vcore = reg;
cfg->vcore_volt = volt;
host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
disable_caps:
}
static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
host->reg_va09 = regulator_get(hba->dev, "va09");
if (IS_ERR(host->reg_va09))
dev_info(hba->dev, "failed to get va09");
else
host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
}
static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct device_node *np = hba->dev->of_node;
if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
ufs_mtk_init_boost_crypt(hba);
if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
ufs_mtk_init_va09_pwr_ctrl(hba);
if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
host->caps |= UFS_MTK_CAP_DISABLE_AH8;
if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
host->caps |= UFS_MTK_CAP_BROKEN_VCC;
if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
{
ufs_mtk_boost_crypt(hba, scale_up);
}
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
phy_power_on(host->mphy);
ufs_mtk_setup_ref_clk(hba, on);
if (!ufshcd_is_clkscaling_supported(hba))
ufs_mtk_scale_perf(hba, on);
if (!ufshcd_is_clkscaling_supported(hba))
ufs_mtk_scale_perf(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
return;
if (host->mcq_nr_intr == 0)
return;
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
disable_irq(irq);
}
host->is_mcq_intr_enabled = false;
}
static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
return;
if (host->mcq_nr_intr == 0)
return;
if (host->is_mcq_intr_enabled == true)
return;
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
enable_irq(irq);
}
host->is_mcq_intr_enabled = true;
}
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
* Return: 0 on success, non-zero on failure.
*/
static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/*
* In case ufs_mtk_init() is not yet done, simply ignore.
* This ufs_mtk_setup_clocks() shall be called from
* ufs_mtk_init() after init is done.
*/
if (!host)
return 0;
if (!on && status == PRE_CHANGE) {
if (ufshcd_is_link_off(hba)) {
clk_pwr_off = true;
} else if (ufshcd_is_link_hibern8(hba) ||
(!ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_auto_hibern8_enabled(hba))) {
* Gate ref-clk and poweroff mphy if link state is in
* OFF or Hibern8 by either Auto-Hibern8 or
* ufshcd_link_state_transition().
ret = ufs_mtk_wait_link_state(hba,
VS_LINK_HIBERN8,
15);
if (!ret)
clk_pwr_off = true;
}
if (clk_pwr_off)
ufs_mtk_pwr_ctrl(hba, false);
ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
ufs_mtk_mcq_enable_irq(hba);
}
return ret;
}
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int ret, ver = 0;
if (host->hw_ver.major)
return;
/* Set default (minimum) version anyway */
host->hw_ver.major = 2;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
if (!ret) {
if (ver >= UFS_UNIPRO_VER_1_8) {
/*
* Fix HCI version for some platforms with
* incorrect version
*/
if (hba->ufs_version < ufshci_version(3, 0))
hba->ufs_version = ufshci_version(3, 0);
}
static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
{
return hba->ufs_version;
}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
/**
* ufs_mtk_init_clocks - Init mtk driver private clocks
*
* @hba: per adapter instance
*/
static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
* Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
* being switched on/off in clock gating.
*/
list_for_each_entry_safe(clki, clki_tmp, head, list) {
if (!strcmp(clki->name, "ufs_sel")) {
host->mclk.ufs_sel_clki = clki;
} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
host->mclk.ufs_sel_max_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
}
}
if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
!mclk->ufs_sel_min_clki) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
}
}
#define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
struct device_node *np = hba->dev->of_node;
struct device *dev = hba->dev;
char vcc_name[MAX_VCC_NAME];
struct arm_smccc_res res;
int err, ver;
if (hba->vreg_info.vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
ufs_mtk_get_vcc_num(res);
if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
else
return -ENODEV;
} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
} else {
return 0;
}
err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
if (err)
return err;
err = ufshcd_get_vreg(dev, info->vcc);
if (err)
return err;
err = regulator_enable(info->vcc->reg);
if (!err) {
info->vcc->enabled = true;
dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
}
return err;
}
static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
struct ufs_vreg **vreg_on, **vreg_off;
if (hba->dev_info.wspecversion >= 0x0300) {
vreg_on = &info->vccq;
vreg_off = &info->vccq2;
} else {
vreg_on = &info->vccq2;
vreg_off = &info->vccq;
}
if (*vreg_on)
(*vreg_on)->always_on = true;
if (*vreg_off) {
regulator_disable((*vreg_off)->reg);
devm_kfree(hba->dev, (*vreg_off)->name);
devm_kfree(hba->dev, *vreg_off);
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct platform_device *pdev;
int i;
int irq;
host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
pdev = container_of(hba->dev, struct platform_device, dev);
if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
goto failed;
for (i = 0; i < host->mcq_nr_intr; i++) {
/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
irq = platform_get_irq(pdev, i + 1);
if (irq < 0) {
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
goto failed;
}
host->mcq_intr_info[i].hba = hba;
host->mcq_intr_info[i].irq = irq;
dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
}
return;
failed:
/* invalidate irq info */
for (i = 0; i < host->mcq_nr_intr; i++)
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
host->mcq_nr_intr = 0;
}
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
* Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_mtk_init(struct ufs_hba *hba)
{
const struct of_device_id *id;
struct device *dev = hba->dev;
struct ufs_mtk_host *host;
struct Scsi_Host *shost = hba->host;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
goto out;
}
host->hba = hba;
ufshcd_set_variant(hba, host);
id = of_match_device(ufs_mtk_of_match, dev);
if (!id) {
err = -EINVAL;
goto out;
}
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
ufs_mtk_init_mcq_irq(hba);
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
ufs_mtk_init_reset(hba);
/* backup mphy setting if mphy can reset */
if (host->mphy_reset)
ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
/* Enable inline encryption */
hba->caps |= UFSHCD_CAP_CRYPTO;