cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ufshcd.c (273743B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Universal Flash Storage Host controller driver Core
      4 * Copyright (C) 2011-2013 Samsung India Software Operations
      5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
      6 *
      7 * Authors:
      8 *	Santosh Yaraganavi <santosh.sy@samsung.com>
      9 *	Vinayak Holikatti <h.vinayak@samsung.com>
     10 */
     11
     12#include <linux/async.h>
     13#include <linux/devfreq.h>
     14#include <linux/nls.h>
     15#include <linux/of.h>
     16#include <linux/bitfield.h>
     17#include <linux/blk-pm.h>
     18#include <linux/blkdev.h>
     19#include <linux/clk.h>
     20#include <linux/delay.h>
     21#include <linux/interrupt.h>
     22#include <linux/module.h>
     23#include <linux/regulator/consumer.h>
     24#include <scsi/scsi_cmnd.h>
     25#include <scsi/scsi_dbg.h>
     26#include <scsi/scsi_driver.h>
     27#include <scsi/scsi_eh.h>
     28#include "ufshcd-priv.h"
     29#include <ufs/ufs_quirks.h>
     30#include <ufs/unipro.h>
     31#include "ufs-sysfs.h"
     32#include "ufs-debugfs.h"
     33#include "ufs-fault-injection.h"
     34#include "ufs_bsg.h"
     35#include "ufshcd-crypto.h"
     36#include "ufshpb.h"
     37#include <asm/unaligned.h>
     38
     39#define CREATE_TRACE_POINTS
     40#include <trace/events/ufs.h>
     41
     42#define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
     43				 UTP_TASK_REQ_COMPL |\
     44				 UFSHCD_ERROR_MASK)
     45/* UIC command timeout, unit: ms */
     46#define UIC_CMD_TIMEOUT	500
     47
     48/* NOP OUT retries waiting for NOP IN response */
     49#define NOP_OUT_RETRIES    10
     50/* Timeout after 50 msecs if NOP OUT hangs without response */
     51#define NOP_OUT_TIMEOUT    50 /* msecs */
     52
     53/* Query request retries */
     54#define QUERY_REQ_RETRIES 3
     55/* Query request timeout */
     56#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
     57
     58/* Task management command timeout */
     59#define TM_CMD_TIMEOUT	100 /* msecs */
     60
     61/* maximum number of retries for a general UIC command  */
     62#define UFS_UIC_COMMAND_RETRIES 3
     63
     64/* maximum number of link-startup retries */
     65#define DME_LINKSTARTUP_RETRIES 3
     66
     67/* Maximum retries for Hibern8 enter */
     68#define UIC_HIBERN8_ENTER_RETRIES 3
     69
     70/* maximum number of reset retries before giving up */
     71#define MAX_HOST_RESET_RETRIES 5
     72
     73/* Maximum number of error handler retries before giving up */
     74#define MAX_ERR_HANDLER_RETRIES 5
     75
     76/* Expose the flag value from utp_upiu_query.value */
     77#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
     78
     79/* Interrupt aggregation default timeout, unit: 40us */
     80#define INT_AGGR_DEF_TO	0x02
     81
     82/* default delay of autosuspend: 2000 ms */
     83#define RPM_AUTOSUSPEND_DELAY_MS 2000
     84
     85/* Default delay of RPM device flush delayed work */
     86#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
     87
     88/* Default value of wait time before gating device ref clock */
     89#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
     90
     91/* Polling time to wait for fDeviceInit */
     92#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
     93
     94#define ufshcd_toggle_vreg(_dev, _vreg, _on)				\
     95	({                                                              \
     96		int _ret;                                               \
     97		if (_on)                                                \
     98			_ret = ufshcd_enable_vreg(_dev, _vreg);         \
     99		else                                                    \
    100			_ret = ufshcd_disable_vreg(_dev, _vreg);        \
    101		_ret;                                                   \
    102	})
    103
    104#define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
    105	size_t __len = (len);                                            \
    106	print_hex_dump(KERN_ERR, prefix_str,                             \
    107		       __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
    108		       16, 4, buf, __len, false);                        \
    109} while (0)
    110
    111int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
    112		     const char *prefix)
    113{
    114	u32 *regs;
    115	size_t pos;
    116
    117	if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
    118		return -EINVAL;
    119
    120	regs = kzalloc(len, GFP_ATOMIC);
    121	if (!regs)
    122		return -ENOMEM;
    123
    124	for (pos = 0; pos < len; pos += 4) {
    125		if (offset == 0 &&
    126		    pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
    127		    pos <= REG_UIC_ERROR_CODE_DME)
    128			continue;
    129		regs[pos / 4] = ufshcd_readl(hba, offset + pos);
    130	}
    131
    132	ufshcd_hex_dump(prefix, regs, len);
    133	kfree(regs);
    134
    135	return 0;
    136}
    137EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
    138
    139enum {
    140	UFSHCD_MAX_CHANNEL	= 0,
    141	UFSHCD_MAX_ID		= 1,
    142	UFSHCD_NUM_RESERVED	= 1,
    143	UFSHCD_CMD_PER_LUN	= 32 - UFSHCD_NUM_RESERVED,
    144	UFSHCD_CAN_QUEUE	= 32 - UFSHCD_NUM_RESERVED,
    145};
    146
    147static const char *const ufshcd_state_name[] = {
    148	[UFSHCD_STATE_RESET]			= "reset",
    149	[UFSHCD_STATE_OPERATIONAL]		= "operational",
    150	[UFSHCD_STATE_ERROR]			= "error",
    151	[UFSHCD_STATE_EH_SCHEDULED_FATAL]	= "eh_fatal",
    152	[UFSHCD_STATE_EH_SCHEDULED_NON_FATAL]	= "eh_non_fatal",
    153};
    154
    155/* UFSHCD error handling flags */
    156enum {
    157	UFSHCD_EH_IN_PROGRESS = (1 << 0),
    158};
    159
    160/* UFSHCD UIC layer error flags */
    161enum {
    162	UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
    163	UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
    164	UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
    165	UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
    166	UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
    167	UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
    168	UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
    169};
    170
    171#define ufshcd_set_eh_in_progress(h) \
    172	((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
    173#define ufshcd_eh_in_progress(h) \
    174	((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
    175#define ufshcd_clear_eh_in_progress(h) \
    176	((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
    177
    178struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
    179	[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
    180	[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
    181	[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
    182	[UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
    183	[UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
    184	[UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
    185	/*
    186	 * For DeepSleep, the link is first put in hibern8 and then off.
    187	 * Leaving the link in hibern8 is not supported.
    188	 */
    189	[UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
    190};
    191
    192static inline enum ufs_dev_pwr_mode
    193ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
    194{
    195	return ufs_pm_lvl_states[lvl].dev_state;
    196}
    197
    198static inline enum uic_link_state
    199ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
    200{
    201	return ufs_pm_lvl_states[lvl].link_state;
    202}
    203
    204static inline enum ufs_pm_level
    205ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
    206					enum uic_link_state link_state)
    207{
    208	enum ufs_pm_level lvl;
    209
    210	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
    211		if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
    212			(ufs_pm_lvl_states[lvl].link_state == link_state))
    213			return lvl;
    214	}
    215
    216	/* if no match found, return the level 0 */
    217	return UFS_PM_LVL_0;
    218}
    219
    220static const struct ufs_dev_quirk ufs_fixups[] = {
    221	/* UFS cards deviations table */
    222	{ .wmanufacturerid = UFS_VENDOR_MICRON,
    223	  .model = UFS_ANY_MODEL,
    224	  .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
    225		   UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
    226	{ .wmanufacturerid = UFS_VENDOR_SAMSUNG,
    227	  .model = UFS_ANY_MODEL,
    228	  .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
    229		   UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
    230		   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
    231	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
    232	  .model = UFS_ANY_MODEL,
    233	  .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
    234	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
    235	  .model = "hB8aL1" /*H28U62301AMR*/,
    236	  .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
    237	{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
    238	  .model = UFS_ANY_MODEL,
    239	  .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
    240	{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
    241	  .model = "THGLF2G9C8KBADG",
    242	  .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
    243	{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
    244	  .model = "THGLF2G9D8KBADG",
    245	  .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
    246	{}
    247};
    248
    249static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
    250static void ufshcd_async_scan(void *data, async_cookie_t cookie);
    251static int ufshcd_reset_and_restore(struct ufs_hba *hba);
    252static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
    253static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
    254static void ufshcd_hba_exit(struct ufs_hba *hba);
    255static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
    256static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
    257static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
    258static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
    259static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
    260static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
    261static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
    262static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
    263static irqreturn_t ufshcd_intr(int irq, void *__hba);
    264static int ufshcd_change_power_mode(struct ufs_hba *hba,
    265			     struct ufs_pa_layer_attr *pwr_mode);
    266static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
    267static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
    268static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
    269					 struct ufs_vreg *vreg);
    270static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
    271static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
    272static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
    273static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
    274static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
    275
    276static inline void ufshcd_enable_irq(struct ufs_hba *hba)
    277{
    278	if (!hba->is_irq_enabled) {
    279		enable_irq(hba->irq);
    280		hba->is_irq_enabled = true;
    281	}
    282}
    283
    284static inline void ufshcd_disable_irq(struct ufs_hba *hba)
    285{
    286	if (hba->is_irq_enabled) {
    287		disable_irq(hba->irq);
    288		hba->is_irq_enabled = false;
    289	}
    290}
    291
    292static inline void ufshcd_wb_config(struct ufs_hba *hba)
    293{
    294	if (!ufshcd_is_wb_allowed(hba))
    295		return;
    296
    297	ufshcd_wb_toggle(hba, true);
    298
    299	ufshcd_wb_toggle_flush_during_h8(hba, true);
    300	if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
    301		ufshcd_wb_toggle_flush(hba, true);
    302}
    303
    304static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
    305{
    306	if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
    307		scsi_unblock_requests(hba->host);
    308}
    309
    310static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
    311{
    312	if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
    313		scsi_block_requests(hba->host);
    314}
    315
    316static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
    317				      enum ufs_trace_str_t str_t)
    318{
    319	struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
    320	struct utp_upiu_header *header;
    321
    322	if (!trace_ufshcd_upiu_enabled())
    323		return;
    324
    325	if (str_t == UFS_CMD_SEND)
    326		header = &rq->header;
    327	else
    328		header = &hba->lrb[tag].ucd_rsp_ptr->header;
    329
    330	trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
    331			  UFS_TSF_CDB);
    332}
    333
    334static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
    335					enum ufs_trace_str_t str_t,
    336					struct utp_upiu_req *rq_rsp)
    337{
    338	if (!trace_ufshcd_upiu_enabled())
    339		return;
    340
    341	trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
    342			  &rq_rsp->qr, UFS_TSF_OSF);
    343}
    344
    345static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
    346				     enum ufs_trace_str_t str_t)
    347{
    348	struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
    349
    350	if (!trace_ufshcd_upiu_enabled())
    351		return;
    352
    353	if (str_t == UFS_TM_SEND)
    354		trace_ufshcd_upiu(dev_name(hba->dev), str_t,
    355				  &descp->upiu_req.req_header,
    356				  &descp->upiu_req.input_param1,
    357				  UFS_TSF_TM_INPUT);
    358	else
    359		trace_ufshcd_upiu(dev_name(hba->dev), str_t,
    360				  &descp->upiu_rsp.rsp_header,
    361				  &descp->upiu_rsp.output_param1,
    362				  UFS_TSF_TM_OUTPUT);
    363}
    364
    365static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
    366					 struct uic_command *ucmd,
    367					 enum ufs_trace_str_t str_t)
    368{
    369	u32 cmd;
    370
    371	if (!trace_ufshcd_uic_command_enabled())
    372		return;
    373
    374	if (str_t == UFS_CMD_SEND)
    375		cmd = ucmd->command;
    376	else
    377		cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
    378
    379	trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
    380				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
    381				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
    382				 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
    383}
    384
    385static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
    386				     enum ufs_trace_str_t str_t)
    387{
    388	u64 lba = 0;
    389	u8 opcode = 0, group_id = 0;
    390	u32 intr, doorbell;
    391	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
    392	struct scsi_cmnd *cmd = lrbp->cmd;
    393	struct request *rq = scsi_cmd_to_rq(cmd);
    394	int transfer_len = -1;
    395
    396	if (!cmd)
    397		return;
    398
    399	/* trace UPIU also */
    400	ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
    401	if (!trace_ufshcd_command_enabled())
    402		return;
    403
    404	opcode = cmd->cmnd[0];
    405
    406	if (opcode == READ_10 || opcode == WRITE_10) {
    407		/*
    408		 * Currently we only fully trace read(10) and write(10) commands
    409		 */
    410		transfer_len =
    411		       be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
    412		lba = scsi_get_lba(cmd);
    413		if (opcode == WRITE_10)
    414			group_id = lrbp->cmd->cmnd[6];
    415	} else if (opcode == UNMAP) {
    416		/*
    417		 * The number of Bytes to be unmapped beginning with the lba.
    418		 */
    419		transfer_len = blk_rq_bytes(rq);
    420		lba = scsi_get_lba(cmd);
    421	}
    422
    423	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
    424	doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
    425	trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
    426			doorbell, transfer_len, intr, lba, opcode, group_id);
    427}
    428
    429static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
    430{
    431	struct ufs_clk_info *clki;
    432	struct list_head *head = &hba->clk_list_head;
    433
    434	if (list_empty(head))
    435		return;
    436
    437	list_for_each_entry(clki, head, list) {
    438		if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
    439				clki->max_freq)
    440			dev_err(hba->dev, "clk: %s, rate: %u\n",
    441					clki->name, clki->curr_freq);
    442	}
    443}
    444
    445static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
    446			     char *err_name)
    447{
    448	int i;
    449	bool found = false;
    450	struct ufs_event_hist *e;
    451
    452	if (id >= UFS_EVT_CNT)
    453		return;
    454
    455	e = &hba->ufs_stats.event[id];
    456
    457	for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
    458		int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
    459
    460		if (e->tstamp[p] == 0)
    461			continue;
    462		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
    463			e->val[p], ktime_to_us(e->tstamp[p]));
    464		found = true;
    465	}
    466
    467	if (!found)
    468		dev_err(hba->dev, "No record of %s\n", err_name);
    469	else
    470		dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
    471}
    472
    473static void ufshcd_print_evt_hist(struct ufs_hba *hba)
    474{
    475	ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
    476
    477	ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
    478	ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
    479	ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
    480	ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
    481	ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
    482	ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
    483			 "auto_hibern8_err");
    484	ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
    485	ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
    486			 "link_startup_fail");
    487	ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
    488	ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
    489			 "suspend_fail");
    490	ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
    491	ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
    492	ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
    493
    494	ufshcd_vops_dbg_register_dump(hba);
    495}
    496
    497static
    498void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
    499{
    500	struct ufshcd_lrb *lrbp;
    501	int prdt_length;
    502	int tag;
    503
    504	for_each_set_bit(tag, &bitmap, hba->nutrs) {
    505		lrbp = &hba->lrb[tag];
    506
    507		dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
    508				tag, ktime_to_us(lrbp->issue_time_stamp));
    509		dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
    510				tag, ktime_to_us(lrbp->compl_time_stamp));
    511		dev_err(hba->dev,
    512			"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
    513			tag, (u64)lrbp->utrd_dma_addr);
    514
    515		ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
    516				sizeof(struct utp_transfer_req_desc));
    517		dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
    518			(u64)lrbp->ucd_req_dma_addr);
    519		ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
    520				sizeof(struct utp_upiu_req));
    521		dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
    522			(u64)lrbp->ucd_rsp_dma_addr);
    523		ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
    524				sizeof(struct utp_upiu_rsp));
    525
    526		prdt_length = le16_to_cpu(
    527			lrbp->utr_descriptor_ptr->prd_table_length);
    528		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
    529			prdt_length /= sizeof(struct ufshcd_sg_entry);
    530
    531		dev_err(hba->dev,
    532			"UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
    533			tag, prdt_length,
    534			(u64)lrbp->ucd_prdt_dma_addr);
    535
    536		if (pr_prdt)
    537			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
    538				sizeof(struct ufshcd_sg_entry) * prdt_length);
    539	}
    540}
    541
    542static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
    543{
    544	int tag;
    545
    546	for_each_set_bit(tag, &bitmap, hba->nutmrs) {
    547		struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
    548
    549		dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
    550		ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
    551	}
    552}
    553
    554static void ufshcd_print_host_state(struct ufs_hba *hba)
    555{
    556	struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
    557
    558	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
    559	dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
    560		hba->outstanding_reqs, hba->outstanding_tasks);
    561	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
    562		hba->saved_err, hba->saved_uic_err);
    563	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
    564		hba->curr_dev_pwr_mode, hba->uic_link_state);
    565	dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
    566		hba->pm_op_in_progress, hba->is_sys_suspended);
    567	dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
    568		hba->auto_bkops_enabled, hba->host->host_self_blocked);
    569	dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
    570	dev_err(hba->dev,
    571		"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
    572		ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
    573		hba->ufs_stats.hibern8_exit_cnt);
    574	dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
    575		ktime_to_us(hba->ufs_stats.last_intr_ts),
    576		hba->ufs_stats.last_intr_status);
    577	dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
    578		hba->eh_flags, hba->req_abort_count);
    579	dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
    580		hba->ufs_version, hba->capabilities, hba->caps);
    581	dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
    582		hba->dev_quirks);
    583	if (sdev_ufs)
    584		dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
    585			sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
    586
    587	ufshcd_print_clk_freqs(hba);
    588}
    589
    590/**
    591 * ufshcd_print_pwr_info - print power params as saved in hba
    592 * power info
    593 * @hba: per-adapter instance
    594 */
    595static void ufshcd_print_pwr_info(struct ufs_hba *hba)
    596{
    597	static const char * const names[] = {
    598		"INVALID MODE",
    599		"FAST MODE",
    600		"SLOW_MODE",
    601		"INVALID MODE",
    602		"FASTAUTO_MODE",
    603		"SLOWAUTO_MODE",
    604		"INVALID MODE",
    605	};
    606
    607	/*
    608	 * Using dev_dbg to avoid messages during runtime PM to avoid
    609	 * never-ending cycles of messages written back to storage by user space
    610	 * causing runtime resume, causing more messages and so on.
    611	 */
    612	dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
    613		 __func__,
    614		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
    615		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
    616		 names[hba->pwr_info.pwr_rx],
    617		 names[hba->pwr_info.pwr_tx],
    618		 hba->pwr_info.hs_rate);
    619}
    620
    621static void ufshcd_device_reset(struct ufs_hba *hba)
    622{
    623	int err;
    624
    625	err = ufshcd_vops_device_reset(hba);
    626
    627	if (!err) {
    628		ufshcd_set_ufs_dev_active(hba);
    629		if (ufshcd_is_wb_allowed(hba)) {
    630			hba->dev_info.wb_enabled = false;
    631			hba->dev_info.wb_buf_flush_enabled = false;
    632		}
    633	}
    634	if (err != -EOPNOTSUPP)
    635		ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
    636}
    637
    638void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
    639{
    640	if (!us)
    641		return;
    642
    643	if (us < 10)
    644		udelay(us);
    645	else
    646		usleep_range(us, us + tolerance);
    647}
    648EXPORT_SYMBOL_GPL(ufshcd_delay_us);
    649
    650/**
    651 * ufshcd_wait_for_register - wait for register value to change
    652 * @hba: per-adapter interface
    653 * @reg: mmio register offset
    654 * @mask: mask to apply to the read register value
    655 * @val: value to wait for
    656 * @interval_us: polling interval in microseconds
    657 * @timeout_ms: timeout in milliseconds
    658 *
    659 * Return:
    660 * -ETIMEDOUT on error, zero on success.
    661 */
    662static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
    663				u32 val, unsigned long interval_us,
    664				unsigned long timeout_ms)
    665{
    666	int err = 0;
    667	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
    668
    669	/* ignore bits that we don't intend to wait on */
    670	val = val & mask;
    671
    672	while ((ufshcd_readl(hba, reg) & mask) != val) {
    673		usleep_range(interval_us, interval_us + 50);
    674		if (time_after(jiffies, timeout)) {
    675			if ((ufshcd_readl(hba, reg) & mask) != val)
    676				err = -ETIMEDOUT;
    677			break;
    678		}
    679	}
    680
    681	return err;
    682}
    683
    684/**
    685 * ufshcd_get_intr_mask - Get the interrupt bit mask
    686 * @hba: Pointer to adapter instance
    687 *
    688 * Returns interrupt bit mask per version
    689 */
    690static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
    691{
    692	if (hba->ufs_version == ufshci_version(1, 0))
    693		return INTERRUPT_MASK_ALL_VER_10;
    694	if (hba->ufs_version <= ufshci_version(2, 0))
    695		return INTERRUPT_MASK_ALL_VER_11;
    696
    697	return INTERRUPT_MASK_ALL_VER_21;
    698}
    699
    700/**
    701 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
    702 * @hba: Pointer to adapter instance
    703 *
    704 * Returns UFSHCI version supported by the controller
    705 */
    706static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
    707{
    708	u32 ufshci_ver;
    709
    710	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
    711		ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
    712	else
    713		ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
    714
    715	/*
    716	 * UFSHCI v1.x uses a different version scheme, in order
    717	 * to allow the use of comparisons with the ufshci_version
    718	 * function, we convert it to the same scheme as ufs 2.0+.
    719	 */
    720	if (ufshci_ver & 0x00010000)
    721		return ufshci_version(1, ufshci_ver & 0x00000100);
    722
    723	return ufshci_ver;
    724}
    725
    726/**
    727 * ufshcd_is_device_present - Check if any device connected to
    728 *			      the host controller
    729 * @hba: pointer to adapter instance
    730 *
    731 * Returns true if device present, false if no device detected
    732 */
    733static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
    734{
    735	return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
    736}
    737
    738/**
    739 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
    740 * @lrbp: pointer to local command reference block
    741 *
    742 * This function is used to get the OCS field from UTRD
    743 * Returns the OCS field in the UTRD
    744 */
    745static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
    746{
    747	return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
    748}
    749
    750/**
    751 * ufshcd_utrl_clear() - Clear requests from the controller request list.
    752 * @hba: per adapter instance
    753 * @mask: mask with one bit set for each request to be cleared
    754 */
    755static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
    756{
    757	if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
    758		mask = ~mask;
    759	/*
    760	 * From the UFSHCI specification: "UTP Transfer Request List CLear
    761	 * Register (UTRLCLR): This field is bit significant. Each bit
    762	 * corresponds to a slot in the UTP Transfer Request List, where bit 0
    763	 * corresponds to request slot 0. A bit in this field is set to ‘0’
    764	 * by host software to indicate to the host controller that a transfer
    765	 * request slot is cleared. The host controller
    766	 * shall free up any resources associated to the request slot
    767	 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
    768	 * host software indicates no change to request slots by setting the
    769	 * associated bits in this field to ‘1’. Bits in this field shall only
    770	 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
    771	 */
    772	ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
    773}
    774
    775/**
    776 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
    777 * @hba: per adapter instance
    778 * @pos: position of the bit to be cleared
    779 */
    780static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
    781{
    782	if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
    783		ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
    784	else
    785		ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
    786}
    787
    788/**
    789 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
    790 * @reg: Register value of host controller status
    791 *
    792 * Returns integer, 0 on Success and positive value if failed
    793 */
    794static inline int ufshcd_get_lists_status(u32 reg)
    795{
    796	return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
    797}
    798
    799/**
    800 * ufshcd_get_uic_cmd_result - Get the UIC command result
    801 * @hba: Pointer to adapter instance
    802 *
    803 * This function gets the result of UIC command completion
    804 * Returns 0 on success, non zero value on error
    805 */
    806static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
    807{
    808	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
    809	       MASK_UIC_COMMAND_RESULT;
    810}
    811
    812/**
    813 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
    814 * @hba: Pointer to adapter instance
    815 *
    816 * This function gets UIC command argument3
    817 * Returns 0 on success, non zero value on error
    818 */
    819static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
    820{
    821	return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
    822}
    823
    824/**
    825 * ufshcd_get_req_rsp - returns the TR response transaction type
    826 * @ucd_rsp_ptr: pointer to response UPIU
    827 */
    828static inline int
    829ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
    830{
    831	return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
    832}
    833
    834/**
    835 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
    836 * @ucd_rsp_ptr: pointer to response UPIU
    837 *
    838 * This function gets the response status and scsi_status from response UPIU
    839 * Returns the response result code.
    840 */
    841static inline int
    842ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
    843{
    844	return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
    845}
    846
    847/*
    848 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
    849 *				from response UPIU
    850 * @ucd_rsp_ptr: pointer to response UPIU
    851 *
    852 * Return the data segment length.
    853 */
    854static inline unsigned int
    855ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
    856{
    857	return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
    858		MASK_RSP_UPIU_DATA_SEG_LEN;
    859}
    860
    861/**
    862 * ufshcd_is_exception_event - Check if the device raised an exception event
    863 * @ucd_rsp_ptr: pointer to response UPIU
    864 *
    865 * The function checks if the device raised an exception event indicated in
    866 * the Device Information field of response UPIU.
    867 *
    868 * Returns true if exception is raised, false otherwise.
    869 */
    870static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
    871{
    872	return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
    873			MASK_RSP_EXCEPTION_EVENT;
    874}
    875
    876/**
    877 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
    878 * @hba: per adapter instance
    879 */
    880static inline void
    881ufshcd_reset_intr_aggr(struct ufs_hba *hba)
    882{
    883	ufshcd_writel(hba, INT_AGGR_ENABLE |
    884		      INT_AGGR_COUNTER_AND_TIMER_RESET,
    885		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
    886}
    887
    888/**
    889 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
    890 * @hba: per adapter instance
    891 * @cnt: Interrupt aggregation counter threshold
    892 * @tmout: Interrupt aggregation timeout value
    893 */
    894static inline void
    895ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
    896{
    897	ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
    898		      INT_AGGR_COUNTER_THLD_VAL(cnt) |
    899		      INT_AGGR_TIMEOUT_VAL(tmout),
    900		      REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
    901}
    902
    903/**
    904 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
    905 * @hba: per adapter instance
    906 */
    907static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
    908{
    909	ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
    910}
    911
    912/**
    913 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
    914 *			When run-stop registers are set to 1, it indicates the
    915 *			host controller that it can process the requests
    916 * @hba: per adapter instance
    917 */
    918static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
    919{
    920	ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
    921		      REG_UTP_TASK_REQ_LIST_RUN_STOP);
    922	ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
    923		      REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
    924}
    925
    926/**
    927 * ufshcd_hba_start - Start controller initialization sequence
    928 * @hba: per adapter instance
    929 */
    930static inline void ufshcd_hba_start(struct ufs_hba *hba)
    931{
    932	u32 val = CONTROLLER_ENABLE;
    933
    934	if (ufshcd_crypto_enable(hba))
    935		val |= CRYPTO_GENERAL_ENABLE;
    936
    937	ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
    938}
    939
    940/**
    941 * ufshcd_is_hba_active - Get controller state
    942 * @hba: per adapter instance
    943 *
    944 * Returns true if and only if the controller is active.
    945 */
    946static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
    947{
    948	return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
    949}
    950
    951u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
    952{
    953	/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
    954	if (hba->ufs_version <= ufshci_version(1, 1))
    955		return UFS_UNIPRO_VER_1_41;
    956	else
    957		return UFS_UNIPRO_VER_1_6;
    958}
    959EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
    960
    961static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
    962{
    963	/*
    964	 * If both host and device support UniPro ver1.6 or later, PA layer
    965	 * parameters tuning happens during link startup itself.
    966	 *
    967	 * We can manually tune PA layer parameters if either host or device
    968	 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
    969	 * logic simple, we will only do manual tuning if local unipro version
    970	 * doesn't support ver1.6 or later.
    971	 */
    972	return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
    973}
    974
    975/**
    976 * ufshcd_set_clk_freq - set UFS controller clock frequencies
    977 * @hba: per adapter instance
    978 * @scale_up: If True, set max possible frequency othewise set low frequency
    979 *
    980 * Returns 0 if successful
    981 * Returns < 0 for any other errors
    982 */
    983static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
    984{
    985	int ret = 0;
    986	struct ufs_clk_info *clki;
    987	struct list_head *head = &hba->clk_list_head;
    988
    989	if (list_empty(head))
    990		goto out;
    991
    992	list_for_each_entry(clki, head, list) {
    993		if (!IS_ERR_OR_NULL(clki->clk)) {
    994			if (scale_up && clki->max_freq) {
    995				if (clki->curr_freq == clki->max_freq)
    996					continue;
    997
    998				ret = clk_set_rate(clki->clk, clki->max_freq);
    999				if (ret) {
   1000					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
   1001						__func__, clki->name,
   1002						clki->max_freq, ret);
   1003					break;
   1004				}
   1005				trace_ufshcd_clk_scaling(dev_name(hba->dev),
   1006						"scaled up", clki->name,
   1007						clki->curr_freq,
   1008						clki->max_freq);
   1009
   1010				clki->curr_freq = clki->max_freq;
   1011
   1012			} else if (!scale_up && clki->min_freq) {
   1013				if (clki->curr_freq == clki->min_freq)
   1014					continue;
   1015
   1016				ret = clk_set_rate(clki->clk, clki->min_freq);
   1017				if (ret) {
   1018					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
   1019						__func__, clki->name,
   1020						clki->min_freq, ret);
   1021					break;
   1022				}
   1023				trace_ufshcd_clk_scaling(dev_name(hba->dev),
   1024						"scaled down", clki->name,
   1025						clki->curr_freq,
   1026						clki->min_freq);
   1027				clki->curr_freq = clki->min_freq;
   1028			}
   1029		}
   1030		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
   1031				clki->name, clk_get_rate(clki->clk));
   1032	}
   1033
   1034out:
   1035	return ret;
   1036}
   1037
   1038/**
   1039 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
   1040 * @hba: per adapter instance
   1041 * @scale_up: True if scaling up and false if scaling down
   1042 *
   1043 * Returns 0 if successful
   1044 * Returns < 0 for any other errors
   1045 */
   1046static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
   1047{
   1048	int ret = 0;
   1049	ktime_t start = ktime_get();
   1050
   1051	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
   1052	if (ret)
   1053		goto out;
   1054
   1055	ret = ufshcd_set_clk_freq(hba, scale_up);
   1056	if (ret)
   1057		goto out;
   1058
   1059	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
   1060	if (ret)
   1061		ufshcd_set_clk_freq(hba, !scale_up);
   1062
   1063out:
   1064	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
   1065			(scale_up ? "up" : "down"),
   1066			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
   1067	return ret;
   1068}
   1069
   1070/**
   1071 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
   1072 * @hba: per adapter instance
   1073 * @scale_up: True if scaling up and false if scaling down
   1074 *
   1075 * Returns true if scaling is required, false otherwise.
   1076 */
   1077static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
   1078					       bool scale_up)
   1079{
   1080	struct ufs_clk_info *clki;
   1081	struct list_head *head = &hba->clk_list_head;
   1082
   1083	if (list_empty(head))
   1084		return false;
   1085
   1086	list_for_each_entry(clki, head, list) {
   1087		if (!IS_ERR_OR_NULL(clki->clk)) {
   1088			if (scale_up && clki->max_freq) {
   1089				if (clki->curr_freq == clki->max_freq)
   1090					continue;
   1091				return true;
   1092			} else if (!scale_up && clki->min_freq) {
   1093				if (clki->curr_freq == clki->min_freq)
   1094					continue;
   1095				return true;
   1096			}
   1097		}
   1098	}
   1099
   1100	return false;
   1101}
   1102
   1103/*
   1104 * Determine the number of pending commands by counting the bits in the SCSI
   1105 * device budget maps. This approach has been selected because a bit is set in
   1106 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
   1107 * flag. The host_self_blocked flag can be modified by calling
   1108 * scsi_block_requests() or scsi_unblock_requests().
   1109 */
   1110static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
   1111{
   1112	struct scsi_device *sdev;
   1113	u32 pending = 0;
   1114
   1115	lockdep_assert_held(hba->host->host_lock);
   1116	__shost_for_each_device(sdev, hba->host)
   1117		pending += sbitmap_weight(&sdev->budget_map);
   1118
   1119	return pending;
   1120}
   1121
   1122static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
   1123					u64 wait_timeout_us)
   1124{
   1125	unsigned long flags;
   1126	int ret = 0;
   1127	u32 tm_doorbell;
   1128	u32 tr_pending;
   1129	bool timeout = false, do_last_check = false;
   1130	ktime_t start;
   1131
   1132	ufshcd_hold(hba, false);
   1133	spin_lock_irqsave(hba->host->host_lock, flags);
   1134	/*
   1135	 * Wait for all the outstanding tasks/transfer requests.
   1136	 * Verify by checking the doorbell registers are clear.
   1137	 */
   1138	start = ktime_get();
   1139	do {
   1140		if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
   1141			ret = -EBUSY;
   1142			goto out;
   1143		}
   1144
   1145		tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
   1146		tr_pending = ufshcd_pending_cmds(hba);
   1147		if (!tm_doorbell && !tr_pending) {
   1148			timeout = false;
   1149			break;
   1150		} else if (do_last_check) {
   1151			break;
   1152		}
   1153
   1154		spin_unlock_irqrestore(hba->host->host_lock, flags);
   1155		schedule();
   1156		if (ktime_to_us(ktime_sub(ktime_get(), start)) >
   1157		    wait_timeout_us) {
   1158			timeout = true;
   1159			/*
   1160			 * We might have scheduled out for long time so make
   1161			 * sure to check if doorbells are cleared by this time
   1162			 * or not.
   1163			 */
   1164			do_last_check = true;
   1165		}
   1166		spin_lock_irqsave(hba->host->host_lock, flags);
   1167	} while (tm_doorbell || tr_pending);
   1168
   1169	if (timeout) {
   1170		dev_err(hba->dev,
   1171			"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
   1172			__func__, tm_doorbell, tr_pending);
   1173		ret = -EBUSY;
   1174	}
   1175out:
   1176	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1177	ufshcd_release(hba);
   1178	return ret;
   1179}
   1180
   1181/**
   1182 * ufshcd_scale_gear - scale up/down UFS gear
   1183 * @hba: per adapter instance
   1184 * @scale_up: True for scaling up gear and false for scaling down
   1185 *
   1186 * Returns 0 for success,
   1187 * Returns -EBUSY if scaling can't happen at this time
   1188 * Returns non-zero for any other errors
   1189 */
   1190static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
   1191{
   1192	int ret = 0;
   1193	struct ufs_pa_layer_attr new_pwr_info;
   1194
   1195	if (scale_up) {
   1196		memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
   1197		       sizeof(struct ufs_pa_layer_attr));
   1198	} else {
   1199		memcpy(&new_pwr_info, &hba->pwr_info,
   1200		       sizeof(struct ufs_pa_layer_attr));
   1201
   1202		if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
   1203		    hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
   1204			/* save the current power mode */
   1205			memcpy(&hba->clk_scaling.saved_pwr_info.info,
   1206				&hba->pwr_info,
   1207				sizeof(struct ufs_pa_layer_attr));
   1208
   1209			/* scale down gear */
   1210			new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
   1211			new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
   1212		}
   1213	}
   1214
   1215	/* check if the power mode needs to be changed or not? */
   1216	ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
   1217	if (ret)
   1218		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
   1219			__func__, ret,
   1220			hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
   1221			new_pwr_info.gear_tx, new_pwr_info.gear_rx);
   1222
   1223	return ret;
   1224}
   1225
   1226static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
   1227{
   1228	#define DOORBELL_CLR_TOUT_US		(1000 * 1000) /* 1 sec */
   1229	int ret = 0;
   1230	/*
   1231	 * make sure that there are no outstanding requests when
   1232	 * clock scaling is in progress
   1233	 */
   1234	ufshcd_scsi_block_requests(hba);
   1235	down_write(&hba->clk_scaling_lock);
   1236
   1237	if (!hba->clk_scaling.is_allowed ||
   1238	    ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
   1239		ret = -EBUSY;
   1240		up_write(&hba->clk_scaling_lock);
   1241		ufshcd_scsi_unblock_requests(hba);
   1242		goto out;
   1243	}
   1244
   1245	/* let's not get into low power until clock scaling is completed */
   1246	ufshcd_hold(hba, false);
   1247
   1248out:
   1249	return ret;
   1250}
   1251
   1252static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
   1253{
   1254	if (writelock)
   1255		up_write(&hba->clk_scaling_lock);
   1256	else
   1257		up_read(&hba->clk_scaling_lock);
   1258	ufshcd_scsi_unblock_requests(hba);
   1259	ufshcd_release(hba);
   1260}
   1261
   1262/**
   1263 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
   1264 * @hba: per adapter instance
   1265 * @scale_up: True for scaling up and false for scalin down
   1266 *
   1267 * Returns 0 for success,
   1268 * Returns -EBUSY if scaling can't happen at this time
   1269 * Returns non-zero for any other errors
   1270 */
   1271static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
   1272{
   1273	int ret = 0;
   1274	bool is_writelock = true;
   1275
   1276	ret = ufshcd_clock_scaling_prepare(hba);
   1277	if (ret)
   1278		return ret;
   1279
   1280	/* scale down the gear before scaling down clocks */
   1281	if (!scale_up) {
   1282		ret = ufshcd_scale_gear(hba, false);
   1283		if (ret)
   1284			goto out_unprepare;
   1285	}
   1286
   1287	ret = ufshcd_scale_clks(hba, scale_up);
   1288	if (ret) {
   1289		if (!scale_up)
   1290			ufshcd_scale_gear(hba, true);
   1291		goto out_unprepare;
   1292	}
   1293
   1294	/* scale up the gear after scaling up clocks */
   1295	if (scale_up) {
   1296		ret = ufshcd_scale_gear(hba, true);
   1297		if (ret) {
   1298			ufshcd_scale_clks(hba, false);
   1299			goto out_unprepare;
   1300		}
   1301	}
   1302
   1303	/* Enable Write Booster if we have scaled up else disable it */
   1304	downgrade_write(&hba->clk_scaling_lock);
   1305	is_writelock = false;
   1306	ufshcd_wb_toggle(hba, scale_up);
   1307
   1308out_unprepare:
   1309	ufshcd_clock_scaling_unprepare(hba, is_writelock);
   1310	return ret;
   1311}
   1312
   1313static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
   1314{
   1315	struct ufs_hba *hba = container_of(work, struct ufs_hba,
   1316					   clk_scaling.suspend_work);
   1317	unsigned long irq_flags;
   1318
   1319	spin_lock_irqsave(hba->host->host_lock, irq_flags);
   1320	if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
   1321		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1322		return;
   1323	}
   1324	hba->clk_scaling.is_suspended = true;
   1325	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1326
   1327	__ufshcd_suspend_clkscaling(hba);
   1328}
   1329
   1330static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
   1331{
   1332	struct ufs_hba *hba = container_of(work, struct ufs_hba,
   1333					   clk_scaling.resume_work);
   1334	unsigned long irq_flags;
   1335
   1336	spin_lock_irqsave(hba->host->host_lock, irq_flags);
   1337	if (!hba->clk_scaling.is_suspended) {
   1338		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1339		return;
   1340	}
   1341	hba->clk_scaling.is_suspended = false;
   1342	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1343
   1344	devfreq_resume_device(hba->devfreq);
   1345}
   1346
   1347static int ufshcd_devfreq_target(struct device *dev,
   1348				unsigned long *freq, u32 flags)
   1349{
   1350	int ret = 0;
   1351	struct ufs_hba *hba = dev_get_drvdata(dev);
   1352	ktime_t start;
   1353	bool scale_up, sched_clk_scaling_suspend_work = false;
   1354	struct list_head *clk_list = &hba->clk_list_head;
   1355	struct ufs_clk_info *clki;
   1356	unsigned long irq_flags;
   1357
   1358	if (!ufshcd_is_clkscaling_supported(hba))
   1359		return -EINVAL;
   1360
   1361	clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
   1362	/* Override with the closest supported frequency */
   1363	*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
   1364	spin_lock_irqsave(hba->host->host_lock, irq_flags);
   1365	if (ufshcd_eh_in_progress(hba)) {
   1366		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1367		return 0;
   1368	}
   1369
   1370	if (!hba->clk_scaling.active_reqs)
   1371		sched_clk_scaling_suspend_work = true;
   1372
   1373	if (list_empty(clk_list)) {
   1374		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1375		goto out;
   1376	}
   1377
   1378	/* Decide based on the rounded-off frequency and update */
   1379	scale_up = *freq == clki->max_freq;
   1380	if (!scale_up)
   1381		*freq = clki->min_freq;
   1382	/* Update the frequency */
   1383	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
   1384		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1385		ret = 0;
   1386		goto out; /* no state change required */
   1387	}
   1388	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
   1389
   1390	start = ktime_get();
   1391	ret = ufshcd_devfreq_scale(hba, scale_up);
   1392
   1393	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
   1394		(scale_up ? "up" : "down"),
   1395		ktime_to_us(ktime_sub(ktime_get(), start)), ret);
   1396
   1397out:
   1398	if (sched_clk_scaling_suspend_work)
   1399		queue_work(hba->clk_scaling.workq,
   1400			   &hba->clk_scaling.suspend_work);
   1401
   1402	return ret;
   1403}
   1404
   1405static int ufshcd_devfreq_get_dev_status(struct device *dev,
   1406		struct devfreq_dev_status *stat)
   1407{
   1408	struct ufs_hba *hba = dev_get_drvdata(dev);
   1409	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
   1410	unsigned long flags;
   1411	struct list_head *clk_list = &hba->clk_list_head;
   1412	struct ufs_clk_info *clki;
   1413	ktime_t curr_t;
   1414
   1415	if (!ufshcd_is_clkscaling_supported(hba))
   1416		return -EINVAL;
   1417
   1418	memset(stat, 0, sizeof(*stat));
   1419
   1420	spin_lock_irqsave(hba->host->host_lock, flags);
   1421	curr_t = ktime_get();
   1422	if (!scaling->window_start_t)
   1423		goto start_window;
   1424
   1425	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
   1426	/*
   1427	 * If current frequency is 0, then the ondemand governor considers
   1428	 * there's no initial frequency set. And it always requests to set
   1429	 * to max. frequency.
   1430	 */
   1431	stat->current_frequency = clki->curr_freq;
   1432	if (scaling->is_busy_started)
   1433		scaling->tot_busy_t += ktime_us_delta(curr_t,
   1434				scaling->busy_start_t);
   1435
   1436	stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
   1437	stat->busy_time = scaling->tot_busy_t;
   1438start_window:
   1439	scaling->window_start_t = curr_t;
   1440	scaling->tot_busy_t = 0;
   1441
   1442	if (hba->outstanding_reqs) {
   1443		scaling->busy_start_t = curr_t;
   1444		scaling->is_busy_started = true;
   1445	} else {
   1446		scaling->busy_start_t = 0;
   1447		scaling->is_busy_started = false;
   1448	}
   1449	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1450	return 0;
   1451}
   1452
   1453static int ufshcd_devfreq_init(struct ufs_hba *hba)
   1454{
   1455	struct list_head *clk_list = &hba->clk_list_head;
   1456	struct ufs_clk_info *clki;
   1457	struct devfreq *devfreq;
   1458	int ret;
   1459
   1460	/* Skip devfreq if we don't have any clocks in the list */
   1461	if (list_empty(clk_list))
   1462		return 0;
   1463
   1464	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
   1465	dev_pm_opp_add(hba->dev, clki->min_freq, 0);
   1466	dev_pm_opp_add(hba->dev, clki->max_freq, 0);
   1467
   1468	ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
   1469					 &hba->vps->ondemand_data);
   1470	devfreq = devfreq_add_device(hba->dev,
   1471			&hba->vps->devfreq_profile,
   1472			DEVFREQ_GOV_SIMPLE_ONDEMAND,
   1473			&hba->vps->ondemand_data);
   1474	if (IS_ERR(devfreq)) {
   1475		ret = PTR_ERR(devfreq);
   1476		dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
   1477
   1478		dev_pm_opp_remove(hba->dev, clki->min_freq);
   1479		dev_pm_opp_remove(hba->dev, clki->max_freq);
   1480		return ret;
   1481	}
   1482
   1483	hba->devfreq = devfreq;
   1484
   1485	return 0;
   1486}
   1487
   1488static void ufshcd_devfreq_remove(struct ufs_hba *hba)
   1489{
   1490	struct list_head *clk_list = &hba->clk_list_head;
   1491	struct ufs_clk_info *clki;
   1492
   1493	if (!hba->devfreq)
   1494		return;
   1495
   1496	devfreq_remove_device(hba->devfreq);
   1497	hba->devfreq = NULL;
   1498
   1499	clki = list_first_entry(clk_list, struct ufs_clk_info, list);
   1500	dev_pm_opp_remove(hba->dev, clki->min_freq);
   1501	dev_pm_opp_remove(hba->dev, clki->max_freq);
   1502}
   1503
   1504static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
   1505{
   1506	unsigned long flags;
   1507
   1508	devfreq_suspend_device(hba->devfreq);
   1509	spin_lock_irqsave(hba->host->host_lock, flags);
   1510	hba->clk_scaling.window_start_t = 0;
   1511	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1512}
   1513
   1514static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
   1515{
   1516	unsigned long flags;
   1517	bool suspend = false;
   1518
   1519	cancel_work_sync(&hba->clk_scaling.suspend_work);
   1520	cancel_work_sync(&hba->clk_scaling.resume_work);
   1521
   1522	spin_lock_irqsave(hba->host->host_lock, flags);
   1523	if (!hba->clk_scaling.is_suspended) {
   1524		suspend = true;
   1525		hba->clk_scaling.is_suspended = true;
   1526	}
   1527	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1528
   1529	if (suspend)
   1530		__ufshcd_suspend_clkscaling(hba);
   1531}
   1532
   1533static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
   1534{
   1535	unsigned long flags;
   1536	bool resume = false;
   1537
   1538	spin_lock_irqsave(hba->host->host_lock, flags);
   1539	if (hba->clk_scaling.is_suspended) {
   1540		resume = true;
   1541		hba->clk_scaling.is_suspended = false;
   1542	}
   1543	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1544
   1545	if (resume)
   1546		devfreq_resume_device(hba->devfreq);
   1547}
   1548
   1549static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
   1550		struct device_attribute *attr, char *buf)
   1551{
   1552	struct ufs_hba *hba = dev_get_drvdata(dev);
   1553
   1554	return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
   1555}
   1556
   1557static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
   1558		struct device_attribute *attr, const char *buf, size_t count)
   1559{
   1560	struct ufs_hba *hba = dev_get_drvdata(dev);
   1561	u32 value;
   1562	int err = 0;
   1563
   1564	if (kstrtou32(buf, 0, &value))
   1565		return -EINVAL;
   1566
   1567	down(&hba->host_sem);
   1568	if (!ufshcd_is_user_access_allowed(hba)) {
   1569		err = -EBUSY;
   1570		goto out;
   1571	}
   1572
   1573	value = !!value;
   1574	if (value == hba->clk_scaling.is_enabled)
   1575		goto out;
   1576
   1577	ufshcd_rpm_get_sync(hba);
   1578	ufshcd_hold(hba, false);
   1579
   1580	hba->clk_scaling.is_enabled = value;
   1581
   1582	if (value) {
   1583		ufshcd_resume_clkscaling(hba);
   1584	} else {
   1585		ufshcd_suspend_clkscaling(hba);
   1586		err = ufshcd_devfreq_scale(hba, true);
   1587		if (err)
   1588			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
   1589					__func__, err);
   1590	}
   1591
   1592	ufshcd_release(hba);
   1593	ufshcd_rpm_put_sync(hba);
   1594out:
   1595	up(&hba->host_sem);
   1596	return err ? err : count;
   1597}
   1598
   1599static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
   1600{
   1601	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
   1602	hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
   1603	sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
   1604	hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
   1605	hba->clk_scaling.enable_attr.attr.mode = 0644;
   1606	if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
   1607		dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
   1608}
   1609
   1610static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
   1611{
   1612	if (hba->clk_scaling.enable_attr.attr.name)
   1613		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
   1614}
   1615
   1616static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
   1617{
   1618	char wq_name[sizeof("ufs_clkscaling_00")];
   1619
   1620	if (!ufshcd_is_clkscaling_supported(hba))
   1621		return;
   1622
   1623	if (!hba->clk_scaling.min_gear)
   1624		hba->clk_scaling.min_gear = UFS_HS_G1;
   1625
   1626	INIT_WORK(&hba->clk_scaling.suspend_work,
   1627		  ufshcd_clk_scaling_suspend_work);
   1628	INIT_WORK(&hba->clk_scaling.resume_work,
   1629		  ufshcd_clk_scaling_resume_work);
   1630
   1631	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
   1632		 hba->host->host_no);
   1633	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
   1634
   1635	hba->clk_scaling.is_initialized = true;
   1636}
   1637
   1638static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
   1639{
   1640	if (!hba->clk_scaling.is_initialized)
   1641		return;
   1642
   1643	ufshcd_remove_clk_scaling_sysfs(hba);
   1644	destroy_workqueue(hba->clk_scaling.workq);
   1645	ufshcd_devfreq_remove(hba);
   1646	hba->clk_scaling.is_initialized = false;
   1647}
   1648
   1649static void ufshcd_ungate_work(struct work_struct *work)
   1650{
   1651	int ret;
   1652	unsigned long flags;
   1653	struct ufs_hba *hba = container_of(work, struct ufs_hba,
   1654			clk_gating.ungate_work);
   1655
   1656	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
   1657
   1658	spin_lock_irqsave(hba->host->host_lock, flags);
   1659	if (hba->clk_gating.state == CLKS_ON) {
   1660		spin_unlock_irqrestore(hba->host->host_lock, flags);
   1661		goto unblock_reqs;
   1662	}
   1663
   1664	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1665	ufshcd_hba_vreg_set_hpm(hba);
   1666	ufshcd_setup_clocks(hba, true);
   1667
   1668	ufshcd_enable_irq(hba);
   1669
   1670	/* Exit from hibern8 */
   1671	if (ufshcd_can_hibern8_during_gating(hba)) {
   1672		/* Prevent gating in this path */
   1673		hba->clk_gating.is_suspended = true;
   1674		if (ufshcd_is_link_hibern8(hba)) {
   1675			ret = ufshcd_uic_hibern8_exit(hba);
   1676			if (ret)
   1677				dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
   1678					__func__, ret);
   1679			else
   1680				ufshcd_set_link_active(hba);
   1681		}
   1682		hba->clk_gating.is_suspended = false;
   1683	}
   1684unblock_reqs:
   1685	ufshcd_scsi_unblock_requests(hba);
   1686}
   1687
   1688/**
   1689 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
   1690 * Also, exit from hibern8 mode and set the link as active.
   1691 * @hba: per adapter instance
   1692 * @async: This indicates whether caller should ungate clocks asynchronously.
   1693 */
   1694int ufshcd_hold(struct ufs_hba *hba, bool async)
   1695{
   1696	int rc = 0;
   1697	bool flush_result;
   1698	unsigned long flags;
   1699
   1700	if (!ufshcd_is_clkgating_allowed(hba) ||
   1701	    !hba->clk_gating.is_initialized)
   1702		goto out;
   1703	spin_lock_irqsave(hba->host->host_lock, flags);
   1704	hba->clk_gating.active_reqs++;
   1705
   1706start:
   1707	switch (hba->clk_gating.state) {
   1708	case CLKS_ON:
   1709		/*
   1710		 * Wait for the ungate work to complete if in progress.
   1711		 * Though the clocks may be in ON state, the link could
   1712		 * still be in hibner8 state if hibern8 is allowed
   1713		 * during clock gating.
   1714		 * Make sure we exit hibern8 state also in addition to
   1715		 * clocks being ON.
   1716		 */
   1717		if (ufshcd_can_hibern8_during_gating(hba) &&
   1718		    ufshcd_is_link_hibern8(hba)) {
   1719			if (async) {
   1720				rc = -EAGAIN;
   1721				hba->clk_gating.active_reqs--;
   1722				break;
   1723			}
   1724			spin_unlock_irqrestore(hba->host->host_lock, flags);
   1725			flush_result = flush_work(&hba->clk_gating.ungate_work);
   1726			if (hba->clk_gating.is_suspended && !flush_result)
   1727				goto out;
   1728			spin_lock_irqsave(hba->host->host_lock, flags);
   1729			goto start;
   1730		}
   1731		break;
   1732	case REQ_CLKS_OFF:
   1733		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
   1734			hba->clk_gating.state = CLKS_ON;
   1735			trace_ufshcd_clk_gating(dev_name(hba->dev),
   1736						hba->clk_gating.state);
   1737			break;
   1738		}
   1739		/*
   1740		 * If we are here, it means gating work is either done or
   1741		 * currently running. Hence, fall through to cancel gating
   1742		 * work and to enable clocks.
   1743		 */
   1744		fallthrough;
   1745	case CLKS_OFF:
   1746		hba->clk_gating.state = REQ_CLKS_ON;
   1747		trace_ufshcd_clk_gating(dev_name(hba->dev),
   1748					hba->clk_gating.state);
   1749		if (queue_work(hba->clk_gating.clk_gating_workq,
   1750			       &hba->clk_gating.ungate_work))
   1751			ufshcd_scsi_block_requests(hba);
   1752		/*
   1753		 * fall through to check if we should wait for this
   1754		 * work to be done or not.
   1755		 */
   1756		fallthrough;
   1757	case REQ_CLKS_ON:
   1758		if (async) {
   1759			rc = -EAGAIN;
   1760			hba->clk_gating.active_reqs--;
   1761			break;
   1762		}
   1763
   1764		spin_unlock_irqrestore(hba->host->host_lock, flags);
   1765		flush_work(&hba->clk_gating.ungate_work);
   1766		/* Make sure state is CLKS_ON before returning */
   1767		spin_lock_irqsave(hba->host->host_lock, flags);
   1768		goto start;
   1769	default:
   1770		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
   1771				__func__, hba->clk_gating.state);
   1772		break;
   1773	}
   1774	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1775out:
   1776	return rc;
   1777}
   1778EXPORT_SYMBOL_GPL(ufshcd_hold);
   1779
   1780static void ufshcd_gate_work(struct work_struct *work)
   1781{
   1782	struct ufs_hba *hba = container_of(work, struct ufs_hba,
   1783			clk_gating.gate_work.work);
   1784	unsigned long flags;
   1785	int ret;
   1786
   1787	spin_lock_irqsave(hba->host->host_lock, flags);
   1788	/*
   1789	 * In case you are here to cancel this work the gating state
   1790	 * would be marked as REQ_CLKS_ON. In this case save time by
   1791	 * skipping the gating work and exit after changing the clock
   1792	 * state to CLKS_ON.
   1793	 */
   1794	if (hba->clk_gating.is_suspended ||
   1795		(hba->clk_gating.state != REQ_CLKS_OFF)) {
   1796		hba->clk_gating.state = CLKS_ON;
   1797		trace_ufshcd_clk_gating(dev_name(hba->dev),
   1798					hba->clk_gating.state);
   1799		goto rel_lock;
   1800	}
   1801
   1802	if (hba->clk_gating.active_reqs
   1803		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
   1804		|| hba->outstanding_reqs || hba->outstanding_tasks
   1805		|| hba->active_uic_cmd || hba->uic_async_done)
   1806		goto rel_lock;
   1807
   1808	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1809
   1810	/* put the link into hibern8 mode before turning off clocks */
   1811	if (ufshcd_can_hibern8_during_gating(hba)) {
   1812		ret = ufshcd_uic_hibern8_enter(hba);
   1813		if (ret) {
   1814			hba->clk_gating.state = CLKS_ON;
   1815			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
   1816					__func__, ret);
   1817			trace_ufshcd_clk_gating(dev_name(hba->dev),
   1818						hba->clk_gating.state);
   1819			goto out;
   1820		}
   1821		ufshcd_set_link_hibern8(hba);
   1822	}
   1823
   1824	ufshcd_disable_irq(hba);
   1825
   1826	ufshcd_setup_clocks(hba, false);
   1827
   1828	/* Put the host controller in low power mode if possible */
   1829	ufshcd_hba_vreg_set_lpm(hba);
   1830	/*
   1831	 * In case you are here to cancel this work the gating state
   1832	 * would be marked as REQ_CLKS_ON. In this case keep the state
   1833	 * as REQ_CLKS_ON which would anyway imply that clocks are off
   1834	 * and a request to turn them on is pending. By doing this way,
   1835	 * we keep the state machine in tact and this would ultimately
   1836	 * prevent from doing cancel work multiple times when there are
   1837	 * new requests arriving before the current cancel work is done.
   1838	 */
   1839	spin_lock_irqsave(hba->host->host_lock, flags);
   1840	if (hba->clk_gating.state == REQ_CLKS_OFF) {
   1841		hba->clk_gating.state = CLKS_OFF;
   1842		trace_ufshcd_clk_gating(dev_name(hba->dev),
   1843					hba->clk_gating.state);
   1844	}
   1845rel_lock:
   1846	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1847out:
   1848	return;
   1849}
   1850
   1851/* host lock must be held before calling this variant */
   1852static void __ufshcd_release(struct ufs_hba *hba)
   1853{
   1854	if (!ufshcd_is_clkgating_allowed(hba))
   1855		return;
   1856
   1857	hba->clk_gating.active_reqs--;
   1858
   1859	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
   1860	    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
   1861	    hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
   1862	    hba->active_uic_cmd || hba->uic_async_done ||
   1863	    hba->clk_gating.state == CLKS_OFF)
   1864		return;
   1865
   1866	hba->clk_gating.state = REQ_CLKS_OFF;
   1867	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
   1868	queue_delayed_work(hba->clk_gating.clk_gating_workq,
   1869			   &hba->clk_gating.gate_work,
   1870			   msecs_to_jiffies(hba->clk_gating.delay_ms));
   1871}
   1872
   1873void ufshcd_release(struct ufs_hba *hba)
   1874{
   1875	unsigned long flags;
   1876
   1877	spin_lock_irqsave(hba->host->host_lock, flags);
   1878	__ufshcd_release(hba);
   1879	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1880}
   1881EXPORT_SYMBOL_GPL(ufshcd_release);
   1882
   1883static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
   1884		struct device_attribute *attr, char *buf)
   1885{
   1886	struct ufs_hba *hba = dev_get_drvdata(dev);
   1887
   1888	return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
   1889}
   1890
   1891void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
   1892{
   1893	struct ufs_hba *hba = dev_get_drvdata(dev);
   1894	unsigned long flags;
   1895
   1896	spin_lock_irqsave(hba->host->host_lock, flags);
   1897	hba->clk_gating.delay_ms = value;
   1898	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1899}
   1900EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
   1901
   1902static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
   1903		struct device_attribute *attr, const char *buf, size_t count)
   1904{
   1905	unsigned long value;
   1906
   1907	if (kstrtoul(buf, 0, &value))
   1908		return -EINVAL;
   1909
   1910	ufshcd_clkgate_delay_set(dev, value);
   1911	return count;
   1912}
   1913
   1914static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
   1915		struct device_attribute *attr, char *buf)
   1916{
   1917	struct ufs_hba *hba = dev_get_drvdata(dev);
   1918
   1919	return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
   1920}
   1921
   1922static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
   1923		struct device_attribute *attr, const char *buf, size_t count)
   1924{
   1925	struct ufs_hba *hba = dev_get_drvdata(dev);
   1926	unsigned long flags;
   1927	u32 value;
   1928
   1929	if (kstrtou32(buf, 0, &value))
   1930		return -EINVAL;
   1931
   1932	value = !!value;
   1933
   1934	spin_lock_irqsave(hba->host->host_lock, flags);
   1935	if (value == hba->clk_gating.is_enabled)
   1936		goto out;
   1937
   1938	if (value)
   1939		__ufshcd_release(hba);
   1940	else
   1941		hba->clk_gating.active_reqs++;
   1942
   1943	hba->clk_gating.is_enabled = value;
   1944out:
   1945	spin_unlock_irqrestore(hba->host->host_lock, flags);
   1946	return count;
   1947}
   1948
   1949static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
   1950{
   1951	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
   1952	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
   1953	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
   1954	hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
   1955	hba->clk_gating.delay_attr.attr.mode = 0644;
   1956	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
   1957		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
   1958
   1959	hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
   1960	hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
   1961	sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
   1962	hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
   1963	hba->clk_gating.enable_attr.attr.mode = 0644;
   1964	if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
   1965		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
   1966}
   1967
   1968static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
   1969{
   1970	if (hba->clk_gating.delay_attr.attr.name)
   1971		device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
   1972	if (hba->clk_gating.enable_attr.attr.name)
   1973		device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
   1974}
   1975
   1976static void ufshcd_init_clk_gating(struct ufs_hba *hba)
   1977{
   1978	char wq_name[sizeof("ufs_clk_gating_00")];
   1979
   1980	if (!ufshcd_is_clkgating_allowed(hba))
   1981		return;
   1982
   1983	hba->clk_gating.state = CLKS_ON;
   1984
   1985	hba->clk_gating.delay_ms = 150;
   1986	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
   1987	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
   1988
   1989	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
   1990		 hba->host->host_no);
   1991	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
   1992					WQ_MEM_RECLAIM | WQ_HIGHPRI);
   1993
   1994	ufshcd_init_clk_gating_sysfs(hba);
   1995
   1996	hba->clk_gating.is_enabled = true;
   1997	hba->clk_gating.is_initialized = true;
   1998}
   1999
   2000static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
   2001{
   2002	if (!hba->clk_gating.is_initialized)
   2003		return;
   2004
   2005	ufshcd_remove_clk_gating_sysfs(hba);
   2006
   2007	/* Ungate the clock if necessary. */
   2008	ufshcd_hold(hba, false);
   2009	hba->clk_gating.is_initialized = false;
   2010	ufshcd_release(hba);
   2011
   2012	destroy_workqueue(hba->clk_gating.clk_gating_workq);
   2013}
   2014
   2015/* Must be called with host lock acquired */
   2016static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
   2017{
   2018	bool queue_resume_work = false;
   2019	ktime_t curr_t = ktime_get();
   2020	unsigned long flags;
   2021
   2022	if (!ufshcd_is_clkscaling_supported(hba))
   2023		return;
   2024
   2025	spin_lock_irqsave(hba->host->host_lock, flags);
   2026	if (!hba->clk_scaling.active_reqs++)
   2027		queue_resume_work = true;
   2028
   2029	if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
   2030		spin_unlock_irqrestore(hba->host->host_lock, flags);
   2031		return;
   2032	}
   2033
   2034	if (queue_resume_work)
   2035		queue_work(hba->clk_scaling.workq,
   2036			   &hba->clk_scaling.resume_work);
   2037
   2038	if (!hba->clk_scaling.window_start_t) {
   2039		hba->clk_scaling.window_start_t = curr_t;
   2040		hba->clk_scaling.tot_busy_t = 0;
   2041		hba->clk_scaling.is_busy_started = false;
   2042	}
   2043
   2044	if (!hba->clk_scaling.is_busy_started) {
   2045		hba->clk_scaling.busy_start_t = curr_t;
   2046		hba->clk_scaling.is_busy_started = true;
   2047	}
   2048	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2049}
   2050
   2051static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
   2052{
   2053	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
   2054	unsigned long flags;
   2055
   2056	if (!ufshcd_is_clkscaling_supported(hba))
   2057		return;
   2058
   2059	spin_lock_irqsave(hba->host->host_lock, flags);
   2060	hba->clk_scaling.active_reqs--;
   2061	if (!hba->outstanding_reqs && scaling->is_busy_started) {
   2062		scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
   2063					scaling->busy_start_t));
   2064		scaling->busy_start_t = 0;
   2065		scaling->is_busy_started = false;
   2066	}
   2067	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2068}
   2069
   2070static inline int ufshcd_monitor_opcode2dir(u8 opcode)
   2071{
   2072	if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
   2073		return READ;
   2074	else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
   2075		return WRITE;
   2076	else
   2077		return -EINVAL;
   2078}
   2079
   2080static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
   2081						struct ufshcd_lrb *lrbp)
   2082{
   2083	struct ufs_hba_monitor *m = &hba->monitor;
   2084
   2085	return (m->enabled && lrbp && lrbp->cmd &&
   2086		(!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
   2087		ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
   2088}
   2089
   2090static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2091{
   2092	int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
   2093	unsigned long flags;
   2094
   2095	spin_lock_irqsave(hba->host->host_lock, flags);
   2096	if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
   2097		hba->monitor.busy_start_ts[dir] = ktime_get();
   2098	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2099}
   2100
   2101static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2102{
   2103	int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
   2104	unsigned long flags;
   2105
   2106	spin_lock_irqsave(hba->host->host_lock, flags);
   2107	if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
   2108		struct request *req = scsi_cmd_to_rq(lrbp->cmd);
   2109		struct ufs_hba_monitor *m = &hba->monitor;
   2110		ktime_t now, inc, lat;
   2111
   2112		now = lrbp->compl_time_stamp;
   2113		inc = ktime_sub(now, m->busy_start_ts[dir]);
   2114		m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
   2115		m->nr_sec_rw[dir] += blk_rq_sectors(req);
   2116
   2117		/* Update latencies */
   2118		m->nr_req[dir]++;
   2119		lat = ktime_sub(now, lrbp->issue_time_stamp);
   2120		m->lat_sum[dir] += lat;
   2121		if (m->lat_max[dir] < lat || !m->lat_max[dir])
   2122			m->lat_max[dir] = lat;
   2123		if (m->lat_min[dir] > lat || !m->lat_min[dir])
   2124			m->lat_min[dir] = lat;
   2125
   2126		m->nr_queued[dir]--;
   2127		/* Push forward the busy start of monitor */
   2128		m->busy_start_ts[dir] = now;
   2129	}
   2130	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2131}
   2132
   2133/**
   2134 * ufshcd_send_command - Send SCSI or device management commands
   2135 * @hba: per adapter instance
   2136 * @task_tag: Task tag of the command
   2137 */
   2138static inline
   2139void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
   2140{
   2141	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
   2142	unsigned long flags;
   2143
   2144	lrbp->issue_time_stamp = ktime_get();
   2145	lrbp->compl_time_stamp = ktime_set(0, 0);
   2146	ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
   2147	ufshcd_clk_scaling_start_busy(hba);
   2148	if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
   2149		ufshcd_start_monitor(hba, lrbp);
   2150
   2151	spin_lock_irqsave(&hba->outstanding_lock, flags);
   2152	if (hba->vops && hba->vops->setup_xfer_req)
   2153		hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
   2154	__set_bit(task_tag, &hba->outstanding_reqs);
   2155	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
   2156	spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   2157}
   2158
   2159/**
   2160 * ufshcd_copy_sense_data - Copy sense data in case of check condition
   2161 * @lrbp: pointer to local reference block
   2162 */
   2163static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
   2164{
   2165	u8 *const sense_buffer = lrbp->cmd->sense_buffer;
   2166	int len;
   2167
   2168	if (sense_buffer &&
   2169	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
   2170		int len_to_copy;
   2171
   2172		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
   2173		len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
   2174
   2175		memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
   2176		       len_to_copy);
   2177	}
   2178}
   2179
   2180/**
   2181 * ufshcd_copy_query_response() - Copy the Query Response and the data
   2182 * descriptor
   2183 * @hba: per adapter instance
   2184 * @lrbp: pointer to local reference block
   2185 */
   2186static
   2187int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2188{
   2189	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
   2190
   2191	memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
   2192
   2193	/* Get the descriptor */
   2194	if (hba->dev_cmd.query.descriptor &&
   2195	    lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
   2196		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
   2197				GENERAL_UPIU_REQUEST_SIZE;
   2198		u16 resp_len;
   2199		u16 buf_len;
   2200
   2201		/* data segment length */
   2202		resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
   2203						MASK_QUERY_DATA_SEG_LEN;
   2204		buf_len = be16_to_cpu(
   2205				hba->dev_cmd.query.request.upiu_req.length);
   2206		if (likely(buf_len >= resp_len)) {
   2207			memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
   2208		} else {
   2209			dev_warn(hba->dev,
   2210				 "%s: rsp size %d is bigger than buffer size %d",
   2211				 __func__, resp_len, buf_len);
   2212			return -EINVAL;
   2213		}
   2214	}
   2215
   2216	return 0;
   2217}
   2218
   2219/**
   2220 * ufshcd_hba_capabilities - Read controller capabilities
   2221 * @hba: per adapter instance
   2222 *
   2223 * Return: 0 on success, negative on error.
   2224 */
   2225static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
   2226{
   2227	int err;
   2228
   2229	hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
   2230
   2231	/* nutrs and nutmrs are 0 based values */
   2232	hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
   2233	hba->nutmrs =
   2234	((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
   2235	hba->reserved_slot = hba->nutrs - 1;
   2236
   2237	/* Read crypto capabilities */
   2238	err = ufshcd_hba_init_crypto_capabilities(hba);
   2239	if (err)
   2240		dev_err(hba->dev, "crypto setup failed\n");
   2241
   2242	return err;
   2243}
   2244
   2245/**
   2246 * ufshcd_ready_for_uic_cmd - Check if controller is ready
   2247 *                            to accept UIC commands
   2248 * @hba: per adapter instance
   2249 * Return true on success, else false
   2250 */
   2251static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
   2252{
   2253	return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
   2254}
   2255
   2256/**
   2257 * ufshcd_get_upmcrs - Get the power mode change request status
   2258 * @hba: Pointer to adapter instance
   2259 *
   2260 * This function gets the UPMCRS field of HCS register
   2261 * Returns value of UPMCRS field
   2262 */
   2263static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
   2264{
   2265	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
   2266}
   2267
   2268/**
   2269 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
   2270 * @hba: per adapter instance
   2271 * @uic_cmd: UIC command
   2272 */
   2273static inline void
   2274ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
   2275{
   2276	lockdep_assert_held(&hba->uic_cmd_mutex);
   2277
   2278	WARN_ON(hba->active_uic_cmd);
   2279
   2280	hba->active_uic_cmd = uic_cmd;
   2281
   2282	/* Write Args */
   2283	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
   2284	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
   2285	ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
   2286
   2287	ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
   2288
   2289	/* Write UIC Cmd */
   2290	ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
   2291		      REG_UIC_COMMAND);
   2292}
   2293
   2294/**
   2295 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
   2296 * @hba: per adapter instance
   2297 * @uic_cmd: UIC command
   2298 *
   2299 * Returns 0 only if success.
   2300 */
   2301static int
   2302ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
   2303{
   2304	int ret;
   2305	unsigned long flags;
   2306
   2307	lockdep_assert_held(&hba->uic_cmd_mutex);
   2308
   2309	if (wait_for_completion_timeout(&uic_cmd->done,
   2310					msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
   2311		ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
   2312	} else {
   2313		ret = -ETIMEDOUT;
   2314		dev_err(hba->dev,
   2315			"uic cmd 0x%x with arg3 0x%x completion timeout\n",
   2316			uic_cmd->command, uic_cmd->argument3);
   2317
   2318		if (!uic_cmd->cmd_active) {
   2319			dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
   2320				__func__);
   2321			ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
   2322		}
   2323	}
   2324
   2325	spin_lock_irqsave(hba->host->host_lock, flags);
   2326	hba->active_uic_cmd = NULL;
   2327	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2328
   2329	return ret;
   2330}
   2331
   2332/**
   2333 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
   2334 * @hba: per adapter instance
   2335 * @uic_cmd: UIC command
   2336 * @completion: initialize the completion only if this is set to true
   2337 *
   2338 * Returns 0 only if success.
   2339 */
   2340static int
   2341__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
   2342		      bool completion)
   2343{
   2344	lockdep_assert_held(&hba->uic_cmd_mutex);
   2345	lockdep_assert_held(hba->host->host_lock);
   2346
   2347	if (!ufshcd_ready_for_uic_cmd(hba)) {
   2348		dev_err(hba->dev,
   2349			"Controller not ready to accept UIC commands\n");
   2350		return -EIO;
   2351	}
   2352
   2353	if (completion)
   2354		init_completion(&uic_cmd->done);
   2355
   2356	uic_cmd->cmd_active = 1;
   2357	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
   2358
   2359	return 0;
   2360}
   2361
   2362/**
   2363 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
   2364 * @hba: per adapter instance
   2365 * @uic_cmd: UIC command
   2366 *
   2367 * Returns 0 only if success.
   2368 */
   2369int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
   2370{
   2371	int ret;
   2372	unsigned long flags;
   2373
   2374	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
   2375		return 0;
   2376
   2377	ufshcd_hold(hba, false);
   2378	mutex_lock(&hba->uic_cmd_mutex);
   2379	ufshcd_add_delay_before_dme_cmd(hba);
   2380
   2381	spin_lock_irqsave(hba->host->host_lock, flags);
   2382	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
   2383	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2384	if (!ret)
   2385		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
   2386
   2387	mutex_unlock(&hba->uic_cmd_mutex);
   2388
   2389	ufshcd_release(hba);
   2390	return ret;
   2391}
   2392
   2393/**
   2394 * ufshcd_map_sg - Map scatter-gather list to prdt
   2395 * @hba: per adapter instance
   2396 * @lrbp: pointer to local reference block
   2397 *
   2398 * Returns 0 in case of success, non-zero value in case of failure
   2399 */
   2400static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2401{
   2402	struct ufshcd_sg_entry *prd_table;
   2403	struct scatterlist *sg;
   2404	struct scsi_cmnd *cmd;
   2405	int sg_segments;
   2406	int i;
   2407
   2408	cmd = lrbp->cmd;
   2409	sg_segments = scsi_dma_map(cmd);
   2410	if (sg_segments < 0)
   2411		return sg_segments;
   2412
   2413	if (sg_segments) {
   2414
   2415		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
   2416			lrbp->utr_descriptor_ptr->prd_table_length =
   2417				cpu_to_le16((sg_segments *
   2418					sizeof(struct ufshcd_sg_entry)));
   2419		else
   2420			lrbp->utr_descriptor_ptr->prd_table_length =
   2421				cpu_to_le16(sg_segments);
   2422
   2423		prd_table = lrbp->ucd_prdt_ptr;
   2424
   2425		scsi_for_each_sg(cmd, sg, sg_segments, i) {
   2426			const unsigned int len = sg_dma_len(sg);
   2427
   2428			/*
   2429			 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
   2430			 * based value that indicates the length, in bytes, of
   2431			 * the data block. A maximum of length of 256KB may
   2432			 * exist for any entry. Bits 1:0 of this field shall be
   2433			 * 11b to indicate Dword granularity. A value of '3'
   2434			 * indicates 4 bytes, '7' indicates 8 bytes, etc."
   2435			 */
   2436			WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
   2437			prd_table[i].size = cpu_to_le32(len - 1);
   2438			prd_table[i].addr = cpu_to_le64(sg->dma_address);
   2439			prd_table[i].reserved = 0;
   2440		}
   2441	} else {
   2442		lrbp->utr_descriptor_ptr->prd_table_length = 0;
   2443	}
   2444
   2445	return 0;
   2446}
   2447
   2448/**
   2449 * ufshcd_enable_intr - enable interrupts
   2450 * @hba: per adapter instance
   2451 * @intrs: interrupt bits
   2452 */
   2453static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
   2454{
   2455	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
   2456
   2457	if (hba->ufs_version == ufshci_version(1, 0)) {
   2458		u32 rw;
   2459		rw = set & INTERRUPT_MASK_RW_VER_10;
   2460		set = rw | ((set ^ intrs) & intrs);
   2461	} else {
   2462		set |= intrs;
   2463	}
   2464
   2465	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
   2466}
   2467
   2468/**
   2469 * ufshcd_disable_intr - disable interrupts
   2470 * @hba: per adapter instance
   2471 * @intrs: interrupt bits
   2472 */
   2473static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
   2474{
   2475	u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
   2476
   2477	if (hba->ufs_version == ufshci_version(1, 0)) {
   2478		u32 rw;
   2479		rw = (set & INTERRUPT_MASK_RW_VER_10) &
   2480			~(intrs & INTERRUPT_MASK_RW_VER_10);
   2481		set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
   2482
   2483	} else {
   2484		set &= ~intrs;
   2485	}
   2486
   2487	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
   2488}
   2489
   2490/**
   2491 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
   2492 * descriptor according to request
   2493 * @lrbp: pointer to local reference block
   2494 * @upiu_flags: flags required in the header
   2495 * @cmd_dir: requests data direction
   2496 */
   2497static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
   2498			u8 *upiu_flags, enum dma_data_direction cmd_dir)
   2499{
   2500	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
   2501	u32 data_direction;
   2502	u32 dword_0;
   2503	u32 dword_1 = 0;
   2504	u32 dword_3 = 0;
   2505
   2506	if (cmd_dir == DMA_FROM_DEVICE) {
   2507		data_direction = UTP_DEVICE_TO_HOST;
   2508		*upiu_flags = UPIU_CMD_FLAGS_READ;
   2509	} else if (cmd_dir == DMA_TO_DEVICE) {
   2510		data_direction = UTP_HOST_TO_DEVICE;
   2511		*upiu_flags = UPIU_CMD_FLAGS_WRITE;
   2512	} else {
   2513		data_direction = UTP_NO_DATA_TRANSFER;
   2514		*upiu_flags = UPIU_CMD_FLAGS_NONE;
   2515	}
   2516
   2517	dword_0 = data_direction | (lrbp->command_type
   2518				<< UPIU_COMMAND_TYPE_OFFSET);
   2519	if (lrbp->intr_cmd)
   2520		dword_0 |= UTP_REQ_DESC_INT_CMD;
   2521
   2522	/* Prepare crypto related dwords */
   2523	ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
   2524
   2525	/* Transfer request descriptor header fields */
   2526	req_desc->header.dword_0 = cpu_to_le32(dword_0);
   2527	req_desc->header.dword_1 = cpu_to_le32(dword_1);
   2528	/*
   2529	 * assigning invalid value for command status. Controller
   2530	 * updates OCS on command completion, with the command
   2531	 * status
   2532	 */
   2533	req_desc->header.dword_2 =
   2534		cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
   2535	req_desc->header.dword_3 = cpu_to_le32(dword_3);
   2536
   2537	req_desc->prd_table_length = 0;
   2538}
   2539
   2540/**
   2541 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
   2542 * for scsi commands
   2543 * @lrbp: local reference block pointer
   2544 * @upiu_flags: flags
   2545 */
   2546static
   2547void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
   2548{
   2549	struct scsi_cmnd *cmd = lrbp->cmd;
   2550	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
   2551	unsigned short cdb_len;
   2552
   2553	/* command descriptor fields */
   2554	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
   2555				UPIU_TRANSACTION_COMMAND, upiu_flags,
   2556				lrbp->lun, lrbp->task_tag);
   2557	ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
   2558				UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
   2559
   2560	/* Total EHS length and Data segment length will be zero */
   2561	ucd_req_ptr->header.dword_2 = 0;
   2562
   2563	ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
   2564
   2565	cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
   2566	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
   2567	memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
   2568
   2569	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
   2570}
   2571
   2572/**
   2573 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
   2574 * for query requsts
   2575 * @hba: UFS hba
   2576 * @lrbp: local reference block pointer
   2577 * @upiu_flags: flags
   2578 */
   2579static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
   2580				struct ufshcd_lrb *lrbp, u8 upiu_flags)
   2581{
   2582	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
   2583	struct ufs_query *query = &hba->dev_cmd.query;
   2584	u16 len = be16_to_cpu(query->request.upiu_req.length);
   2585
   2586	/* Query request header */
   2587	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
   2588			UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
   2589			lrbp->lun, lrbp->task_tag);
   2590	ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
   2591			0, query->request.query_func, 0, 0);
   2592
   2593	/* Data segment length only need for WRITE_DESC */
   2594	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
   2595		ucd_req_ptr->header.dword_2 =
   2596			UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
   2597	else
   2598		ucd_req_ptr->header.dword_2 = 0;
   2599
   2600	/* Copy the Query Request buffer as is */
   2601	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
   2602			QUERY_OSF_SIZE);
   2603
   2604	/* Copy the Descriptor */
   2605	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
   2606		memcpy(ucd_req_ptr + 1, query->descriptor, len);
   2607
   2608	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
   2609}
   2610
   2611static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
   2612{
   2613	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
   2614
   2615	memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
   2616
   2617	/* command descriptor fields */
   2618	ucd_req_ptr->header.dword_0 =
   2619		UPIU_HEADER_DWORD(
   2620			UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
   2621	/* clear rest of the fields of basic header */
   2622	ucd_req_ptr->header.dword_1 = 0;
   2623	ucd_req_ptr->header.dword_2 = 0;
   2624
   2625	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
   2626}
   2627
   2628/**
   2629 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
   2630 *			     for Device Management Purposes
   2631 * @hba: per adapter instance
   2632 * @lrbp: pointer to local reference block
   2633 */
   2634static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
   2635				      struct ufshcd_lrb *lrbp)
   2636{
   2637	u8 upiu_flags;
   2638	int ret = 0;
   2639
   2640	if (hba->ufs_version <= ufshci_version(1, 1))
   2641		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
   2642	else
   2643		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
   2644
   2645	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
   2646	if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
   2647		ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
   2648	else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
   2649		ufshcd_prepare_utp_nop_upiu(lrbp);
   2650	else
   2651		ret = -EINVAL;
   2652
   2653	return ret;
   2654}
   2655
   2656/**
   2657 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
   2658 *			   for SCSI Purposes
   2659 * @hba: per adapter instance
   2660 * @lrbp: pointer to local reference block
   2661 */
   2662static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2663{
   2664	u8 upiu_flags;
   2665	int ret = 0;
   2666
   2667	if (hba->ufs_version <= ufshci_version(1, 1))
   2668		lrbp->command_type = UTP_CMD_TYPE_SCSI;
   2669	else
   2670		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
   2671
   2672	if (likely(lrbp->cmd)) {
   2673		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
   2674						lrbp->cmd->sc_data_direction);
   2675		ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
   2676	} else {
   2677		ret = -EINVAL;
   2678	}
   2679
   2680	return ret;
   2681}
   2682
   2683/**
   2684 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
   2685 * @upiu_wlun_id: UPIU W-LUN id
   2686 *
   2687 * Returns SCSI W-LUN id
   2688 */
   2689static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
   2690{
   2691	return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
   2692}
   2693
   2694static inline bool is_device_wlun(struct scsi_device *sdev)
   2695{
   2696	return sdev->lun ==
   2697		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
   2698}
   2699
   2700/*
   2701 * Associate the UFS controller queue with the default and poll HCTX types.
   2702 * Initialize the mq_map[] arrays.
   2703 */
   2704static int ufshcd_map_queues(struct Scsi_Host *shost)
   2705{
   2706	int i, ret;
   2707
   2708	for (i = 0; i < shost->nr_maps; i++) {
   2709		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
   2710
   2711		switch (i) {
   2712		case HCTX_TYPE_DEFAULT:
   2713		case HCTX_TYPE_POLL:
   2714			map->nr_queues = 1;
   2715			break;
   2716		case HCTX_TYPE_READ:
   2717			map->nr_queues = 0;
   2718			continue;
   2719		default:
   2720			WARN_ON_ONCE(true);
   2721		}
   2722		map->queue_offset = 0;
   2723		ret = blk_mq_map_queues(map);
   2724		WARN_ON_ONCE(ret);
   2725	}
   2726
   2727	return 0;
   2728}
   2729
   2730static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
   2731{
   2732	struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
   2733	struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
   2734	dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
   2735		i * sizeof(struct utp_transfer_cmd_desc);
   2736	u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
   2737				       response_upiu);
   2738	u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
   2739
   2740	lrb->utr_descriptor_ptr = utrdlp + i;
   2741	lrb->utrd_dma_addr = hba->utrdl_dma_addr +
   2742		i * sizeof(struct utp_transfer_req_desc);
   2743	lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
   2744	lrb->ucd_req_dma_addr = cmd_desc_element_addr;
   2745	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
   2746	lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
   2747	lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
   2748	lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
   2749}
   2750
   2751/**
   2752 * ufshcd_queuecommand - main entry point for SCSI requests
   2753 * @host: SCSI host pointer
   2754 * @cmd: command from SCSI Midlayer
   2755 *
   2756 * Returns 0 for success, non-zero in case of failure
   2757 */
   2758static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
   2759{
   2760	struct ufs_hba *hba = shost_priv(host);
   2761	int tag = scsi_cmd_to_rq(cmd)->tag;
   2762	struct ufshcd_lrb *lrbp;
   2763	int err = 0;
   2764
   2765	WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
   2766
   2767	/*
   2768	 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
   2769	 * calls.
   2770	 */
   2771	rcu_read_lock();
   2772
   2773	switch (hba->ufshcd_state) {
   2774	case UFSHCD_STATE_OPERATIONAL:
   2775		break;
   2776	case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
   2777		/*
   2778		 * SCSI error handler can call ->queuecommand() while UFS error
   2779		 * handler is in progress. Error interrupts could change the
   2780		 * state from UFSHCD_STATE_RESET to
   2781		 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
   2782		 * being issued in that case.
   2783		 */
   2784		if (ufshcd_eh_in_progress(hba)) {
   2785			err = SCSI_MLQUEUE_HOST_BUSY;
   2786			goto out;
   2787		}
   2788		break;
   2789	case UFSHCD_STATE_EH_SCHEDULED_FATAL:
   2790		/*
   2791		 * pm_runtime_get_sync() is used at error handling preparation
   2792		 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
   2793		 * PM ops, it can never be finished if we let SCSI layer keep
   2794		 * retrying it, which gets err handler stuck forever. Neither
   2795		 * can we let the scsi cmd pass through, because UFS is in bad
   2796		 * state, the scsi cmd may eventually time out, which will get
   2797		 * err handler blocked for too long. So, just fail the scsi cmd
   2798		 * sent from PM ops, err handler can recover PM error anyways.
   2799		 */
   2800		if (hba->pm_op_in_progress) {
   2801			hba->force_reset = true;
   2802			set_host_byte(cmd, DID_BAD_TARGET);
   2803			scsi_done(cmd);
   2804			goto out;
   2805		}
   2806		fallthrough;
   2807	case UFSHCD_STATE_RESET:
   2808		err = SCSI_MLQUEUE_HOST_BUSY;
   2809		goto out;
   2810	case UFSHCD_STATE_ERROR:
   2811		set_host_byte(cmd, DID_ERROR);
   2812		scsi_done(cmd);
   2813		goto out;
   2814	}
   2815
   2816	hba->req_abort_count = 0;
   2817
   2818	err = ufshcd_hold(hba, true);
   2819	if (err) {
   2820		err = SCSI_MLQUEUE_HOST_BUSY;
   2821		goto out;
   2822	}
   2823	WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
   2824		(hba->clk_gating.state != CLKS_ON));
   2825
   2826	lrbp = &hba->lrb[tag];
   2827	WARN_ON(lrbp->cmd);
   2828	lrbp->cmd = cmd;
   2829	lrbp->task_tag = tag;
   2830	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
   2831	lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
   2832
   2833	ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
   2834
   2835	lrbp->req_abort_skip = false;
   2836
   2837	ufshpb_prep(hba, lrbp);
   2838
   2839	ufshcd_comp_scsi_upiu(hba, lrbp);
   2840
   2841	err = ufshcd_map_sg(hba, lrbp);
   2842	if (err) {
   2843		lrbp->cmd = NULL;
   2844		ufshcd_release(hba);
   2845		goto out;
   2846	}
   2847
   2848	ufshcd_send_command(hba, tag);
   2849
   2850out:
   2851	rcu_read_unlock();
   2852
   2853	if (ufs_trigger_eh()) {
   2854		unsigned long flags;
   2855
   2856		spin_lock_irqsave(hba->host->host_lock, flags);
   2857		ufshcd_schedule_eh_work(hba);
   2858		spin_unlock_irqrestore(hba->host->host_lock, flags);
   2859	}
   2860
   2861	return err;
   2862}
   2863
   2864static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
   2865		struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
   2866{
   2867	lrbp->cmd = NULL;
   2868	lrbp->task_tag = tag;
   2869	lrbp->lun = 0; /* device management cmd is not specific to any LUN */
   2870	lrbp->intr_cmd = true; /* No interrupt aggregation */
   2871	ufshcd_prepare_lrbp_crypto(NULL, lrbp);
   2872	hba->dev_cmd.type = cmd_type;
   2873
   2874	return ufshcd_compose_devman_upiu(hba, lrbp);
   2875}
   2876
   2877/*
   2878 * Clear all the requests from the controller for which a bit has been set in
   2879 * @mask and wait until the controller confirms that these requests have been
   2880 * cleared.
   2881 */
   2882static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
   2883{
   2884	unsigned long flags;
   2885
   2886	/* clear outstanding transaction before retry */
   2887	spin_lock_irqsave(hba->host->host_lock, flags);
   2888	ufshcd_utrl_clear(hba, mask);
   2889	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2890
   2891	/*
   2892	 * wait for h/w to clear corresponding bit in door-bell.
   2893	 * max. wait is 1 sec.
   2894	 */
   2895	return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
   2896					mask, ~mask, 1000, 1000);
   2897}
   2898
   2899static int
   2900ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2901{
   2902	struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
   2903
   2904	/* Get the UPIU response */
   2905	query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
   2906				UPIU_RSP_CODE_OFFSET;
   2907	return query_res->response;
   2908}
   2909
   2910/**
   2911 * ufshcd_dev_cmd_completion() - handles device management command responses
   2912 * @hba: per adapter instance
   2913 * @lrbp: pointer to local reference block
   2914 */
   2915static int
   2916ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   2917{
   2918	int resp;
   2919	int err = 0;
   2920
   2921	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
   2922	resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
   2923
   2924	switch (resp) {
   2925	case UPIU_TRANSACTION_NOP_IN:
   2926		if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
   2927			err = -EINVAL;
   2928			dev_err(hba->dev, "%s: unexpected response %x\n",
   2929					__func__, resp);
   2930		}
   2931		break;
   2932	case UPIU_TRANSACTION_QUERY_RSP:
   2933		err = ufshcd_check_query_response(hba, lrbp);
   2934		if (!err)
   2935			err = ufshcd_copy_query_response(hba, lrbp);
   2936		break;
   2937	case UPIU_TRANSACTION_REJECT_UPIU:
   2938		/* TODO: handle Reject UPIU Response */
   2939		err = -EPERM;
   2940		dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
   2941				__func__);
   2942		break;
   2943	default:
   2944		err = -EINVAL;
   2945		dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
   2946				__func__, resp);
   2947		break;
   2948	}
   2949
   2950	return err;
   2951}
   2952
   2953static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
   2954		struct ufshcd_lrb *lrbp, int max_timeout)
   2955{
   2956	int err = 0;
   2957	unsigned long time_left;
   2958	unsigned long flags;
   2959
   2960	time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
   2961			msecs_to_jiffies(max_timeout));
   2962
   2963	spin_lock_irqsave(hba->host->host_lock, flags);
   2964	hba->dev_cmd.complete = NULL;
   2965	if (likely(time_left)) {
   2966		err = ufshcd_get_tr_ocs(lrbp);
   2967		if (!err)
   2968			err = ufshcd_dev_cmd_completion(hba, lrbp);
   2969	}
   2970	spin_unlock_irqrestore(hba->host->host_lock, flags);
   2971
   2972	if (!time_left) {
   2973		err = -ETIMEDOUT;
   2974		dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
   2975			__func__, lrbp->task_tag);
   2976		if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
   2977			/* successfully cleared the command, retry if needed */
   2978			err = -EAGAIN;
   2979		/*
   2980		 * in case of an error, after clearing the doorbell,
   2981		 * we also need to clear the outstanding_request
   2982		 * field in hba
   2983		 */
   2984		spin_lock_irqsave(&hba->outstanding_lock, flags);
   2985		__clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
   2986		spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   2987	}
   2988
   2989	return err;
   2990}
   2991
   2992/**
   2993 * ufshcd_exec_dev_cmd - API for sending device management requests
   2994 * @hba: UFS hba
   2995 * @cmd_type: specifies the type (NOP, Query...)
   2996 * @timeout: timeout in milliseconds
   2997 *
   2998 * NOTE: Since there is only one available tag for device management commands,
   2999 * it is expected you hold the hba->dev_cmd.lock mutex.
   3000 */
   3001static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
   3002		enum dev_cmd_type cmd_type, int timeout)
   3003{
   3004	DECLARE_COMPLETION_ONSTACK(wait);
   3005	const u32 tag = hba->reserved_slot;
   3006	struct ufshcd_lrb *lrbp;
   3007	int err;
   3008
   3009	/* Protects use of hba->reserved_slot. */
   3010	lockdep_assert_held(&hba->dev_cmd.lock);
   3011
   3012	down_read(&hba->clk_scaling_lock);
   3013
   3014	lrbp = &hba->lrb[tag];
   3015	WARN_ON(lrbp->cmd);
   3016	err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
   3017	if (unlikely(err))
   3018		goto out;
   3019
   3020	hba->dev_cmd.complete = &wait;
   3021
   3022	ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
   3023
   3024	ufshcd_send_command(hba, tag);
   3025	err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
   3026	ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
   3027				    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
   3028
   3029out:
   3030	up_read(&hba->clk_scaling_lock);
   3031	return err;
   3032}
   3033
   3034/**
   3035 * ufshcd_init_query() - init the query response and request parameters
   3036 * @hba: per-adapter instance
   3037 * @request: address of the request pointer to be initialized
   3038 * @response: address of the response pointer to be initialized
   3039 * @opcode: operation to perform
   3040 * @idn: flag idn to access
   3041 * @index: LU number to access
   3042 * @selector: query/flag/descriptor further identification
   3043 */
   3044static inline void ufshcd_init_query(struct ufs_hba *hba,
   3045		struct ufs_query_req **request, struct ufs_query_res **response,
   3046		enum query_opcode opcode, u8 idn, u8 index, u8 selector)
   3047{
   3048	*request = &hba->dev_cmd.query.request;
   3049	*response = &hba->dev_cmd.query.response;
   3050	memset(*request, 0, sizeof(struct ufs_query_req));
   3051	memset(*response, 0, sizeof(struct ufs_query_res));
   3052	(*request)->upiu_req.opcode = opcode;
   3053	(*request)->upiu_req.idn = idn;
   3054	(*request)->upiu_req.index = index;
   3055	(*request)->upiu_req.selector = selector;
   3056}
   3057
   3058static int ufshcd_query_flag_retry(struct ufs_hba *hba,
   3059	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
   3060{
   3061	int ret;
   3062	int retries;
   3063
   3064	for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
   3065		ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
   3066		if (ret)
   3067			dev_dbg(hba->dev,
   3068				"%s: failed with error %d, retries %d\n",
   3069				__func__, ret, retries);
   3070		else
   3071			break;
   3072	}
   3073
   3074	if (ret)
   3075		dev_err(hba->dev,
   3076			"%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
   3077			__func__, opcode, idn, ret, retries);
   3078	return ret;
   3079}
   3080
   3081/**
   3082 * ufshcd_query_flag() - API function for sending flag query requests
   3083 * @hba: per-adapter instance
   3084 * @opcode: flag query to perform
   3085 * @idn: flag idn to access
   3086 * @index: flag index to access
   3087 * @flag_res: the flag value after the query request completes
   3088 *
   3089 * Returns 0 for success, non-zero in case of failure
   3090 */
   3091int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
   3092			enum flag_idn idn, u8 index, bool *flag_res)
   3093{
   3094	struct ufs_query_req *request = NULL;
   3095	struct ufs_query_res *response = NULL;
   3096	int err, selector = 0;
   3097	int timeout = QUERY_REQ_TIMEOUT;
   3098
   3099	BUG_ON(!hba);
   3100
   3101	ufshcd_hold(hba, false);
   3102	mutex_lock(&hba->dev_cmd.lock);
   3103	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
   3104			selector);
   3105
   3106	switch (opcode) {
   3107	case UPIU_QUERY_OPCODE_SET_FLAG:
   3108	case UPIU_QUERY_OPCODE_CLEAR_FLAG:
   3109	case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
   3110		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
   3111		break;
   3112	case UPIU_QUERY_OPCODE_READ_FLAG:
   3113		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
   3114		if (!flag_res) {
   3115			/* No dummy reads */
   3116			dev_err(hba->dev, "%s: Invalid argument for read request\n",
   3117					__func__);
   3118			err = -EINVAL;
   3119			goto out_unlock;
   3120		}
   3121		break;
   3122	default:
   3123		dev_err(hba->dev,
   3124			"%s: Expected query flag opcode but got = %d\n",
   3125			__func__, opcode);
   3126		err = -EINVAL;
   3127		goto out_unlock;
   3128	}
   3129
   3130	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
   3131
   3132	if (err) {
   3133		dev_err(hba->dev,
   3134			"%s: Sending flag query for idn %d failed, err = %d\n",
   3135			__func__, idn, err);
   3136		goto out_unlock;
   3137	}
   3138
   3139	if (flag_res)
   3140		*flag_res = (be32_to_cpu(response->upiu_res.value) &
   3141				MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
   3142
   3143out_unlock:
   3144	mutex_unlock(&hba->dev_cmd.lock);
   3145	ufshcd_release(hba);
   3146	return err;
   3147}
   3148
   3149/**
   3150 * ufshcd_query_attr - API function for sending attribute requests
   3151 * @hba: per-adapter instance
   3152 * @opcode: attribute opcode
   3153 * @idn: attribute idn to access
   3154 * @index: index field
   3155 * @selector: selector field
   3156 * @attr_val: the attribute value after the query request completes
   3157 *
   3158 * Returns 0 for success, non-zero in case of failure
   3159*/
   3160int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
   3161		      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
   3162{
   3163	struct ufs_query_req *request = NULL;
   3164	struct ufs_query_res *response = NULL;
   3165	int err;
   3166
   3167	BUG_ON(!hba);
   3168
   3169	if (!attr_val) {
   3170		dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
   3171				__func__, opcode);
   3172		return -EINVAL;
   3173	}
   3174
   3175	ufshcd_hold(hba, false);
   3176
   3177	mutex_lock(&hba->dev_cmd.lock);
   3178	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
   3179			selector);
   3180
   3181	switch (opcode) {
   3182	case UPIU_QUERY_OPCODE_WRITE_ATTR:
   3183		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
   3184		request->upiu_req.value = cpu_to_be32(*attr_val);
   3185		break;
   3186	case UPIU_QUERY_OPCODE_READ_ATTR:
   3187		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
   3188		break;
   3189	default:
   3190		dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
   3191				__func__, opcode);
   3192		err = -EINVAL;
   3193		goto out_unlock;
   3194	}
   3195
   3196	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
   3197
   3198	if (err) {
   3199		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
   3200				__func__, opcode, idn, index, err);
   3201		goto out_unlock;
   3202	}
   3203
   3204	*attr_val = be32_to_cpu(response->upiu_res.value);
   3205
   3206out_unlock:
   3207	mutex_unlock(&hba->dev_cmd.lock);
   3208	ufshcd_release(hba);
   3209	return err;
   3210}
   3211
   3212/**
   3213 * ufshcd_query_attr_retry() - API function for sending query
   3214 * attribute with retries
   3215 * @hba: per-adapter instance
   3216 * @opcode: attribute opcode
   3217 * @idn: attribute idn to access
   3218 * @index: index field
   3219 * @selector: selector field
   3220 * @attr_val: the attribute value after the query request
   3221 * completes
   3222 *
   3223 * Returns 0 for success, non-zero in case of failure
   3224*/
   3225int ufshcd_query_attr_retry(struct ufs_hba *hba,
   3226	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
   3227	u32 *attr_val)
   3228{
   3229	int ret = 0;
   3230	u32 retries;
   3231
   3232	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
   3233		ret = ufshcd_query_attr(hba, opcode, idn, index,
   3234						selector, attr_val);
   3235		if (ret)
   3236			dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
   3237				__func__, ret, retries);
   3238		else
   3239			break;
   3240	}
   3241
   3242	if (ret)
   3243		dev_err(hba->dev,
   3244			"%s: query attribute, idn %d, failed with error %d after %d retires\n",
   3245			__func__, idn, ret, QUERY_REQ_RETRIES);
   3246	return ret;
   3247}
   3248
   3249static int __ufshcd_query_descriptor(struct ufs_hba *hba,
   3250			enum query_opcode opcode, enum desc_idn idn, u8 index,
   3251			u8 selector, u8 *desc_buf, int *buf_len)
   3252{
   3253	struct ufs_query_req *request = NULL;
   3254	struct ufs_query_res *response = NULL;
   3255	int err;
   3256
   3257	BUG_ON(!hba);
   3258
   3259	if (!desc_buf) {
   3260		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
   3261				__func__, opcode);
   3262		return -EINVAL;
   3263	}
   3264
   3265	if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
   3266		dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
   3267				__func__, *buf_len);
   3268		return -EINVAL;
   3269	}
   3270
   3271	ufshcd_hold(hba, false);
   3272
   3273	mutex_lock(&hba->dev_cmd.lock);
   3274	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
   3275			selector);
   3276	hba->dev_cmd.query.descriptor = desc_buf;
   3277	request->upiu_req.length = cpu_to_be16(*buf_len);
   3278
   3279	switch (opcode) {
   3280	case UPIU_QUERY_OPCODE_WRITE_DESC:
   3281		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
   3282		break;
   3283	case UPIU_QUERY_OPCODE_READ_DESC:
   3284		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
   3285		break;
   3286	default:
   3287		dev_err(hba->dev,
   3288				"%s: Expected query descriptor opcode but got = 0x%.2x\n",
   3289				__func__, opcode);
   3290		err = -EINVAL;
   3291		goto out_unlock;
   3292	}
   3293
   3294	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
   3295
   3296	if (err) {
   3297		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
   3298				__func__, opcode, idn, index, err);
   3299		goto out_unlock;
   3300	}
   3301
   3302	*buf_len = be16_to_cpu(response->upiu_res.length);
   3303
   3304out_unlock:
   3305	hba->dev_cmd.query.descriptor = NULL;
   3306	mutex_unlock(&hba->dev_cmd.lock);
   3307	ufshcd_release(hba);
   3308	return err;
   3309}
   3310
   3311/**
   3312 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
   3313 * @hba: per-adapter instance
   3314 * @opcode: attribute opcode
   3315 * @idn: attribute idn to access
   3316 * @index: index field
   3317 * @selector: selector field
   3318 * @desc_buf: the buffer that contains the descriptor
   3319 * @buf_len: length parameter passed to the device
   3320 *
   3321 * Returns 0 for success, non-zero in case of failure.
   3322 * The buf_len parameter will contain, on return, the length parameter
   3323 * received on the response.
   3324 */
   3325int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
   3326				  enum query_opcode opcode,
   3327				  enum desc_idn idn, u8 index,
   3328				  u8 selector,
   3329				  u8 *desc_buf, int *buf_len)
   3330{
   3331	int err;
   3332	int retries;
   3333
   3334	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
   3335		err = __ufshcd_query_descriptor(hba, opcode, idn, index,
   3336						selector, desc_buf, buf_len);
   3337		if (!err || err == -EINVAL)
   3338			break;
   3339	}
   3340
   3341	return err;
   3342}
   3343
   3344/**
   3345 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
   3346 * @hba: Pointer to adapter instance
   3347 * @desc_id: descriptor idn value
   3348 * @desc_len: mapped desc length (out)
   3349 */
   3350void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
   3351				  int *desc_len)
   3352{
   3353	if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
   3354	    desc_id == QUERY_DESC_IDN_RFU_1)
   3355		*desc_len = 0;
   3356	else
   3357		*desc_len = hba->desc_size[desc_id];
   3358}
   3359EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
   3360
   3361static void ufshcd_update_desc_length(struct ufs_hba *hba,
   3362				      enum desc_idn desc_id, int desc_index,
   3363				      unsigned char desc_len)
   3364{
   3365	if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
   3366	    desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
   3367		/* For UFS 3.1, the normal unit descriptor is 10 bytes larger
   3368		 * than the RPMB unit, however, both descriptors share the same
   3369		 * desc_idn, to cover both unit descriptors with one length, we
   3370		 * choose the normal unit descriptor length by desc_index.
   3371		 */
   3372		hba->desc_size[desc_id] = desc_len;
   3373}
   3374
   3375/**
   3376 * ufshcd_read_desc_param - read the specified descriptor parameter
   3377 * @hba: Pointer to adapter instance
   3378 * @desc_id: descriptor idn value
   3379 * @desc_index: descriptor index
   3380 * @param_offset: offset of the parameter to read
   3381 * @param_read_buf: pointer to buffer where parameter would be read
   3382 * @param_size: sizeof(param_read_buf)
   3383 *
   3384 * Return 0 in case of success, non-zero otherwise
   3385 */
   3386int ufshcd_read_desc_param(struct ufs_hba *hba,
   3387			   enum desc_idn desc_id,
   3388			   int desc_index,
   3389			   u8 param_offset,
   3390			   u8 *param_read_buf,
   3391			   u8 param_size)
   3392{
   3393	int ret;
   3394	u8 *desc_buf;
   3395	int buff_len;
   3396	bool is_kmalloc = true;
   3397
   3398	/* Safety check */
   3399	if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
   3400		return -EINVAL;
   3401
   3402	/* Get the length of descriptor */
   3403	ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
   3404	if (!buff_len) {
   3405		dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
   3406		return -EINVAL;
   3407	}
   3408
   3409	if (param_offset >= buff_len) {
   3410		dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
   3411			__func__, param_offset, desc_id, buff_len);
   3412		return -EINVAL;
   3413	}
   3414
   3415	/* Check whether we need temp memory */
   3416	if (param_offset != 0 || param_size < buff_len) {
   3417		desc_buf = kzalloc(buff_len, GFP_KERNEL);
   3418		if (!desc_buf)
   3419			return -ENOMEM;
   3420	} else {
   3421		desc_buf = param_read_buf;
   3422		is_kmalloc = false;
   3423	}
   3424
   3425	/* Request for full descriptor */
   3426	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
   3427					desc_id, desc_index, 0,
   3428					desc_buf, &buff_len);
   3429
   3430	if (ret) {
   3431		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
   3432			__func__, desc_id, desc_index, param_offset, ret);
   3433		goto out;
   3434	}
   3435
   3436	/* Sanity check */
   3437	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
   3438		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
   3439			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
   3440		ret = -EINVAL;
   3441		goto out;
   3442	}
   3443
   3444	/* Update descriptor length */
   3445	buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
   3446	ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
   3447
   3448	if (is_kmalloc) {
   3449		/* Make sure we don't copy more data than available */
   3450		if (param_offset >= buff_len)
   3451			ret = -EINVAL;
   3452		else
   3453			memcpy(param_read_buf, &desc_buf[param_offset],
   3454			       min_t(u32, param_size, buff_len - param_offset));
   3455	}
   3456out:
   3457	if (is_kmalloc)
   3458		kfree(desc_buf);
   3459	return ret;
   3460}
   3461
   3462/**
   3463 * struct uc_string_id - unicode string
   3464 *
   3465 * @len: size of this descriptor inclusive
   3466 * @type: descriptor type
   3467 * @uc: unicode string character
   3468 */
   3469struct uc_string_id {
   3470	u8 len;
   3471	u8 type;
   3472	wchar_t uc[];
   3473} __packed;
   3474
   3475/* replace non-printable or non-ASCII characters with spaces */
   3476static inline char ufshcd_remove_non_printable(u8 ch)
   3477{
   3478	return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
   3479}
   3480
   3481/**
   3482 * ufshcd_read_string_desc - read string descriptor
   3483 * @hba: pointer to adapter instance
   3484 * @desc_index: descriptor index
   3485 * @buf: pointer to buffer where descriptor would be read,
   3486 *       the caller should free the memory.
   3487 * @ascii: if true convert from unicode to ascii characters
   3488 *         null terminated string.
   3489 *
   3490 * Return:
   3491 * *      string size on success.
   3492 * *      -ENOMEM: on allocation failure
   3493 * *      -EINVAL: on a wrong parameter
   3494 */
   3495int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
   3496			    u8 **buf, bool ascii)
   3497{
   3498	struct uc_string_id *uc_str;
   3499	u8 *str;
   3500	int ret;
   3501
   3502	if (!buf)
   3503		return -EINVAL;
   3504
   3505	uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
   3506	if (!uc_str)
   3507		return -ENOMEM;
   3508
   3509	ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
   3510				     (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
   3511	if (ret < 0) {
   3512		dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
   3513			QUERY_REQ_RETRIES, ret);
   3514		str = NULL;
   3515		goto out;
   3516	}
   3517
   3518	if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
   3519		dev_dbg(hba->dev, "String Desc is of zero length\n");
   3520		str = NULL;
   3521		ret = 0;
   3522		goto out;
   3523	}
   3524
   3525	if (ascii) {
   3526		ssize_t ascii_len;
   3527		int i;
   3528		/* remove header and divide by 2 to move from UTF16 to UTF8 */
   3529		ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
   3530		str = kzalloc(ascii_len, GFP_KERNEL);
   3531		if (!str) {
   3532			ret = -ENOMEM;
   3533			goto out;
   3534		}
   3535
   3536		/*
   3537		 * the descriptor contains string in UTF16 format
   3538		 * we need to convert to utf-8 so it can be displayed
   3539		 */
   3540		ret = utf16s_to_utf8s(uc_str->uc,
   3541				      uc_str->len - QUERY_DESC_HDR_SIZE,
   3542				      UTF16_BIG_ENDIAN, str, ascii_len);
   3543
   3544		/* replace non-printable or non-ASCII characters with spaces */
   3545		for (i = 0; i < ret; i++)
   3546			str[i] = ufshcd_remove_non_printable(str[i]);
   3547
   3548		str[ret++] = '\0';
   3549
   3550	} else {
   3551		str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
   3552		if (!str) {
   3553			ret = -ENOMEM;
   3554			goto out;
   3555		}
   3556		ret = uc_str->len;
   3557	}
   3558out:
   3559	*buf = str;
   3560	kfree(uc_str);
   3561	return ret;
   3562}
   3563
   3564/**
   3565 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
   3566 * @hba: Pointer to adapter instance
   3567 * @lun: lun id
   3568 * @param_offset: offset of the parameter to read
   3569 * @param_read_buf: pointer to buffer where parameter would be read
   3570 * @param_size: sizeof(param_read_buf)
   3571 *
   3572 * Return 0 in case of success, non-zero otherwise
   3573 */
   3574static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
   3575					      int lun,
   3576					      enum unit_desc_param param_offset,
   3577					      u8 *param_read_buf,
   3578					      u32 param_size)
   3579{
   3580	/*
   3581	 * Unit descriptors are only available for general purpose LUs (LUN id
   3582	 * from 0 to 7) and RPMB Well known LU.
   3583	 */
   3584	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
   3585		return -EOPNOTSUPP;
   3586
   3587	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
   3588				      param_offset, param_read_buf, param_size);
   3589}
   3590
   3591static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
   3592{
   3593	int err = 0;
   3594	u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
   3595
   3596	if (hba->dev_info.wspecversion >= 0x300) {
   3597		err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   3598				QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
   3599				&gating_wait);
   3600		if (err)
   3601			dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
   3602					 err, gating_wait);
   3603
   3604		if (gating_wait == 0) {
   3605			gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
   3606			dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
   3607					 gating_wait);
   3608		}
   3609
   3610		hba->dev_info.clk_gating_wait_us = gating_wait;
   3611	}
   3612
   3613	return err;
   3614}
   3615
   3616/**
   3617 * ufshcd_memory_alloc - allocate memory for host memory space data structures
   3618 * @hba: per adapter instance
   3619 *
   3620 * 1. Allocate DMA memory for Command Descriptor array
   3621 *	Each command descriptor consist of Command UPIU, Response UPIU and PRDT
   3622 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
   3623 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
   3624 *	(UTMRDL)
   3625 * 4. Allocate memory for local reference block(lrb).
   3626 *
   3627 * Returns 0 for success, non-zero in case of failure
   3628 */
   3629static int ufshcd_memory_alloc(struct ufs_hba *hba)
   3630{
   3631	size_t utmrdl_size, utrdl_size, ucdl_size;
   3632
   3633	/* Allocate memory for UTP command descriptors */
   3634	ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
   3635	hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
   3636						  ucdl_size,
   3637						  &hba->ucdl_dma_addr,
   3638						  GFP_KERNEL);
   3639
   3640	/*
   3641	 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
   3642	 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
   3643	 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
   3644	 * be aligned to 128 bytes as well
   3645	 */
   3646	if (!hba->ucdl_base_addr ||
   3647	    WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
   3648		dev_err(hba->dev,
   3649			"Command Descriptor Memory allocation failed\n");
   3650		goto out;
   3651	}
   3652
   3653	/*
   3654	 * Allocate memory for UTP Transfer descriptors
   3655	 * UFSHCI requires 1024 byte alignment of UTRD
   3656	 */
   3657	utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
   3658	hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
   3659						   utrdl_size,
   3660						   &hba->utrdl_dma_addr,
   3661						   GFP_KERNEL);
   3662	if (!hba->utrdl_base_addr ||
   3663	    WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
   3664		dev_err(hba->dev,
   3665			"Transfer Descriptor Memory allocation failed\n");
   3666		goto out;
   3667	}
   3668
   3669	/*
   3670	 * Allocate memory for UTP Task Management descriptors
   3671	 * UFSHCI requires 1024 byte alignment of UTMRD
   3672	 */
   3673	utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
   3674	hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
   3675						    utmrdl_size,
   3676						    &hba->utmrdl_dma_addr,
   3677						    GFP_KERNEL);
   3678	if (!hba->utmrdl_base_addr ||
   3679	    WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
   3680		dev_err(hba->dev,
   3681		"Task Management Descriptor Memory allocation failed\n");
   3682		goto out;
   3683	}
   3684
   3685	/* Allocate memory for local reference block */
   3686	hba->lrb = devm_kcalloc(hba->dev,
   3687				hba->nutrs, sizeof(struct ufshcd_lrb),
   3688				GFP_KERNEL);
   3689	if (!hba->lrb) {
   3690		dev_err(hba->dev, "LRB Memory allocation failed\n");
   3691		goto out;
   3692	}
   3693	return 0;
   3694out:
   3695	return -ENOMEM;
   3696}
   3697
   3698/**
   3699 * ufshcd_host_memory_configure - configure local reference block with
   3700 *				memory offsets
   3701 * @hba: per adapter instance
   3702 *
   3703 * Configure Host memory space
   3704 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
   3705 * address.
   3706 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
   3707 * and PRDT offset.
   3708 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
   3709 * into local reference block.
   3710 */
   3711static void ufshcd_host_memory_configure(struct ufs_hba *hba)
   3712{
   3713	struct utp_transfer_req_desc *utrdlp;
   3714	dma_addr_t cmd_desc_dma_addr;
   3715	dma_addr_t cmd_desc_element_addr;
   3716	u16 response_offset;
   3717	u16 prdt_offset;
   3718	int cmd_desc_size;
   3719	int i;
   3720
   3721	utrdlp = hba->utrdl_base_addr;
   3722
   3723	response_offset =
   3724		offsetof(struct utp_transfer_cmd_desc, response_upiu);
   3725	prdt_offset =
   3726		offsetof(struct utp_transfer_cmd_desc, prd_table);
   3727
   3728	cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
   3729	cmd_desc_dma_addr = hba->ucdl_dma_addr;
   3730
   3731	for (i = 0; i < hba->nutrs; i++) {
   3732		/* Configure UTRD with command descriptor base address */
   3733		cmd_desc_element_addr =
   3734				(cmd_desc_dma_addr + (cmd_desc_size * i));
   3735		utrdlp[i].command_desc_base_addr_lo =
   3736				cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
   3737		utrdlp[i].command_desc_base_addr_hi =
   3738				cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
   3739
   3740		/* Response upiu and prdt offset should be in double words */
   3741		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
   3742			utrdlp[i].response_upiu_offset =
   3743				cpu_to_le16(response_offset);
   3744			utrdlp[i].prd_table_offset =
   3745				cpu_to_le16(prdt_offset);
   3746			utrdlp[i].response_upiu_length =
   3747				cpu_to_le16(ALIGNED_UPIU_SIZE);
   3748		} else {
   3749			utrdlp[i].response_upiu_offset =
   3750				cpu_to_le16(response_offset >> 2);
   3751			utrdlp[i].prd_table_offset =
   3752				cpu_to_le16(prdt_offset >> 2);
   3753			utrdlp[i].response_upiu_length =
   3754				cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
   3755		}
   3756
   3757		ufshcd_init_lrb(hba, &hba->lrb[i], i);
   3758	}
   3759}
   3760
   3761/**
   3762 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
   3763 * @hba: per adapter instance
   3764 *
   3765 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
   3766 * in order to initialize the Unipro link startup procedure.
   3767 * Once the Unipro links are up, the device connected to the controller
   3768 * is detected.
   3769 *
   3770 * Returns 0 on success, non-zero value on failure
   3771 */
   3772static int ufshcd_dme_link_startup(struct ufs_hba *hba)
   3773{
   3774	struct uic_command uic_cmd = {0};
   3775	int ret;
   3776
   3777	uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
   3778
   3779	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
   3780	if (ret)
   3781		dev_dbg(hba->dev,
   3782			"dme-link-startup: error code %d\n", ret);
   3783	return ret;
   3784}
   3785/**
   3786 * ufshcd_dme_reset - UIC command for DME_RESET
   3787 * @hba: per adapter instance
   3788 *
   3789 * DME_RESET command is issued in order to reset UniPro stack.
   3790 * This function now deals with cold reset.
   3791 *
   3792 * Returns 0 on success, non-zero value on failure
   3793 */
   3794static int ufshcd_dme_reset(struct ufs_hba *hba)
   3795{
   3796	struct uic_command uic_cmd = {0};
   3797	int ret;
   3798
   3799	uic_cmd.command = UIC_CMD_DME_RESET;
   3800
   3801	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
   3802	if (ret)
   3803		dev_err(hba->dev,
   3804			"dme-reset: error code %d\n", ret);
   3805
   3806	return ret;
   3807}
   3808
   3809int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
   3810			       int agreed_gear,
   3811			       int adapt_val)
   3812{
   3813	int ret;
   3814
   3815	if (agreed_gear != UFS_HS_G4)
   3816		adapt_val = PA_NO_ADAPT;
   3817
   3818	ret = ufshcd_dme_set(hba,
   3819			     UIC_ARG_MIB(PA_TXHSADAPTTYPE),
   3820			     adapt_val);
   3821	return ret;
   3822}
   3823EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
   3824
   3825/**
   3826 * ufshcd_dme_enable - UIC command for DME_ENABLE
   3827 * @hba: per adapter instance
   3828 *
   3829 * DME_ENABLE command is issued in order to enable UniPro stack.
   3830 *
   3831 * Returns 0 on success, non-zero value on failure
   3832 */
   3833static int ufshcd_dme_enable(struct ufs_hba *hba)
   3834{
   3835	struct uic_command uic_cmd = {0};
   3836	int ret;
   3837
   3838	uic_cmd.command = UIC_CMD_DME_ENABLE;
   3839
   3840	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
   3841	if (ret)
   3842		dev_err(hba->dev,
   3843			"dme-enable: error code %d\n", ret);
   3844
   3845	return ret;
   3846}
   3847
   3848static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
   3849{
   3850	#define MIN_DELAY_BEFORE_DME_CMDS_US	1000
   3851	unsigned long min_sleep_time_us;
   3852
   3853	if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
   3854		return;
   3855
   3856	/*
   3857	 * last_dme_cmd_tstamp will be 0 only for 1st call to
   3858	 * this function
   3859	 */
   3860	if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
   3861		min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
   3862	} else {
   3863		unsigned long delta =
   3864			(unsigned long) ktime_to_us(
   3865				ktime_sub(ktime_get(),
   3866				hba->last_dme_cmd_tstamp));
   3867
   3868		if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
   3869			min_sleep_time_us =
   3870				MIN_DELAY_BEFORE_DME_CMDS_US - delta;
   3871		else
   3872			return; /* no more delay required */
   3873	}
   3874
   3875	/* allow sleep for extra 50us if needed */
   3876	usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
   3877}
   3878
   3879/**
   3880 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
   3881 * @hba: per adapter instance
   3882 * @attr_sel: uic command argument1
   3883 * @attr_set: attribute set type as uic command argument2
   3884 * @mib_val: setting value as uic command argument3
   3885 * @peer: indicate whether peer or local
   3886 *
   3887 * Returns 0 on success, non-zero value on failure
   3888 */
   3889int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
   3890			u8 attr_set, u32 mib_val, u8 peer)
   3891{
   3892	struct uic_command uic_cmd = {0};
   3893	static const char *const action[] = {
   3894		"dme-set",
   3895		"dme-peer-set"
   3896	};
   3897	const char *set = action[!!peer];
   3898	int ret;
   3899	int retries = UFS_UIC_COMMAND_RETRIES;
   3900
   3901	uic_cmd.command = peer ?
   3902		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
   3903	uic_cmd.argument1 = attr_sel;
   3904	uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
   3905	uic_cmd.argument3 = mib_val;
   3906
   3907	do {
   3908		/* for peer attributes we retry upon failure */
   3909		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
   3910		if (ret)
   3911			dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
   3912				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
   3913	} while (ret && peer && --retries);
   3914
   3915	if (ret)
   3916		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
   3917			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
   3918			UFS_UIC_COMMAND_RETRIES - retries);
   3919
   3920	return ret;
   3921}
   3922EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
   3923
   3924/**
   3925 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
   3926 * @hba: per adapter instance
   3927 * @attr_sel: uic command argument1
   3928 * @mib_val: the value of the attribute as returned by the UIC command
   3929 * @peer: indicate whether peer or local
   3930 *
   3931 * Returns 0 on success, non-zero value on failure
   3932 */
   3933int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
   3934			u32 *mib_val, u8 peer)
   3935{
   3936	struct uic_command uic_cmd = {0};
   3937	static const char *const action[] = {
   3938		"dme-get",
   3939		"dme-peer-get"
   3940	};
   3941	const char *get = action[!!peer];
   3942	int ret;
   3943	int retries = UFS_UIC_COMMAND_RETRIES;
   3944	struct ufs_pa_layer_attr orig_pwr_info;
   3945	struct ufs_pa_layer_attr temp_pwr_info;
   3946	bool pwr_mode_change = false;
   3947
   3948	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
   3949		orig_pwr_info = hba->pwr_info;
   3950		temp_pwr_info = orig_pwr_info;
   3951
   3952		if (orig_pwr_info.pwr_tx == FAST_MODE ||
   3953		    orig_pwr_info.pwr_rx == FAST_MODE) {
   3954			temp_pwr_info.pwr_tx = FASTAUTO_MODE;
   3955			temp_pwr_info.pwr_rx = FASTAUTO_MODE;
   3956			pwr_mode_change = true;
   3957		} else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
   3958		    orig_pwr_info.pwr_rx == SLOW_MODE) {
   3959			temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
   3960			temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
   3961			pwr_mode_change = true;
   3962		}
   3963		if (pwr_mode_change) {
   3964			ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
   3965			if (ret)
   3966				goto out;
   3967		}
   3968	}
   3969
   3970	uic_cmd.command = peer ?
   3971		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
   3972	uic_cmd.argument1 = attr_sel;
   3973
   3974	do {
   3975		/* for peer attributes we retry upon failure */
   3976		ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
   3977		if (ret)
   3978			dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
   3979				get, UIC_GET_ATTR_ID(attr_sel), ret);
   3980	} while (ret && peer && --retries);
   3981
   3982	if (ret)
   3983		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
   3984			get, UIC_GET_ATTR_ID(attr_sel),
   3985			UFS_UIC_COMMAND_RETRIES - retries);
   3986
   3987	if (mib_val && !ret)
   3988		*mib_val = uic_cmd.argument3;
   3989
   3990	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
   3991	    && pwr_mode_change)
   3992		ufshcd_change_power_mode(hba, &orig_pwr_info);
   3993out:
   3994	return ret;
   3995}
   3996EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
   3997
   3998/**
   3999 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
   4000 * state) and waits for it to take effect.
   4001 *
   4002 * @hba: per adapter instance
   4003 * @cmd: UIC command to execute
   4004 *
   4005 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
   4006 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
   4007 * and device UniPro link and hence it's final completion would be indicated by
   4008 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
   4009 * addition to normal UIC command completion Status (UCCS). This function only
   4010 * returns after the relevant status bits indicate the completion.
   4011 *
   4012 * Returns 0 on success, non-zero value on failure
   4013 */
   4014static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
   4015{
   4016	DECLARE_COMPLETION_ONSTACK(uic_async_done);
   4017	unsigned long flags;
   4018	u8 status;
   4019	int ret;
   4020	bool reenable_intr = false;
   4021
   4022	mutex_lock(&hba->uic_cmd_mutex);
   4023	ufshcd_add_delay_before_dme_cmd(hba);
   4024
   4025	spin_lock_irqsave(hba->host->host_lock, flags);
   4026	if (ufshcd_is_link_broken(hba)) {
   4027		ret = -ENOLINK;
   4028		goto out_unlock;
   4029	}
   4030	hba->uic_async_done = &uic_async_done;
   4031	if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
   4032		ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
   4033		/*
   4034		 * Make sure UIC command completion interrupt is disabled before
   4035		 * issuing UIC command.
   4036		 */
   4037		wmb();
   4038		reenable_intr = true;
   4039	}
   4040	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
   4041	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4042	if (ret) {
   4043		dev_err(hba->dev,
   4044			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
   4045			cmd->command, cmd->argument3, ret);
   4046		goto out;
   4047	}
   4048
   4049	if (!wait_for_completion_timeout(hba->uic_async_done,
   4050					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
   4051		dev_err(hba->dev,
   4052			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
   4053			cmd->command, cmd->argument3);
   4054
   4055		if (!cmd->cmd_active) {
   4056			dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
   4057				__func__);
   4058			goto check_upmcrs;
   4059		}
   4060
   4061		ret = -ETIMEDOUT;
   4062		goto out;
   4063	}
   4064
   4065check_upmcrs:
   4066	status = ufshcd_get_upmcrs(hba);
   4067	if (status != PWR_LOCAL) {
   4068		dev_err(hba->dev,
   4069			"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
   4070			cmd->command, status);
   4071		ret = (status != PWR_OK) ? status : -1;
   4072	}
   4073out:
   4074	if (ret) {
   4075		ufshcd_print_host_state(hba);
   4076		ufshcd_print_pwr_info(hba);
   4077		ufshcd_print_evt_hist(hba);
   4078	}
   4079
   4080	spin_lock_irqsave(hba->host->host_lock, flags);
   4081	hba->active_uic_cmd = NULL;
   4082	hba->uic_async_done = NULL;
   4083	if (reenable_intr)
   4084		ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
   4085	if (ret) {
   4086		ufshcd_set_link_broken(hba);
   4087		ufshcd_schedule_eh_work(hba);
   4088	}
   4089out_unlock:
   4090	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4091	mutex_unlock(&hba->uic_cmd_mutex);
   4092
   4093	return ret;
   4094}
   4095
   4096/**
   4097 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
   4098 *				using DME_SET primitives.
   4099 * @hba: per adapter instance
   4100 * @mode: powr mode value
   4101 *
   4102 * Returns 0 on success, non-zero value on failure
   4103 */
   4104static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
   4105{
   4106	struct uic_command uic_cmd = {0};
   4107	int ret;
   4108
   4109	if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
   4110		ret = ufshcd_dme_set(hba,
   4111				UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
   4112		if (ret) {
   4113			dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
   4114						__func__, ret);
   4115			goto out;
   4116		}
   4117	}
   4118
   4119	uic_cmd.command = UIC_CMD_DME_SET;
   4120	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
   4121	uic_cmd.argument3 = mode;
   4122	ufshcd_hold(hba, false);
   4123	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
   4124	ufshcd_release(hba);
   4125
   4126out:
   4127	return ret;
   4128}
   4129
   4130int ufshcd_link_recovery(struct ufs_hba *hba)
   4131{
   4132	int ret;
   4133	unsigned long flags;
   4134
   4135	spin_lock_irqsave(hba->host->host_lock, flags);
   4136	hba->ufshcd_state = UFSHCD_STATE_RESET;
   4137	ufshcd_set_eh_in_progress(hba);
   4138	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4139
   4140	/* Reset the attached device */
   4141	ufshcd_device_reset(hba);
   4142
   4143	ret = ufshcd_host_reset_and_restore(hba);
   4144
   4145	spin_lock_irqsave(hba->host->host_lock, flags);
   4146	if (ret)
   4147		hba->ufshcd_state = UFSHCD_STATE_ERROR;
   4148	ufshcd_clear_eh_in_progress(hba);
   4149	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4150
   4151	if (ret)
   4152		dev_err(hba->dev, "%s: link recovery failed, err %d",
   4153			__func__, ret);
   4154
   4155	return ret;
   4156}
   4157EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
   4158
   4159int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
   4160{
   4161	int ret;
   4162	struct uic_command uic_cmd = {0};
   4163	ktime_t start = ktime_get();
   4164
   4165	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
   4166
   4167	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
   4168	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
   4169	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
   4170			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
   4171
   4172	if (ret)
   4173		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
   4174			__func__, ret);
   4175	else
   4176		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
   4177								POST_CHANGE);
   4178
   4179	return ret;
   4180}
   4181EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
   4182
   4183int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
   4184{
   4185	struct uic_command uic_cmd = {0};
   4186	int ret;
   4187	ktime_t start = ktime_get();
   4188
   4189	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
   4190
   4191	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
   4192	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
   4193	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
   4194			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
   4195
   4196	if (ret) {
   4197		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
   4198			__func__, ret);
   4199	} else {
   4200		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
   4201								POST_CHANGE);
   4202		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
   4203		hba->ufs_stats.hibern8_exit_cnt++;
   4204	}
   4205
   4206	return ret;
   4207}
   4208EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
   4209
   4210void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
   4211{
   4212	unsigned long flags;
   4213	bool update = false;
   4214
   4215	if (!ufshcd_is_auto_hibern8_supported(hba))
   4216		return;
   4217
   4218	spin_lock_irqsave(hba->host->host_lock, flags);
   4219	if (hba->ahit != ahit) {
   4220		hba->ahit = ahit;
   4221		update = true;
   4222	}
   4223	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4224
   4225	if (update &&
   4226	    !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
   4227		ufshcd_rpm_get_sync(hba);
   4228		ufshcd_hold(hba, false);
   4229		ufshcd_auto_hibern8_enable(hba);
   4230		ufshcd_release(hba);
   4231		ufshcd_rpm_put_sync(hba);
   4232	}
   4233}
   4234EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
   4235
   4236void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
   4237{
   4238	if (!ufshcd_is_auto_hibern8_supported(hba))
   4239		return;
   4240
   4241	ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
   4242}
   4243
   4244 /**
   4245 * ufshcd_init_pwr_info - setting the POR (power on reset)
   4246 * values in hba power info
   4247 * @hba: per-adapter instance
   4248 */
   4249static void ufshcd_init_pwr_info(struct ufs_hba *hba)
   4250{
   4251	hba->pwr_info.gear_rx = UFS_PWM_G1;
   4252	hba->pwr_info.gear_tx = UFS_PWM_G1;
   4253	hba->pwr_info.lane_rx = 1;
   4254	hba->pwr_info.lane_tx = 1;
   4255	hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
   4256	hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
   4257	hba->pwr_info.hs_rate = 0;
   4258}
   4259
   4260/**
   4261 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
   4262 * @hba: per-adapter instance
   4263 */
   4264static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
   4265{
   4266	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
   4267
   4268	if (hba->max_pwr_info.is_valid)
   4269		return 0;
   4270
   4271	pwr_info->pwr_tx = FAST_MODE;
   4272	pwr_info->pwr_rx = FAST_MODE;
   4273	pwr_info->hs_rate = PA_HS_MODE_B;
   4274
   4275	/* Get the connected lane count */
   4276	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
   4277			&pwr_info->lane_rx);
   4278	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
   4279			&pwr_info->lane_tx);
   4280
   4281	if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
   4282		dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
   4283				__func__,
   4284				pwr_info->lane_rx,
   4285				pwr_info->lane_tx);
   4286		return -EINVAL;
   4287	}
   4288
   4289	/*
   4290	 * First, get the maximum gears of HS speed.
   4291	 * If a zero value, it means there is no HSGEAR capability.
   4292	 * Then, get the maximum gears of PWM speed.
   4293	 */
   4294	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
   4295	if (!pwr_info->gear_rx) {
   4296		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
   4297				&pwr_info->gear_rx);
   4298		if (!pwr_info->gear_rx) {
   4299			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
   4300				__func__, pwr_info->gear_rx);
   4301			return -EINVAL;
   4302		}
   4303		pwr_info->pwr_rx = SLOW_MODE;
   4304	}
   4305
   4306	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
   4307			&pwr_info->gear_tx);
   4308	if (!pwr_info->gear_tx) {
   4309		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
   4310				&pwr_info->gear_tx);
   4311		if (!pwr_info->gear_tx) {
   4312			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
   4313				__func__, pwr_info->gear_tx);
   4314			return -EINVAL;
   4315		}
   4316		pwr_info->pwr_tx = SLOW_MODE;
   4317	}
   4318
   4319	hba->max_pwr_info.is_valid = true;
   4320	return 0;
   4321}
   4322
   4323static int ufshcd_change_power_mode(struct ufs_hba *hba,
   4324			     struct ufs_pa_layer_attr *pwr_mode)
   4325{
   4326	int ret;
   4327
   4328	/* if already configured to the requested pwr_mode */
   4329	if (!hba->force_pmc &&
   4330	    pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
   4331	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
   4332	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
   4333	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
   4334	    pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
   4335	    pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
   4336	    pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
   4337		dev_dbg(hba->dev, "%s: power already configured\n", __func__);
   4338		return 0;
   4339	}
   4340
   4341	/*
   4342	 * Configure attributes for power mode change with below.
   4343	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
   4344	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
   4345	 * - PA_HSSERIES
   4346	 */
   4347	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
   4348	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
   4349			pwr_mode->lane_rx);
   4350	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
   4351			pwr_mode->pwr_rx == FAST_MODE)
   4352		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
   4353	else
   4354		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
   4355
   4356	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
   4357	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
   4358			pwr_mode->lane_tx);
   4359	if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
   4360			pwr_mode->pwr_tx == FAST_MODE)
   4361		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
   4362	else
   4363		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
   4364
   4365	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
   4366	    pwr_mode->pwr_tx == FASTAUTO_MODE ||
   4367	    pwr_mode->pwr_rx == FAST_MODE ||
   4368	    pwr_mode->pwr_tx == FAST_MODE)
   4369		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
   4370						pwr_mode->hs_rate);
   4371
   4372	if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
   4373		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
   4374				DL_FC0ProtectionTimeOutVal_Default);
   4375		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
   4376				DL_TC0ReplayTimeOutVal_Default);
   4377		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
   4378				DL_AFC0ReqTimeOutVal_Default);
   4379		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
   4380				DL_FC1ProtectionTimeOutVal_Default);
   4381		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
   4382				DL_TC1ReplayTimeOutVal_Default);
   4383		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
   4384				DL_AFC1ReqTimeOutVal_Default);
   4385
   4386		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
   4387				DL_FC0ProtectionTimeOutVal_Default);
   4388		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
   4389				DL_TC0ReplayTimeOutVal_Default);
   4390		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
   4391				DL_AFC0ReqTimeOutVal_Default);
   4392	}
   4393
   4394	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
   4395			| pwr_mode->pwr_tx);
   4396
   4397	if (ret) {
   4398		dev_err(hba->dev,
   4399			"%s: power mode change failed %d\n", __func__, ret);
   4400	} else {
   4401		ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
   4402								pwr_mode);
   4403
   4404		memcpy(&hba->pwr_info, pwr_mode,
   4405			sizeof(struct ufs_pa_layer_attr));
   4406	}
   4407
   4408	return ret;
   4409}
   4410
   4411/**
   4412 * ufshcd_config_pwr_mode - configure a new power mode
   4413 * @hba: per-adapter instance
   4414 * @desired_pwr_mode: desired power configuration
   4415 */
   4416int ufshcd_config_pwr_mode(struct ufs_hba *hba,
   4417		struct ufs_pa_layer_attr *desired_pwr_mode)
   4418{
   4419	struct ufs_pa_layer_attr final_params = { 0 };
   4420	int ret;
   4421
   4422	ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
   4423					desired_pwr_mode, &final_params);
   4424
   4425	if (ret)
   4426		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
   4427
   4428	ret = ufshcd_change_power_mode(hba, &final_params);
   4429
   4430	return ret;
   4431}
   4432EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
   4433
   4434/**
   4435 * ufshcd_complete_dev_init() - checks device readiness
   4436 * @hba: per-adapter instance
   4437 *
   4438 * Set fDeviceInit flag and poll until device toggles it.
   4439 */
   4440static int ufshcd_complete_dev_init(struct ufs_hba *hba)
   4441{
   4442	int err;
   4443	bool flag_res = true;
   4444	ktime_t timeout;
   4445
   4446	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
   4447		QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
   4448	if (err) {
   4449		dev_err(hba->dev,
   4450			"%s setting fDeviceInit flag failed with error %d\n",
   4451			__func__, err);
   4452		goto out;
   4453	}
   4454
   4455	/* Poll fDeviceInit flag to be cleared */
   4456	timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
   4457	do {
   4458		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
   4459					QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
   4460		if (!flag_res)
   4461			break;
   4462		usleep_range(500, 1000);
   4463	} while (ktime_before(ktime_get(), timeout));
   4464
   4465	if (err) {
   4466		dev_err(hba->dev,
   4467				"%s reading fDeviceInit flag failed with error %d\n",
   4468				__func__, err);
   4469	} else if (flag_res) {
   4470		dev_err(hba->dev,
   4471				"%s fDeviceInit was not cleared by the device\n",
   4472				__func__);
   4473		err = -EBUSY;
   4474	}
   4475out:
   4476	return err;
   4477}
   4478
   4479/**
   4480 * ufshcd_make_hba_operational - Make UFS controller operational
   4481 * @hba: per adapter instance
   4482 *
   4483 * To bring UFS host controller to operational state,
   4484 * 1. Enable required interrupts
   4485 * 2. Configure interrupt aggregation
   4486 * 3. Program UTRL and UTMRL base address
   4487 * 4. Configure run-stop-registers
   4488 *
   4489 * Returns 0 on success, non-zero value on failure
   4490 */
   4491int ufshcd_make_hba_operational(struct ufs_hba *hba)
   4492{
   4493	int err = 0;
   4494	u32 reg;
   4495
   4496	/* Enable required interrupts */
   4497	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
   4498
   4499	/* Configure interrupt aggregation */
   4500	if (ufshcd_is_intr_aggr_allowed(hba))
   4501		ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
   4502	else
   4503		ufshcd_disable_intr_aggr(hba);
   4504
   4505	/* Configure UTRL and UTMRL base address registers */
   4506	ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
   4507			REG_UTP_TRANSFER_REQ_LIST_BASE_L);
   4508	ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
   4509			REG_UTP_TRANSFER_REQ_LIST_BASE_H);
   4510	ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
   4511			REG_UTP_TASK_REQ_LIST_BASE_L);
   4512	ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
   4513			REG_UTP_TASK_REQ_LIST_BASE_H);
   4514
   4515	/*
   4516	 * Make sure base address and interrupt setup are updated before
   4517	 * enabling the run/stop registers below.
   4518	 */
   4519	wmb();
   4520
   4521	/*
   4522	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
   4523	 */
   4524	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
   4525	if (!(ufshcd_get_lists_status(reg))) {
   4526		ufshcd_enable_run_stop_reg(hba);
   4527	} else {
   4528		dev_err(hba->dev,
   4529			"Host controller not ready to process requests");
   4530		err = -EIO;
   4531	}
   4532
   4533	return err;
   4534}
   4535EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
   4536
   4537/**
   4538 * ufshcd_hba_stop - Send controller to reset state
   4539 * @hba: per adapter instance
   4540 */
   4541void ufshcd_hba_stop(struct ufs_hba *hba)
   4542{
   4543	unsigned long flags;
   4544	int err;
   4545
   4546	/*
   4547	 * Obtain the host lock to prevent that the controller is disabled
   4548	 * while the UFS interrupt handler is active on another CPU.
   4549	 */
   4550	spin_lock_irqsave(hba->host->host_lock, flags);
   4551	ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
   4552	spin_unlock_irqrestore(hba->host->host_lock, flags);
   4553
   4554	err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
   4555					CONTROLLER_ENABLE, CONTROLLER_DISABLE,
   4556					10, 1);
   4557	if (err)
   4558		dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
   4559}
   4560EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
   4561
   4562/**
   4563 * ufshcd_hba_execute_hce - initialize the controller
   4564 * @hba: per adapter instance
   4565 *
   4566 * The controller resets itself and controller firmware initialization
   4567 * sequence kicks off. When controller is ready it will set
   4568 * the Host Controller Enable bit to 1.
   4569 *
   4570 * Returns 0 on success, non-zero value on failure
   4571 */
   4572static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
   4573{
   4574	int retry_outer = 3;
   4575	int retry_inner;
   4576
   4577start:
   4578	if (ufshcd_is_hba_active(hba))
   4579		/* change controller state to "reset state" */
   4580		ufshcd_hba_stop(hba);
   4581
   4582	/* UniPro link is disabled at this point */
   4583	ufshcd_set_link_off(hba);
   4584
   4585	ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
   4586
   4587	/* start controller initialization sequence */
   4588	ufshcd_hba_start(hba);
   4589
   4590	/*
   4591	 * To initialize a UFS host controller HCE bit must be set to 1.
   4592	 * During initialization the HCE bit value changes from 1->0->1.
   4593	 * When the host controller completes initialization sequence
   4594	 * it sets the value of HCE bit to 1. The same HCE bit is read back
   4595	 * to check if the controller has completed initialization sequence.
   4596	 * So without this delay the value HCE = 1, set in the previous
   4597	 * instruction might be read back.
   4598	 * This delay can be changed based on the controller.
   4599	 */
   4600	ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
   4601
   4602	/* wait for the host controller to complete initialization */
   4603	retry_inner = 50;
   4604	while (!ufshcd_is_hba_active(hba)) {
   4605		if (retry_inner) {
   4606			retry_inner--;
   4607		} else {
   4608			dev_err(hba->dev,
   4609				"Controller enable failed\n");
   4610			if (retry_outer) {
   4611				retry_outer--;
   4612				goto start;
   4613			}
   4614			return -EIO;
   4615		}
   4616		usleep_range(1000, 1100);
   4617	}
   4618
   4619	/* enable UIC related interrupts */
   4620	ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
   4621
   4622	ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
   4623
   4624	return 0;
   4625}
   4626
   4627int ufshcd_hba_enable(struct ufs_hba *hba)
   4628{
   4629	int ret;
   4630
   4631	if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
   4632		ufshcd_set_link_off(hba);
   4633		ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
   4634
   4635		/* enable UIC related interrupts */
   4636		ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
   4637		ret = ufshcd_dme_reset(hba);
   4638		if (!ret) {
   4639			ret = ufshcd_dme_enable(hba);
   4640			if (!ret)
   4641				ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
   4642			if (ret)
   4643				dev_err(hba->dev,
   4644					"Host controller enable failed with non-hce\n");
   4645		}
   4646	} else {
   4647		ret = ufshcd_hba_execute_hce(hba);
   4648	}
   4649
   4650	return ret;
   4651}
   4652EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
   4653
   4654static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
   4655{
   4656	int tx_lanes = 0, i, err = 0;
   4657
   4658	if (!peer)
   4659		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
   4660			       &tx_lanes);
   4661	else
   4662		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
   4663				    &tx_lanes);
   4664	for (i = 0; i < tx_lanes; i++) {
   4665		if (!peer)
   4666			err = ufshcd_dme_set(hba,
   4667				UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
   4668					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
   4669					0);
   4670		else
   4671			err = ufshcd_dme_peer_set(hba,
   4672				UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
   4673					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
   4674					0);
   4675		if (err) {
   4676			dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
   4677				__func__, peer, i, err);
   4678			break;
   4679		}
   4680	}
   4681
   4682	return err;
   4683}
   4684
   4685static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
   4686{
   4687	return ufshcd_disable_tx_lcc(hba, true);
   4688}
   4689
   4690void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
   4691{
   4692	struct ufs_event_hist *e;
   4693
   4694	if (id >= UFS_EVT_CNT)
   4695		return;
   4696
   4697	e = &hba->ufs_stats.event[id];
   4698	e->val[e->pos] = val;
   4699	e->tstamp[e->pos] = ktime_get();
   4700	e->cnt += 1;
   4701	e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
   4702
   4703	ufshcd_vops_event_notify(hba, id, &val);
   4704}
   4705EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
   4706
   4707/**
   4708 * ufshcd_link_startup - Initialize unipro link startup
   4709 * @hba: per adapter instance
   4710 *
   4711 * Returns 0 for success, non-zero in case of failure
   4712 */
   4713static int ufshcd_link_startup(struct ufs_hba *hba)
   4714{
   4715	int ret;
   4716	int retries = DME_LINKSTARTUP_RETRIES;
   4717	bool link_startup_again = false;
   4718
   4719	/*
   4720	 * If UFS device isn't active then we will have to issue link startup
   4721	 * 2 times to make sure the device state move to active.
   4722	 */
   4723	if (!ufshcd_is_ufs_dev_active(hba))
   4724		link_startup_again = true;
   4725
   4726link_startup:
   4727	do {
   4728		ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
   4729
   4730		ret = ufshcd_dme_link_startup(hba);
   4731
   4732		/* check if device is detected by inter-connect layer */
   4733		if (!ret && !ufshcd_is_device_present(hba)) {
   4734			ufshcd_update_evt_hist(hba,
   4735					       UFS_EVT_LINK_STARTUP_FAIL,
   4736					       0);
   4737			dev_err(hba->dev, "%s: Device not present\n", __func__);
   4738			ret = -ENXIO;
   4739			goto out;
   4740		}
   4741
   4742		/*
   4743		 * DME link lost indication is only received when link is up,
   4744		 * but we can't be sure if the link is up until link startup
   4745		 * succeeds. So reset the local Uni-Pro and try again.
   4746		 */
   4747		if (ret && ufshcd_hba_enable(hba)) {
   4748			ufshcd_update_evt_hist(hba,
   4749					       UFS_EVT_LINK_STARTUP_FAIL,
   4750					       (u32)ret);
   4751			goto out;
   4752		}
   4753	} while (ret && retries--);
   4754
   4755	if (ret) {
   4756		/* failed to get the link up... retire */
   4757		ufshcd_update_evt_hist(hba,
   4758				       UFS_EVT_LINK_STARTUP_FAIL,
   4759				       (u32)ret);
   4760		goto out;
   4761	}
   4762
   4763	if (link_startup_again) {
   4764		link_startup_again = false;
   4765		retries = DME_LINKSTARTUP_RETRIES;
   4766		goto link_startup;
   4767	}
   4768
   4769	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
   4770	ufshcd_init_pwr_info(hba);
   4771	ufshcd_print_pwr_info(hba);
   4772
   4773	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
   4774		ret = ufshcd_disable_device_tx_lcc(hba);
   4775		if (ret)
   4776			goto out;
   4777	}
   4778
   4779	/* Include any host controller configuration via UIC commands */
   4780	ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
   4781	if (ret)
   4782		goto out;
   4783
   4784	/* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
   4785	ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
   4786	ret = ufshcd_make_hba_operational(hba);
   4787out:
   4788	if (ret) {
   4789		dev_err(hba->dev, "link startup failed %d\n", ret);
   4790		ufshcd_print_host_state(hba);
   4791		ufshcd_print_pwr_info(hba);
   4792		ufshcd_print_evt_hist(hba);
   4793	}
   4794	return ret;
   4795}
   4796
   4797/**
   4798 * ufshcd_verify_dev_init() - Verify device initialization
   4799 * @hba: per-adapter instance
   4800 *
   4801 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
   4802 * device Transport Protocol (UTP) layer is ready after a reset.
   4803 * If the UTP layer at the device side is not initialized, it may
   4804 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
   4805 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
   4806 */
   4807static int ufshcd_verify_dev_init(struct ufs_hba *hba)
   4808{
   4809	int err = 0;
   4810	int retries;
   4811
   4812	ufshcd_hold(hba, false);
   4813	mutex_lock(&hba->dev_cmd.lock);
   4814	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
   4815		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
   4816					  hba->nop_out_timeout);
   4817
   4818		if (!err || err == -ETIMEDOUT)
   4819			break;
   4820
   4821		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
   4822	}
   4823	mutex_unlock(&hba->dev_cmd.lock);
   4824	ufshcd_release(hba);
   4825
   4826	if (err)
   4827		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
   4828	return err;
   4829}
   4830
   4831/**
   4832 * ufshcd_set_queue_depth - set lun queue depth
   4833 * @sdev: pointer to SCSI device
   4834 *
   4835 * Read bLUQueueDepth value and activate scsi tagged command
   4836 * queueing. For WLUN, queue depth is set to 1. For best-effort
   4837 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
   4838 * value that host can queue.
   4839 */
   4840static void ufshcd_set_queue_depth(struct scsi_device *sdev)
   4841{
   4842	int ret = 0;
   4843	u8 lun_qdepth;
   4844	struct ufs_hba *hba;
   4845
   4846	hba = shost_priv(sdev->host);
   4847
   4848	lun_qdepth = hba->nutrs;
   4849	ret = ufshcd_read_unit_desc_param(hba,
   4850					  ufshcd_scsi_to_upiu_lun(sdev->lun),
   4851					  UNIT_DESC_PARAM_LU_Q_DEPTH,
   4852					  &lun_qdepth,
   4853					  sizeof(lun_qdepth));
   4854
   4855	/* Some WLUN doesn't support unit descriptor */
   4856	if (ret == -EOPNOTSUPP)
   4857		lun_qdepth = 1;
   4858	else if (!lun_qdepth)
   4859		/* eventually, we can figure out the real queue depth */
   4860		lun_qdepth = hba->nutrs;
   4861	else
   4862		lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
   4863
   4864	dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
   4865			__func__, lun_qdepth);
   4866	scsi_change_queue_depth(sdev, lun_qdepth);
   4867}
   4868
   4869/*
   4870 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
   4871 * @hba: per-adapter instance
   4872 * @lun: UFS device lun id
   4873 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
   4874 *
   4875 * Returns 0 in case of success and b_lu_write_protect status would be returned
   4876 * @b_lu_write_protect parameter.
   4877 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
   4878 * Returns -EINVAL in case of invalid parameters passed to this function.
   4879 */
   4880static int ufshcd_get_lu_wp(struct ufs_hba *hba,
   4881			    u8 lun,
   4882			    u8 *b_lu_write_protect)
   4883{
   4884	int ret;
   4885
   4886	if (!b_lu_write_protect)
   4887		ret = -EINVAL;
   4888	/*
   4889	 * According to UFS device spec, RPMB LU can't be write
   4890	 * protected so skip reading bLUWriteProtect parameter for
   4891	 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
   4892	 */
   4893	else if (lun >= hba->dev_info.max_lu_supported)
   4894		ret = -ENOTSUPP;
   4895	else
   4896		ret = ufshcd_read_unit_desc_param(hba,
   4897					  lun,
   4898					  UNIT_DESC_PARAM_LU_WR_PROTECT,
   4899					  b_lu_write_protect,
   4900					  sizeof(*b_lu_write_protect));
   4901	return ret;
   4902}
   4903
   4904/**
   4905 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
   4906 * status
   4907 * @hba: per-adapter instance
   4908 * @sdev: pointer to SCSI device
   4909 *
   4910 */
   4911static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
   4912						    struct scsi_device *sdev)
   4913{
   4914	if (hba->dev_info.f_power_on_wp_en &&
   4915	    !hba->dev_info.is_lu_power_on_wp) {
   4916		u8 b_lu_write_protect;
   4917
   4918		if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
   4919				      &b_lu_write_protect) &&
   4920		    (b_lu_write_protect == UFS_LU_POWER_ON_WP))
   4921			hba->dev_info.is_lu_power_on_wp = true;
   4922	}
   4923}
   4924
   4925/**
   4926 * ufshcd_setup_links - associate link b/w device wlun and other luns
   4927 * @sdev: pointer to SCSI device
   4928 * @hba: pointer to ufs hba
   4929 */
   4930static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
   4931{
   4932	struct device_link *link;
   4933
   4934	/*
   4935	 * Device wlun is the supplier & rest of the luns are consumers.
   4936	 * This ensures that device wlun suspends after all other luns.
   4937	 */
   4938	if (hba->ufs_device_wlun) {
   4939		link = device_link_add(&sdev->sdev_gendev,
   4940				       &hba->ufs_device_wlun->sdev_gendev,
   4941				       DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
   4942		if (!link) {
   4943			dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
   4944				dev_name(&hba->ufs_device_wlun->sdev_gendev));
   4945			return;
   4946		}
   4947		hba->luns_avail--;
   4948		/* Ignore REPORT_LUN wlun probing */
   4949		if (hba->luns_avail == 1) {
   4950			ufshcd_rpm_put(hba);
   4951			return;
   4952		}
   4953	} else {
   4954		/*
   4955		 * Device wlun is probed. The assumption is that WLUNs are
   4956		 * scanned before other LUNs.
   4957		 */
   4958		hba->luns_avail--;
   4959	}
   4960}
   4961
   4962/**
   4963 * ufshcd_slave_alloc - handle initial SCSI device configurations
   4964 * @sdev: pointer to SCSI device
   4965 *
   4966 * Returns success
   4967 */
   4968static int ufshcd_slave_alloc(struct scsi_device *sdev)
   4969{
   4970	struct ufs_hba *hba;
   4971
   4972	hba = shost_priv(sdev->host);
   4973
   4974	/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
   4975	sdev->use_10_for_ms = 1;
   4976
   4977	/* DBD field should be set to 1 in mode sense(10) */
   4978	sdev->set_dbd_for_ms = 1;
   4979
   4980	/* allow SCSI layer to restart the device in case of errors */
   4981	sdev->allow_restart = 1;
   4982
   4983	/* REPORT SUPPORTED OPERATION CODES is not supported */
   4984	sdev->no_report_opcodes = 1;
   4985
   4986	/* WRITE_SAME command is not supported */
   4987	sdev->no_write_same = 1;
   4988
   4989	ufshcd_set_queue_depth(sdev);
   4990
   4991	ufshcd_get_lu_power_on_wp_status(hba, sdev);
   4992
   4993	ufshcd_setup_links(hba, sdev);
   4994
   4995	return 0;
   4996}
   4997
   4998/**
   4999 * ufshcd_change_queue_depth - change queue depth
   5000 * @sdev: pointer to SCSI device
   5001 * @depth: required depth to set
   5002 *
   5003 * Change queue depth and make sure the max. limits are not crossed.
   5004 */
   5005static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
   5006{
   5007	return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
   5008}
   5009
   5010static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
   5011{
   5012	/* skip well-known LU */
   5013	if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
   5014	    !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
   5015		return;
   5016
   5017	ufshpb_destroy_lu(hba, sdev);
   5018}
   5019
   5020static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
   5021{
   5022	/* skip well-known LU */
   5023	if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
   5024	    !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
   5025		return;
   5026
   5027	ufshpb_init_hpb_lu(hba, sdev);
   5028}
   5029
   5030/**
   5031 * ufshcd_slave_configure - adjust SCSI device configurations
   5032 * @sdev: pointer to SCSI device
   5033 */
   5034static int ufshcd_slave_configure(struct scsi_device *sdev)
   5035{
   5036	struct ufs_hba *hba = shost_priv(sdev->host);
   5037	struct request_queue *q = sdev->request_queue;
   5038
   5039	ufshcd_hpb_configure(hba, sdev);
   5040
   5041	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
   5042	if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
   5043		blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
   5044	/*
   5045	 * Block runtime-pm until all consumers are added.
   5046	 * Refer ufshcd_setup_links().
   5047	 */
   5048	if (is_device_wlun(sdev))
   5049		pm_runtime_get_noresume(&sdev->sdev_gendev);
   5050	else if (ufshcd_is_rpm_autosuspend_allowed(hba))
   5051		sdev->rpm_autosuspend = 1;
   5052	/*
   5053	 * Do not print messages during runtime PM to avoid never-ending cycles
   5054	 * of messages written back to storage by user space causing runtime
   5055	 * resume, causing more messages and so on.
   5056	 */
   5057	sdev->silence_suspend = 1;
   5058
   5059	ufshcd_crypto_register(hba, q);
   5060
   5061	return 0;
   5062}
   5063
   5064/**
   5065 * ufshcd_slave_destroy - remove SCSI device configurations
   5066 * @sdev: pointer to SCSI device
   5067 */
   5068static void ufshcd_slave_destroy(struct scsi_device *sdev)
   5069{
   5070	struct ufs_hba *hba;
   5071	unsigned long flags;
   5072
   5073	hba = shost_priv(sdev->host);
   5074
   5075	ufshcd_hpb_destroy(hba, sdev);
   5076
   5077	/* Drop the reference as it won't be needed anymore */
   5078	if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
   5079		spin_lock_irqsave(hba->host->host_lock, flags);
   5080		hba->ufs_device_wlun = NULL;
   5081		spin_unlock_irqrestore(hba->host->host_lock, flags);
   5082	} else if (hba->ufs_device_wlun) {
   5083		struct device *supplier = NULL;
   5084
   5085		/* Ensure UFS Device WLUN exists and does not disappear */
   5086		spin_lock_irqsave(hba->host->host_lock, flags);
   5087		if (hba->ufs_device_wlun) {
   5088			supplier = &hba->ufs_device_wlun->sdev_gendev;
   5089			get_device(supplier);
   5090		}
   5091		spin_unlock_irqrestore(hba->host->host_lock, flags);
   5092
   5093		if (supplier) {
   5094			/*
   5095			 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
   5096			 * device will not have been registered but can still
   5097			 * have a device link holding a reference to the device.
   5098			 */
   5099			device_link_remove(&sdev->sdev_gendev, supplier);
   5100			put_device(supplier);
   5101		}
   5102	}
   5103}
   5104
   5105/**
   5106 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
   5107 * @lrbp: pointer to local reference block of completed command
   5108 * @scsi_status: SCSI command status
   5109 *
   5110 * Returns value base on SCSI command status
   5111 */
   5112static inline int
   5113ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
   5114{
   5115	int result = 0;
   5116
   5117	switch (scsi_status) {
   5118	case SAM_STAT_CHECK_CONDITION:
   5119		ufshcd_copy_sense_data(lrbp);
   5120		fallthrough;
   5121	case SAM_STAT_GOOD:
   5122		result |= DID_OK << 16 | scsi_status;
   5123		break;
   5124	case SAM_STAT_TASK_SET_FULL:
   5125	case SAM_STAT_BUSY:
   5126	case SAM_STAT_TASK_ABORTED:
   5127		ufshcd_copy_sense_data(lrbp);
   5128		result |= scsi_status;
   5129		break;
   5130	default:
   5131		result |= DID_ERROR << 16;
   5132		break;
   5133	} /* end of switch */
   5134
   5135	return result;
   5136}
   5137
   5138/**
   5139 * ufshcd_transfer_rsp_status - Get overall status of the response
   5140 * @hba: per adapter instance
   5141 * @lrbp: pointer to local reference block of completed command
   5142 *
   5143 * Returns result of the command to notify SCSI midlayer
   5144 */
   5145static inline int
   5146ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
   5147{
   5148	int result = 0;
   5149	int scsi_status;
   5150	enum utp_ocs ocs;
   5151
   5152	/* overall command status of utrd */
   5153	ocs = ufshcd_get_tr_ocs(lrbp);
   5154
   5155	if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
   5156		if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
   5157					MASK_RSP_UPIU_RESULT)
   5158			ocs = OCS_SUCCESS;
   5159	}
   5160
   5161	switch (ocs) {
   5162	case OCS_SUCCESS:
   5163		result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
   5164		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
   5165		switch (result) {
   5166		case UPIU_TRANSACTION_RESPONSE:
   5167			/*
   5168			 * get the response UPIU result to extract
   5169			 * the SCSI command status
   5170			 */
   5171			result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
   5172
   5173			/*
   5174			 * get the result based on SCSI status response
   5175			 * to notify the SCSI midlayer of the command status
   5176			 */
   5177			scsi_status = result & MASK_SCSI_STATUS;
   5178			result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
   5179
   5180			/*
   5181			 * Currently we are only supporting BKOPs exception
   5182			 * events hence we can ignore BKOPs exception event
   5183			 * during power management callbacks. BKOPs exception
   5184			 * event is not expected to be raised in runtime suspend
   5185			 * callback as it allows the urgent bkops.
   5186			 * During system suspend, we are anyway forcefully
   5187			 * disabling the bkops and if urgent bkops is needed
   5188			 * it will be enabled on system resume. Long term
   5189			 * solution could be to abort the system suspend if
   5190			 * UFS device needs urgent BKOPs.
   5191			 */
   5192			if (!hba->pm_op_in_progress &&
   5193			    !ufshcd_eh_in_progress(hba) &&
   5194			    ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
   5195				/* Flushed in suspend */
   5196				schedule_work(&hba->eeh_work);
   5197
   5198			if (scsi_status == SAM_STAT_GOOD)
   5199				ufshpb_rsp_upiu(hba, lrbp);
   5200			break;
   5201		case UPIU_TRANSACTION_REJECT_UPIU:
   5202			/* TODO: handle Reject UPIU Response */
   5203			result = DID_ERROR << 16;
   5204			dev_err(hba->dev,
   5205				"Reject UPIU not fully implemented\n");
   5206			break;
   5207		default:
   5208			dev_err(hba->dev,
   5209				"Unexpected request response code = %x\n",
   5210				result);
   5211			result = DID_ERROR << 16;
   5212			break;
   5213		}
   5214		break;
   5215	case OCS_ABORTED:
   5216		result |= DID_ABORT << 16;
   5217		break;
   5218	case OCS_INVALID_COMMAND_STATUS:
   5219		result |= DID_REQUEUE << 16;
   5220		break;
   5221	case OCS_INVALID_CMD_TABLE_ATTR:
   5222	case OCS_INVALID_PRDT_ATTR:
   5223	case OCS_MISMATCH_DATA_BUF_SIZE:
   5224	case OCS_MISMATCH_RESP_UPIU_SIZE:
   5225	case OCS_PEER_COMM_FAILURE:
   5226	case OCS_FATAL_ERROR:
   5227	case OCS_DEVICE_FATAL_ERROR:
   5228	case OCS_INVALID_CRYPTO_CONFIG:
   5229	case OCS_GENERAL_CRYPTO_ERROR:
   5230	default:
   5231		result |= DID_ERROR << 16;
   5232		dev_err(hba->dev,
   5233				"OCS error from controller = %x for tag %d\n",
   5234				ocs, lrbp->task_tag);
   5235		ufshcd_print_evt_hist(hba);
   5236		ufshcd_print_host_state(hba);
   5237		break;
   5238	} /* end of switch */
   5239
   5240	if ((host_byte(result) != DID_OK) &&
   5241	    (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
   5242		ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
   5243	return result;
   5244}
   5245
   5246static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
   5247					 u32 intr_mask)
   5248{
   5249	if (!ufshcd_is_auto_hibern8_supported(hba) ||
   5250	    !ufshcd_is_auto_hibern8_enabled(hba))
   5251		return false;
   5252
   5253	if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
   5254		return false;
   5255
   5256	if (hba->active_uic_cmd &&
   5257	    (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
   5258	    hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
   5259		return false;
   5260
   5261	return true;
   5262}
   5263
   5264/**
   5265 * ufshcd_uic_cmd_compl - handle completion of uic command
   5266 * @hba: per adapter instance
   5267 * @intr_status: interrupt status generated by the controller
   5268 *
   5269 * Returns
   5270 *  IRQ_HANDLED - If interrupt is valid
   5271 *  IRQ_NONE    - If invalid interrupt
   5272 */
   5273static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
   5274{
   5275	irqreturn_t retval = IRQ_NONE;
   5276
   5277	spin_lock(hba->host->host_lock);
   5278	if (ufshcd_is_auto_hibern8_error(hba, intr_status))
   5279		hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
   5280
   5281	if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
   5282		hba->active_uic_cmd->argument2 |=
   5283			ufshcd_get_uic_cmd_result(hba);
   5284		hba->active_uic_cmd->argument3 =
   5285			ufshcd_get_dme_attr_val(hba);
   5286		if (!hba->uic_async_done)
   5287			hba->active_uic_cmd->cmd_active = 0;
   5288		complete(&hba->active_uic_cmd->done);
   5289		retval = IRQ_HANDLED;
   5290	}
   5291
   5292	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
   5293		hba->active_uic_cmd->cmd_active = 0;
   5294		complete(hba->uic_async_done);
   5295		retval = IRQ_HANDLED;
   5296	}
   5297
   5298	if (retval == IRQ_HANDLED)
   5299		ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
   5300					     UFS_CMD_COMP);
   5301	spin_unlock(hba->host->host_lock);
   5302	return retval;
   5303}
   5304
   5305/* Release the resources allocated for processing a SCSI command. */
   5306static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
   5307				    struct ufshcd_lrb *lrbp)
   5308{
   5309	struct scsi_cmnd *cmd = lrbp->cmd;
   5310
   5311	scsi_dma_unmap(cmd);
   5312	lrbp->cmd = NULL;	/* Mark the command as completed. */
   5313	ufshcd_release(hba);
   5314	ufshcd_clk_scaling_update_busy(hba);
   5315}
   5316
   5317/**
   5318 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
   5319 * @hba: per adapter instance
   5320 * @completed_reqs: bitmask that indicates which requests to complete
   5321 */
   5322static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
   5323					unsigned long completed_reqs)
   5324{
   5325	struct ufshcd_lrb *lrbp;
   5326	struct scsi_cmnd *cmd;
   5327	int index;
   5328
   5329	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
   5330		lrbp = &hba->lrb[index];
   5331		lrbp->compl_time_stamp = ktime_get();
   5332		cmd = lrbp->cmd;
   5333		if (cmd) {
   5334			if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
   5335				ufshcd_update_monitor(hba, lrbp);
   5336			ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
   5337			cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
   5338			ufshcd_release_scsi_cmd(hba, lrbp);
   5339			/* Do not touch lrbp after scsi done */
   5340			scsi_done(cmd);
   5341		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
   5342			lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
   5343			if (hba->dev_cmd.complete) {
   5344				ufshcd_add_command_trace(hba, index,
   5345							 UFS_DEV_COMP);
   5346				complete(hba->dev_cmd.complete);
   5347				ufshcd_clk_scaling_update_busy(hba);
   5348			}
   5349		}
   5350	}
   5351}
   5352
   5353/*
   5354 * Returns > 0 if one or more commands have been completed or 0 if no
   5355 * requests have been completed.
   5356 */
   5357static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
   5358{
   5359	struct ufs_hba *hba = shost_priv(shost);
   5360	unsigned long completed_reqs, flags;
   5361	u32 tr_doorbell;
   5362
   5363	spin_lock_irqsave(&hba->outstanding_lock, flags);
   5364	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
   5365	completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
   5366	WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
   5367		  "completed: %#lx; outstanding: %#lx\n", completed_reqs,
   5368		  hba->outstanding_reqs);
   5369	hba->outstanding_reqs &= ~completed_reqs;
   5370	spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   5371
   5372	if (completed_reqs)
   5373		__ufshcd_transfer_req_compl(hba, completed_reqs);
   5374
   5375	return completed_reqs;
   5376}
   5377
   5378/**
   5379 * ufshcd_transfer_req_compl - handle SCSI and query command completion
   5380 * @hba: per adapter instance
   5381 *
   5382 * Returns
   5383 *  IRQ_HANDLED - If interrupt is valid
   5384 *  IRQ_NONE    - If invalid interrupt
   5385 */
   5386static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
   5387{
   5388	/* Resetting interrupt aggregation counters first and reading the
   5389	 * DOOR_BELL afterward allows us to handle all the completed requests.
   5390	 * In order to prevent other interrupts starvation the DB is read once
   5391	 * after reset. The down side of this solution is the possibility of
   5392	 * false interrupt if device completes another request after resetting
   5393	 * aggregation and before reading the DB.
   5394	 */
   5395	if (ufshcd_is_intr_aggr_allowed(hba) &&
   5396	    !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
   5397		ufshcd_reset_intr_aggr(hba);
   5398
   5399	if (ufs_fail_completion())
   5400		return IRQ_HANDLED;
   5401
   5402	/*
   5403	 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
   5404	 * do not want polling to trigger spurious interrupt complaints.
   5405	 */
   5406	ufshcd_poll(hba->host, 0);
   5407
   5408	return IRQ_HANDLED;
   5409}
   5410
   5411int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
   5412{
   5413	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
   5414				       QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
   5415				       &ee_ctrl_mask);
   5416}
   5417
   5418int ufshcd_write_ee_control(struct ufs_hba *hba)
   5419{
   5420	int err;
   5421
   5422	mutex_lock(&hba->ee_ctrl_mutex);
   5423	err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
   5424	mutex_unlock(&hba->ee_ctrl_mutex);
   5425	if (err)
   5426		dev_err(hba->dev, "%s: failed to write ee control %d\n",
   5427			__func__, err);
   5428	return err;
   5429}
   5430
   5431int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
   5432			     u16 set, u16 clr)
   5433{
   5434	u16 new_mask, ee_ctrl_mask;
   5435	int err = 0;
   5436
   5437	mutex_lock(&hba->ee_ctrl_mutex);
   5438	new_mask = (*mask & ~clr) | set;
   5439	ee_ctrl_mask = new_mask | *other_mask;
   5440	if (ee_ctrl_mask != hba->ee_ctrl_mask)
   5441		err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
   5442	/* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
   5443	if (!err) {
   5444		hba->ee_ctrl_mask = ee_ctrl_mask;
   5445		*mask = new_mask;
   5446	}
   5447	mutex_unlock(&hba->ee_ctrl_mutex);
   5448	return err;
   5449}
   5450
   5451/**
   5452 * ufshcd_disable_ee - disable exception event
   5453 * @hba: per-adapter instance
   5454 * @mask: exception event to disable
   5455 *
   5456 * Disables exception event in the device so that the EVENT_ALERT
   5457 * bit is not set.
   5458 *
   5459 * Returns zero on success, non-zero error value on failure.
   5460 */
   5461static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
   5462{
   5463	return ufshcd_update_ee_drv_mask(hba, 0, mask);
   5464}
   5465
   5466/**
   5467 * ufshcd_enable_ee - enable exception event
   5468 * @hba: per-adapter instance
   5469 * @mask: exception event to enable
   5470 *
   5471 * Enable corresponding exception event in the device to allow
   5472 * device to alert host in critical scenarios.
   5473 *
   5474 * Returns zero on success, non-zero error value on failure.
   5475 */
   5476static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
   5477{
   5478	return ufshcd_update_ee_drv_mask(hba, mask, 0);
   5479}
   5480
   5481/**
   5482 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
   5483 * @hba: per-adapter instance
   5484 *
   5485 * Allow device to manage background operations on its own. Enabling
   5486 * this might lead to inconsistent latencies during normal data transfers
   5487 * as the device is allowed to manage its own way of handling background
   5488 * operations.
   5489 *
   5490 * Returns zero on success, non-zero on failure.
   5491 */
   5492static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
   5493{
   5494	int err = 0;
   5495
   5496	if (hba->auto_bkops_enabled)
   5497		goto out;
   5498
   5499	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
   5500			QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
   5501	if (err) {
   5502		dev_err(hba->dev, "%s: failed to enable bkops %d\n",
   5503				__func__, err);
   5504		goto out;
   5505	}
   5506
   5507	hba->auto_bkops_enabled = true;
   5508	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
   5509
   5510	/* No need of URGENT_BKOPS exception from the device */
   5511	err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
   5512	if (err)
   5513		dev_err(hba->dev, "%s: failed to disable exception event %d\n",
   5514				__func__, err);
   5515out:
   5516	return err;
   5517}
   5518
   5519/**
   5520 * ufshcd_disable_auto_bkops - block device in doing background operations
   5521 * @hba: per-adapter instance
   5522 *
   5523 * Disabling background operations improves command response latency but
   5524 * has drawback of device moving into critical state where the device is
   5525 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
   5526 * host is idle so that BKOPS are managed effectively without any negative
   5527 * impacts.
   5528 *
   5529 * Returns zero on success, non-zero on failure.
   5530 */
   5531static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
   5532{
   5533	int err = 0;
   5534
   5535	if (!hba->auto_bkops_enabled)
   5536		goto out;
   5537
   5538	/*
   5539	 * If host assisted BKOPs is to be enabled, make sure
   5540	 * urgent bkops exception is allowed.
   5541	 */
   5542	err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
   5543	if (err) {
   5544		dev_err(hba->dev, "%s: failed to enable exception event %d\n",
   5545				__func__, err);
   5546		goto out;
   5547	}
   5548
   5549	err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
   5550			QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
   5551	if (err) {
   5552		dev_err(hba->dev, "%s: failed to disable bkops %d\n",
   5553				__func__, err);
   5554		ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
   5555		goto out;
   5556	}
   5557
   5558	hba->auto_bkops_enabled = false;
   5559	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
   5560	hba->is_urgent_bkops_lvl_checked = false;
   5561out:
   5562	return err;
   5563}
   5564
   5565/**
   5566 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
   5567 * @hba: per adapter instance
   5568 *
   5569 * After a device reset the device may toggle the BKOPS_EN flag
   5570 * to default value. The s/w tracking variables should be updated
   5571 * as well. This function would change the auto-bkops state based on
   5572 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
   5573 */
   5574static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
   5575{
   5576	if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
   5577		hba->auto_bkops_enabled = false;
   5578		hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
   5579		ufshcd_enable_auto_bkops(hba);
   5580	} else {
   5581		hba->auto_bkops_enabled = true;
   5582		hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
   5583		ufshcd_disable_auto_bkops(hba);
   5584	}
   5585	hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
   5586	hba->is_urgent_bkops_lvl_checked = false;
   5587}
   5588
   5589static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
   5590{
   5591	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5592			QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
   5593}
   5594
   5595/**
   5596 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
   5597 * @hba: per-adapter instance
   5598 * @status: bkops_status value
   5599 *
   5600 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
   5601 * flag in the device to permit background operations if the device
   5602 * bkops_status is greater than or equal to "status" argument passed to
   5603 * this function, disable otherwise.
   5604 *
   5605 * Returns 0 for success, non-zero in case of failure.
   5606 *
   5607 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
   5608 * to know whether auto bkops is enabled or disabled after this function
   5609 * returns control to it.
   5610 */
   5611static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
   5612			     enum bkops_status status)
   5613{
   5614	int err;
   5615	u32 curr_status = 0;
   5616
   5617	err = ufshcd_get_bkops_status(hba, &curr_status);
   5618	if (err) {
   5619		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
   5620				__func__, err);
   5621		goto out;
   5622	} else if (curr_status > BKOPS_STATUS_MAX) {
   5623		dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
   5624				__func__, curr_status);
   5625		err = -EINVAL;
   5626		goto out;
   5627	}
   5628
   5629	if (curr_status >= status)
   5630		err = ufshcd_enable_auto_bkops(hba);
   5631	else
   5632		err = ufshcd_disable_auto_bkops(hba);
   5633out:
   5634	return err;
   5635}
   5636
   5637/**
   5638 * ufshcd_urgent_bkops - handle urgent bkops exception event
   5639 * @hba: per-adapter instance
   5640 *
   5641 * Enable fBackgroundOpsEn flag in the device to permit background
   5642 * operations.
   5643 *
   5644 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
   5645 * and negative error value for any other failure.
   5646 */
   5647static int ufshcd_urgent_bkops(struct ufs_hba *hba)
   5648{
   5649	return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
   5650}
   5651
   5652static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
   5653{
   5654	return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5655			QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
   5656}
   5657
   5658static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
   5659{
   5660	int err;
   5661	u32 curr_status = 0;
   5662
   5663	if (hba->is_urgent_bkops_lvl_checked)
   5664		goto enable_auto_bkops;
   5665
   5666	err = ufshcd_get_bkops_status(hba, &curr_status);
   5667	if (err) {
   5668		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
   5669				__func__, err);
   5670		goto out;
   5671	}
   5672
   5673	/*
   5674	 * We are seeing that some devices are raising the urgent bkops
   5675	 * exception events even when BKOPS status doesn't indicate performace
   5676	 * impacted or critical. Handle these device by determining their urgent
   5677	 * bkops status at runtime.
   5678	 */
   5679	if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
   5680		dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
   5681				__func__, curr_status);
   5682		/* update the current status as the urgent bkops level */
   5683		hba->urgent_bkops_lvl = curr_status;
   5684		hba->is_urgent_bkops_lvl_checked = true;
   5685	}
   5686
   5687enable_auto_bkops:
   5688	err = ufshcd_enable_auto_bkops(hba);
   5689out:
   5690	if (err < 0)
   5691		dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
   5692				__func__, err);
   5693}
   5694
   5695static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
   5696{
   5697	u32 value;
   5698
   5699	if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5700				QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
   5701		return;
   5702
   5703	dev_info(hba->dev, "exception Tcase %d\n", value - 80);
   5704
   5705	ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
   5706
   5707	/*
   5708	 * A placeholder for the platform vendors to add whatever additional
   5709	 * steps required
   5710	 */
   5711}
   5712
   5713static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
   5714{
   5715	u8 index;
   5716	enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
   5717				   UPIU_QUERY_OPCODE_CLEAR_FLAG;
   5718
   5719	index = ufshcd_wb_get_query_index(hba);
   5720	return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
   5721}
   5722
   5723int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
   5724{
   5725	int ret;
   5726
   5727	if (!ufshcd_is_wb_allowed(hba))
   5728		return 0;
   5729
   5730	if (!(enable ^ hba->dev_info.wb_enabled))
   5731		return 0;
   5732
   5733	ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
   5734	if (ret) {
   5735		dev_err(hba->dev, "%s Write Booster %s failed %d\n",
   5736			__func__, enable ? "enable" : "disable", ret);
   5737		return ret;
   5738	}
   5739
   5740	hba->dev_info.wb_enabled = enable;
   5741	dev_info(hba->dev, "%s Write Booster %s\n",
   5742			__func__, enable ? "enabled" : "disabled");
   5743
   5744	return ret;
   5745}
   5746
   5747static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
   5748{
   5749	int ret;
   5750
   5751	ret = __ufshcd_wb_toggle(hba, set,
   5752			QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
   5753	if (ret) {
   5754		dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
   5755			__func__, set ? "enable" : "disable", ret);
   5756		return;
   5757	}
   5758	dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
   5759			__func__, set ? "enabled" : "disabled");
   5760}
   5761
   5762static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
   5763{
   5764	int ret;
   5765
   5766	if (!ufshcd_is_wb_allowed(hba) ||
   5767	    hba->dev_info.wb_buf_flush_enabled == enable)
   5768		return;
   5769
   5770	ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
   5771	if (ret) {
   5772		dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
   5773			enable ? "enable" : "disable", ret);
   5774		return;
   5775	}
   5776
   5777	hba->dev_info.wb_buf_flush_enabled = enable;
   5778
   5779	dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
   5780			__func__, enable ? "enabled" : "disabled");
   5781}
   5782
   5783static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
   5784						u32 avail_buf)
   5785{
   5786	u32 cur_buf;
   5787	int ret;
   5788	u8 index;
   5789
   5790	index = ufshcd_wb_get_query_index(hba);
   5791	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5792					      QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
   5793					      index, 0, &cur_buf);
   5794	if (ret) {
   5795		dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
   5796			__func__, ret);
   5797		return false;
   5798	}
   5799
   5800	if (!cur_buf) {
   5801		dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
   5802			 cur_buf);
   5803		return false;
   5804	}
   5805	/* Let it continue to flush when available buffer exceeds threshold */
   5806	return avail_buf < hba->vps->wb_flush_threshold;
   5807}
   5808
   5809static void ufshcd_wb_force_disable(struct ufs_hba *hba)
   5810{
   5811	if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
   5812		ufshcd_wb_toggle_flush(hba, false);
   5813
   5814	ufshcd_wb_toggle_flush_during_h8(hba, false);
   5815	ufshcd_wb_toggle(hba, false);
   5816	hba->caps &= ~UFSHCD_CAP_WB_EN;
   5817
   5818	dev_info(hba->dev, "%s: WB force disabled\n", __func__);
   5819}
   5820
   5821static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
   5822{
   5823	u32 lifetime;
   5824	int ret;
   5825	u8 index;
   5826
   5827	index = ufshcd_wb_get_query_index(hba);
   5828	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5829				      QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
   5830				      index, 0, &lifetime);
   5831	if (ret) {
   5832		dev_err(hba->dev,
   5833			"%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
   5834			__func__, ret);
   5835		return false;
   5836	}
   5837
   5838	if (lifetime == UFS_WB_EXCEED_LIFETIME) {
   5839		dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
   5840			__func__, lifetime);
   5841		return false;
   5842	}
   5843
   5844	dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
   5845		__func__, lifetime);
   5846
   5847	return true;
   5848}
   5849
   5850static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
   5851{
   5852	int ret;
   5853	u32 avail_buf;
   5854	u8 index;
   5855
   5856	if (!ufshcd_is_wb_allowed(hba))
   5857		return false;
   5858
   5859	if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
   5860		ufshcd_wb_force_disable(hba);
   5861		return false;
   5862	}
   5863
   5864	/*
   5865	 * The ufs device needs the vcc to be ON to flush.
   5866	 * With user-space reduction enabled, it's enough to enable flush
   5867	 * by checking only the available buffer. The threshold
   5868	 * defined here is > 90% full.
   5869	 * With user-space preserved enabled, the current-buffer
   5870	 * should be checked too because the wb buffer size can reduce
   5871	 * when disk tends to be full. This info is provided by current
   5872	 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
   5873	 * keeping vcc on when current buffer is empty.
   5874	 */
   5875	index = ufshcd_wb_get_query_index(hba);
   5876	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   5877				      QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
   5878				      index, 0, &avail_buf);
   5879	if (ret) {
   5880		dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
   5881			 __func__, ret);
   5882		return false;
   5883	}
   5884
   5885	if (!hba->dev_info.b_presrv_uspc_en)
   5886		return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
   5887
   5888	return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
   5889}
   5890
   5891static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
   5892{
   5893	struct ufs_hba *hba = container_of(to_delayed_work(work),
   5894					   struct ufs_hba,
   5895					   rpm_dev_flush_recheck_work);
   5896	/*
   5897	 * To prevent unnecessary VCC power drain after device finishes
   5898	 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
   5899	 * after a certain delay to recheck the threshold by next runtime
   5900	 * suspend.
   5901	 */
   5902	ufshcd_rpm_get_sync(hba);
   5903	ufshcd_rpm_put_sync(hba);
   5904}
   5905
   5906/**
   5907 * ufshcd_exception_event_handler - handle exceptions raised by device
   5908 * @work: pointer to work data
   5909 *
   5910 * Read bExceptionEventStatus attribute from the device and handle the
   5911 * exception event accordingly.
   5912 */
   5913static void ufshcd_exception_event_handler(struct work_struct *work)
   5914{
   5915	struct ufs_hba *hba;
   5916	int err;
   5917	u32 status = 0;
   5918	hba = container_of(work, struct ufs_hba, eeh_work);
   5919
   5920	ufshcd_scsi_block_requests(hba);
   5921	err = ufshcd_get_ee_status(hba, &status);
   5922	if (err) {
   5923		dev_err(hba->dev, "%s: failed to get exception status %d\n",
   5924				__func__, err);
   5925		goto out;
   5926	}
   5927
   5928	trace_ufshcd_exception_event(dev_name(hba->dev), status);
   5929
   5930	if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
   5931		ufshcd_bkops_exception_event_handler(hba);
   5932
   5933	if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
   5934		ufshcd_temp_exception_event_handler(hba, status);
   5935
   5936	ufs_debugfs_exception_event(hba, status);
   5937out:
   5938	ufshcd_scsi_unblock_requests(hba);
   5939}
   5940
   5941/* Complete requests that have door-bell cleared */
   5942static void ufshcd_complete_requests(struct ufs_hba *hba)
   5943{
   5944	ufshcd_transfer_req_compl(hba);
   5945	ufshcd_tmc_handler(hba);
   5946}
   5947
   5948/**
   5949 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
   5950 *				to recover from the DL NAC errors or not.
   5951 * @hba: per-adapter instance
   5952 *
   5953 * Returns true if error handling is required, false otherwise
   5954 */
   5955static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
   5956{
   5957	unsigned long flags;
   5958	bool err_handling = true;
   5959
   5960	spin_lock_irqsave(hba->host->host_lock, flags);
   5961	/*
   5962	 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
   5963	 * device fatal error and/or DL NAC & REPLAY timeout errors.
   5964	 */
   5965	if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
   5966		goto out;
   5967
   5968	if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
   5969	    ((hba->saved_err & UIC_ERROR) &&
   5970	     (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
   5971		goto out;
   5972
   5973	if ((hba->saved_err & UIC_ERROR) &&
   5974	    (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
   5975		int err;
   5976		/*
   5977		 * wait for 50ms to see if we can get any other errors or not.
   5978		 */
   5979		spin_unlock_irqrestore(hba->host->host_lock, flags);
   5980		msleep(50);
   5981		spin_lock_irqsave(hba->host->host_lock, flags);
   5982
   5983		/*
   5984		 * now check if we have got any other severe errors other than
   5985		 * DL NAC error?
   5986		 */
   5987		if ((hba->saved_err & INT_FATAL_ERRORS) ||
   5988		    ((hba->saved_err & UIC_ERROR) &&
   5989		    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
   5990			goto out;
   5991
   5992		/*
   5993		 * As DL NAC is the only error received so far, send out NOP
   5994		 * command to confirm if link is still active or not.
   5995		 *   - If we don't get any response then do error recovery.
   5996		 *   - If we get response then clear the DL NAC error bit.
   5997		 */
   5998
   5999		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6000		err = ufshcd_verify_dev_init(hba);
   6001		spin_lock_irqsave(hba->host->host_lock, flags);
   6002
   6003		if (err)
   6004			goto out;
   6005
   6006		/* Link seems to be alive hence ignore the DL NAC errors */
   6007		if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
   6008			hba->saved_err &= ~UIC_ERROR;
   6009		/* clear NAC error */
   6010		hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
   6011		if (!hba->saved_uic_err)
   6012			err_handling = false;
   6013	}
   6014out:
   6015	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6016	return err_handling;
   6017}
   6018
   6019/* host lock must be held before calling this func */
   6020static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
   6021{
   6022	return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
   6023	       (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
   6024}
   6025
   6026void ufshcd_schedule_eh_work(struct ufs_hba *hba)
   6027{
   6028	lockdep_assert_held(hba->host->host_lock);
   6029
   6030	/* handle fatal errors only when link is not in error state */
   6031	if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
   6032		if (hba->force_reset || ufshcd_is_link_broken(hba) ||
   6033		    ufshcd_is_saved_err_fatal(hba))
   6034			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
   6035		else
   6036			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
   6037		queue_work(hba->eh_wq, &hba->eh_work);
   6038	}
   6039}
   6040
   6041static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
   6042{
   6043	down_write(&hba->clk_scaling_lock);
   6044	hba->clk_scaling.is_allowed = allow;
   6045	up_write(&hba->clk_scaling_lock);
   6046}
   6047
   6048static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
   6049{
   6050	if (suspend) {
   6051		if (hba->clk_scaling.is_enabled)
   6052			ufshcd_suspend_clkscaling(hba);
   6053		ufshcd_clk_scaling_allow(hba, false);
   6054	} else {
   6055		ufshcd_clk_scaling_allow(hba, true);
   6056		if (hba->clk_scaling.is_enabled)
   6057			ufshcd_resume_clkscaling(hba);
   6058	}
   6059}
   6060
   6061static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
   6062{
   6063	ufshcd_rpm_get_sync(hba);
   6064	if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
   6065	    hba->is_sys_suspended) {
   6066		enum ufs_pm_op pm_op;
   6067
   6068		/*
   6069		 * Don't assume anything of resume, if
   6070		 * resume fails, irq and clocks can be OFF, and powers
   6071		 * can be OFF or in LPM.
   6072		 */
   6073		ufshcd_setup_hba_vreg(hba, true);
   6074		ufshcd_enable_irq(hba);
   6075		ufshcd_setup_vreg(hba, true);
   6076		ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
   6077		ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
   6078		ufshcd_hold(hba, false);
   6079		if (!ufshcd_is_clkgating_allowed(hba))
   6080			ufshcd_setup_clocks(hba, true);
   6081		ufshcd_release(hba);
   6082		pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
   6083		ufshcd_vops_resume(hba, pm_op);
   6084	} else {
   6085		ufshcd_hold(hba, false);
   6086		if (ufshcd_is_clkscaling_supported(hba) &&
   6087		    hba->clk_scaling.is_enabled)
   6088			ufshcd_suspend_clkscaling(hba);
   6089		ufshcd_clk_scaling_allow(hba, false);
   6090	}
   6091	ufshcd_scsi_block_requests(hba);
   6092	/* Drain ufshcd_queuecommand() */
   6093	synchronize_rcu();
   6094	cancel_work_sync(&hba->eeh_work);
   6095}
   6096
   6097static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
   6098{
   6099	ufshcd_scsi_unblock_requests(hba);
   6100	ufshcd_release(hba);
   6101	if (ufshcd_is_clkscaling_supported(hba))
   6102		ufshcd_clk_scaling_suspend(hba, false);
   6103	ufshcd_rpm_put(hba);
   6104}
   6105
   6106static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
   6107{
   6108	return (!hba->is_powered || hba->shutting_down ||
   6109		!hba->ufs_device_wlun ||
   6110		hba->ufshcd_state == UFSHCD_STATE_ERROR ||
   6111		(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
   6112		   ufshcd_is_link_broken(hba))));
   6113}
   6114
   6115#ifdef CONFIG_PM
   6116static void ufshcd_recover_pm_error(struct ufs_hba *hba)
   6117{
   6118	struct Scsi_Host *shost = hba->host;
   6119	struct scsi_device *sdev;
   6120	struct request_queue *q;
   6121	int ret;
   6122
   6123	hba->is_sys_suspended = false;
   6124	/*
   6125	 * Set RPM status of wlun device to RPM_ACTIVE,
   6126	 * this also clears its runtime error.
   6127	 */
   6128	ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
   6129
   6130	/* hba device might have a runtime error otherwise */
   6131	if (ret)
   6132		ret = pm_runtime_set_active(hba->dev);
   6133	/*
   6134	 * If wlun device had runtime error, we also need to resume those
   6135	 * consumer scsi devices in case any of them has failed to be
   6136	 * resumed due to supplier runtime resume failure. This is to unblock
   6137	 * blk_queue_enter in case there are bios waiting inside it.
   6138	 */
   6139	if (!ret) {
   6140		shost_for_each_device(sdev, shost) {
   6141			q = sdev->request_queue;
   6142			if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
   6143				       q->rpm_status == RPM_SUSPENDING))
   6144				pm_request_resume(q->dev);
   6145		}
   6146	}
   6147}
   6148#else
   6149static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
   6150{
   6151}
   6152#endif
   6153
   6154static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
   6155{
   6156	struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
   6157	u32 mode;
   6158
   6159	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
   6160
   6161	if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
   6162		return true;
   6163
   6164	if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
   6165		return true;
   6166
   6167	return false;
   6168}
   6169
   6170/**
   6171 * ufshcd_err_handler - handle UFS errors that require s/w attention
   6172 * @work: pointer to work structure
   6173 */
   6174static void ufshcd_err_handler(struct work_struct *work)
   6175{
   6176	int retries = MAX_ERR_HANDLER_RETRIES;
   6177	struct ufs_hba *hba;
   6178	unsigned long flags;
   6179	bool needs_restore;
   6180	bool needs_reset;
   6181	bool err_xfer;
   6182	bool err_tm;
   6183	int pmc_err;
   6184	int tag;
   6185
   6186	hba = container_of(work, struct ufs_hba, eh_work);
   6187
   6188	dev_info(hba->dev,
   6189		 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
   6190		 __func__, ufshcd_state_name[hba->ufshcd_state],
   6191		 hba->is_powered, hba->shutting_down, hba->saved_err,
   6192		 hba->saved_uic_err, hba->force_reset,
   6193		 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
   6194
   6195	down(&hba->host_sem);
   6196	spin_lock_irqsave(hba->host->host_lock, flags);
   6197	if (ufshcd_err_handling_should_stop(hba)) {
   6198		if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
   6199			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
   6200		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6201		up(&hba->host_sem);
   6202		return;
   6203	}
   6204	ufshcd_set_eh_in_progress(hba);
   6205	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6206	ufshcd_err_handling_prepare(hba);
   6207	/* Complete requests that have door-bell cleared by h/w */
   6208	ufshcd_complete_requests(hba);
   6209	spin_lock_irqsave(hba->host->host_lock, flags);
   6210again:
   6211	needs_restore = false;
   6212	needs_reset = false;
   6213	err_xfer = false;
   6214	err_tm = false;
   6215
   6216	if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
   6217		hba->ufshcd_state = UFSHCD_STATE_RESET;
   6218	/*
   6219	 * A full reset and restore might have happened after preparation
   6220	 * is finished, double check whether we should stop.
   6221	 */
   6222	if (ufshcd_err_handling_should_stop(hba))
   6223		goto skip_err_handling;
   6224
   6225	if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
   6226		bool ret;
   6227
   6228		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6229		/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
   6230		ret = ufshcd_quirk_dl_nac_errors(hba);
   6231		spin_lock_irqsave(hba->host->host_lock, flags);
   6232		if (!ret && ufshcd_err_handling_should_stop(hba))
   6233			goto skip_err_handling;
   6234	}
   6235
   6236	if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
   6237	    (hba->saved_uic_err &&
   6238	     (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
   6239		bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
   6240
   6241		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6242		ufshcd_print_host_state(hba);
   6243		ufshcd_print_pwr_info(hba);
   6244		ufshcd_print_evt_hist(hba);
   6245		ufshcd_print_tmrs(hba, hba->outstanding_tasks);
   6246		ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
   6247		spin_lock_irqsave(hba->host->host_lock, flags);
   6248	}
   6249
   6250	/*
   6251	 * if host reset is required then skip clearing the pending
   6252	 * transfers forcefully because they will get cleared during
   6253	 * host reset and restore
   6254	 */
   6255	if (hba->force_reset || ufshcd_is_link_broken(hba) ||
   6256	    ufshcd_is_saved_err_fatal(hba) ||
   6257	    ((hba->saved_err & UIC_ERROR) &&
   6258	     (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
   6259				    UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
   6260		needs_reset = true;
   6261		goto do_reset;
   6262	}
   6263
   6264	/*
   6265	 * If LINERESET was caught, UFS might have been put to PWM mode,
   6266	 * check if power mode restore is needed.
   6267	 */
   6268	if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
   6269		hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
   6270		if (!hba->saved_uic_err)
   6271			hba->saved_err &= ~UIC_ERROR;
   6272		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6273		if (ufshcd_is_pwr_mode_restore_needed(hba))
   6274			needs_restore = true;
   6275		spin_lock_irqsave(hba->host->host_lock, flags);
   6276		if (!hba->saved_err && !needs_restore)
   6277			goto skip_err_handling;
   6278	}
   6279
   6280	hba->silence_err_logs = true;
   6281	/* release lock as clear command might sleep */
   6282	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6283	/* Clear pending transfer requests */
   6284	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
   6285		if (ufshcd_try_to_abort_task(hba, tag)) {
   6286			err_xfer = true;
   6287			goto lock_skip_pending_xfer_clear;
   6288		}
   6289		dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
   6290			hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
   6291	}
   6292
   6293	/* Clear pending task management requests */
   6294	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
   6295		if (ufshcd_clear_tm_cmd(hba, tag)) {
   6296			err_tm = true;
   6297			goto lock_skip_pending_xfer_clear;
   6298		}
   6299	}
   6300
   6301lock_skip_pending_xfer_clear:
   6302	/* Complete the requests that are cleared by s/w */
   6303	ufshcd_complete_requests(hba);
   6304
   6305	spin_lock_irqsave(hba->host->host_lock, flags);
   6306	hba->silence_err_logs = false;
   6307	if (err_xfer || err_tm) {
   6308		needs_reset = true;
   6309		goto do_reset;
   6310	}
   6311
   6312	/*
   6313	 * After all reqs and tasks are cleared from doorbell,
   6314	 * now it is safe to retore power mode.
   6315	 */
   6316	if (needs_restore) {
   6317		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6318		/*
   6319		 * Hold the scaling lock just in case dev cmds
   6320		 * are sent via bsg and/or sysfs.
   6321		 */
   6322		down_write(&hba->clk_scaling_lock);
   6323		hba->force_pmc = true;
   6324		pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
   6325		if (pmc_err) {
   6326			needs_reset = true;
   6327			dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
   6328					__func__, pmc_err);
   6329		}
   6330		hba->force_pmc = false;
   6331		ufshcd_print_pwr_info(hba);
   6332		up_write(&hba->clk_scaling_lock);
   6333		spin_lock_irqsave(hba->host->host_lock, flags);
   6334	}
   6335
   6336do_reset:
   6337	/* Fatal errors need reset */
   6338	if (needs_reset) {
   6339		int err;
   6340
   6341		hba->force_reset = false;
   6342		spin_unlock_irqrestore(hba->host->host_lock, flags);
   6343		err = ufshcd_reset_and_restore(hba);
   6344		if (err)
   6345			dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
   6346					__func__, err);
   6347		else
   6348			ufshcd_recover_pm_error(hba);
   6349		spin_lock_irqsave(hba->host->host_lock, flags);
   6350	}
   6351
   6352skip_err_handling:
   6353	if (!needs_reset) {
   6354		if (hba->ufshcd_state == UFSHCD_STATE_RESET)
   6355			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
   6356		if (hba->saved_err || hba->saved_uic_err)
   6357			dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
   6358			    __func__, hba->saved_err, hba->saved_uic_err);
   6359	}
   6360	/* Exit in an operational state or dead */
   6361	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
   6362	    hba->ufshcd_state != UFSHCD_STATE_ERROR) {
   6363		if (--retries)
   6364			goto again;
   6365		hba->ufshcd_state = UFSHCD_STATE_ERROR;
   6366	}
   6367	ufshcd_clear_eh_in_progress(hba);
   6368	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6369	ufshcd_err_handling_unprepare(hba);
   6370	up(&hba->host_sem);
   6371
   6372	dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
   6373		 ufshcd_state_name[hba->ufshcd_state]);
   6374}
   6375
   6376/**
   6377 * ufshcd_update_uic_error - check and set fatal UIC error flags.
   6378 * @hba: per-adapter instance
   6379 *
   6380 * Returns
   6381 *  IRQ_HANDLED - If interrupt is valid
   6382 *  IRQ_NONE    - If invalid interrupt
   6383 */
   6384static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
   6385{
   6386	u32 reg;
   6387	irqreturn_t retval = IRQ_NONE;
   6388
   6389	/* PHY layer error */
   6390	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
   6391	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
   6392	    (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
   6393		ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
   6394		/*
   6395		 * To know whether this error is fatal or not, DB timeout
   6396		 * must be checked but this error is handled separately.
   6397		 */
   6398		if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
   6399			dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
   6400					__func__);
   6401
   6402		/* Got a LINERESET indication. */
   6403		if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
   6404			struct uic_command *cmd = NULL;
   6405
   6406			hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
   6407			if (hba->uic_async_done && hba->active_uic_cmd)
   6408				cmd = hba->active_uic_cmd;
   6409			/*
   6410			 * Ignore the LINERESET during power mode change
   6411			 * operation via DME_SET command.
   6412			 */
   6413			if (cmd && (cmd->command == UIC_CMD_DME_SET))
   6414				hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
   6415		}
   6416		retval |= IRQ_HANDLED;
   6417	}
   6418
   6419	/* PA_INIT_ERROR is fatal and needs UIC reset */
   6420	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
   6421	if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
   6422	    (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
   6423		ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
   6424
   6425		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
   6426			hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
   6427		else if (hba->dev_quirks &
   6428				UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
   6429			if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
   6430				hba->uic_error |=
   6431					UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
   6432			else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
   6433				hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
   6434		}
   6435		retval |= IRQ_HANDLED;
   6436	}
   6437
   6438	/* UIC NL/TL/DME errors needs software retry */
   6439	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
   6440	if ((reg & UIC_NETWORK_LAYER_ERROR) &&
   6441	    (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
   6442		ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
   6443		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
   6444		retval |= IRQ_HANDLED;
   6445	}
   6446
   6447	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
   6448	if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
   6449	    (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
   6450		ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
   6451		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
   6452		retval |= IRQ_HANDLED;
   6453	}
   6454
   6455	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
   6456	if ((reg & UIC_DME_ERROR) &&
   6457	    (reg & UIC_DME_ERROR_CODE_MASK)) {
   6458		ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
   6459		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
   6460		retval |= IRQ_HANDLED;
   6461	}
   6462
   6463	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
   6464			__func__, hba->uic_error);
   6465	return retval;
   6466}
   6467
   6468/**
   6469 * ufshcd_check_errors - Check for errors that need s/w attention
   6470 * @hba: per-adapter instance
   6471 * @intr_status: interrupt status generated by the controller
   6472 *
   6473 * Returns
   6474 *  IRQ_HANDLED - If interrupt is valid
   6475 *  IRQ_NONE    - If invalid interrupt
   6476 */
   6477static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
   6478{
   6479	bool queue_eh_work = false;
   6480	irqreturn_t retval = IRQ_NONE;
   6481
   6482	spin_lock(hba->host->host_lock);
   6483	hba->errors |= UFSHCD_ERROR_MASK & intr_status;
   6484
   6485	if (hba->errors & INT_FATAL_ERRORS) {
   6486		ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
   6487				       hba->errors);
   6488		queue_eh_work = true;
   6489	}
   6490
   6491	if (hba->errors & UIC_ERROR) {
   6492		hba->uic_error = 0;
   6493		retval = ufshcd_update_uic_error(hba);
   6494		if (hba->uic_error)
   6495			queue_eh_work = true;
   6496	}
   6497
   6498	if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
   6499		dev_err(hba->dev,
   6500			"%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
   6501			__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
   6502			"Enter" : "Exit",
   6503			hba->errors, ufshcd_get_upmcrs(hba));
   6504		ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
   6505				       hba->errors);
   6506		ufshcd_set_link_broken(hba);
   6507		queue_eh_work = true;
   6508	}
   6509
   6510	if (queue_eh_work) {
   6511		/*
   6512		 * update the transfer error masks to sticky bits, let's do this
   6513		 * irrespective of current ufshcd_state.
   6514		 */
   6515		hba->saved_err |= hba->errors;
   6516		hba->saved_uic_err |= hba->uic_error;
   6517
   6518		/* dump controller state before resetting */
   6519		if ((hba->saved_err &
   6520		     (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
   6521		    (hba->saved_uic_err &&
   6522		     (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
   6523			dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
   6524					__func__, hba->saved_err,
   6525					hba->saved_uic_err);
   6526			ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
   6527					 "host_regs: ");
   6528			ufshcd_print_pwr_info(hba);
   6529		}
   6530		ufshcd_schedule_eh_work(hba);
   6531		retval |= IRQ_HANDLED;
   6532	}
   6533	/*
   6534	 * if (!queue_eh_work) -
   6535	 * Other errors are either non-fatal where host recovers
   6536	 * itself without s/w intervention or errors that will be
   6537	 * handled by the SCSI core layer.
   6538	 */
   6539	hba->errors = 0;
   6540	hba->uic_error = 0;
   6541	spin_unlock(hba->host->host_lock);
   6542	return retval;
   6543}
   6544
   6545/**
   6546 * ufshcd_tmc_handler - handle task management function completion
   6547 * @hba: per adapter instance
   6548 *
   6549 * Returns
   6550 *  IRQ_HANDLED - If interrupt is valid
   6551 *  IRQ_NONE    - If invalid interrupt
   6552 */
   6553static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
   6554{
   6555	unsigned long flags, pending, issued;
   6556	irqreturn_t ret = IRQ_NONE;
   6557	int tag;
   6558
   6559	spin_lock_irqsave(hba->host->host_lock, flags);
   6560	pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
   6561	issued = hba->outstanding_tasks & ~pending;
   6562	for_each_set_bit(tag, &issued, hba->nutmrs) {
   6563		struct request *req = hba->tmf_rqs[tag];
   6564		struct completion *c = req->end_io_data;
   6565
   6566		complete(c);
   6567		ret = IRQ_HANDLED;
   6568	}
   6569	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6570
   6571	return ret;
   6572}
   6573
   6574/**
   6575 * ufshcd_sl_intr - Interrupt service routine
   6576 * @hba: per adapter instance
   6577 * @intr_status: contains interrupts generated by the controller
   6578 *
   6579 * Returns
   6580 *  IRQ_HANDLED - If interrupt is valid
   6581 *  IRQ_NONE    - If invalid interrupt
   6582 */
   6583static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
   6584{
   6585	irqreturn_t retval = IRQ_NONE;
   6586
   6587	if (intr_status & UFSHCD_UIC_MASK)
   6588		retval |= ufshcd_uic_cmd_compl(hba, intr_status);
   6589
   6590	if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
   6591		retval |= ufshcd_check_errors(hba, intr_status);
   6592
   6593	if (intr_status & UTP_TASK_REQ_COMPL)
   6594		retval |= ufshcd_tmc_handler(hba);
   6595
   6596	if (intr_status & UTP_TRANSFER_REQ_COMPL)
   6597		retval |= ufshcd_transfer_req_compl(hba);
   6598
   6599	return retval;
   6600}
   6601
   6602/**
   6603 * ufshcd_intr - Main interrupt service routine
   6604 * @irq: irq number
   6605 * @__hba: pointer to adapter instance
   6606 *
   6607 * Returns
   6608 *  IRQ_HANDLED - If interrupt is valid
   6609 *  IRQ_NONE    - If invalid interrupt
   6610 */
   6611static irqreturn_t ufshcd_intr(int irq, void *__hba)
   6612{
   6613	u32 intr_status, enabled_intr_status = 0;
   6614	irqreturn_t retval = IRQ_NONE;
   6615	struct ufs_hba *hba = __hba;
   6616	int retries = hba->nutrs;
   6617
   6618	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
   6619	hba->ufs_stats.last_intr_status = intr_status;
   6620	hba->ufs_stats.last_intr_ts = ktime_get();
   6621
   6622	/*
   6623	 * There could be max of hba->nutrs reqs in flight and in worst case
   6624	 * if the reqs get finished 1 by 1 after the interrupt status is
   6625	 * read, make sure we handle them by checking the interrupt status
   6626	 * again in a loop until we process all of the reqs before returning.
   6627	 */
   6628	while (intr_status && retries--) {
   6629		enabled_intr_status =
   6630			intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
   6631		ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
   6632		if (enabled_intr_status)
   6633			retval |= ufshcd_sl_intr(hba, enabled_intr_status);
   6634
   6635		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
   6636	}
   6637
   6638	if (enabled_intr_status && retval == IRQ_NONE &&
   6639	    (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
   6640	     hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
   6641		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
   6642					__func__,
   6643					intr_status,
   6644					hba->ufs_stats.last_intr_status,
   6645					enabled_intr_status);
   6646		ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
   6647	}
   6648
   6649	return retval;
   6650}
   6651
   6652static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
   6653{
   6654	int err = 0;
   6655	u32 mask = 1 << tag;
   6656	unsigned long flags;
   6657
   6658	if (!test_bit(tag, &hba->outstanding_tasks))
   6659		goto out;
   6660
   6661	spin_lock_irqsave(hba->host->host_lock, flags);
   6662	ufshcd_utmrl_clear(hba, tag);
   6663	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6664
   6665	/* poll for max. 1 sec to clear door bell register by h/w */
   6666	err = ufshcd_wait_for_register(hba,
   6667			REG_UTP_TASK_REQ_DOOR_BELL,
   6668			mask, 0, 1000, 1000);
   6669
   6670	dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
   6671		tag, err ? "succeeded" : "failed");
   6672
   6673out:
   6674	return err;
   6675}
   6676
   6677static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
   6678		struct utp_task_req_desc *treq, u8 tm_function)
   6679{
   6680	struct request_queue *q = hba->tmf_queue;
   6681	struct Scsi_Host *host = hba->host;
   6682	DECLARE_COMPLETION_ONSTACK(wait);
   6683	struct request *req;
   6684	unsigned long flags;
   6685	int task_tag, err;
   6686
   6687	/*
   6688	 * blk_mq_alloc_request() is used here only to get a free tag.
   6689	 */
   6690	req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
   6691	if (IS_ERR(req))
   6692		return PTR_ERR(req);
   6693
   6694	req->end_io_data = &wait;
   6695	ufshcd_hold(hba, false);
   6696
   6697	spin_lock_irqsave(host->host_lock, flags);
   6698
   6699	task_tag = req->tag;
   6700	WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
   6701		  task_tag);
   6702	hba->tmf_rqs[req->tag] = req;
   6703	treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
   6704
   6705	memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
   6706	ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
   6707
   6708	/* send command to the controller */
   6709	__set_bit(task_tag, &hba->outstanding_tasks);
   6710
   6711	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
   6712	/* Make sure that doorbell is committed immediately */
   6713	wmb();
   6714
   6715	spin_unlock_irqrestore(host->host_lock, flags);
   6716
   6717	ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
   6718
   6719	/* wait until the task management command is completed */
   6720	err = wait_for_completion_io_timeout(&wait,
   6721			msecs_to_jiffies(TM_CMD_TIMEOUT));
   6722	if (!err) {
   6723		ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
   6724		dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
   6725				__func__, tm_function);
   6726		if (ufshcd_clear_tm_cmd(hba, task_tag))
   6727			dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
   6728					__func__, task_tag);
   6729		err = -ETIMEDOUT;
   6730	} else {
   6731		err = 0;
   6732		memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
   6733
   6734		ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
   6735	}
   6736
   6737	spin_lock_irqsave(hba->host->host_lock, flags);
   6738	hba->tmf_rqs[req->tag] = NULL;
   6739	__clear_bit(task_tag, &hba->outstanding_tasks);
   6740	spin_unlock_irqrestore(hba->host->host_lock, flags);
   6741
   6742	ufshcd_release(hba);
   6743	blk_mq_free_request(req);
   6744
   6745	return err;
   6746}
   6747
   6748/**
   6749 * ufshcd_issue_tm_cmd - issues task management commands to controller
   6750 * @hba: per adapter instance
   6751 * @lun_id: LUN ID to which TM command is sent
   6752 * @task_id: task ID to which the TM command is applicable
   6753 * @tm_function: task management function opcode
   6754 * @tm_response: task management service response return value
   6755 *
   6756 * Returns non-zero value on error, zero on success.
   6757 */
   6758static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
   6759		u8 tm_function, u8 *tm_response)
   6760{
   6761	struct utp_task_req_desc treq = { { 0 }, };
   6762	enum utp_ocs ocs_value;
   6763	int err;
   6764
   6765	/* Configure task request descriptor */
   6766	treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
   6767	treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
   6768
   6769	/* Configure task request UPIU */
   6770	treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
   6771				  cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
   6772	treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
   6773
   6774	/*
   6775	 * The host shall provide the same value for LUN field in the basic
   6776	 * header and for Input Parameter.
   6777	 */
   6778	treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
   6779	treq.upiu_req.input_param2 = cpu_to_be32(task_id);
   6780
   6781	err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
   6782	if (err == -ETIMEDOUT)
   6783		return err;
   6784
   6785	ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
   6786	if (ocs_value != OCS_SUCCESS)
   6787		dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
   6788				__func__, ocs_value);
   6789	else if (tm_response)
   6790		*tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
   6791				MASK_TM_SERVICE_RESP;
   6792	return err;
   6793}
   6794
   6795/**
   6796 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
   6797 * @hba:	per-adapter instance
   6798 * @req_upiu:	upiu request
   6799 * @rsp_upiu:	upiu reply
   6800 * @desc_buff:	pointer to descriptor buffer, NULL if NA
   6801 * @buff_len:	descriptor size, 0 if NA
   6802 * @cmd_type:	specifies the type (NOP, Query...)
   6803 * @desc_op:	descriptor operation
   6804 *
   6805 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
   6806 * Therefore, it "rides" the device management infrastructure: uses its tag and
   6807 * tasks work queues.
   6808 *
   6809 * Since there is only one available tag for device management commands,
   6810 * the caller is expected to hold the hba->dev_cmd.lock mutex.
   6811 */
   6812static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
   6813					struct utp_upiu_req *req_upiu,
   6814					struct utp_upiu_req *rsp_upiu,
   6815					u8 *desc_buff, int *buff_len,
   6816					enum dev_cmd_type cmd_type,
   6817					enum query_opcode desc_op)
   6818{
   6819	DECLARE_COMPLETION_ONSTACK(wait);
   6820	const u32 tag = hba->reserved_slot;
   6821	struct ufshcd_lrb *lrbp;
   6822	int err = 0;
   6823	u8 upiu_flags;
   6824
   6825	/* Protects use of hba->reserved_slot. */
   6826	lockdep_assert_held(&hba->dev_cmd.lock);
   6827
   6828	down_read(&hba->clk_scaling_lock);
   6829
   6830	lrbp = &hba->lrb[tag];
   6831	WARN_ON(lrbp->cmd);
   6832	lrbp->cmd = NULL;
   6833	lrbp->task_tag = tag;
   6834	lrbp->lun = 0;
   6835	lrbp->intr_cmd = true;
   6836	ufshcd_prepare_lrbp_crypto(NULL, lrbp);
   6837	hba->dev_cmd.type = cmd_type;
   6838
   6839	if (hba->ufs_version <= ufshci_version(1, 1))
   6840		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
   6841	else
   6842		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
   6843
   6844	/* update the task tag in the request upiu */
   6845	req_upiu->header.dword_0 |= cpu_to_be32(tag);
   6846
   6847	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
   6848
   6849	/* just copy the upiu request as it is */
   6850	memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
   6851	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
   6852		/* The Data Segment Area is optional depending upon the query
   6853		 * function value. for WRITE DESCRIPTOR, the data segment
   6854		 * follows right after the tsf.
   6855		 */
   6856		memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
   6857		*buff_len = 0;
   6858	}
   6859
   6860	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
   6861
   6862	hba->dev_cmd.complete = &wait;
   6863
   6864	ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
   6865
   6866	ufshcd_send_command(hba, tag);
   6867	/*
   6868	 * ignore the returning value here - ufshcd_check_query_response is
   6869	 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
   6870	 * read the response directly ignoring all errors.
   6871	 */
   6872	ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
   6873
   6874	/* just copy the upiu response as it is */
   6875	memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
   6876	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
   6877		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
   6878		u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
   6879			       MASK_QUERY_DATA_SEG_LEN;
   6880
   6881		if (*buff_len >= resp_len) {
   6882			memcpy(desc_buff, descp, resp_len);
   6883			*buff_len = resp_len;
   6884		} else {
   6885			dev_warn(hba->dev,
   6886				 "%s: rsp size %d is bigger than buffer size %d",
   6887				 __func__, resp_len, *buff_len);
   6888			*buff_len = 0;
   6889			err = -EINVAL;
   6890		}
   6891	}
   6892	ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
   6893				    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
   6894
   6895	up_read(&hba->clk_scaling_lock);
   6896	return err;
   6897}
   6898
   6899/**
   6900 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
   6901 * @hba:	per-adapter instance
   6902 * @req_upiu:	upiu request
   6903 * @rsp_upiu:	upiu reply - only 8 DW as we do not support scsi commands
   6904 * @msgcode:	message code, one of UPIU Transaction Codes Initiator to Target
   6905 * @desc_buff:	pointer to descriptor buffer, NULL if NA
   6906 * @buff_len:	descriptor size, 0 if NA
   6907 * @desc_op:	descriptor operation
   6908 *
   6909 * Supports UTP Transfer requests (nop and query), and UTP Task
   6910 * Management requests.
   6911 * It is up to the caller to fill the upiu conent properly, as it will
   6912 * be copied without any further input validations.
   6913 */
   6914int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
   6915			     struct utp_upiu_req *req_upiu,
   6916			     struct utp_upiu_req *rsp_upiu,
   6917			     int msgcode,
   6918			     u8 *desc_buff, int *buff_len,
   6919			     enum query_opcode desc_op)
   6920{
   6921	int err;
   6922	enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
   6923	struct utp_task_req_desc treq = { { 0 }, };
   6924	enum utp_ocs ocs_value;
   6925	u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
   6926
   6927	switch (msgcode) {
   6928	case UPIU_TRANSACTION_NOP_OUT:
   6929		cmd_type = DEV_CMD_TYPE_NOP;
   6930		fallthrough;
   6931	case UPIU_TRANSACTION_QUERY_REQ:
   6932		ufshcd_hold(hba, false);
   6933		mutex_lock(&hba->dev_cmd.lock);
   6934		err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
   6935						   desc_buff, buff_len,
   6936						   cmd_type, desc_op);
   6937		mutex_unlock(&hba->dev_cmd.lock);
   6938		ufshcd_release(hba);
   6939
   6940		break;
   6941	case UPIU_TRANSACTION_TASK_REQ:
   6942		treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
   6943		treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
   6944
   6945		memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
   6946
   6947		err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
   6948		if (err == -ETIMEDOUT)
   6949			break;
   6950
   6951		ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
   6952		if (ocs_value != OCS_SUCCESS) {
   6953			dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
   6954				ocs_value);
   6955			break;
   6956		}
   6957
   6958		memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
   6959
   6960		break;
   6961	default:
   6962		err = -EINVAL;
   6963
   6964		break;
   6965	}
   6966
   6967	return err;
   6968}
   6969
   6970/**
   6971 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
   6972 * @cmd: SCSI command pointer
   6973 *
   6974 * Returns SUCCESS/FAILED
   6975 */
   6976static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
   6977{
   6978	unsigned long flags, pending_reqs = 0, not_cleared = 0;
   6979	struct Scsi_Host *host;
   6980	struct ufs_hba *hba;
   6981	u32 pos;
   6982	int err;
   6983	u8 resp = 0xF, lun;
   6984
   6985	host = cmd->device->host;
   6986	hba = shost_priv(host);
   6987
   6988	lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
   6989	err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
   6990	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
   6991		if (!err)
   6992			err = resp;
   6993		goto out;
   6994	}
   6995
   6996	/* clear the commands that were pending for corresponding LUN */
   6997	spin_lock_irqsave(&hba->outstanding_lock, flags);
   6998	for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
   6999		if (hba->lrb[pos].lun == lun)
   7000			__set_bit(pos, &pending_reqs);
   7001	hba->outstanding_reqs &= ~pending_reqs;
   7002	spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   7003
   7004	if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
   7005		spin_lock_irqsave(&hba->outstanding_lock, flags);
   7006		not_cleared = pending_reqs &
   7007			ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
   7008		hba->outstanding_reqs |= not_cleared;
   7009		spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   7010
   7011		dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
   7012			__func__, not_cleared);
   7013	}
   7014	__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
   7015
   7016out:
   7017	hba->req_abort_count = 0;
   7018	ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
   7019	if (!err) {
   7020		err = SUCCESS;
   7021	} else {
   7022		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
   7023		err = FAILED;
   7024	}
   7025	return err;
   7026}
   7027
   7028static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
   7029{
   7030	struct ufshcd_lrb *lrbp;
   7031	int tag;
   7032
   7033	for_each_set_bit(tag, &bitmap, hba->nutrs) {
   7034		lrbp = &hba->lrb[tag];
   7035		lrbp->req_abort_skip = true;
   7036	}
   7037}
   7038
   7039/**
   7040 * ufshcd_try_to_abort_task - abort a specific task
   7041 * @hba: Pointer to adapter instance
   7042 * @tag: Task tag/index to be aborted
   7043 *
   7044 * Abort the pending command in device by sending UFS_ABORT_TASK task management
   7045 * command, and in host controller by clearing the door-bell register. There can
   7046 * be race between controller sending the command to the device while abort is
   7047 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
   7048 * really issued and then try to abort it.
   7049 *
   7050 * Returns zero on success, non-zero on failure
   7051 */
   7052static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
   7053{
   7054	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
   7055	int err = 0;
   7056	int poll_cnt;
   7057	u8 resp = 0xF;
   7058	u32 reg;
   7059
   7060	for (poll_cnt = 100; poll_cnt; poll_cnt--) {
   7061		err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
   7062				UFS_QUERY_TASK, &resp);
   7063		if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
   7064			/* cmd pending in the device */
   7065			dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
   7066				__func__, tag);
   7067			break;
   7068		} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
   7069			/*
   7070			 * cmd not pending in the device, check if it is
   7071			 * in transition.
   7072			 */
   7073			dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
   7074				__func__, tag);
   7075			reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
   7076			if (reg & (1 << tag)) {
   7077				/* sleep for max. 200us to stabilize */
   7078				usleep_range(100, 200);
   7079				continue;
   7080			}
   7081			/* command completed already */
   7082			dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
   7083				__func__, tag);
   7084			goto out;
   7085		} else {
   7086			dev_err(hba->dev,
   7087				"%s: no response from device. tag = %d, err %d\n",
   7088				__func__, tag, err);
   7089			if (!err)
   7090				err = resp; /* service response error */
   7091			goto out;
   7092		}
   7093	}
   7094
   7095	if (!poll_cnt) {
   7096		err = -EBUSY;
   7097		goto out;
   7098	}
   7099
   7100	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
   7101			UFS_ABORT_TASK, &resp);
   7102	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
   7103		if (!err) {
   7104			err = resp; /* service response error */
   7105			dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
   7106				__func__, tag, err);
   7107		}
   7108		goto out;
   7109	}
   7110
   7111	err = ufshcd_clear_cmds(hba, 1U << tag);
   7112	if (err)
   7113		dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
   7114			__func__, tag, err);
   7115
   7116out:
   7117	return err;
   7118}
   7119
   7120/**
   7121 * ufshcd_abort - scsi host template eh_abort_handler callback
   7122 * @cmd: SCSI command pointer
   7123 *
   7124 * Returns SUCCESS/FAILED
   7125 */
   7126static int ufshcd_abort(struct scsi_cmnd *cmd)
   7127{
   7128	struct Scsi_Host *host = cmd->device->host;
   7129	struct ufs_hba *hba = shost_priv(host);
   7130	int tag = scsi_cmd_to_rq(cmd)->tag;
   7131	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
   7132	unsigned long flags;
   7133	int err = FAILED;
   7134	bool outstanding;
   7135	u32 reg;
   7136
   7137	WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
   7138
   7139	ufshcd_hold(hba, false);
   7140	reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
   7141	/* If command is already aborted/completed, return FAILED. */
   7142	if (!(test_bit(tag, &hba->outstanding_reqs))) {
   7143		dev_err(hba->dev,
   7144			"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
   7145			__func__, tag, hba->outstanding_reqs, reg);
   7146		goto release;
   7147	}
   7148
   7149	/* Print Transfer Request of aborted task */
   7150	dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
   7151
   7152	/*
   7153	 * Print detailed info about aborted request.
   7154	 * As more than one request might get aborted at the same time,
   7155	 * print full information only for the first aborted request in order
   7156	 * to reduce repeated printouts. For other aborted requests only print
   7157	 * basic details.
   7158	 */
   7159	scsi_print_command(cmd);
   7160	if (!hba->req_abort_count) {
   7161		ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
   7162		ufshcd_print_evt_hist(hba);
   7163		ufshcd_print_host_state(hba);
   7164		ufshcd_print_pwr_info(hba);
   7165		ufshcd_print_trs(hba, 1 << tag, true);
   7166	} else {
   7167		ufshcd_print_trs(hba, 1 << tag, false);
   7168	}
   7169	hba->req_abort_count++;
   7170
   7171	if (!(reg & (1 << tag))) {
   7172		dev_err(hba->dev,
   7173		"%s: cmd was completed, but without a notifying intr, tag = %d",
   7174		__func__, tag);
   7175		__ufshcd_transfer_req_compl(hba, 1UL << tag);
   7176		goto release;
   7177	}
   7178
   7179	/*
   7180	 * Task abort to the device W-LUN is illegal. When this command
   7181	 * will fail, due to spec violation, scsi err handling next step
   7182	 * will be to send LU reset which, again, is a spec violation.
   7183	 * To avoid these unnecessary/illegal steps, first we clean up
   7184	 * the lrb taken by this cmd and re-set it in outstanding_reqs,
   7185	 * then queue the eh_work and bail.
   7186	 */
   7187	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
   7188		ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
   7189
   7190		spin_lock_irqsave(host->host_lock, flags);
   7191		hba->force_reset = true;
   7192		ufshcd_schedule_eh_work(hba);
   7193		spin_unlock_irqrestore(host->host_lock, flags);
   7194		goto release;
   7195	}
   7196
   7197	/* Skip task abort in case previous aborts failed and report failure */
   7198	if (lrbp->req_abort_skip) {
   7199		dev_err(hba->dev, "%s: skipping abort\n", __func__);
   7200		ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
   7201		goto release;
   7202	}
   7203
   7204	err = ufshcd_try_to_abort_task(hba, tag);
   7205	if (err) {
   7206		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
   7207		ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
   7208		err = FAILED;
   7209		goto release;
   7210	}
   7211
   7212	/*
   7213	 * Clear the corresponding bit from outstanding_reqs since the command
   7214	 * has been aborted successfully.
   7215	 */
   7216	spin_lock_irqsave(&hba->outstanding_lock, flags);
   7217	outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
   7218	spin_unlock_irqrestore(&hba->outstanding_lock, flags);
   7219
   7220	if (outstanding)
   7221		ufshcd_release_scsi_cmd(hba, lrbp);
   7222
   7223	err = SUCCESS;
   7224
   7225release:
   7226	/* Matches the ufshcd_hold() call at the start of this function. */
   7227	ufshcd_release(hba);
   7228	return err;
   7229}
   7230
   7231/**
   7232 * ufshcd_host_reset_and_restore - reset and restore host controller
   7233 * @hba: per-adapter instance
   7234 *
   7235 * Note that host controller reset may issue DME_RESET to
   7236 * local and remote (device) Uni-Pro stack and the attributes
   7237 * are reset to default state.
   7238 *
   7239 * Returns zero on success, non-zero on failure
   7240 */
   7241static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
   7242{
   7243	int err;
   7244
   7245	/*
   7246	 * Stop the host controller and complete the requests
   7247	 * cleared by h/w
   7248	 */
   7249	ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
   7250	ufshcd_hba_stop(hba);
   7251	hba->silence_err_logs = true;
   7252	ufshcd_complete_requests(hba);
   7253	hba->silence_err_logs = false;
   7254
   7255	/* scale up clocks to max frequency before full reinitialization */
   7256	ufshcd_set_clk_freq(hba, true);
   7257
   7258	err = ufshcd_hba_enable(hba);
   7259
   7260	/* Establish the link again and restore the device */
   7261	if (!err)
   7262		err = ufshcd_probe_hba(hba, false);
   7263
   7264	if (err)
   7265		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
   7266	ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
   7267	return err;
   7268}
   7269
   7270/**
   7271 * ufshcd_reset_and_restore - reset and re-initialize host/device
   7272 * @hba: per-adapter instance
   7273 *
   7274 * Reset and recover device, host and re-establish link. This
   7275 * is helpful to recover the communication in fatal error conditions.
   7276 *
   7277 * Returns zero on success, non-zero on failure
   7278 */
   7279static int ufshcd_reset_and_restore(struct ufs_hba *hba)
   7280{
   7281	u32 saved_err = 0;
   7282	u32 saved_uic_err = 0;
   7283	int err = 0;
   7284	unsigned long flags;
   7285	int retries = MAX_HOST_RESET_RETRIES;
   7286
   7287	spin_lock_irqsave(hba->host->host_lock, flags);
   7288	do {
   7289		/*
   7290		 * This is a fresh start, cache and clear saved error first,
   7291		 * in case new error generated during reset and restore.
   7292		 */
   7293		saved_err |= hba->saved_err;
   7294		saved_uic_err |= hba->saved_uic_err;
   7295		hba->saved_err = 0;
   7296		hba->saved_uic_err = 0;
   7297		hba->force_reset = false;
   7298		hba->ufshcd_state = UFSHCD_STATE_RESET;
   7299		spin_unlock_irqrestore(hba->host->host_lock, flags);
   7300
   7301		/* Reset the attached device */
   7302		ufshcd_device_reset(hba);
   7303
   7304		err = ufshcd_host_reset_and_restore(hba);
   7305
   7306		spin_lock_irqsave(hba->host->host_lock, flags);
   7307		if (err)
   7308			continue;
   7309		/* Do not exit unless operational or dead */
   7310		if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
   7311		    hba->ufshcd_state != UFSHCD_STATE_ERROR &&
   7312		    hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
   7313			err = -EAGAIN;
   7314	} while (err && --retries);
   7315
   7316	/*
   7317	 * Inform scsi mid-layer that we did reset and allow to handle
   7318	 * Unit Attention properly.
   7319	 */
   7320	scsi_report_bus_reset(hba->host, 0);
   7321	if (err) {
   7322		hba->ufshcd_state = UFSHCD_STATE_ERROR;
   7323		hba->saved_err |= saved_err;
   7324		hba->saved_uic_err |= saved_uic_err;
   7325	}
   7326	spin_unlock_irqrestore(hba->host->host_lock, flags);
   7327
   7328	return err;
   7329}
   7330
   7331/**
   7332 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
   7333 * @cmd: SCSI command pointer
   7334 *
   7335 * Returns SUCCESS/FAILED
   7336 */
   7337static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
   7338{
   7339	int err = SUCCESS;
   7340	unsigned long flags;
   7341	struct ufs_hba *hba;
   7342
   7343	hba = shost_priv(cmd->device->host);
   7344
   7345	spin_lock_irqsave(hba->host->host_lock, flags);
   7346	hba->force_reset = true;
   7347	ufshcd_schedule_eh_work(hba);
   7348	dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
   7349	spin_unlock_irqrestore(hba->host->host_lock, flags);
   7350
   7351	flush_work(&hba->eh_work);
   7352
   7353	spin_lock_irqsave(hba->host->host_lock, flags);
   7354	if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
   7355		err = FAILED;
   7356	spin_unlock_irqrestore(hba->host->host_lock, flags);
   7357
   7358	return err;
   7359}
   7360
   7361/**
   7362 * ufshcd_get_max_icc_level - calculate the ICC level
   7363 * @sup_curr_uA: max. current supported by the regulator
   7364 * @start_scan: row at the desc table to start scan from
   7365 * @buff: power descriptor buffer
   7366 *
   7367 * Returns calculated max ICC level for specific regulator
   7368 */
   7369static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
   7370{
   7371	int i;
   7372	int curr_uA;
   7373	u16 data;
   7374	u16 unit;
   7375
   7376	for (i = start_scan; i >= 0; i--) {
   7377		data = get_unaligned_be16(&buff[2 * i]);
   7378		unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
   7379						ATTR_ICC_LVL_UNIT_OFFSET;
   7380		curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
   7381		switch (unit) {
   7382		case UFSHCD_NANO_AMP:
   7383			curr_uA = curr_uA / 1000;
   7384			break;
   7385		case UFSHCD_MILI_AMP:
   7386			curr_uA = curr_uA * 1000;
   7387			break;
   7388		case UFSHCD_AMP:
   7389			curr_uA = curr_uA * 1000 * 1000;
   7390			break;
   7391		case UFSHCD_MICRO_AMP:
   7392		default:
   7393			break;
   7394		}
   7395		if (sup_curr_uA >= curr_uA)
   7396			break;
   7397	}
   7398	if (i < 0) {
   7399		i = 0;
   7400		pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
   7401	}
   7402
   7403	return (u32)i;
   7404}
   7405
   7406/**
   7407 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
   7408 * In case regulators are not initialized we'll return 0
   7409 * @hba: per-adapter instance
   7410 * @desc_buf: power descriptor buffer to extract ICC levels from.
   7411 * @len: length of desc_buff
   7412 *
   7413 * Returns calculated ICC level
   7414 */
   7415static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
   7416							u8 *desc_buf, int len)
   7417{
   7418	u32 icc_level = 0;
   7419
   7420	if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
   7421						!hba->vreg_info.vccq2) {
   7422		/*
   7423		 * Using dev_dbg to avoid messages during runtime PM to avoid
   7424		 * never-ending cycles of messages written back to storage by
   7425		 * user space causing runtime resume, causing more messages and
   7426		 * so on.
   7427		 */
   7428		dev_dbg(hba->dev,
   7429			"%s: Regulator capability was not set, actvIccLevel=%d",
   7430							__func__, icc_level);
   7431		goto out;
   7432	}
   7433
   7434	if (hba->vreg_info.vcc->max_uA)
   7435		icc_level = ufshcd_get_max_icc_level(
   7436				hba->vreg_info.vcc->max_uA,
   7437				POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
   7438				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
   7439
   7440	if (hba->vreg_info.vccq->max_uA)
   7441		icc_level = ufshcd_get_max_icc_level(
   7442				hba->vreg_info.vccq->max_uA,
   7443				icc_level,
   7444				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
   7445
   7446	if (hba->vreg_info.vccq2->max_uA)
   7447		icc_level = ufshcd_get_max_icc_level(
   7448				hba->vreg_info.vccq2->max_uA,
   7449				icc_level,
   7450				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
   7451out:
   7452	return icc_level;
   7453}
   7454
   7455static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
   7456{
   7457	int ret;
   7458	int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
   7459	u8 *desc_buf;
   7460	u32 icc_level;
   7461
   7462	desc_buf = kmalloc(buff_len, GFP_KERNEL);
   7463	if (!desc_buf)
   7464		return;
   7465
   7466	ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
   7467				     desc_buf, buff_len);
   7468	if (ret) {
   7469		dev_err(hba->dev,
   7470			"%s: Failed reading power descriptor.len = %d ret = %d",
   7471			__func__, buff_len, ret);
   7472		goto out;
   7473	}
   7474
   7475	icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
   7476							 buff_len);
   7477	dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
   7478
   7479	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
   7480		QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
   7481
   7482	if (ret)
   7483		dev_err(hba->dev,
   7484			"%s: Failed configuring bActiveICCLevel = %d ret = %d",
   7485			__func__, icc_level, ret);
   7486
   7487out:
   7488	kfree(desc_buf);
   7489}
   7490
   7491static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
   7492{
   7493	scsi_autopm_get_device(sdev);
   7494	blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
   7495	if (sdev->rpm_autosuspend)
   7496		pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
   7497						 RPM_AUTOSUSPEND_DELAY_MS);
   7498	scsi_autopm_put_device(sdev);
   7499}
   7500
   7501/**
   7502 * ufshcd_scsi_add_wlus - Adds required W-LUs
   7503 * @hba: per-adapter instance
   7504 *
   7505 * UFS device specification requires the UFS devices to support 4 well known
   7506 * logical units:
   7507 *	"REPORT_LUNS" (address: 01h)
   7508 *	"UFS Device" (address: 50h)
   7509 *	"RPMB" (address: 44h)
   7510 *	"BOOT" (address: 30h)
   7511 * UFS device's power management needs to be controlled by "POWER CONDITION"
   7512 * field of SSU (START STOP UNIT) command. But this "power condition" field
   7513 * will take effect only when its sent to "UFS device" well known logical unit
   7514 * hence we require the scsi_device instance to represent this logical unit in
   7515 * order for the UFS host driver to send the SSU command for power management.
   7516 *
   7517 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
   7518 * Block) LU so user space process can control this LU. User space may also
   7519 * want to have access to BOOT LU.
   7520 *
   7521 * This function adds scsi device instances for each of all well known LUs
   7522 * (except "REPORT LUNS" LU).
   7523 *
   7524 * Returns zero on success (all required W-LUs are added successfully),
   7525 * non-zero error value on failure (if failed to add any of the required W-LU).
   7526 */
   7527static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
   7528{
   7529	int ret = 0;
   7530	struct scsi_device *sdev_boot, *sdev_rpmb;
   7531
   7532	hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
   7533		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
   7534	if (IS_ERR(hba->ufs_device_wlun)) {
   7535		ret = PTR_ERR(hba->ufs_device_wlun);
   7536		hba->ufs_device_wlun = NULL;
   7537		goto out;
   7538	}
   7539	scsi_device_put(hba->ufs_device_wlun);
   7540
   7541	sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
   7542		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
   7543	if (IS_ERR(sdev_rpmb)) {
   7544		ret = PTR_ERR(sdev_rpmb);
   7545		goto remove_ufs_device_wlun;
   7546	}
   7547	ufshcd_blk_pm_runtime_init(sdev_rpmb);
   7548	scsi_device_put(sdev_rpmb);
   7549
   7550	sdev_boot = __scsi_add_device(hba->host, 0, 0,
   7551		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
   7552	if (IS_ERR(sdev_boot)) {
   7553		dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
   7554	} else {
   7555		ufshcd_blk_pm_runtime_init(sdev_boot);
   7556		scsi_device_put(sdev_boot);
   7557	}
   7558	goto out;
   7559
   7560remove_ufs_device_wlun:
   7561	scsi_remove_device(hba->ufs_device_wlun);
   7562out:
   7563	return ret;
   7564}
   7565
   7566static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
   7567{
   7568	struct ufs_dev_info *dev_info = &hba->dev_info;
   7569	u8 lun;
   7570	u32 d_lu_wb_buf_alloc;
   7571	u32 ext_ufs_feature;
   7572
   7573	if (!ufshcd_is_wb_allowed(hba))
   7574		return;
   7575
   7576	/*
   7577	 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
   7578	 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
   7579	 * enabled
   7580	 */
   7581	if (!(dev_info->wspecversion >= 0x310 ||
   7582	      dev_info->wspecversion == 0x220 ||
   7583	     (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
   7584		goto wb_disabled;
   7585
   7586	if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
   7587	    DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
   7588		goto wb_disabled;
   7589
   7590	ext_ufs_feature = get_unaligned_be32(desc_buf +
   7591					DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
   7592
   7593	if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
   7594		goto wb_disabled;
   7595
   7596	/*
   7597	 * WB may be supported but not configured while provisioning. The spec
   7598	 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
   7599	 * buffer configured.
   7600	 */
   7601	dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
   7602
   7603	dev_info->b_presrv_uspc_en =
   7604		desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
   7605
   7606	if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
   7607		if (!get_unaligned_be32(desc_buf +
   7608				   DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
   7609			goto wb_disabled;
   7610	} else {
   7611		for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
   7612			d_lu_wb_buf_alloc = 0;
   7613			ufshcd_read_unit_desc_param(hba,
   7614					lun,
   7615					UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
   7616					(u8 *)&d_lu_wb_buf_alloc,
   7617					sizeof(d_lu_wb_buf_alloc));
   7618			if (d_lu_wb_buf_alloc) {
   7619				dev_info->wb_dedicated_lu = lun;
   7620				break;
   7621			}
   7622		}
   7623
   7624		if (!d_lu_wb_buf_alloc)
   7625			goto wb_disabled;
   7626	}
   7627
   7628	if (!ufshcd_is_wb_buf_lifetime_available(hba))
   7629		goto wb_disabled;
   7630
   7631	return;
   7632
   7633wb_disabled:
   7634	hba->caps &= ~UFSHCD_CAP_WB_EN;
   7635}
   7636
   7637static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
   7638{
   7639	struct ufs_dev_info *dev_info = &hba->dev_info;
   7640	u32 ext_ufs_feature;
   7641	u8 mask = 0;
   7642
   7643	if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
   7644		return;
   7645
   7646	ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
   7647
   7648	if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
   7649		mask |= MASK_EE_TOO_LOW_TEMP;
   7650
   7651	if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
   7652		mask |= MASK_EE_TOO_HIGH_TEMP;
   7653
   7654	if (mask) {
   7655		ufshcd_enable_ee(hba, mask);
   7656		ufs_hwmon_probe(hba, mask);
   7657	}
   7658}
   7659
   7660void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
   7661			     const struct ufs_dev_quirk *fixups)
   7662{
   7663	const struct ufs_dev_quirk *f;
   7664	struct ufs_dev_info *dev_info = &hba->dev_info;
   7665
   7666	if (!fixups)
   7667		return;
   7668
   7669	for (f = fixups; f->quirk; f++) {
   7670		if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
   7671		     f->wmanufacturerid == UFS_ANY_VENDOR) &&
   7672		     ((dev_info->model &&
   7673		       STR_PRFX_EQUAL(f->model, dev_info->model)) ||
   7674		      !strcmp(f->model, UFS_ANY_MODEL)))
   7675			hba->dev_quirks |= f->quirk;
   7676	}
   7677}
   7678EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
   7679
   7680static void ufs_fixup_device_setup(struct ufs_hba *hba)
   7681{
   7682	/* fix by general quirk table */
   7683	ufshcd_fixup_dev_quirks(hba, ufs_fixups);
   7684
   7685	/* allow vendors to fix quirks */
   7686	ufshcd_vops_fixup_dev_quirks(hba);
   7687}
   7688
   7689static int ufs_get_device_desc(struct ufs_hba *hba)
   7690{
   7691	int err;
   7692	u8 model_index;
   7693	u8 b_ufs_feature_sup;
   7694	u8 *desc_buf;
   7695	struct ufs_dev_info *dev_info = &hba->dev_info;
   7696
   7697	desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
   7698	if (!desc_buf) {
   7699		err = -ENOMEM;
   7700		goto out;
   7701	}
   7702
   7703	err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
   7704				     hba->desc_size[QUERY_DESC_IDN_DEVICE]);
   7705	if (err) {
   7706		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
   7707			__func__, err);
   7708		goto out;
   7709	}
   7710
   7711	/*
   7712	 * getting vendor (manufacturerID) and Bank Index in big endian
   7713	 * format
   7714	 */
   7715	dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
   7716				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
   7717
   7718	/* getting Specification Version in big endian format */
   7719	dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
   7720				      desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
   7721	b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
   7722
   7723	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
   7724
   7725	if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
   7726	    (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
   7727		bool hpb_en = false;
   7728
   7729		ufshpb_get_dev_info(hba, desc_buf);
   7730
   7731		if (!ufshpb_is_legacy(hba))
   7732			err = ufshcd_query_flag_retry(hba,
   7733						      UPIU_QUERY_OPCODE_READ_FLAG,
   7734						      QUERY_FLAG_IDN_HPB_EN, 0,
   7735						      &hpb_en);
   7736
   7737		if (ufshpb_is_legacy(hba) || (!err && hpb_en))
   7738			dev_info->hpb_enabled = true;
   7739	}
   7740
   7741	err = ufshcd_read_string_desc(hba, model_index,
   7742				      &dev_info->model, SD_ASCII_STD);
   7743	if (err < 0) {
   7744		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
   7745			__func__, err);
   7746		goto out;
   7747	}
   7748
   7749	hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
   7750		desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
   7751
   7752	ufs_fixup_device_setup(hba);
   7753
   7754	ufshcd_wb_probe(hba, desc_buf);
   7755
   7756	ufshcd_temp_notif_probe(hba, desc_buf);
   7757
   7758	/*
   7759	 * ufshcd_read_string_desc returns size of the string
   7760	 * reset the error value
   7761	 */
   7762	err = 0;
   7763
   7764out:
   7765	kfree(desc_buf);
   7766	return err;
   7767}
   7768
   7769static void ufs_put_device_desc(struct ufs_hba *hba)
   7770{
   7771	struct ufs_dev_info *dev_info = &hba->dev_info;
   7772
   7773	kfree(dev_info->model);
   7774	dev_info->model = NULL;
   7775}
   7776
   7777/**
   7778 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
   7779 * @hba: per-adapter instance
   7780 *
   7781 * PA_TActivate parameter can be tuned manually if UniPro version is less than
   7782 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
   7783 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
   7784 * the hibern8 exit latency.
   7785 *
   7786 * Returns zero on success, non-zero error value on failure.
   7787 */
   7788static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
   7789{
   7790	int ret = 0;
   7791	u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
   7792
   7793	ret = ufshcd_dme_peer_get(hba,
   7794				  UIC_ARG_MIB_SEL(
   7795					RX_MIN_ACTIVATETIME_CAPABILITY,
   7796					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
   7797				  &peer_rx_min_activatetime);
   7798	if (ret)
   7799		goto out;
   7800
   7801	/* make sure proper unit conversion is applied */
   7802	tuned_pa_tactivate =
   7803		((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
   7804		 / PA_TACTIVATE_TIME_UNIT_US);
   7805	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
   7806			     tuned_pa_tactivate);
   7807
   7808out:
   7809	return ret;
   7810}
   7811
   7812/**
   7813 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
   7814 * @hba: per-adapter instance
   7815 *
   7816 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
   7817 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
   7818 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
   7819 * This optimal value can help reduce the hibern8 exit latency.
   7820 *
   7821 * Returns zero on success, non-zero error value on failure.
   7822 */
   7823static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
   7824{
   7825	int ret = 0;
   7826	u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
   7827	u32 max_hibern8_time, tuned_pa_hibern8time;
   7828
   7829	ret = ufshcd_dme_get(hba,
   7830			     UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
   7831					UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
   7832				  &local_tx_hibern8_time_cap);
   7833	if (ret)
   7834		goto out;
   7835
   7836	ret = ufshcd_dme_peer_get(hba,
   7837				  UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
   7838					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
   7839				  &peer_rx_hibern8_time_cap);
   7840	if (ret)
   7841		goto out;
   7842
   7843	max_hibern8_time = max(local_tx_hibern8_time_cap,
   7844			       peer_rx_hibern8_time_cap);
   7845	/* make sure proper unit conversion is applied */
   7846	tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
   7847				/ PA_HIBERN8_TIME_UNIT_US);
   7848	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
   7849			     tuned_pa_hibern8time);
   7850out:
   7851	return ret;
   7852}
   7853
   7854/**
   7855 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
   7856 * less than device PA_TACTIVATE time.
   7857 * @hba: per-adapter instance
   7858 *
   7859 * Some UFS devices require host PA_TACTIVATE to be lower than device
   7860 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
   7861 * for such devices.
   7862 *
   7863 * Returns zero on success, non-zero error value on failure.
   7864 */
   7865static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
   7866{
   7867	int ret = 0;
   7868	u32 granularity, peer_granularity;
   7869	u32 pa_tactivate, peer_pa_tactivate;
   7870	u32 pa_tactivate_us, peer_pa_tactivate_us;
   7871	u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
   7872
   7873	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
   7874				  &granularity);
   7875	if (ret)
   7876		goto out;
   7877
   7878	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
   7879				  &peer_granularity);
   7880	if (ret)
   7881		goto out;
   7882
   7883	if ((granularity < PA_GRANULARITY_MIN_VAL) ||
   7884	    (granularity > PA_GRANULARITY_MAX_VAL)) {
   7885		dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
   7886			__func__, granularity);
   7887		return -EINVAL;
   7888	}
   7889
   7890	if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
   7891	    (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
   7892		dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
   7893			__func__, peer_granularity);
   7894		return -EINVAL;
   7895	}
   7896
   7897	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
   7898	if (ret)
   7899		goto out;
   7900
   7901	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
   7902				  &peer_pa_tactivate);
   7903	if (ret)
   7904		goto out;
   7905
   7906	pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
   7907	peer_pa_tactivate_us = peer_pa_tactivate *
   7908			     gran_to_us_table[peer_granularity - 1];
   7909
   7910	if (pa_tactivate_us >= peer_pa_tactivate_us) {
   7911		u32 new_peer_pa_tactivate;
   7912
   7913		new_peer_pa_tactivate = pa_tactivate_us /
   7914				      gran_to_us_table[peer_granularity - 1];
   7915		new_peer_pa_tactivate++;
   7916		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
   7917					  new_peer_pa_tactivate);
   7918	}
   7919
   7920out:
   7921	return ret;
   7922}
   7923
   7924static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
   7925{
   7926	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
   7927		ufshcd_tune_pa_tactivate(hba);
   7928		ufshcd_tune_pa_hibern8time(hba);
   7929	}
   7930
   7931	ufshcd_vops_apply_dev_quirks(hba);
   7932
   7933	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
   7934		/* set 1ms timeout for PA_TACTIVATE */
   7935		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
   7936
   7937	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
   7938		ufshcd_quirk_tune_host_pa_tactivate(hba);
   7939}
   7940
   7941static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
   7942{
   7943	hba->ufs_stats.hibern8_exit_cnt = 0;
   7944	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
   7945	hba->req_abort_count = 0;
   7946}
   7947
   7948static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
   7949{
   7950	int err;
   7951	size_t buff_len;
   7952	u8 *desc_buf;
   7953
   7954	buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
   7955	desc_buf = kmalloc(buff_len, GFP_KERNEL);
   7956	if (!desc_buf) {
   7957		err = -ENOMEM;
   7958		goto out;
   7959	}
   7960
   7961	err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
   7962				     desc_buf, buff_len);
   7963	if (err) {
   7964		dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
   7965				__func__, err);
   7966		goto out;
   7967	}
   7968
   7969	if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
   7970		hba->dev_info.max_lu_supported = 32;
   7971	else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
   7972		hba->dev_info.max_lu_supported = 8;
   7973
   7974	if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
   7975		GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
   7976		ufshpb_get_geo_info(hba, desc_buf);
   7977
   7978out:
   7979	kfree(desc_buf);
   7980	return err;
   7981}
   7982
   7983struct ufs_ref_clk {
   7984	unsigned long freq_hz;
   7985	enum ufs_ref_clk_freq val;
   7986};
   7987
   7988static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
   7989	{19200000, REF_CLK_FREQ_19_2_MHZ},
   7990	{26000000, REF_CLK_FREQ_26_MHZ},
   7991	{38400000, REF_CLK_FREQ_38_4_MHZ},
   7992	{52000000, REF_CLK_FREQ_52_MHZ},
   7993	{0, REF_CLK_FREQ_INVAL},
   7994};
   7995
   7996static enum ufs_ref_clk_freq
   7997ufs_get_bref_clk_from_hz(unsigned long freq)
   7998{
   7999	int i;
   8000
   8001	for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
   8002		if (ufs_ref_clk_freqs[i].freq_hz == freq)
   8003			return ufs_ref_clk_freqs[i].val;
   8004
   8005	return REF_CLK_FREQ_INVAL;
   8006}
   8007
   8008void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
   8009{
   8010	unsigned long freq;
   8011
   8012	freq = clk_get_rate(refclk);
   8013
   8014	hba->dev_ref_clk_freq =
   8015		ufs_get_bref_clk_from_hz(freq);
   8016
   8017	if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
   8018		dev_err(hba->dev,
   8019		"invalid ref_clk setting = %ld\n", freq);
   8020}
   8021
   8022static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
   8023{
   8024	int err;
   8025	u32 ref_clk;
   8026	u32 freq = hba->dev_ref_clk_freq;
   8027
   8028	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
   8029			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
   8030
   8031	if (err) {
   8032		dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
   8033			err);
   8034		goto out;
   8035	}
   8036
   8037	if (ref_clk == freq)
   8038		goto out; /* nothing to update */
   8039
   8040	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
   8041			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
   8042
   8043	if (err) {
   8044		dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
   8045			ufs_ref_clk_freqs[freq].freq_hz);
   8046		goto out;
   8047	}
   8048
   8049	dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
   8050			ufs_ref_clk_freqs[freq].freq_hz);
   8051
   8052out:
   8053	return err;
   8054}
   8055
   8056static int ufshcd_device_params_init(struct ufs_hba *hba)
   8057{
   8058	bool flag;
   8059	int ret, i;
   8060
   8061	 /* Init device descriptor sizes */
   8062	for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
   8063		hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
   8064
   8065	/* Init UFS geometry descriptor related parameters */
   8066	ret = ufshcd_device_geo_params_init(hba);
   8067	if (ret)
   8068		goto out;
   8069
   8070	/* Check and apply UFS device quirks */
   8071	ret = ufs_get_device_desc(hba);
   8072	if (ret) {
   8073		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
   8074			__func__, ret);
   8075		goto out;
   8076	}
   8077
   8078	ufshcd_get_ref_clk_gating_wait(hba);
   8079
   8080	if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
   8081			QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
   8082		hba->dev_info.f_power_on_wp_en = flag;
   8083
   8084	/* Probe maximum power mode co-supported by both UFS host and device */
   8085	if (ufshcd_get_max_pwr_mode(hba))
   8086		dev_err(hba->dev,
   8087			"%s: Failed getting max supported power mode\n",
   8088			__func__);
   8089out:
   8090	return ret;
   8091}
   8092
   8093/**
   8094 * ufshcd_add_lus - probe and add UFS logical units
   8095 * @hba: per-adapter instance
   8096 */
   8097static int ufshcd_add_lus(struct ufs_hba *hba)
   8098{
   8099	int ret;
   8100
   8101	/* Add required well known logical units to scsi mid layer */
   8102	ret = ufshcd_scsi_add_wlus(hba);
   8103	if (ret)
   8104		goto out;
   8105
   8106	/* Initialize devfreq after UFS device is detected */
   8107	if (ufshcd_is_clkscaling_supported(hba)) {
   8108		memcpy(&hba->clk_scaling.saved_pwr_info.info,
   8109			&hba->pwr_info,
   8110			sizeof(struct ufs_pa_layer_attr));
   8111		hba->clk_scaling.saved_pwr_info.is_valid = true;
   8112		hba->clk_scaling.is_allowed = true;
   8113
   8114		ret = ufshcd_devfreq_init(hba);
   8115		if (ret)
   8116			goto out;
   8117
   8118		hba->clk_scaling.is_enabled = true;
   8119		ufshcd_init_clk_scaling_sysfs(hba);
   8120	}
   8121
   8122	ufs_bsg_probe(hba);
   8123	ufshpb_init(hba);
   8124	scsi_scan_host(hba->host);
   8125	pm_runtime_put_sync(hba->dev);
   8126
   8127out:
   8128	return ret;
   8129}
   8130
   8131/**
   8132 * ufshcd_probe_hba - probe hba to detect device and initialize it
   8133 * @hba: per-adapter instance
   8134 * @init_dev_params: whether or not to call ufshcd_device_params_init().
   8135 *
   8136 * Execute link-startup and verify device initialization
   8137 */
   8138static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
   8139{
   8140	int ret;
   8141	unsigned long flags;
   8142	ktime_t start = ktime_get();
   8143
   8144	hba->ufshcd_state = UFSHCD_STATE_RESET;
   8145
   8146	ret = ufshcd_link_startup(hba);
   8147	if (ret)
   8148		goto out;
   8149
   8150	if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
   8151		goto out;
   8152
   8153	/* Debug counters initialization */
   8154	ufshcd_clear_dbg_ufs_stats(hba);
   8155
   8156	/* UniPro link is active now */
   8157	ufshcd_set_link_active(hba);
   8158
   8159	/* Verify device initialization by sending NOP OUT UPIU */
   8160	ret = ufshcd_verify_dev_init(hba);
   8161	if (ret)
   8162		goto out;
   8163
   8164	/* Initiate UFS initialization, and waiting until completion */
   8165	ret = ufshcd_complete_dev_init(hba);
   8166	if (ret)
   8167		goto out;
   8168
   8169	/*
   8170	 * Initialize UFS device parameters used by driver, these
   8171	 * parameters are associated with UFS descriptors.
   8172	 */
   8173	if (init_dev_params) {
   8174		ret = ufshcd_device_params_init(hba);
   8175		if (ret)
   8176			goto out;
   8177	}
   8178
   8179	ufshcd_tune_unipro_params(hba);
   8180
   8181	/* UFS device is also active now */
   8182	ufshcd_set_ufs_dev_active(hba);
   8183	ufshcd_force_reset_auto_bkops(hba);
   8184
   8185	/* Gear up to HS gear if supported */
   8186	if (hba->max_pwr_info.is_valid) {
   8187		/*
   8188		 * Set the right value to bRefClkFreq before attempting to
   8189		 * switch to HS gears.
   8190		 */
   8191		if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
   8192			ufshcd_set_dev_ref_clk(hba);
   8193		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
   8194		if (ret) {
   8195			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
   8196					__func__, ret);
   8197			goto out;
   8198		}
   8199		ufshcd_print_pwr_info(hba);
   8200	}
   8201
   8202	/*
   8203	 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
   8204	 * and for removable UFS card as well, hence always set the parameter.
   8205	 * Note: Error handler may issue the device reset hence resetting
   8206	 * bActiveICCLevel as well so it is always safe to set this here.
   8207	 */
   8208	ufshcd_set_active_icc_lvl(hba);
   8209
   8210	ufshcd_wb_config(hba);
   8211	if (hba->ee_usr_mask)
   8212		ufshcd_write_ee_control(hba);
   8213	/* Enable Auto-Hibernate if configured */
   8214	ufshcd_auto_hibern8_enable(hba);
   8215
   8216	ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
   8217out:
   8218	spin_lock_irqsave(hba->host->host_lock, flags);
   8219	if (ret)
   8220		hba->ufshcd_state = UFSHCD_STATE_ERROR;
   8221	else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
   8222		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
   8223	spin_unlock_irqrestore(hba->host->host_lock, flags);
   8224
   8225	trace_ufshcd_init(dev_name(hba->dev), ret,
   8226		ktime_to_us(ktime_sub(ktime_get(), start)),
   8227		hba->curr_dev_pwr_mode, hba->uic_link_state);
   8228	return ret;
   8229}
   8230
   8231/**
   8232 * ufshcd_async_scan - asynchronous execution for probing hba
   8233 * @data: data pointer to pass to this function
   8234 * @cookie: cookie data
   8235 */
   8236static void ufshcd_async_scan(void *data, async_cookie_t cookie)
   8237{
   8238	struct ufs_hba *hba = (struct ufs_hba *)data;
   8239	int ret;
   8240
   8241	down(&hba->host_sem);
   8242	/* Initialize hba, detect and initialize UFS device */
   8243	ret = ufshcd_probe_hba(hba, true);
   8244	up(&hba->host_sem);
   8245	if (ret)
   8246		goto out;
   8247
   8248	/* Probe and add UFS logical units  */
   8249	ret = ufshcd_add_lus(hba);
   8250out:
   8251	/*
   8252	 * If we failed to initialize the device or the device is not
   8253	 * present, turn off the power/clocks etc.
   8254	 */
   8255	if (ret) {
   8256		pm_runtime_put_sync(hba->dev);
   8257		ufshcd_hba_exit(hba);
   8258	}
   8259}
   8260
   8261static const struct attribute_group *ufshcd_driver_groups[] = {
   8262	&ufs_sysfs_unit_descriptor_group,
   8263	&ufs_sysfs_lun_attributes_group,
   8264#ifdef CONFIG_SCSI_UFS_HPB
   8265	&ufs_sysfs_hpb_stat_group,
   8266	&ufs_sysfs_hpb_param_group,
   8267#endif
   8268	NULL,
   8269};
   8270
   8271static struct ufs_hba_variant_params ufs_hba_vps = {
   8272	.hba_enable_delay_us		= 1000,
   8273	.wb_flush_threshold		= UFS_WB_BUF_REMAIN_PERCENT(40),
   8274	.devfreq_profile.polling_ms	= 100,
   8275	.devfreq_profile.target		= ufshcd_devfreq_target,
   8276	.devfreq_profile.get_dev_status	= ufshcd_devfreq_get_dev_status,
   8277	.ondemand_data.upthreshold	= 70,
   8278	.ondemand_data.downdifferential	= 5,
   8279};
   8280
   8281static struct scsi_host_template ufshcd_driver_template = {
   8282	.module			= THIS_MODULE,
   8283	.name			= UFSHCD,
   8284	.proc_name		= UFSHCD,
   8285	.map_queues		= ufshcd_map_queues,
   8286	.queuecommand		= ufshcd_queuecommand,
   8287	.mq_poll		= ufshcd_poll,
   8288	.slave_alloc		= ufshcd_slave_alloc,
   8289	.slave_configure	= ufshcd_slave_configure,
   8290	.slave_destroy		= ufshcd_slave_destroy,
   8291	.change_queue_depth	= ufshcd_change_queue_depth,
   8292	.eh_abort_handler	= ufshcd_abort,
   8293	.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
   8294	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
   8295	.this_id		= -1,
   8296	.sg_tablesize		= SG_ALL,
   8297	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
   8298	.can_queue		= UFSHCD_CAN_QUEUE,
   8299	.max_segment_size	= PRDT_DATA_BYTE_COUNT_MAX,
   8300	.max_host_blocked	= 1,
   8301	.track_queue_depth	= 1,
   8302	.sdev_groups		= ufshcd_driver_groups,
   8303	.dma_boundary		= PAGE_SIZE - 1,
   8304	.rpm_autosuspend_delay	= RPM_AUTOSUSPEND_DELAY_MS,
   8305};
   8306
   8307static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
   8308				   int ua)
   8309{
   8310	int ret;
   8311
   8312	if (!vreg)
   8313		return 0;
   8314
   8315	/*
   8316	 * "set_load" operation shall be required on those regulators
   8317	 * which specifically configured current limitation. Otherwise
   8318	 * zero max_uA may cause unexpected behavior when regulator is
   8319	 * enabled or set as high power mode.
   8320	 */
   8321	if (!vreg->max_uA)
   8322		return 0;
   8323
   8324	ret = regulator_set_load(vreg->reg, ua);
   8325	if (ret < 0) {
   8326		dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
   8327				__func__, vreg->name, ua, ret);
   8328	}
   8329
   8330	return ret;
   8331}
   8332
   8333static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
   8334					 struct ufs_vreg *vreg)
   8335{
   8336	return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
   8337}
   8338
   8339static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
   8340					 struct ufs_vreg *vreg)
   8341{
   8342	if (!vreg)
   8343		return 0;
   8344
   8345	return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
   8346}
   8347
   8348static int ufshcd_config_vreg(struct device *dev,
   8349		struct ufs_vreg *vreg, bool on)
   8350{
   8351	if (regulator_count_voltages(vreg->reg) <= 0)
   8352		return 0;
   8353
   8354	return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
   8355}
   8356
   8357static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
   8358{
   8359	int ret = 0;
   8360
   8361	if (!vreg || vreg->enabled)
   8362		goto out;
   8363
   8364	ret = ufshcd_config_vreg(dev, vreg, true);
   8365	if (!ret)
   8366		ret = regulator_enable(vreg->reg);
   8367
   8368	if (!ret)
   8369		vreg->enabled = true;
   8370	else
   8371		dev_err(dev, "%s: %s enable failed, err=%d\n",
   8372				__func__, vreg->name, ret);
   8373out:
   8374	return ret;
   8375}
   8376
   8377static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
   8378{
   8379	int ret = 0;
   8380
   8381	if (!vreg || !vreg->enabled || vreg->always_on)
   8382		goto out;
   8383
   8384	ret = regulator_disable(vreg->reg);
   8385
   8386	if (!ret) {
   8387		/* ignore errors on applying disable config */
   8388		ufshcd_config_vreg(dev, vreg, false);
   8389		vreg->enabled = false;
   8390	} else {
   8391		dev_err(dev, "%s: %s disable failed, err=%d\n",
   8392				__func__, vreg->name, ret);
   8393	}
   8394out:
   8395	return ret;
   8396}
   8397
   8398static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
   8399{
   8400	int ret = 0;
   8401	struct device *dev = hba->dev;
   8402	struct ufs_vreg_info *info = &hba->vreg_info;
   8403
   8404	ret = ufshcd_toggle_vreg(dev, info->vcc, on);
   8405	if (ret)
   8406		goto out;
   8407
   8408	ret = ufshcd_toggle_vreg(dev, info->vccq, on);
   8409	if (ret)
   8410		goto out;
   8411
   8412	ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
   8413
   8414out:
   8415	if (ret) {
   8416		ufshcd_toggle_vreg(dev, info->vccq2, false);
   8417		ufshcd_toggle_vreg(dev, info->vccq, false);
   8418		ufshcd_toggle_vreg(dev, info->vcc, false);
   8419	}
   8420	return ret;
   8421}
   8422
   8423static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
   8424{
   8425	struct ufs_vreg_info *info = &hba->vreg_info;
   8426
   8427	return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
   8428}
   8429
   8430static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
   8431{
   8432	int ret = 0;
   8433
   8434	if (!vreg)
   8435		goto out;
   8436
   8437	vreg->reg = devm_regulator_get(dev, vreg->name);
   8438	if (IS_ERR(vreg->reg)) {
   8439		ret = PTR_ERR(vreg->reg);
   8440		dev_err(dev, "%s: %s get failed, err=%d\n",
   8441				__func__, vreg->name, ret);
   8442	}
   8443out:
   8444	return ret;
   8445}
   8446
   8447static int ufshcd_init_vreg(struct ufs_hba *hba)
   8448{
   8449	int ret = 0;
   8450	struct device *dev = hba->dev;
   8451	struct ufs_vreg_info *info = &hba->vreg_info;
   8452
   8453	ret = ufshcd_get_vreg(dev, info->vcc);
   8454	if (ret)
   8455		goto out;
   8456
   8457	ret = ufshcd_get_vreg(dev, info->vccq);
   8458	if (!ret)
   8459		ret = ufshcd_get_vreg(dev, info->vccq2);
   8460out:
   8461	return ret;
   8462}
   8463
   8464static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
   8465{
   8466	struct ufs_vreg_info *info = &hba->vreg_info;
   8467
   8468	return ufshcd_get_vreg(hba->dev, info->vdd_hba);
   8469}
   8470
   8471static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
   8472{
   8473	int ret = 0;
   8474	struct ufs_clk_info *clki;
   8475	struct list_head *head = &hba->clk_list_head;
   8476	unsigned long flags;
   8477	ktime_t start = ktime_get();
   8478	bool clk_state_changed = false;
   8479
   8480	if (list_empty(head))
   8481		goto out;
   8482
   8483	ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
   8484	if (ret)
   8485		return ret;
   8486
   8487	list_for_each_entry(clki, head, list) {
   8488		if (!IS_ERR_OR_NULL(clki->clk)) {
   8489			/*
   8490			 * Don't disable clocks which are needed
   8491			 * to keep the link active.
   8492			 */
   8493			if (ufshcd_is_link_active(hba) &&
   8494			    clki->keep_link_active)
   8495				continue;
   8496
   8497			clk_state_changed = on ^ clki->enabled;
   8498			if (on && !clki->enabled) {
   8499				ret = clk_prepare_enable(clki->clk);
   8500				if (ret) {
   8501					dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
   8502						__func__, clki->name, ret);
   8503					goto out;
   8504				}
   8505			} else if (!on && clki->enabled) {
   8506				clk_disable_unprepare(clki->clk);
   8507			}
   8508			clki->enabled = on;
   8509			dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
   8510					clki->name, on ? "en" : "dis");
   8511		}
   8512	}
   8513
   8514	ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
   8515	if (ret)
   8516		return ret;
   8517
   8518out:
   8519	if (ret) {
   8520		list_for_each_entry(clki, head, list) {
   8521			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
   8522				clk_disable_unprepare(clki->clk);
   8523		}
   8524	} else if (!ret && on) {
   8525		spin_lock_irqsave(hba->host->host_lock, flags);
   8526		hba->clk_gating.state = CLKS_ON;
   8527		trace_ufshcd_clk_gating(dev_name(hba->dev),
   8528					hba->clk_gating.state);
   8529		spin_unlock_irqrestore(hba->host->host_lock, flags);
   8530	}
   8531
   8532	if (clk_state_changed)
   8533		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
   8534			(on ? "on" : "off"),
   8535			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
   8536	return ret;
   8537}
   8538
   8539static int ufshcd_init_clocks(struct ufs_hba *hba)
   8540{
   8541	int ret = 0;
   8542	struct ufs_clk_info *clki;
   8543	struct device *dev = hba->dev;
   8544	struct list_head *head = &hba->clk_list_head;
   8545
   8546	if (list_empty(head))
   8547		goto out;
   8548
   8549	list_for_each_entry(clki, head, list) {
   8550		if (!clki->name)
   8551			continue;
   8552
   8553		clki->clk = devm_clk_get(dev, clki->name);
   8554		if (IS_ERR(clki->clk)) {
   8555			ret = PTR_ERR(clki->clk);
   8556			dev_err(dev, "%s: %s clk get failed, %d\n",
   8557					__func__, clki->name, ret);
   8558			goto out;
   8559		}
   8560
   8561		/*
   8562		 * Parse device ref clk freq as per device tree "ref_clk".
   8563		 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
   8564		 * in ufshcd_alloc_host().
   8565		 */
   8566		if (!strcmp(clki->name, "ref_clk"))
   8567			ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
   8568
   8569		if (clki->max_freq) {
   8570			ret = clk_set_rate(clki->clk, clki->max_freq);
   8571			if (ret) {
   8572				dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
   8573					__func__, clki->name,
   8574					clki->max_freq, ret);
   8575				goto out;
   8576			}
   8577			clki->curr_freq = clki->max_freq;
   8578		}
   8579		dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
   8580				clki->name, clk_get_rate(clki->clk));
   8581	}
   8582out:
   8583	return ret;
   8584}
   8585
   8586static int ufshcd_variant_hba_init(struct ufs_hba *hba)
   8587{
   8588	int err = 0;
   8589
   8590	if (!hba->vops)
   8591		goto out;
   8592
   8593	err = ufshcd_vops_init(hba);
   8594	if (err)
   8595		dev_err(hba->dev, "%s: variant %s init failed err %d\n",
   8596			__func__, ufshcd_get_var_name(hba), err);
   8597out:
   8598	return err;
   8599}
   8600
   8601static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
   8602{
   8603	if (!hba->vops)
   8604		return;
   8605
   8606	ufshcd_vops_exit(hba);
   8607}
   8608
   8609static int ufshcd_hba_init(struct ufs_hba *hba)
   8610{
   8611	int err;
   8612
   8613	/*
   8614	 * Handle host controller power separately from the UFS device power
   8615	 * rails as it will help controlling the UFS host controller power
   8616	 * collapse easily which is different than UFS device power collapse.
   8617	 * Also, enable the host controller power before we go ahead with rest
   8618	 * of the initialization here.
   8619	 */
   8620	err = ufshcd_init_hba_vreg(hba);
   8621	if (err)
   8622		goto out;
   8623
   8624	err = ufshcd_setup_hba_vreg(hba, true);
   8625	if (err)
   8626		goto out;
   8627
   8628	err = ufshcd_init_clocks(hba);
   8629	if (err)
   8630		goto out_disable_hba_vreg;
   8631
   8632	err = ufshcd_setup_clocks(hba, true);
   8633	if (err)
   8634		goto out_disable_hba_vreg;
   8635
   8636	err = ufshcd_init_vreg(hba);
   8637	if (err)
   8638		goto out_disable_clks;
   8639
   8640	err = ufshcd_setup_vreg(hba, true);
   8641	if (err)
   8642		goto out_disable_clks;
   8643
   8644	err = ufshcd_variant_hba_init(hba);
   8645	if (err)
   8646		goto out_disable_vreg;
   8647
   8648	ufs_debugfs_hba_init(hba);
   8649
   8650	hba->is_powered = true;
   8651	goto out;
   8652
   8653out_disable_vreg:
   8654	ufshcd_setup_vreg(hba, false);
   8655out_disable_clks:
   8656	ufshcd_setup_clocks(hba, false);
   8657out_disable_hba_vreg:
   8658	ufshcd_setup_hba_vreg(hba, false);
   8659out:
   8660	return err;
   8661}
   8662
   8663static void ufshcd_hba_exit(struct ufs_hba *hba)
   8664{
   8665	if (hba->is_powered) {
   8666		ufshcd_exit_clk_scaling(hba);
   8667		ufshcd_exit_clk_gating(hba);
   8668		if (hba->eh_wq)
   8669			destroy_workqueue(hba->eh_wq);
   8670		ufs_debugfs_hba_exit(hba);
   8671		ufshcd_variant_hba_exit(hba);
   8672		ufshcd_setup_vreg(hba, false);
   8673		ufshcd_setup_clocks(hba, false);
   8674		ufshcd_setup_hba_vreg(hba, false);
   8675		hba->is_powered = false;
   8676		ufs_put_device_desc(hba);
   8677	}
   8678}
   8679
   8680/**
   8681 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
   8682 *			     power mode
   8683 * @hba: per adapter instance
   8684 * @pwr_mode: device power mode to set
   8685 *
   8686 * Returns 0 if requested power mode is set successfully
   8687 * Returns < 0 if failed to set the requested power mode
   8688 */
   8689static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
   8690				     enum ufs_dev_pwr_mode pwr_mode)
   8691{
   8692	unsigned char cmd[6] = { START_STOP };
   8693	struct scsi_sense_hdr sshdr;
   8694	struct scsi_device *sdp;
   8695	unsigned long flags;
   8696	int ret, retries;
   8697
   8698	spin_lock_irqsave(hba->host->host_lock, flags);
   8699	sdp = hba->ufs_device_wlun;
   8700	if (sdp) {
   8701		ret = scsi_device_get(sdp);
   8702		if (!ret && !scsi_device_online(sdp)) {
   8703			ret = -ENODEV;
   8704			scsi_device_put(sdp);
   8705		}
   8706	} else {
   8707		ret = -ENODEV;
   8708	}
   8709	spin_unlock_irqrestore(hba->host->host_lock, flags);
   8710
   8711	if (ret)
   8712		return ret;
   8713
   8714	/*
   8715	 * If scsi commands fail, the scsi mid-layer schedules scsi error-
   8716	 * handling, which would wait for host to be resumed. Since we know
   8717	 * we are functional while we are here, skip host resume in error
   8718	 * handling context.
   8719	 */
   8720	hba->host->eh_noresume = 1;
   8721
   8722	cmd[4] = pwr_mode << 4;
   8723
   8724	/*
   8725	 * Current function would be generally called from the power management
   8726	 * callbacks hence set the RQF_PM flag so that it doesn't resume the
   8727	 * already suspended childs.
   8728	 */
   8729	for (retries = 3; retries > 0; --retries) {
   8730		ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
   8731				START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
   8732		if (!scsi_status_is_check_condition(ret) ||
   8733				!scsi_sense_valid(&sshdr) ||
   8734				sshdr.sense_key != UNIT_ATTENTION)
   8735			break;
   8736	}
   8737	if (ret) {
   8738		sdev_printk(KERN_WARNING, sdp,
   8739			    "START_STOP failed for power mode: %d, result %x\n",
   8740			    pwr_mode, ret);
   8741		if (ret > 0) {
   8742			if (scsi_sense_valid(&sshdr))
   8743				scsi_print_sense_hdr(sdp, NULL, &sshdr);
   8744			ret = -EIO;
   8745		}
   8746	}
   8747
   8748	if (!ret)
   8749		hba->curr_dev_pwr_mode = pwr_mode;
   8750
   8751	scsi_device_put(sdp);
   8752	hba->host->eh_noresume = 0;
   8753	return ret;
   8754}
   8755
   8756static int ufshcd_link_state_transition(struct ufs_hba *hba,
   8757					enum uic_link_state req_link_state,
   8758					int check_for_bkops)
   8759{
   8760	int ret = 0;
   8761
   8762	if (req_link_state == hba->uic_link_state)
   8763		return 0;
   8764
   8765	if (req_link_state == UIC_LINK_HIBERN8_STATE) {
   8766		ret = ufshcd_uic_hibern8_enter(hba);
   8767		if (!ret) {
   8768			ufshcd_set_link_hibern8(hba);
   8769		} else {
   8770			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
   8771					__func__, ret);
   8772			goto out;
   8773		}
   8774	}
   8775	/*
   8776	 * If autobkops is enabled, link can't be turned off because
   8777	 * turning off the link would also turn off the device, except in the
   8778	 * case of DeepSleep where the device is expected to remain powered.
   8779	 */
   8780	else if ((req_link_state == UIC_LINK_OFF_STATE) &&
   8781		 (!check_for_bkops || !hba->auto_bkops_enabled)) {
   8782		/*
   8783		 * Let's make sure that link is in low power mode, we are doing
   8784		 * this currently by putting the link in Hibern8. Otherway to
   8785		 * put the link in low power mode is to send the DME end point
   8786		 * to device and then send the DME reset command to local
   8787		 * unipro. But putting the link in hibern8 is much faster.
   8788		 *
   8789		 * Note also that putting the link in Hibern8 is a requirement
   8790		 * for entering DeepSleep.
   8791		 */
   8792		ret = ufshcd_uic_hibern8_enter(hba);
   8793		if (ret) {
   8794			dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
   8795					__func__, ret);
   8796			goto out;
   8797		}
   8798		/*
   8799		 * Change controller state to "reset state" which
   8800		 * should also put the link in off/reset state
   8801		 */
   8802		ufshcd_hba_stop(hba);
   8803		/*
   8804		 * TODO: Check if we need any delay to make sure that
   8805		 * controller is reset
   8806		 */
   8807		ufshcd_set_link_off(hba);
   8808	}
   8809
   8810out:
   8811	return ret;
   8812}
   8813
   8814static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
   8815{
   8816	bool vcc_off = false;
   8817
   8818	/*
   8819	 * It seems some UFS devices may keep drawing more than sleep current
   8820	 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
   8821	 * To avoid this situation, add 2ms delay before putting these UFS
   8822	 * rails in LPM mode.
   8823	 */
   8824	if (!ufshcd_is_link_active(hba) &&
   8825	    hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
   8826		usleep_range(2000, 2100);
   8827
   8828	/*
   8829	 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
   8830	 * power.
   8831	 *
   8832	 * If UFS device and link is in OFF state, all power supplies (VCC,
   8833	 * VCCQ, VCCQ2) can be turned off if power on write protect is not
   8834	 * required. If UFS link is inactive (Hibern8 or OFF state) and device
   8835	 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
   8836	 *
   8837	 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
   8838	 * in low power state which would save some power.
   8839	 *
   8840	 * If Write Booster is enabled and the device needs to flush the WB
   8841	 * buffer OR if bkops status is urgent for WB, keep Vcc on.
   8842	 */
   8843	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
   8844	    !hba->dev_info.is_lu_power_on_wp) {
   8845		ufshcd_setup_vreg(hba, false);
   8846		vcc_off = true;
   8847	} else if (!ufshcd_is_ufs_dev_active(hba)) {
   8848		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
   8849		vcc_off = true;
   8850		if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
   8851			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
   8852			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
   8853		}
   8854	}
   8855
   8856	/*
   8857	 * Some UFS devices require delay after VCC power rail is turned-off.
   8858	 */
   8859	if (vcc_off && hba->vreg_info.vcc &&
   8860		hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
   8861		usleep_range(5000, 5100);
   8862}
   8863
   8864#ifdef CONFIG_PM
   8865static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
   8866{
   8867	int ret = 0;
   8868
   8869	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
   8870	    !hba->dev_info.is_lu_power_on_wp) {
   8871		ret = ufshcd_setup_vreg(hba, true);
   8872	} else if (!ufshcd_is_ufs_dev_active(hba)) {
   8873		if (!ufshcd_is_link_active(hba)) {
   8874			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
   8875			if (ret)
   8876				goto vcc_disable;
   8877			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
   8878			if (ret)
   8879				goto vccq_lpm;
   8880		}
   8881		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
   8882	}
   8883	goto out;
   8884
   8885vccq_lpm:
   8886	ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
   8887vcc_disable:
   8888	ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
   8889out:
   8890	return ret;
   8891}
   8892#endif /* CONFIG_PM */
   8893
   8894static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
   8895{
   8896	if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
   8897		ufshcd_setup_hba_vreg(hba, false);
   8898}
   8899
   8900static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
   8901{
   8902	if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
   8903		ufshcd_setup_hba_vreg(hba, true);
   8904}
   8905
   8906static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
   8907{
   8908	int ret = 0;
   8909	int check_for_bkops;
   8910	enum ufs_pm_level pm_lvl;
   8911	enum ufs_dev_pwr_mode req_dev_pwr_mode;
   8912	enum uic_link_state req_link_state;
   8913
   8914	hba->pm_op_in_progress = true;
   8915	if (pm_op != UFS_SHUTDOWN_PM) {
   8916		pm_lvl = pm_op == UFS_RUNTIME_PM ?
   8917			 hba->rpm_lvl : hba->spm_lvl;
   8918		req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
   8919		req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
   8920	} else {
   8921		req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
   8922		req_link_state = UIC_LINK_OFF_STATE;
   8923	}
   8924
   8925	ufshpb_suspend(hba);
   8926
   8927	/*
   8928	 * If we can't transition into any of the low power modes
   8929	 * just gate the clocks.
   8930	 */
   8931	ufshcd_hold(hba, false);
   8932	hba->clk_gating.is_suspended = true;
   8933
   8934	if (ufshcd_is_clkscaling_supported(hba))
   8935		ufshcd_clk_scaling_suspend(hba, true);
   8936
   8937	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
   8938			req_link_state == UIC_LINK_ACTIVE_STATE) {
   8939		goto vops_suspend;
   8940	}
   8941
   8942	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
   8943	    (req_link_state == hba->uic_link_state))
   8944		goto enable_scaling;
   8945
   8946	/* UFS device & link must be active before we enter in this function */
   8947	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
   8948		ret = -EINVAL;
   8949		goto enable_scaling;
   8950	}
   8951
   8952	if (pm_op == UFS_RUNTIME_PM) {
   8953		if (ufshcd_can_autobkops_during_suspend(hba)) {
   8954			/*
   8955			 * The device is idle with no requests in the queue,
   8956			 * allow background operations if bkops status shows
   8957			 * that performance might be impacted.
   8958			 */
   8959			ret = ufshcd_urgent_bkops(hba);
   8960			if (ret)
   8961				goto enable_scaling;
   8962		} else {
   8963			/* make sure that auto bkops is disabled */
   8964			ufshcd_disable_auto_bkops(hba);
   8965		}
   8966		/*
   8967		 * If device needs to do BKOP or WB buffer flush during
   8968		 * Hibern8, keep device power mode as "active power mode"
   8969		 * and VCC supply.
   8970		 */
   8971		hba->dev_info.b_rpm_dev_flush_capable =
   8972			hba->auto_bkops_enabled ||
   8973			(((req_link_state == UIC_LINK_HIBERN8_STATE) ||
   8974			((req_link_state == UIC_LINK_ACTIVE_STATE) &&
   8975			ufshcd_is_auto_hibern8_enabled(hba))) &&
   8976			ufshcd_wb_need_flush(hba));
   8977	}
   8978
   8979	flush_work(&hba->eeh_work);
   8980
   8981	ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
   8982	if (ret)
   8983		goto enable_scaling;
   8984
   8985	if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
   8986		if (pm_op != UFS_RUNTIME_PM)
   8987			/* ensure that bkops is disabled */
   8988			ufshcd_disable_auto_bkops(hba);
   8989
   8990		if (!hba->dev_info.b_rpm_dev_flush_capable) {
   8991			ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
   8992			if (ret)
   8993				goto enable_scaling;
   8994		}
   8995	}
   8996
   8997	/*
   8998	 * In the case of DeepSleep, the device is expected to remain powered
   8999	 * with the link off, so do not check for bkops.
   9000	 */
   9001	check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
   9002	ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
   9003	if (ret)
   9004		goto set_dev_active;
   9005
   9006vops_suspend:
   9007	/*
   9008	 * Call vendor specific suspend callback. As these callbacks may access
   9009	 * vendor specific host controller register space call them before the
   9010	 * host clocks are ON.
   9011	 */
   9012	ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
   9013	if (ret)
   9014		goto set_link_active;
   9015	goto out;
   9016
   9017set_link_active:
   9018	/*
   9019	 * Device hardware reset is required to exit DeepSleep. Also, for
   9020	 * DeepSleep, the link is off so host reset and restore will be done
   9021	 * further below.
   9022	 */
   9023	if (ufshcd_is_ufs_dev_deepsleep(hba)) {
   9024		ufshcd_device_reset(hba);
   9025		WARN_ON(!ufshcd_is_link_off(hba));
   9026	}
   9027	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
   9028		ufshcd_set_link_active(hba);
   9029	else if (ufshcd_is_link_off(hba))
   9030		ufshcd_host_reset_and_restore(hba);
   9031set_dev_active:
   9032	/* Can also get here needing to exit DeepSleep */
   9033	if (ufshcd_is_ufs_dev_deepsleep(hba)) {
   9034		ufshcd_device_reset(hba);
   9035		ufshcd_host_reset_and_restore(hba);
   9036	}
   9037	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
   9038		ufshcd_disable_auto_bkops(hba);
   9039enable_scaling:
   9040	if (ufshcd_is_clkscaling_supported(hba))
   9041		ufshcd_clk_scaling_suspend(hba, false);
   9042
   9043	hba->dev_info.b_rpm_dev_flush_capable = false;
   9044out:
   9045	if (hba->dev_info.b_rpm_dev_flush_capable) {
   9046		schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
   9047			msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
   9048	}
   9049
   9050	if (ret) {
   9051		ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
   9052		hba->clk_gating.is_suspended = false;
   9053		ufshcd_release(hba);
   9054		ufshpb_resume(hba);
   9055	}
   9056	hba->pm_op_in_progress = false;
   9057	return ret;
   9058}
   9059
   9060#ifdef CONFIG_PM
   9061static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
   9062{
   9063	int ret;
   9064	enum uic_link_state old_link_state = hba->uic_link_state;
   9065
   9066	hba->pm_op_in_progress = true;
   9067
   9068	/*
   9069	 * Call vendor specific resume callback. As these callbacks may access
   9070	 * vendor specific host controller register space call them when the
   9071	 * host clocks are ON.
   9072	 */
   9073	ret = ufshcd_vops_resume(hba, pm_op);
   9074	if (ret)
   9075		goto out;
   9076
   9077	/* For DeepSleep, the only supported option is to have the link off */
   9078	WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
   9079
   9080	if (ufshcd_is_link_hibern8(hba)) {
   9081		ret = ufshcd_uic_hibern8_exit(hba);
   9082		if (!ret) {
   9083			ufshcd_set_link_active(hba);
   9084		} else {
   9085			dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
   9086					__func__, ret);
   9087			goto vendor_suspend;
   9088		}
   9089	} else if (ufshcd_is_link_off(hba)) {
   9090		/*
   9091		 * A full initialization of the host and the device is
   9092		 * required since the link was put to off during suspend.
   9093		 * Note, in the case of DeepSleep, the device will exit
   9094		 * DeepSleep due to device reset.
   9095		 */
   9096		ret = ufshcd_reset_and_restore(hba);
   9097		/*
   9098		 * ufshcd_reset_and_restore() should have already
   9099		 * set the link state as active
   9100		 */
   9101		if (ret || !ufshcd_is_link_active(hba))
   9102			goto vendor_suspend;
   9103	}
   9104
   9105	if (!ufshcd_is_ufs_dev_active(hba)) {
   9106		ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
   9107		if (ret)
   9108			goto set_old_link_state;
   9109	}
   9110
   9111	if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
   9112		ufshcd_enable_auto_bkops(hba);
   9113	else
   9114		/*
   9115		 * If BKOPs operations are urgently needed at this moment then
   9116		 * keep auto-bkops enabled or else disable it.
   9117		 */
   9118		ufshcd_urgent_bkops(hba);
   9119
   9120	if (hba->ee_usr_mask)
   9121		ufshcd_write_ee_control(hba);
   9122
   9123	if (ufshcd_is_clkscaling_supported(hba))
   9124		ufshcd_clk_scaling_suspend(hba, false);
   9125
   9126	if (hba->dev_info.b_rpm_dev_flush_capable) {
   9127		hba->dev_info.b_rpm_dev_flush_capable = false;
   9128		cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
   9129	}
   9130
   9131	/* Enable Auto-Hibernate if configured */
   9132	ufshcd_auto_hibern8_enable(hba);
   9133
   9134	ufshpb_resume(hba);
   9135	goto out;
   9136
   9137set_old_link_state:
   9138	ufshcd_link_state_transition(hba, old_link_state, 0);
   9139vendor_suspend:
   9140	ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
   9141	ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
   9142out:
   9143	if (ret)
   9144		ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
   9145	hba->clk_gating.is_suspended = false;
   9146	ufshcd_release(hba);
   9147	hba->pm_op_in_progress = false;
   9148	return ret;
   9149}
   9150
   9151static int ufshcd_wl_runtime_suspend(struct device *dev)
   9152{
   9153	struct scsi_device *sdev = to_scsi_device(dev);
   9154	struct ufs_hba *hba;
   9155	int ret;
   9156	ktime_t start = ktime_get();
   9157
   9158	hba = shost_priv(sdev->host);
   9159
   9160	ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
   9161	if (ret)
   9162		dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
   9163
   9164	trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
   9165		ktime_to_us(ktime_sub(ktime_get(), start)),
   9166		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9167
   9168	return ret;
   9169}
   9170
   9171static int ufshcd_wl_runtime_resume(struct device *dev)
   9172{
   9173	struct scsi_device *sdev = to_scsi_device(dev);
   9174	struct ufs_hba *hba;
   9175	int ret = 0;
   9176	ktime_t start = ktime_get();
   9177
   9178	hba = shost_priv(sdev->host);
   9179
   9180	ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
   9181	if (ret)
   9182		dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
   9183
   9184	trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
   9185		ktime_to_us(ktime_sub(ktime_get(), start)),
   9186		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9187
   9188	return ret;
   9189}
   9190#endif
   9191
   9192#ifdef CONFIG_PM_SLEEP
   9193static int ufshcd_wl_suspend(struct device *dev)
   9194{
   9195	struct scsi_device *sdev = to_scsi_device(dev);
   9196	struct ufs_hba *hba;
   9197	int ret = 0;
   9198	ktime_t start = ktime_get();
   9199
   9200	hba = shost_priv(sdev->host);
   9201	down(&hba->host_sem);
   9202
   9203	if (pm_runtime_suspended(dev))
   9204		goto out;
   9205
   9206	ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
   9207	if (ret) {
   9208		dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__,  ret);
   9209		up(&hba->host_sem);
   9210	}
   9211
   9212out:
   9213	if (!ret)
   9214		hba->is_sys_suspended = true;
   9215	trace_ufshcd_wl_suspend(dev_name(dev), ret,
   9216		ktime_to_us(ktime_sub(ktime_get(), start)),
   9217		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9218
   9219	return ret;
   9220}
   9221
   9222static int ufshcd_wl_resume(struct device *dev)
   9223{
   9224	struct scsi_device *sdev = to_scsi_device(dev);
   9225	struct ufs_hba *hba;
   9226	int ret = 0;
   9227	ktime_t start = ktime_get();
   9228
   9229	hba = shost_priv(sdev->host);
   9230
   9231	if (pm_runtime_suspended(dev))
   9232		goto out;
   9233
   9234	ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
   9235	if (ret)
   9236		dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
   9237out:
   9238	trace_ufshcd_wl_resume(dev_name(dev), ret,
   9239		ktime_to_us(ktime_sub(ktime_get(), start)),
   9240		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9241	if (!ret)
   9242		hba->is_sys_suspended = false;
   9243	up(&hba->host_sem);
   9244	return ret;
   9245}
   9246#endif
   9247
   9248static void ufshcd_wl_shutdown(struct device *dev)
   9249{
   9250	struct scsi_device *sdev = to_scsi_device(dev);
   9251	struct ufs_hba *hba;
   9252
   9253	hba = shost_priv(sdev->host);
   9254
   9255	down(&hba->host_sem);
   9256	hba->shutting_down = true;
   9257	up(&hba->host_sem);
   9258
   9259	/* Turn on everything while shutting down */
   9260	ufshcd_rpm_get_sync(hba);
   9261	scsi_device_quiesce(sdev);
   9262	shost_for_each_device(sdev, hba->host) {
   9263		if (sdev == hba->ufs_device_wlun)
   9264			continue;
   9265		scsi_device_quiesce(sdev);
   9266	}
   9267	__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
   9268}
   9269
   9270/**
   9271 * ufshcd_suspend - helper function for suspend operations
   9272 * @hba: per adapter instance
   9273 *
   9274 * This function will put disable irqs, turn off clocks
   9275 * and set vreg and hba-vreg in lpm mode.
   9276 */
   9277static int ufshcd_suspend(struct ufs_hba *hba)
   9278{
   9279	int ret;
   9280
   9281	if (!hba->is_powered)
   9282		return 0;
   9283	/*
   9284	 * Disable the host irq as host controller as there won't be any
   9285	 * host controller transaction expected till resume.
   9286	 */
   9287	ufshcd_disable_irq(hba);
   9288	ret = ufshcd_setup_clocks(hba, false);
   9289	if (ret) {
   9290		ufshcd_enable_irq(hba);
   9291		return ret;
   9292	}
   9293	if (ufshcd_is_clkgating_allowed(hba)) {
   9294		hba->clk_gating.state = CLKS_OFF;
   9295		trace_ufshcd_clk_gating(dev_name(hba->dev),
   9296					hba->clk_gating.state);
   9297	}
   9298
   9299	ufshcd_vreg_set_lpm(hba);
   9300	/* Put the host controller in low power mode if possible */
   9301	ufshcd_hba_vreg_set_lpm(hba);
   9302	return ret;
   9303}
   9304
   9305#ifdef CONFIG_PM
   9306/**
   9307 * ufshcd_resume - helper function for resume operations
   9308 * @hba: per adapter instance
   9309 *
   9310 * This function basically turns on the regulators, clocks and
   9311 * irqs of the hba.
   9312 *
   9313 * Returns 0 for success and non-zero for failure
   9314 */
   9315static int ufshcd_resume(struct ufs_hba *hba)
   9316{
   9317	int ret;
   9318
   9319	if (!hba->is_powered)
   9320		return 0;
   9321
   9322	ufshcd_hba_vreg_set_hpm(hba);
   9323	ret = ufshcd_vreg_set_hpm(hba);
   9324	if (ret)
   9325		goto out;
   9326
   9327	/* Make sure clocks are enabled before accessing controller */
   9328	ret = ufshcd_setup_clocks(hba, true);
   9329	if (ret)
   9330		goto disable_vreg;
   9331
   9332	/* enable the host irq as host controller would be active soon */
   9333	ufshcd_enable_irq(hba);
   9334	goto out;
   9335
   9336disable_vreg:
   9337	ufshcd_vreg_set_lpm(hba);
   9338out:
   9339	if (ret)
   9340		ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
   9341	return ret;
   9342}
   9343#endif /* CONFIG_PM */
   9344
   9345#ifdef CONFIG_PM_SLEEP
   9346/**
   9347 * ufshcd_system_suspend - system suspend callback
   9348 * @dev: Device associated with the UFS controller.
   9349 *
   9350 * Executed before putting the system into a sleep state in which the contents
   9351 * of main memory are preserved.
   9352 *
   9353 * Returns 0 for success and non-zero for failure
   9354 */
   9355int ufshcd_system_suspend(struct device *dev)
   9356{
   9357	struct ufs_hba *hba = dev_get_drvdata(dev);
   9358	int ret = 0;
   9359	ktime_t start = ktime_get();
   9360
   9361	if (pm_runtime_suspended(hba->dev))
   9362		goto out;
   9363
   9364	ret = ufshcd_suspend(hba);
   9365out:
   9366	trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
   9367		ktime_to_us(ktime_sub(ktime_get(), start)),
   9368		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9369	return ret;
   9370}
   9371EXPORT_SYMBOL(ufshcd_system_suspend);
   9372
   9373/**
   9374 * ufshcd_system_resume - system resume callback
   9375 * @dev: Device associated with the UFS controller.
   9376 *
   9377 * Executed after waking the system up from a sleep state in which the contents
   9378 * of main memory were preserved.
   9379 *
   9380 * Returns 0 for success and non-zero for failure
   9381 */
   9382int ufshcd_system_resume(struct device *dev)
   9383{
   9384	struct ufs_hba *hba = dev_get_drvdata(dev);
   9385	ktime_t start = ktime_get();
   9386	int ret = 0;
   9387
   9388	if (pm_runtime_suspended(hba->dev))
   9389		goto out;
   9390
   9391	ret = ufshcd_resume(hba);
   9392
   9393out:
   9394	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
   9395		ktime_to_us(ktime_sub(ktime_get(), start)),
   9396		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9397
   9398	return ret;
   9399}
   9400EXPORT_SYMBOL(ufshcd_system_resume);
   9401#endif /* CONFIG_PM_SLEEP */
   9402
   9403#ifdef CONFIG_PM
   9404/**
   9405 * ufshcd_runtime_suspend - runtime suspend callback
   9406 * @dev: Device associated with the UFS controller.
   9407 *
   9408 * Check the description of ufshcd_suspend() function for more details.
   9409 *
   9410 * Returns 0 for success and non-zero for failure
   9411 */
   9412int ufshcd_runtime_suspend(struct device *dev)
   9413{
   9414	struct ufs_hba *hba = dev_get_drvdata(dev);
   9415	int ret;
   9416	ktime_t start = ktime_get();
   9417
   9418	ret = ufshcd_suspend(hba);
   9419
   9420	trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
   9421		ktime_to_us(ktime_sub(ktime_get(), start)),
   9422		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9423	return ret;
   9424}
   9425EXPORT_SYMBOL(ufshcd_runtime_suspend);
   9426
   9427/**
   9428 * ufshcd_runtime_resume - runtime resume routine
   9429 * @dev: Device associated with the UFS controller.
   9430 *
   9431 * This function basically brings controller
   9432 * to active state. Following operations are done in this function:
   9433 *
   9434 * 1. Turn on all the controller related clocks
   9435 * 2. Turn ON VCC rail
   9436 */
   9437int ufshcd_runtime_resume(struct device *dev)
   9438{
   9439	struct ufs_hba *hba = dev_get_drvdata(dev);
   9440	int ret;
   9441	ktime_t start = ktime_get();
   9442
   9443	ret = ufshcd_resume(hba);
   9444
   9445	trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
   9446		ktime_to_us(ktime_sub(ktime_get(), start)),
   9447		hba->curr_dev_pwr_mode, hba->uic_link_state);
   9448	return ret;
   9449}
   9450EXPORT_SYMBOL(ufshcd_runtime_resume);
   9451#endif /* CONFIG_PM */
   9452
   9453/**
   9454 * ufshcd_shutdown - shutdown routine
   9455 * @hba: per adapter instance
   9456 *
   9457 * This function would turn off both UFS device and UFS hba
   9458 * regulators. It would also disable clocks.
   9459 *
   9460 * Returns 0 always to allow force shutdown even in case of errors.
   9461 */
   9462int ufshcd_shutdown(struct ufs_hba *hba)
   9463{
   9464	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
   9465		goto out;
   9466
   9467	pm_runtime_get_sync(hba->dev);
   9468
   9469	ufshcd_suspend(hba);
   9470out:
   9471	hba->is_powered = false;
   9472	/* allow force shutdown even in case of errors */
   9473	return 0;
   9474}
   9475EXPORT_SYMBOL(ufshcd_shutdown);
   9476
   9477/**
   9478 * ufshcd_remove - de-allocate SCSI host and host memory space
   9479 *		data structure memory
   9480 * @hba: per adapter instance
   9481 */
   9482void ufshcd_remove(struct ufs_hba *hba)
   9483{
   9484	if (hba->ufs_device_wlun)
   9485		ufshcd_rpm_get_sync(hba);
   9486	ufs_hwmon_remove(hba);
   9487	ufs_bsg_remove(hba);
   9488	ufshpb_remove(hba);
   9489	ufs_sysfs_remove_nodes(hba->dev);
   9490	blk_cleanup_queue(hba->tmf_queue);
   9491	blk_mq_free_tag_set(&hba->tmf_tag_set);
   9492	scsi_remove_host(hba->host);
   9493	/* disable interrupts */
   9494	ufshcd_disable_intr(hba, hba->intr_mask);
   9495	ufshcd_hba_stop(hba);
   9496	ufshcd_hba_exit(hba);
   9497}
   9498EXPORT_SYMBOL_GPL(ufshcd_remove);
   9499
   9500/**
   9501 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
   9502 * @hba: pointer to Host Bus Adapter (HBA)
   9503 */
   9504void ufshcd_dealloc_host(struct ufs_hba *hba)
   9505{
   9506	scsi_host_put(hba->host);
   9507}
   9508EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
   9509
   9510/**
   9511 * ufshcd_set_dma_mask - Set dma mask based on the controller
   9512 *			 addressing capability
   9513 * @hba: per adapter instance
   9514 *
   9515 * Returns 0 for success, non-zero for failure
   9516 */
   9517static int ufshcd_set_dma_mask(struct ufs_hba *hba)
   9518{
   9519	if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
   9520		if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
   9521			return 0;
   9522	}
   9523	return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
   9524}
   9525
   9526/**
   9527 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
   9528 * @dev: pointer to device handle
   9529 * @hba_handle: driver private handle
   9530 * Returns 0 on success, non-zero value on failure
   9531 */
   9532int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
   9533{
   9534	struct Scsi_Host *host;
   9535	struct ufs_hba *hba;
   9536	int err = 0;
   9537
   9538	if (!dev) {
   9539		dev_err(dev,
   9540		"Invalid memory reference for dev is NULL\n");
   9541		err = -ENODEV;
   9542		goto out_error;
   9543	}
   9544
   9545	host = scsi_host_alloc(&ufshcd_driver_template,
   9546				sizeof(struct ufs_hba));
   9547	if (!host) {
   9548		dev_err(dev, "scsi_host_alloc failed\n");
   9549		err = -ENOMEM;
   9550		goto out_error;
   9551	}
   9552	host->nr_maps = HCTX_TYPE_POLL + 1;
   9553	hba = shost_priv(host);
   9554	hba->host = host;
   9555	hba->dev = dev;
   9556	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
   9557	hba->nop_out_timeout = NOP_OUT_TIMEOUT;
   9558	INIT_LIST_HEAD(&hba->clk_list_head);
   9559	spin_lock_init(&hba->outstanding_lock);
   9560
   9561	*hba_handle = hba;
   9562
   9563out_error:
   9564	return err;
   9565}
   9566EXPORT_SYMBOL(ufshcd_alloc_host);
   9567
   9568/* This function exists because blk_mq_alloc_tag_set() requires this. */
   9569static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
   9570				     const struct blk_mq_queue_data *qd)
   9571{
   9572	WARN_ON_ONCE(true);
   9573	return BLK_STS_NOTSUPP;
   9574}
   9575
   9576static const struct blk_mq_ops ufshcd_tmf_ops = {
   9577	.queue_rq = ufshcd_queue_tmf,
   9578};
   9579
   9580/**
   9581 * ufshcd_init - Driver initialization routine
   9582 * @hba: per-adapter instance
   9583 * @mmio_base: base register address
   9584 * @irq: Interrupt line of device
   9585 * Returns 0 on success, non-zero value on failure
   9586 */
   9587int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
   9588{
   9589	int err;
   9590	struct Scsi_Host *host = hba->host;
   9591	struct device *dev = hba->dev;
   9592	char eh_wq_name[sizeof("ufs_eh_wq_00")];
   9593
   9594	/*
   9595	 * dev_set_drvdata() must be called before any callbacks are registered
   9596	 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
   9597	 * sysfs).
   9598	 */
   9599	dev_set_drvdata(dev, hba);
   9600
   9601	if (!mmio_base) {
   9602		dev_err(hba->dev,
   9603		"Invalid memory reference for mmio_base is NULL\n");
   9604		err = -ENODEV;
   9605		goto out_error;
   9606	}
   9607
   9608	hba->mmio_base = mmio_base;
   9609	hba->irq = irq;
   9610	hba->vps = &ufs_hba_vps;
   9611
   9612	err = ufshcd_hba_init(hba);
   9613	if (err)
   9614		goto out_error;
   9615
   9616	/* Read capabilities registers */
   9617	err = ufshcd_hba_capabilities(hba);
   9618	if (err)
   9619		goto out_disable;
   9620
   9621	/* Get UFS version supported by the controller */
   9622	hba->ufs_version = ufshcd_get_ufs_version(hba);
   9623
   9624	/* Get Interrupt bit mask per version */
   9625	hba->intr_mask = ufshcd_get_intr_mask(hba);
   9626
   9627	err = ufshcd_set_dma_mask(hba);
   9628	if (err) {
   9629		dev_err(hba->dev, "set dma mask failed\n");
   9630		goto out_disable;
   9631	}
   9632
   9633	/* Allocate memory for host memory space */
   9634	err = ufshcd_memory_alloc(hba);
   9635	if (err) {
   9636		dev_err(hba->dev, "Memory allocation failed\n");
   9637		goto out_disable;
   9638	}
   9639
   9640	/* Configure LRB */
   9641	ufshcd_host_memory_configure(hba);
   9642
   9643	host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
   9644	host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
   9645	host->max_id = UFSHCD_MAX_ID;
   9646	host->max_lun = UFS_MAX_LUNS;
   9647	host->max_channel = UFSHCD_MAX_CHANNEL;
   9648	host->unique_id = host->host_no;
   9649	host->max_cmd_len = UFS_CDB_SIZE;
   9650
   9651	hba->max_pwr_info.is_valid = false;
   9652
   9653	/* Initialize work queues */
   9654	snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
   9655		 hba->host->host_no);
   9656	hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
   9657	if (!hba->eh_wq) {
   9658		dev_err(hba->dev, "%s: failed to create eh workqueue\n",
   9659			__func__);
   9660		err = -ENOMEM;
   9661		goto out_disable;
   9662	}
   9663	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
   9664	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
   9665
   9666	sema_init(&hba->host_sem, 1);
   9667
   9668	/* Initialize UIC command mutex */
   9669	mutex_init(&hba->uic_cmd_mutex);
   9670
   9671	/* Initialize mutex for device management commands */
   9672	mutex_init(&hba->dev_cmd.lock);
   9673
   9674	/* Initialize mutex for exception event control */
   9675	mutex_init(&hba->ee_ctrl_mutex);
   9676
   9677	init_rwsem(&hba->clk_scaling_lock);
   9678
   9679	ufshcd_init_clk_gating(hba);
   9680
   9681	ufshcd_init_clk_scaling(hba);
   9682
   9683	/*
   9684	 * In order to avoid any spurious interrupt immediately after
   9685	 * registering UFS controller interrupt handler, clear any pending UFS
   9686	 * interrupt status and disable all the UFS interrupts.
   9687	 */
   9688	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
   9689		      REG_INTERRUPT_STATUS);
   9690	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
   9691	/*
   9692	 * Make sure that UFS interrupts are disabled and any pending interrupt
   9693	 * status is cleared before registering UFS interrupt handler.
   9694	 */
   9695	mb();
   9696
   9697	/* IRQ registration */
   9698	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
   9699	if (err) {
   9700		dev_err(hba->dev, "request irq failed\n");
   9701		goto out_disable;
   9702	} else {
   9703		hba->is_irq_enabled = true;
   9704	}
   9705
   9706	err = scsi_add_host(host, hba->dev);
   9707	if (err) {
   9708		dev_err(hba->dev, "scsi_add_host failed\n");
   9709		goto out_disable;
   9710	}
   9711
   9712	hba->tmf_tag_set = (struct blk_mq_tag_set) {
   9713		.nr_hw_queues	= 1,
   9714		.queue_depth	= hba->nutmrs,
   9715		.ops		= &ufshcd_tmf_ops,
   9716		.flags		= BLK_MQ_F_NO_SCHED,
   9717	};
   9718	err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
   9719	if (err < 0)
   9720		goto out_remove_scsi_host;
   9721	hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
   9722	if (IS_ERR(hba->tmf_queue)) {
   9723		err = PTR_ERR(hba->tmf_queue);
   9724		goto free_tmf_tag_set;
   9725	}
   9726	hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
   9727				    sizeof(*hba->tmf_rqs), GFP_KERNEL);
   9728	if (!hba->tmf_rqs) {
   9729		err = -ENOMEM;
   9730		goto free_tmf_queue;
   9731	}
   9732
   9733	/* Reset the attached device */
   9734	ufshcd_device_reset(hba);
   9735
   9736	ufshcd_init_crypto(hba);
   9737
   9738	/* Host controller enable */
   9739	err = ufshcd_hba_enable(hba);
   9740	if (err) {
   9741		dev_err(hba->dev, "Host controller enable failed\n");
   9742		ufshcd_print_evt_hist(hba);
   9743		ufshcd_print_host_state(hba);
   9744		goto free_tmf_queue;
   9745	}
   9746
   9747	/*
   9748	 * Set the default power management level for runtime and system PM.
   9749	 * Default power saving mode is to keep UFS link in Hibern8 state
   9750	 * and UFS device in sleep state.
   9751	 */
   9752	hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
   9753						UFS_SLEEP_PWR_MODE,
   9754						UIC_LINK_HIBERN8_STATE);
   9755	hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
   9756						UFS_SLEEP_PWR_MODE,
   9757						UIC_LINK_HIBERN8_STATE);
   9758
   9759	INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
   9760			  ufshcd_rpm_dev_flush_recheck_work);
   9761
   9762	/* Set the default auto-hiberate idle timer value to 150 ms */
   9763	if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
   9764		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
   9765			    FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
   9766	}
   9767
   9768	/* Hold auto suspend until async scan completes */
   9769	pm_runtime_get_sync(dev);
   9770	atomic_set(&hba->scsi_block_reqs_cnt, 0);
   9771	/*
   9772	 * We are assuming that device wasn't put in sleep/power-down
   9773	 * state exclusively during the boot stage before kernel.
   9774	 * This assumption helps avoid doing link startup twice during
   9775	 * ufshcd_probe_hba().
   9776	 */
   9777	ufshcd_set_ufs_dev_active(hba);
   9778
   9779	async_schedule(ufshcd_async_scan, hba);
   9780	ufs_sysfs_add_nodes(hba->dev);
   9781
   9782	device_enable_async_suspend(dev);
   9783	return 0;
   9784
   9785free_tmf_queue:
   9786	blk_cleanup_queue(hba->tmf_queue);
   9787free_tmf_tag_set:
   9788	blk_mq_free_tag_set(&hba->tmf_tag_set);
   9789out_remove_scsi_host:
   9790	scsi_remove_host(hba->host);
   9791out_disable:
   9792	hba->is_irq_enabled = false;
   9793	ufshcd_hba_exit(hba);
   9794out_error:
   9795	return err;
   9796}
   9797EXPORT_SYMBOL_GPL(ufshcd_init);
   9798
   9799void ufshcd_resume_complete(struct device *dev)
   9800{
   9801	struct ufs_hba *hba = dev_get_drvdata(dev);
   9802
   9803	if (hba->complete_put) {
   9804		ufshcd_rpm_put(hba);
   9805		hba->complete_put = false;
   9806	}
   9807}
   9808EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
   9809
   9810static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
   9811{
   9812	struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
   9813	enum ufs_dev_pwr_mode dev_pwr_mode;
   9814	enum uic_link_state link_state;
   9815	unsigned long flags;
   9816	bool res;
   9817
   9818	spin_lock_irqsave(&dev->power.lock, flags);
   9819	dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
   9820	link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
   9821	res = pm_runtime_suspended(dev) &&
   9822	      hba->curr_dev_pwr_mode == dev_pwr_mode &&
   9823	      hba->uic_link_state == link_state &&
   9824	      !hba->dev_info.b_rpm_dev_flush_capable;
   9825	spin_unlock_irqrestore(&dev->power.lock, flags);
   9826
   9827	return res;
   9828}
   9829
   9830int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
   9831{
   9832	struct ufs_hba *hba = dev_get_drvdata(dev);
   9833	int ret;
   9834
   9835	/*
   9836	 * SCSI assumes that runtime-pm and system-pm for scsi drivers
   9837	 * are same. And it doesn't wake up the device for system-suspend
   9838	 * if it's runtime suspended. But ufs doesn't follow that.
   9839	 * Refer ufshcd_resume_complete()
   9840	 */
   9841	if (hba->ufs_device_wlun) {
   9842		/* Prevent runtime suspend */
   9843		ufshcd_rpm_get_noresume(hba);
   9844		/*
   9845		 * Check if already runtime suspended in same state as system
   9846		 * suspend would be.
   9847		 */
   9848		if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
   9849			/* RPM state is not ok for SPM, so runtime resume */
   9850			ret = ufshcd_rpm_resume(hba);
   9851			if (ret < 0 && ret != -EACCES) {
   9852				ufshcd_rpm_put(hba);
   9853				return ret;
   9854			}
   9855		}
   9856		hba->complete_put = true;
   9857	}
   9858	return 0;
   9859}
   9860EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
   9861
   9862int ufshcd_suspend_prepare(struct device *dev)
   9863{
   9864	return __ufshcd_suspend_prepare(dev, true);
   9865}
   9866EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
   9867
   9868#ifdef CONFIG_PM_SLEEP
   9869static int ufshcd_wl_poweroff(struct device *dev)
   9870{
   9871	struct scsi_device *sdev = to_scsi_device(dev);
   9872	struct ufs_hba *hba = shost_priv(sdev->host);
   9873
   9874	__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
   9875	return 0;
   9876}
   9877#endif
   9878
   9879static int ufshcd_wl_probe(struct device *dev)
   9880{
   9881	struct scsi_device *sdev = to_scsi_device(dev);
   9882
   9883	if (!is_device_wlun(sdev))
   9884		return -ENODEV;
   9885
   9886	blk_pm_runtime_init(sdev->request_queue, dev);
   9887	pm_runtime_set_autosuspend_delay(dev, 0);
   9888	pm_runtime_allow(dev);
   9889
   9890	return  0;
   9891}
   9892
   9893static int ufshcd_wl_remove(struct device *dev)
   9894{
   9895	pm_runtime_forbid(dev);
   9896	return 0;
   9897}
   9898
   9899static const struct dev_pm_ops ufshcd_wl_pm_ops = {
   9900#ifdef CONFIG_PM_SLEEP
   9901	.suspend = ufshcd_wl_suspend,
   9902	.resume = ufshcd_wl_resume,
   9903	.freeze = ufshcd_wl_suspend,
   9904	.thaw = ufshcd_wl_resume,
   9905	.poweroff = ufshcd_wl_poweroff,
   9906	.restore = ufshcd_wl_resume,
   9907#endif
   9908	SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
   9909};
   9910
   9911/*
   9912 * ufs_dev_wlun_template - describes ufs device wlun
   9913 * ufs-device wlun - used to send pm commands
   9914 * All luns are consumers of ufs-device wlun.
   9915 *
   9916 * Currently, no sd driver is present for wluns.
   9917 * Hence the no specific pm operations are performed.
   9918 * With ufs design, SSU should be sent to ufs-device wlun.
   9919 * Hence register a scsi driver for ufs wluns only.
   9920 */
   9921static struct scsi_driver ufs_dev_wlun_template = {
   9922	.gendrv = {
   9923		.name = "ufs_device_wlun",
   9924		.owner = THIS_MODULE,
   9925		.probe = ufshcd_wl_probe,
   9926		.remove = ufshcd_wl_remove,
   9927		.pm = &ufshcd_wl_pm_ops,
   9928		.shutdown = ufshcd_wl_shutdown,
   9929	},
   9930};
   9931
   9932static int __init ufshcd_core_init(void)
   9933{
   9934	int ret;
   9935
   9936	/* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
   9937	static_assert(sizeof(struct utp_transfer_cmd_desc) ==
   9938		      2 * ALIGNED_UPIU_SIZE +
   9939			      SG_ALL * sizeof(struct ufshcd_sg_entry));
   9940
   9941	ufs_debugfs_init();
   9942
   9943	ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
   9944	if (ret)
   9945		ufs_debugfs_exit();
   9946	return ret;
   9947}
   9948
   9949static void __exit ufshcd_core_exit(void)
   9950{
   9951	ufs_debugfs_exit();
   9952	scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
   9953}
   9954
   9955module_init(ufshcd_core_init);
   9956module_exit(ufshcd_core_exit);
   9957
   9958MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
   9959MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
   9960MODULE_DESCRIPTION("Generic UFS host controller driver Core");
   9961MODULE_LICENSE("GPL");