0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/async.h>
0013 #include <linux/devfreq.h>
0014 #include <linux/nls.h>
0015 #include <linux/of.h>
0016 #include <linux/bitfield.h>
0017 #include <linux/blk-pm.h>
0018 #include <linux/blkdev.h>
0019 #include <linux/clk.h>
0020 #include <linux/delay.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/module.h>
0023 #include <linux/regulator/consumer.h>
0024 #include <scsi/scsi_cmnd.h>
0025 #include <scsi/scsi_dbg.h>
0026 #include <scsi/scsi_driver.h>
0027 #include <scsi/scsi_eh.h>
0028 #include "ufshcd-priv.h"
0029 #include <ufs/ufs_quirks.h>
0030 #include <ufs/unipro.h>
0031 #include "ufs-sysfs.h"
0032 #include "ufs-debugfs.h"
0033 #include "ufs-fault-injection.h"
0034 #include "ufs_bsg.h"
0035 #include "ufshcd-crypto.h"
0036 #include "ufshpb.h"
0037 #include <asm/unaligned.h>
0038
0039 #define CREATE_TRACE_POINTS
0040 #include <trace/events/ufs.h>
0041
0042 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
0043 UTP_TASK_REQ_COMPL |\
0044 UFSHCD_ERROR_MASK)
0045
0046 #define UIC_CMD_TIMEOUT 500
0047
0048
0049 #define NOP_OUT_RETRIES 10
0050
0051 #define NOP_OUT_TIMEOUT 50
0052
0053
0054 #define QUERY_REQ_RETRIES 3
0055
0056 #define QUERY_REQ_TIMEOUT 1500
0057
0058
0059 #define TM_CMD_TIMEOUT 100
0060
0061
0062 #define UFS_UIC_COMMAND_RETRIES 3
0063
0064
0065 #define DME_LINKSTARTUP_RETRIES 3
0066
0067
0068 #define MAX_HOST_RESET_RETRIES 5
0069
0070
0071 #define MAX_ERR_HANDLER_RETRIES 5
0072
0073
0074 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
0075
0076
0077 #define INT_AGGR_DEF_TO 0x02
0078
0079
0080 #define RPM_AUTOSUSPEND_DELAY_MS 2000
0081
0082
0083 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
0084
0085
0086 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF
0087
0088
0089 #define FDEVICEINIT_COMPL_TIMEOUT 1500
0090
0091 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
0092 ({ \
0093 int _ret; \
0094 if (_on) \
0095 _ret = ufshcd_enable_vreg(_dev, _vreg); \
0096 else \
0097 _ret = ufshcd_disable_vreg(_dev, _vreg); \
0098 _ret; \
0099 })
0100
0101 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
0102 size_t __len = (len); \
0103 print_hex_dump(KERN_ERR, prefix_str, \
0104 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
0105 16, 4, buf, __len, false); \
0106 } while (0)
0107
0108 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
0109 const char *prefix)
0110 {
0111 u32 *regs;
0112 size_t pos;
0113
0114 if (offset % 4 != 0 || len % 4 != 0)
0115 return -EINVAL;
0116
0117 regs = kzalloc(len, GFP_ATOMIC);
0118 if (!regs)
0119 return -ENOMEM;
0120
0121 for (pos = 0; pos < len; pos += 4) {
0122 if (offset == 0 &&
0123 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
0124 pos <= REG_UIC_ERROR_CODE_DME)
0125 continue;
0126 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
0127 }
0128
0129 ufshcd_hex_dump(prefix, regs, len);
0130 kfree(regs);
0131
0132 return 0;
0133 }
0134 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
0135
0136 enum {
0137 UFSHCD_MAX_CHANNEL = 0,
0138 UFSHCD_MAX_ID = 1,
0139 UFSHCD_NUM_RESERVED = 1,
0140 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
0141 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
0142 };
0143
0144 static const char *const ufshcd_state_name[] = {
0145 [UFSHCD_STATE_RESET] = "reset",
0146 [UFSHCD_STATE_OPERATIONAL] = "operational",
0147 [UFSHCD_STATE_ERROR] = "error",
0148 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
0149 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
0150 };
0151
0152
0153 enum {
0154 UFSHCD_EH_IN_PROGRESS = (1 << 0),
0155 };
0156
0157
0158 enum {
0159 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0),
0160 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1),
0161 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2),
0162 UFSHCD_UIC_NL_ERROR = (1 << 3),
0163 UFSHCD_UIC_TL_ERROR = (1 << 4),
0164 UFSHCD_UIC_DME_ERROR = (1 << 5),
0165 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6),
0166 };
0167
0168 #define ufshcd_set_eh_in_progress(h) \
0169 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
0170 #define ufshcd_eh_in_progress(h) \
0171 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
0172 #define ufshcd_clear_eh_in_progress(h) \
0173 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
0174
0175 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
0176 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
0177 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
0178 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
0179 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
0180 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
0181 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
0182
0183
0184
0185
0186 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
0187 };
0188
0189 static inline enum ufs_dev_pwr_mode
0190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
0191 {
0192 return ufs_pm_lvl_states[lvl].dev_state;
0193 }
0194
0195 static inline enum uic_link_state
0196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
0197 {
0198 return ufs_pm_lvl_states[lvl].link_state;
0199 }
0200
0201 static inline enum ufs_pm_level
0202 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
0203 enum uic_link_state link_state)
0204 {
0205 enum ufs_pm_level lvl;
0206
0207 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
0208 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
0209 (ufs_pm_lvl_states[lvl].link_state == link_state))
0210 return lvl;
0211 }
0212
0213
0214 return UFS_PM_LVL_0;
0215 }
0216
0217 static const struct ufs_dev_quirk ufs_fixups[] = {
0218
0219 { .wmanufacturerid = UFS_VENDOR_MICRON,
0220 .model = UFS_ANY_MODEL,
0221 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
0222 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
0223 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
0224 .model = UFS_ANY_MODEL,
0225 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
0226 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
0227 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
0228 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
0229 .model = UFS_ANY_MODEL,
0230 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
0231 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
0232 .model = "hB8aL1" ,
0233 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
0234 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
0235 .model = UFS_ANY_MODEL,
0236 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
0237 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
0238 .model = "THGLF2G9C8KBADG",
0239 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
0240 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
0241 .model = "THGLF2G9D8KBADG",
0242 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
0243 {}
0244 };
0245
0246 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
0247 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
0248 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
0249 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
0250 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
0251 static void ufshcd_hba_exit(struct ufs_hba *hba);
0252 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
0253 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
0254 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
0255 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
0256 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
0257 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
0258 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
0259 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
0260 static irqreturn_t ufshcd_intr(int irq, void *__hba);
0261 static int ufshcd_change_power_mode(struct ufs_hba *hba,
0262 struct ufs_pa_layer_attr *pwr_mode);
0263 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
0264 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
0265 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
0266 struct ufs_vreg *vreg);
0267 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
0268 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
0269 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
0270 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
0271 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
0272
0273 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
0274 {
0275 if (!hba->is_irq_enabled) {
0276 enable_irq(hba->irq);
0277 hba->is_irq_enabled = true;
0278 }
0279 }
0280
0281 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
0282 {
0283 if (hba->is_irq_enabled) {
0284 disable_irq(hba->irq);
0285 hba->is_irq_enabled = false;
0286 }
0287 }
0288
0289 static inline void ufshcd_wb_config(struct ufs_hba *hba)
0290 {
0291 if (!ufshcd_is_wb_allowed(hba))
0292 return;
0293
0294 ufshcd_wb_toggle(hba, true);
0295
0296 ufshcd_wb_toggle_flush_during_h8(hba, true);
0297 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
0298 ufshcd_wb_toggle_flush(hba, true);
0299 }
0300
0301 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
0302 {
0303 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
0304 scsi_unblock_requests(hba->host);
0305 }
0306
0307 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
0308 {
0309 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
0310 scsi_block_requests(hba->host);
0311 }
0312
0313 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
0314 enum ufs_trace_str_t str_t)
0315 {
0316 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
0317 struct utp_upiu_header *header;
0318
0319 if (!trace_ufshcd_upiu_enabled())
0320 return;
0321
0322 if (str_t == UFS_CMD_SEND)
0323 header = &rq->header;
0324 else
0325 header = &hba->lrb[tag].ucd_rsp_ptr->header;
0326
0327 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
0328 UFS_TSF_CDB);
0329 }
0330
0331 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
0332 enum ufs_trace_str_t str_t,
0333 struct utp_upiu_req *rq_rsp)
0334 {
0335 if (!trace_ufshcd_upiu_enabled())
0336 return;
0337
0338 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
0339 &rq_rsp->qr, UFS_TSF_OSF);
0340 }
0341
0342 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
0343 enum ufs_trace_str_t str_t)
0344 {
0345 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
0346
0347 if (!trace_ufshcd_upiu_enabled())
0348 return;
0349
0350 if (str_t == UFS_TM_SEND)
0351 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
0352 &descp->upiu_req.req_header,
0353 &descp->upiu_req.input_param1,
0354 UFS_TSF_TM_INPUT);
0355 else
0356 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
0357 &descp->upiu_rsp.rsp_header,
0358 &descp->upiu_rsp.output_param1,
0359 UFS_TSF_TM_OUTPUT);
0360 }
0361
0362 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
0363 const struct uic_command *ucmd,
0364 enum ufs_trace_str_t str_t)
0365 {
0366 u32 cmd;
0367
0368 if (!trace_ufshcd_uic_command_enabled())
0369 return;
0370
0371 if (str_t == UFS_CMD_SEND)
0372 cmd = ucmd->command;
0373 else
0374 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
0375
0376 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
0377 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
0378 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
0379 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
0380 }
0381
0382 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
0383 enum ufs_trace_str_t str_t)
0384 {
0385 u64 lba = 0;
0386 u8 opcode = 0, group_id = 0;
0387 u32 intr, doorbell;
0388 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
0389 struct scsi_cmnd *cmd = lrbp->cmd;
0390 struct request *rq = scsi_cmd_to_rq(cmd);
0391 int transfer_len = -1;
0392
0393 if (!cmd)
0394 return;
0395
0396
0397 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
0398 if (!trace_ufshcd_command_enabled())
0399 return;
0400
0401 opcode = cmd->cmnd[0];
0402
0403 if (opcode == READ_10 || opcode == WRITE_10) {
0404
0405
0406
0407 transfer_len =
0408 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
0409 lba = scsi_get_lba(cmd);
0410 if (opcode == WRITE_10)
0411 group_id = lrbp->cmd->cmnd[6];
0412 } else if (opcode == UNMAP) {
0413
0414
0415
0416 transfer_len = blk_rq_bytes(rq);
0417 lba = scsi_get_lba(cmd);
0418 }
0419
0420 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
0421 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
0422 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
0423 doorbell, transfer_len, intr, lba, opcode, group_id);
0424 }
0425
0426 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
0427 {
0428 struct ufs_clk_info *clki;
0429 struct list_head *head = &hba->clk_list_head;
0430
0431 if (list_empty(head))
0432 return;
0433
0434 list_for_each_entry(clki, head, list) {
0435 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
0436 clki->max_freq)
0437 dev_err(hba->dev, "clk: %s, rate: %u\n",
0438 clki->name, clki->curr_freq);
0439 }
0440 }
0441
0442 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
0443 const char *err_name)
0444 {
0445 int i;
0446 bool found = false;
0447 const struct ufs_event_hist *e;
0448
0449 if (id >= UFS_EVT_CNT)
0450 return;
0451
0452 e = &hba->ufs_stats.event[id];
0453
0454 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
0455 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
0456
0457 if (e->tstamp[p] == 0)
0458 continue;
0459 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
0460 e->val[p], ktime_to_us(e->tstamp[p]));
0461 found = true;
0462 }
0463
0464 if (!found)
0465 dev_err(hba->dev, "No record of %s\n", err_name);
0466 else
0467 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
0468 }
0469
0470 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
0471 {
0472 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
0473
0474 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
0475 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
0476 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
0477 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
0478 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
0479 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
0480 "auto_hibern8_err");
0481 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
0482 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
0483 "link_startup_fail");
0484 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
0485 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
0486 "suspend_fail");
0487 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
0488 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
0489 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
0490
0491 ufshcd_vops_dbg_register_dump(hba);
0492 }
0493
0494 static
0495 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
0496 {
0497 const struct ufshcd_lrb *lrbp;
0498 int prdt_length;
0499 int tag;
0500
0501 for_each_set_bit(tag, &bitmap, hba->nutrs) {
0502 lrbp = &hba->lrb[tag];
0503
0504 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
0505 tag, ktime_to_us(lrbp->issue_time_stamp));
0506 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
0507 tag, ktime_to_us(lrbp->compl_time_stamp));
0508 dev_err(hba->dev,
0509 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
0510 tag, (u64)lrbp->utrd_dma_addr);
0511
0512 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
0513 sizeof(struct utp_transfer_req_desc));
0514 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
0515 (u64)lrbp->ucd_req_dma_addr);
0516 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
0517 sizeof(struct utp_upiu_req));
0518 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
0519 (u64)lrbp->ucd_rsp_dma_addr);
0520 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
0521 sizeof(struct utp_upiu_rsp));
0522
0523 prdt_length = le16_to_cpu(
0524 lrbp->utr_descriptor_ptr->prd_table_length);
0525 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
0526 prdt_length /= sizeof(struct ufshcd_sg_entry);
0527
0528 dev_err(hba->dev,
0529 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
0530 tag, prdt_length,
0531 (u64)lrbp->ucd_prdt_dma_addr);
0532
0533 if (pr_prdt)
0534 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
0535 sizeof(struct ufshcd_sg_entry) * prdt_length);
0536 }
0537 }
0538
0539 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
0540 {
0541 int tag;
0542
0543 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
0544 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
0545
0546 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
0547 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
0548 }
0549 }
0550
0551 static void ufshcd_print_host_state(struct ufs_hba *hba)
0552 {
0553 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
0554
0555 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
0556 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
0557 hba->outstanding_reqs, hba->outstanding_tasks);
0558 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
0559 hba->saved_err, hba->saved_uic_err);
0560 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
0561 hba->curr_dev_pwr_mode, hba->uic_link_state);
0562 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
0563 hba->pm_op_in_progress, hba->is_sys_suspended);
0564 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
0565 hba->auto_bkops_enabled, hba->host->host_self_blocked);
0566 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
0567 dev_err(hba->dev,
0568 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
0569 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
0570 hba->ufs_stats.hibern8_exit_cnt);
0571 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
0572 ktime_to_us(hba->ufs_stats.last_intr_ts),
0573 hba->ufs_stats.last_intr_status);
0574 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
0575 hba->eh_flags, hba->req_abort_count);
0576 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
0577 hba->ufs_version, hba->capabilities, hba->caps);
0578 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
0579 hba->dev_quirks);
0580 if (sdev_ufs)
0581 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
0582 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
0583
0584 ufshcd_print_clk_freqs(hba);
0585 }
0586
0587
0588
0589
0590
0591
0592 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
0593 {
0594 static const char * const names[] = {
0595 "INVALID MODE",
0596 "FAST MODE",
0597 "SLOW_MODE",
0598 "INVALID MODE",
0599 "FASTAUTO_MODE",
0600 "SLOWAUTO_MODE",
0601 "INVALID MODE",
0602 };
0603
0604
0605
0606
0607
0608
0609 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
0610 __func__,
0611 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
0612 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
0613 names[hba->pwr_info.pwr_rx],
0614 names[hba->pwr_info.pwr_tx],
0615 hba->pwr_info.hs_rate);
0616 }
0617
0618 static void ufshcd_device_reset(struct ufs_hba *hba)
0619 {
0620 int err;
0621
0622 err = ufshcd_vops_device_reset(hba);
0623
0624 if (!err) {
0625 ufshcd_set_ufs_dev_active(hba);
0626 if (ufshcd_is_wb_allowed(hba)) {
0627 hba->dev_info.wb_enabled = false;
0628 hba->dev_info.wb_buf_flush_enabled = false;
0629 }
0630 }
0631 if (err != -EOPNOTSUPP)
0632 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
0633 }
0634
0635 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
0636 {
0637 if (!us)
0638 return;
0639
0640 if (us < 10)
0641 udelay(us);
0642 else
0643 usleep_range(us, us + tolerance);
0644 }
0645 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
0660 u32 val, unsigned long interval_us,
0661 unsigned long timeout_ms)
0662 {
0663 int err = 0;
0664 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
0665
0666
0667 val = val & mask;
0668
0669 while ((ufshcd_readl(hba, reg) & mask) != val) {
0670 usleep_range(interval_us, interval_us + 50);
0671 if (time_after(jiffies, timeout)) {
0672 if ((ufshcd_readl(hba, reg) & mask) != val)
0673 err = -ETIMEDOUT;
0674 break;
0675 }
0676 }
0677
0678 return err;
0679 }
0680
0681
0682
0683
0684
0685
0686
0687 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
0688 {
0689 if (hba->ufs_version == ufshci_version(1, 0))
0690 return INTERRUPT_MASK_ALL_VER_10;
0691 if (hba->ufs_version <= ufshci_version(2, 0))
0692 return INTERRUPT_MASK_ALL_VER_11;
0693
0694 return INTERRUPT_MASK_ALL_VER_21;
0695 }
0696
0697
0698
0699
0700
0701
0702
0703 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
0704 {
0705 u32 ufshci_ver;
0706
0707 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
0708 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
0709 else
0710 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
0711
0712
0713
0714
0715
0716
0717 if (ufshci_ver & 0x00010000)
0718 return ufshci_version(1, ufshci_ver & 0x00000100);
0719
0720 return ufshci_ver;
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
0731 {
0732 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
0733 }
0734
0735
0736
0737
0738
0739
0740
0741
0742 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
0743 {
0744 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
0745 }
0746
0747
0748
0749
0750
0751
0752 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
0753 {
0754 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
0755 mask = ~mask;
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
0770 }
0771
0772
0773
0774
0775
0776
0777 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
0778 {
0779 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
0780 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
0781 else
0782 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
0783 }
0784
0785
0786
0787
0788
0789
0790
0791 static inline int ufshcd_get_lists_status(u32 reg)
0792 {
0793 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
0794 }
0795
0796
0797
0798
0799
0800
0801
0802
0803 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
0804 {
0805 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
0806 MASK_UIC_COMMAND_RESULT;
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
0817 {
0818 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
0819 }
0820
0821
0822
0823
0824
0825 static inline int
0826 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
0827 {
0828 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
0829 }
0830
0831
0832
0833
0834
0835
0836
0837
0838 static inline int
0839 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
0840 {
0841 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851 static inline unsigned int
0852 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
0853 {
0854 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
0855 MASK_RSP_UPIU_DATA_SEG_LEN;
0856 }
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
0868 {
0869 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
0870 MASK_RSP_EXCEPTION_EVENT;
0871 }
0872
0873
0874
0875
0876
0877 static inline void
0878 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
0879 {
0880 ufshcd_writel(hba, INT_AGGR_ENABLE |
0881 INT_AGGR_COUNTER_AND_TIMER_RESET,
0882 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
0883 }
0884
0885
0886
0887
0888
0889
0890
0891 static inline void
0892 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
0893 {
0894 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
0895 INT_AGGR_COUNTER_THLD_VAL(cnt) |
0896 INT_AGGR_TIMEOUT_VAL(tmout),
0897 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
0898 }
0899
0900
0901
0902
0903
0904 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
0905 {
0906 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
0907 }
0908
0909
0910
0911
0912
0913
0914
0915 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
0916 {
0917 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
0918 REG_UTP_TASK_REQ_LIST_RUN_STOP);
0919 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
0920 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
0921 }
0922
0923
0924
0925
0926
0927 static inline void ufshcd_hba_start(struct ufs_hba *hba)
0928 {
0929 u32 val = CONTROLLER_ENABLE;
0930
0931 if (ufshcd_crypto_enable(hba))
0932 val |= CRYPTO_GENERAL_ENABLE;
0933
0934 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
0935 }
0936
0937
0938
0939
0940
0941
0942
0943 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
0944 {
0945 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
0946 }
0947
0948 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
0949 {
0950
0951 if (hba->ufs_version <= ufshci_version(1, 1))
0952 return UFS_UNIPRO_VER_1_41;
0953 else
0954 return UFS_UNIPRO_VER_1_6;
0955 }
0956 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
0957
0958 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
0959 {
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
0970 }
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
0981 {
0982 int ret = 0;
0983 struct ufs_clk_info *clki;
0984 struct list_head *head = &hba->clk_list_head;
0985
0986 if (list_empty(head))
0987 goto out;
0988
0989 list_for_each_entry(clki, head, list) {
0990 if (!IS_ERR_OR_NULL(clki->clk)) {
0991 if (scale_up && clki->max_freq) {
0992 if (clki->curr_freq == clki->max_freq)
0993 continue;
0994
0995 ret = clk_set_rate(clki->clk, clki->max_freq);
0996 if (ret) {
0997 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
0998 __func__, clki->name,
0999 clki->max_freq, ret);
1000 break;
1001 }
1002 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1003 "scaled up", clki->name,
1004 clki->curr_freq,
1005 clki->max_freq);
1006
1007 clki->curr_freq = clki->max_freq;
1008
1009 } else if (!scale_up && clki->min_freq) {
1010 if (clki->curr_freq == clki->min_freq)
1011 continue;
1012
1013 ret = clk_set_rate(clki->clk, clki->min_freq);
1014 if (ret) {
1015 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1016 __func__, clki->name,
1017 clki->min_freq, ret);
1018 break;
1019 }
1020 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1021 "scaled down", clki->name,
1022 clki->curr_freq,
1023 clki->min_freq);
1024 clki->curr_freq = clki->min_freq;
1025 }
1026 }
1027 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1028 clki->name, clk_get_rate(clki->clk));
1029 }
1030
1031 out:
1032 return ret;
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1044 {
1045 int ret = 0;
1046 ktime_t start = ktime_get();
1047
1048 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1049 if (ret)
1050 goto out;
1051
1052 ret = ufshcd_set_clk_freq(hba, scale_up);
1053 if (ret)
1054 goto out;
1055
1056 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1057 if (ret)
1058 ufshcd_set_clk_freq(hba, !scale_up);
1059
1060 out:
1061 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1062 (scale_up ? "up" : "down"),
1063 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1064 return ret;
1065 }
1066
1067
1068
1069
1070
1071
1072
1073
1074 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1075 bool scale_up)
1076 {
1077 struct ufs_clk_info *clki;
1078 struct list_head *head = &hba->clk_list_head;
1079
1080 if (list_empty(head))
1081 return false;
1082
1083 list_for_each_entry(clki, head, list) {
1084 if (!IS_ERR_OR_NULL(clki->clk)) {
1085 if (scale_up && clki->max_freq) {
1086 if (clki->curr_freq == clki->max_freq)
1087 continue;
1088 return true;
1089 } else if (!scale_up && clki->min_freq) {
1090 if (clki->curr_freq == clki->min_freq)
1091 continue;
1092 return true;
1093 }
1094 }
1095 }
1096
1097 return false;
1098 }
1099
1100
1101
1102
1103
1104
1105
1106
1107 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1108 {
1109 const struct scsi_device *sdev;
1110 u32 pending = 0;
1111
1112 lockdep_assert_held(hba->host->host_lock);
1113 __shost_for_each_device(sdev, hba->host)
1114 pending += sbitmap_weight(&sdev->budget_map);
1115
1116 return pending;
1117 }
1118
1119 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1120 u64 wait_timeout_us)
1121 {
1122 unsigned long flags;
1123 int ret = 0;
1124 u32 tm_doorbell;
1125 u32 tr_pending;
1126 bool timeout = false, do_last_check = false;
1127 ktime_t start;
1128
1129 ufshcd_hold(hba, false);
1130 spin_lock_irqsave(hba->host->host_lock, flags);
1131
1132
1133
1134
1135 start = ktime_get();
1136 do {
1137 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1138 ret = -EBUSY;
1139 goto out;
1140 }
1141
1142 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1143 tr_pending = ufshcd_pending_cmds(hba);
1144 if (!tm_doorbell && !tr_pending) {
1145 timeout = false;
1146 break;
1147 } else if (do_last_check) {
1148 break;
1149 }
1150
1151 spin_unlock_irqrestore(hba->host->host_lock, flags);
1152 schedule();
1153 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1154 wait_timeout_us) {
1155 timeout = true;
1156
1157
1158
1159
1160
1161 do_last_check = true;
1162 }
1163 spin_lock_irqsave(hba->host->host_lock, flags);
1164 } while (tm_doorbell || tr_pending);
1165
1166 if (timeout) {
1167 dev_err(hba->dev,
1168 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1169 __func__, tm_doorbell, tr_pending);
1170 ret = -EBUSY;
1171 }
1172 out:
1173 spin_unlock_irqrestore(hba->host->host_lock, flags);
1174 ufshcd_release(hba);
1175 return ret;
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1188 {
1189 int ret = 0;
1190 struct ufs_pa_layer_attr new_pwr_info;
1191
1192 if (scale_up) {
1193 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1194 sizeof(struct ufs_pa_layer_attr));
1195 } else {
1196 memcpy(&new_pwr_info, &hba->pwr_info,
1197 sizeof(struct ufs_pa_layer_attr));
1198
1199 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1200 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1201
1202 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1203 &hba->pwr_info,
1204 sizeof(struct ufs_pa_layer_attr));
1205
1206
1207 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1208 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1209 }
1210 }
1211
1212
1213 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1214 if (ret)
1215 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1216 __func__, ret,
1217 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1218 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1219
1220 return ret;
1221 }
1222
1223 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1224 {
1225 #define DOORBELL_CLR_TOUT_US (1000 * 1000)
1226 int ret = 0;
1227
1228
1229
1230
1231 ufshcd_scsi_block_requests(hba);
1232 down_write(&hba->clk_scaling_lock);
1233
1234 if (!hba->clk_scaling.is_allowed ||
1235 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1236 ret = -EBUSY;
1237 up_write(&hba->clk_scaling_lock);
1238 ufshcd_scsi_unblock_requests(hba);
1239 goto out;
1240 }
1241
1242
1243 ufshcd_hold(hba, false);
1244
1245 out:
1246 return ret;
1247 }
1248
1249 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
1250 {
1251 if (writelock)
1252 up_write(&hba->clk_scaling_lock);
1253 else
1254 up_read(&hba->clk_scaling_lock);
1255 ufshcd_scsi_unblock_requests(hba);
1256 ufshcd_release(hba);
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1269 {
1270 int ret = 0;
1271 bool is_writelock = true;
1272
1273 ret = ufshcd_clock_scaling_prepare(hba);
1274 if (ret)
1275 return ret;
1276
1277
1278 if (!scale_up) {
1279 ret = ufshcd_scale_gear(hba, false);
1280 if (ret)
1281 goto out_unprepare;
1282 }
1283
1284 ret = ufshcd_scale_clks(hba, scale_up);
1285 if (ret) {
1286 if (!scale_up)
1287 ufshcd_scale_gear(hba, true);
1288 goto out_unprepare;
1289 }
1290
1291
1292 if (scale_up) {
1293 ret = ufshcd_scale_gear(hba, true);
1294 if (ret) {
1295 ufshcd_scale_clks(hba, false);
1296 goto out_unprepare;
1297 }
1298 }
1299
1300
1301 downgrade_write(&hba->clk_scaling_lock);
1302 is_writelock = false;
1303 ufshcd_wb_toggle(hba, scale_up);
1304
1305 out_unprepare:
1306 ufshcd_clock_scaling_unprepare(hba, is_writelock);
1307 return ret;
1308 }
1309
1310 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1311 {
1312 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1313 clk_scaling.suspend_work);
1314 unsigned long irq_flags;
1315
1316 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1317 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1318 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1319 return;
1320 }
1321 hba->clk_scaling.is_suspended = true;
1322 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1323
1324 __ufshcd_suspend_clkscaling(hba);
1325 }
1326
1327 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1328 {
1329 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1330 clk_scaling.resume_work);
1331 unsigned long irq_flags;
1332
1333 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1334 if (!hba->clk_scaling.is_suspended) {
1335 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1336 return;
1337 }
1338 hba->clk_scaling.is_suspended = false;
1339 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1340
1341 devfreq_resume_device(hba->devfreq);
1342 }
1343
1344 static int ufshcd_devfreq_target(struct device *dev,
1345 unsigned long *freq, u32 flags)
1346 {
1347 int ret = 0;
1348 struct ufs_hba *hba = dev_get_drvdata(dev);
1349 ktime_t start;
1350 bool scale_up, sched_clk_scaling_suspend_work = false;
1351 struct list_head *clk_list = &hba->clk_list_head;
1352 struct ufs_clk_info *clki;
1353 unsigned long irq_flags;
1354
1355 if (!ufshcd_is_clkscaling_supported(hba))
1356 return -EINVAL;
1357
1358 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1359
1360 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1361 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1362 if (ufshcd_eh_in_progress(hba)) {
1363 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1364 return 0;
1365 }
1366
1367 if (!hba->clk_scaling.active_reqs)
1368 sched_clk_scaling_suspend_work = true;
1369
1370 if (list_empty(clk_list)) {
1371 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1372 goto out;
1373 }
1374
1375
1376 scale_up = *freq == clki->max_freq;
1377 if (!scale_up)
1378 *freq = clki->min_freq;
1379
1380 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1381 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1382 ret = 0;
1383 goto out;
1384 }
1385 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1386
1387 start = ktime_get();
1388 ret = ufshcd_devfreq_scale(hba, scale_up);
1389
1390 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1391 (scale_up ? "up" : "down"),
1392 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1393
1394 out:
1395 if (sched_clk_scaling_suspend_work)
1396 queue_work(hba->clk_scaling.workq,
1397 &hba->clk_scaling.suspend_work);
1398
1399 return ret;
1400 }
1401
1402 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1403 struct devfreq_dev_status *stat)
1404 {
1405 struct ufs_hba *hba = dev_get_drvdata(dev);
1406 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1407 unsigned long flags;
1408 struct list_head *clk_list = &hba->clk_list_head;
1409 struct ufs_clk_info *clki;
1410 ktime_t curr_t;
1411
1412 if (!ufshcd_is_clkscaling_supported(hba))
1413 return -EINVAL;
1414
1415 memset(stat, 0, sizeof(*stat));
1416
1417 spin_lock_irqsave(hba->host->host_lock, flags);
1418 curr_t = ktime_get();
1419 if (!scaling->window_start_t)
1420 goto start_window;
1421
1422 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1423
1424
1425
1426
1427
1428 stat->current_frequency = clki->curr_freq;
1429 if (scaling->is_busy_started)
1430 scaling->tot_busy_t += ktime_us_delta(curr_t,
1431 scaling->busy_start_t);
1432
1433 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1434 stat->busy_time = scaling->tot_busy_t;
1435 start_window:
1436 scaling->window_start_t = curr_t;
1437 scaling->tot_busy_t = 0;
1438
1439 if (hba->outstanding_reqs) {
1440 scaling->busy_start_t = curr_t;
1441 scaling->is_busy_started = true;
1442 } else {
1443 scaling->busy_start_t = 0;
1444 scaling->is_busy_started = false;
1445 }
1446 spin_unlock_irqrestore(hba->host->host_lock, flags);
1447 return 0;
1448 }
1449
1450 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1451 {
1452 struct list_head *clk_list = &hba->clk_list_head;
1453 struct ufs_clk_info *clki;
1454 struct devfreq *devfreq;
1455 int ret;
1456
1457
1458 if (list_empty(clk_list))
1459 return 0;
1460
1461 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1462 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1463 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1464
1465 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1466 &hba->vps->ondemand_data);
1467 devfreq = devfreq_add_device(hba->dev,
1468 &hba->vps->devfreq_profile,
1469 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1470 &hba->vps->ondemand_data);
1471 if (IS_ERR(devfreq)) {
1472 ret = PTR_ERR(devfreq);
1473 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1474
1475 dev_pm_opp_remove(hba->dev, clki->min_freq);
1476 dev_pm_opp_remove(hba->dev, clki->max_freq);
1477 return ret;
1478 }
1479
1480 hba->devfreq = devfreq;
1481
1482 return 0;
1483 }
1484
1485 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1486 {
1487 struct list_head *clk_list = &hba->clk_list_head;
1488 struct ufs_clk_info *clki;
1489
1490 if (!hba->devfreq)
1491 return;
1492
1493 devfreq_remove_device(hba->devfreq);
1494 hba->devfreq = NULL;
1495
1496 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1497 dev_pm_opp_remove(hba->dev, clki->min_freq);
1498 dev_pm_opp_remove(hba->dev, clki->max_freq);
1499 }
1500
1501 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1502 {
1503 unsigned long flags;
1504
1505 devfreq_suspend_device(hba->devfreq);
1506 spin_lock_irqsave(hba->host->host_lock, flags);
1507 hba->clk_scaling.window_start_t = 0;
1508 spin_unlock_irqrestore(hba->host->host_lock, flags);
1509 }
1510
1511 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1512 {
1513 unsigned long flags;
1514 bool suspend = false;
1515
1516 cancel_work_sync(&hba->clk_scaling.suspend_work);
1517 cancel_work_sync(&hba->clk_scaling.resume_work);
1518
1519 spin_lock_irqsave(hba->host->host_lock, flags);
1520 if (!hba->clk_scaling.is_suspended) {
1521 suspend = true;
1522 hba->clk_scaling.is_suspended = true;
1523 }
1524 spin_unlock_irqrestore(hba->host->host_lock, flags);
1525
1526 if (suspend)
1527 __ufshcd_suspend_clkscaling(hba);
1528 }
1529
1530 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1531 {
1532 unsigned long flags;
1533 bool resume = false;
1534
1535 spin_lock_irqsave(hba->host->host_lock, flags);
1536 if (hba->clk_scaling.is_suspended) {
1537 resume = true;
1538 hba->clk_scaling.is_suspended = false;
1539 }
1540 spin_unlock_irqrestore(hba->host->host_lock, flags);
1541
1542 if (resume)
1543 devfreq_resume_device(hba->devfreq);
1544 }
1545
1546 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1547 struct device_attribute *attr, char *buf)
1548 {
1549 struct ufs_hba *hba = dev_get_drvdata(dev);
1550
1551 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1552 }
1553
1554 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1555 struct device_attribute *attr, const char *buf, size_t count)
1556 {
1557 struct ufs_hba *hba = dev_get_drvdata(dev);
1558 u32 value;
1559 int err = 0;
1560
1561 if (kstrtou32(buf, 0, &value))
1562 return -EINVAL;
1563
1564 down(&hba->host_sem);
1565 if (!ufshcd_is_user_access_allowed(hba)) {
1566 err = -EBUSY;
1567 goto out;
1568 }
1569
1570 value = !!value;
1571 if (value == hba->clk_scaling.is_enabled)
1572 goto out;
1573
1574 ufshcd_rpm_get_sync(hba);
1575 ufshcd_hold(hba, false);
1576
1577 hba->clk_scaling.is_enabled = value;
1578
1579 if (value) {
1580 ufshcd_resume_clkscaling(hba);
1581 } else {
1582 ufshcd_suspend_clkscaling(hba);
1583 err = ufshcd_devfreq_scale(hba, true);
1584 if (err)
1585 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1586 __func__, err);
1587 }
1588
1589 ufshcd_release(hba);
1590 ufshcd_rpm_put_sync(hba);
1591 out:
1592 up(&hba->host_sem);
1593 return err ? err : count;
1594 }
1595
1596 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1597 {
1598 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1599 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1600 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1601 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1602 hba->clk_scaling.enable_attr.attr.mode = 0644;
1603 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1604 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1605 }
1606
1607 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1608 {
1609 if (hba->clk_scaling.enable_attr.attr.name)
1610 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1611 }
1612
1613 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1614 {
1615 char wq_name[sizeof("ufs_clkscaling_00")];
1616
1617 if (!ufshcd_is_clkscaling_supported(hba))
1618 return;
1619
1620 if (!hba->clk_scaling.min_gear)
1621 hba->clk_scaling.min_gear = UFS_HS_G1;
1622
1623 INIT_WORK(&hba->clk_scaling.suspend_work,
1624 ufshcd_clk_scaling_suspend_work);
1625 INIT_WORK(&hba->clk_scaling.resume_work,
1626 ufshcd_clk_scaling_resume_work);
1627
1628 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1629 hba->host->host_no);
1630 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1631
1632 hba->clk_scaling.is_initialized = true;
1633 }
1634
1635 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1636 {
1637 if (!hba->clk_scaling.is_initialized)
1638 return;
1639
1640 ufshcd_remove_clk_scaling_sysfs(hba);
1641 destroy_workqueue(hba->clk_scaling.workq);
1642 ufshcd_devfreq_remove(hba);
1643 hba->clk_scaling.is_initialized = false;
1644 }
1645
1646 static void ufshcd_ungate_work(struct work_struct *work)
1647 {
1648 int ret;
1649 unsigned long flags;
1650 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1651 clk_gating.ungate_work);
1652
1653 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1654
1655 spin_lock_irqsave(hba->host->host_lock, flags);
1656 if (hba->clk_gating.state == CLKS_ON) {
1657 spin_unlock_irqrestore(hba->host->host_lock, flags);
1658 goto unblock_reqs;
1659 }
1660
1661 spin_unlock_irqrestore(hba->host->host_lock, flags);
1662 ufshcd_hba_vreg_set_hpm(hba);
1663 ufshcd_setup_clocks(hba, true);
1664
1665 ufshcd_enable_irq(hba);
1666
1667
1668 if (ufshcd_can_hibern8_during_gating(hba)) {
1669
1670 hba->clk_gating.is_suspended = true;
1671 if (ufshcd_is_link_hibern8(hba)) {
1672 ret = ufshcd_uic_hibern8_exit(hba);
1673 if (ret)
1674 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1675 __func__, ret);
1676 else
1677 ufshcd_set_link_active(hba);
1678 }
1679 hba->clk_gating.is_suspended = false;
1680 }
1681 unblock_reqs:
1682 ufshcd_scsi_unblock_requests(hba);
1683 }
1684
1685
1686
1687
1688
1689
1690
1691 int ufshcd_hold(struct ufs_hba *hba, bool async)
1692 {
1693 int rc = 0;
1694 bool flush_result;
1695 unsigned long flags;
1696
1697 if (!ufshcd_is_clkgating_allowed(hba) ||
1698 !hba->clk_gating.is_initialized)
1699 goto out;
1700 spin_lock_irqsave(hba->host->host_lock, flags);
1701 hba->clk_gating.active_reqs++;
1702
1703 start:
1704 switch (hba->clk_gating.state) {
1705 case CLKS_ON:
1706
1707
1708
1709
1710
1711
1712
1713
1714 if (ufshcd_can_hibern8_during_gating(hba) &&
1715 ufshcd_is_link_hibern8(hba)) {
1716 if (async) {
1717 rc = -EAGAIN;
1718 hba->clk_gating.active_reqs--;
1719 break;
1720 }
1721 spin_unlock_irqrestore(hba->host->host_lock, flags);
1722 flush_result = flush_work(&hba->clk_gating.ungate_work);
1723 if (hba->clk_gating.is_suspended && !flush_result)
1724 goto out;
1725 spin_lock_irqsave(hba->host->host_lock, flags);
1726 goto start;
1727 }
1728 break;
1729 case REQ_CLKS_OFF:
1730 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1731 hba->clk_gating.state = CLKS_ON;
1732 trace_ufshcd_clk_gating(dev_name(hba->dev),
1733 hba->clk_gating.state);
1734 break;
1735 }
1736
1737
1738
1739
1740
1741 fallthrough;
1742 case CLKS_OFF:
1743 hba->clk_gating.state = REQ_CLKS_ON;
1744 trace_ufshcd_clk_gating(dev_name(hba->dev),
1745 hba->clk_gating.state);
1746 if (queue_work(hba->clk_gating.clk_gating_workq,
1747 &hba->clk_gating.ungate_work))
1748 ufshcd_scsi_block_requests(hba);
1749
1750
1751
1752
1753 fallthrough;
1754 case REQ_CLKS_ON:
1755 if (async) {
1756 rc = -EAGAIN;
1757 hba->clk_gating.active_reqs--;
1758 break;
1759 }
1760
1761 spin_unlock_irqrestore(hba->host->host_lock, flags);
1762 flush_work(&hba->clk_gating.ungate_work);
1763
1764 spin_lock_irqsave(hba->host->host_lock, flags);
1765 goto start;
1766 default:
1767 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1768 __func__, hba->clk_gating.state);
1769 break;
1770 }
1771 spin_unlock_irqrestore(hba->host->host_lock, flags);
1772 out:
1773 return rc;
1774 }
1775 EXPORT_SYMBOL_GPL(ufshcd_hold);
1776
1777 static void ufshcd_gate_work(struct work_struct *work)
1778 {
1779 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1780 clk_gating.gate_work.work);
1781 unsigned long flags;
1782 int ret;
1783
1784 spin_lock_irqsave(hba->host->host_lock, flags);
1785
1786
1787
1788
1789
1790
1791 if (hba->clk_gating.is_suspended ||
1792 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1793 hba->clk_gating.state = CLKS_ON;
1794 trace_ufshcd_clk_gating(dev_name(hba->dev),
1795 hba->clk_gating.state);
1796 goto rel_lock;
1797 }
1798
1799 if (hba->clk_gating.active_reqs
1800 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1801 || hba->outstanding_reqs || hba->outstanding_tasks
1802 || hba->active_uic_cmd || hba->uic_async_done)
1803 goto rel_lock;
1804
1805 spin_unlock_irqrestore(hba->host->host_lock, flags);
1806
1807
1808 if (ufshcd_can_hibern8_during_gating(hba)) {
1809 ret = ufshcd_uic_hibern8_enter(hba);
1810 if (ret) {
1811 hba->clk_gating.state = CLKS_ON;
1812 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1813 __func__, ret);
1814 trace_ufshcd_clk_gating(dev_name(hba->dev),
1815 hba->clk_gating.state);
1816 goto out;
1817 }
1818 ufshcd_set_link_hibern8(hba);
1819 }
1820
1821 ufshcd_disable_irq(hba);
1822
1823 ufshcd_setup_clocks(hba, false);
1824
1825
1826 ufshcd_hba_vreg_set_lpm(hba);
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 spin_lock_irqsave(hba->host->host_lock, flags);
1837 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1838 hba->clk_gating.state = CLKS_OFF;
1839 trace_ufshcd_clk_gating(dev_name(hba->dev),
1840 hba->clk_gating.state);
1841 }
1842 rel_lock:
1843 spin_unlock_irqrestore(hba->host->host_lock, flags);
1844 out:
1845 return;
1846 }
1847
1848
1849 static void __ufshcd_release(struct ufs_hba *hba)
1850 {
1851 if (!ufshcd_is_clkgating_allowed(hba))
1852 return;
1853
1854 hba->clk_gating.active_reqs--;
1855
1856 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1857 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1858 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
1859 hba->active_uic_cmd || hba->uic_async_done ||
1860 hba->clk_gating.state == CLKS_OFF)
1861 return;
1862
1863 hba->clk_gating.state = REQ_CLKS_OFF;
1864 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1865 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1866 &hba->clk_gating.gate_work,
1867 msecs_to_jiffies(hba->clk_gating.delay_ms));
1868 }
1869
1870 void ufshcd_release(struct ufs_hba *hba)
1871 {
1872 unsigned long flags;
1873
1874 spin_lock_irqsave(hba->host->host_lock, flags);
1875 __ufshcd_release(hba);
1876 spin_unlock_irqrestore(hba->host->host_lock, flags);
1877 }
1878 EXPORT_SYMBOL_GPL(ufshcd_release);
1879
1880 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1881 struct device_attribute *attr, char *buf)
1882 {
1883 struct ufs_hba *hba = dev_get_drvdata(dev);
1884
1885 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1886 }
1887
1888 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
1889 {
1890 struct ufs_hba *hba = dev_get_drvdata(dev);
1891 unsigned long flags;
1892
1893 spin_lock_irqsave(hba->host->host_lock, flags);
1894 hba->clk_gating.delay_ms = value;
1895 spin_unlock_irqrestore(hba->host->host_lock, flags);
1896 }
1897 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
1898
1899 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1900 struct device_attribute *attr, const char *buf, size_t count)
1901 {
1902 unsigned long value;
1903
1904 if (kstrtoul(buf, 0, &value))
1905 return -EINVAL;
1906
1907 ufshcd_clkgate_delay_set(dev, value);
1908 return count;
1909 }
1910
1911 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1912 struct device_attribute *attr, char *buf)
1913 {
1914 struct ufs_hba *hba = dev_get_drvdata(dev);
1915
1916 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1917 }
1918
1919 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1920 struct device_attribute *attr, const char *buf, size_t count)
1921 {
1922 struct ufs_hba *hba = dev_get_drvdata(dev);
1923 unsigned long flags;
1924 u32 value;
1925
1926 if (kstrtou32(buf, 0, &value))
1927 return -EINVAL;
1928
1929 value = !!value;
1930
1931 spin_lock_irqsave(hba->host->host_lock, flags);
1932 if (value == hba->clk_gating.is_enabled)
1933 goto out;
1934
1935 if (value)
1936 __ufshcd_release(hba);
1937 else
1938 hba->clk_gating.active_reqs++;
1939
1940 hba->clk_gating.is_enabled = value;
1941 out:
1942 spin_unlock_irqrestore(hba->host->host_lock, flags);
1943 return count;
1944 }
1945
1946 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1947 {
1948 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1949 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1950 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1951 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1952 hba->clk_gating.delay_attr.attr.mode = 0644;
1953 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1954 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1955
1956 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1957 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1958 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1959 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1960 hba->clk_gating.enable_attr.attr.mode = 0644;
1961 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1962 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1963 }
1964
1965 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1966 {
1967 if (hba->clk_gating.delay_attr.attr.name)
1968 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1969 if (hba->clk_gating.enable_attr.attr.name)
1970 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1971 }
1972
1973 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1974 {
1975 char wq_name[sizeof("ufs_clk_gating_00")];
1976
1977 if (!ufshcd_is_clkgating_allowed(hba))
1978 return;
1979
1980 hba->clk_gating.state = CLKS_ON;
1981
1982 hba->clk_gating.delay_ms = 150;
1983 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1984 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1985
1986 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1987 hba->host->host_no);
1988 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1989 WQ_MEM_RECLAIM | WQ_HIGHPRI);
1990
1991 ufshcd_init_clk_gating_sysfs(hba);
1992
1993 hba->clk_gating.is_enabled = true;
1994 hba->clk_gating.is_initialized = true;
1995 }
1996
1997 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1998 {
1999 if (!hba->clk_gating.is_initialized)
2000 return;
2001
2002 ufshcd_remove_clk_gating_sysfs(hba);
2003
2004
2005 ufshcd_hold(hba, false);
2006 hba->clk_gating.is_initialized = false;
2007 ufshcd_release(hba);
2008
2009 destroy_workqueue(hba->clk_gating.clk_gating_workq);
2010 }
2011
2012
2013 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2014 {
2015 bool queue_resume_work = false;
2016 ktime_t curr_t = ktime_get();
2017 unsigned long flags;
2018
2019 if (!ufshcd_is_clkscaling_supported(hba))
2020 return;
2021
2022 spin_lock_irqsave(hba->host->host_lock, flags);
2023 if (!hba->clk_scaling.active_reqs++)
2024 queue_resume_work = true;
2025
2026 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2027 spin_unlock_irqrestore(hba->host->host_lock, flags);
2028 return;
2029 }
2030
2031 if (queue_resume_work)
2032 queue_work(hba->clk_scaling.workq,
2033 &hba->clk_scaling.resume_work);
2034
2035 if (!hba->clk_scaling.window_start_t) {
2036 hba->clk_scaling.window_start_t = curr_t;
2037 hba->clk_scaling.tot_busy_t = 0;
2038 hba->clk_scaling.is_busy_started = false;
2039 }
2040
2041 if (!hba->clk_scaling.is_busy_started) {
2042 hba->clk_scaling.busy_start_t = curr_t;
2043 hba->clk_scaling.is_busy_started = true;
2044 }
2045 spin_unlock_irqrestore(hba->host->host_lock, flags);
2046 }
2047
2048 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2049 {
2050 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2051 unsigned long flags;
2052
2053 if (!ufshcd_is_clkscaling_supported(hba))
2054 return;
2055
2056 spin_lock_irqsave(hba->host->host_lock, flags);
2057 hba->clk_scaling.active_reqs--;
2058 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2059 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2060 scaling->busy_start_t));
2061 scaling->busy_start_t = 0;
2062 scaling->is_busy_started = false;
2063 }
2064 spin_unlock_irqrestore(hba->host->host_lock, flags);
2065 }
2066
2067 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2068 {
2069 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2070 return READ;
2071 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2072 return WRITE;
2073 else
2074 return -EINVAL;
2075 }
2076
2077 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2078 struct ufshcd_lrb *lrbp)
2079 {
2080 const struct ufs_hba_monitor *m = &hba->monitor;
2081
2082 return (m->enabled && lrbp && lrbp->cmd &&
2083 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2084 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2085 }
2086
2087 static void ufshcd_start_monitor(struct ufs_hba *hba,
2088 const struct ufshcd_lrb *lrbp)
2089 {
2090 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2091 unsigned long flags;
2092
2093 spin_lock_irqsave(hba->host->host_lock, flags);
2094 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2095 hba->monitor.busy_start_ts[dir] = ktime_get();
2096 spin_unlock_irqrestore(hba->host->host_lock, flags);
2097 }
2098
2099 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2100 {
2101 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2102 unsigned long flags;
2103
2104 spin_lock_irqsave(hba->host->host_lock, flags);
2105 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2106 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2107 struct ufs_hba_monitor *m = &hba->monitor;
2108 ktime_t now, inc, lat;
2109
2110 now = lrbp->compl_time_stamp;
2111 inc = ktime_sub(now, m->busy_start_ts[dir]);
2112 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2113 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2114
2115
2116 m->nr_req[dir]++;
2117 lat = ktime_sub(now, lrbp->issue_time_stamp);
2118 m->lat_sum[dir] += lat;
2119 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2120 m->lat_max[dir] = lat;
2121 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2122 m->lat_min[dir] = lat;
2123
2124 m->nr_queued[dir]--;
2125
2126 m->busy_start_ts[dir] = now;
2127 }
2128 spin_unlock_irqrestore(hba->host->host_lock, flags);
2129 }
2130
2131
2132
2133
2134
2135
2136 static inline
2137 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2138 {
2139 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2140 unsigned long flags;
2141
2142 lrbp->issue_time_stamp = ktime_get();
2143 lrbp->compl_time_stamp = ktime_set(0, 0);
2144 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2145 ufshcd_clk_scaling_start_busy(hba);
2146 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2147 ufshcd_start_monitor(hba, lrbp);
2148
2149 spin_lock_irqsave(&hba->outstanding_lock, flags);
2150 if (hba->vops && hba->vops->setup_xfer_req)
2151 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
2152 __set_bit(task_tag, &hba->outstanding_reqs);
2153 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2154 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2155 }
2156
2157
2158
2159
2160
2161 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2162 {
2163 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2164 int len;
2165
2166 if (sense_buffer &&
2167 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2168 int len_to_copy;
2169
2170 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2171 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2172
2173 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2174 len_to_copy);
2175 }
2176 }
2177
2178
2179
2180
2181
2182
2183
2184 static
2185 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2186 {
2187 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2188
2189 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2190
2191
2192 if (hba->dev_cmd.query.descriptor &&
2193 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2194 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2195 GENERAL_UPIU_REQUEST_SIZE;
2196 u16 resp_len;
2197 u16 buf_len;
2198
2199
2200 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2201 MASK_QUERY_DATA_SEG_LEN;
2202 buf_len = be16_to_cpu(
2203 hba->dev_cmd.query.request.upiu_req.length);
2204 if (likely(buf_len >= resp_len)) {
2205 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2206 } else {
2207 dev_warn(hba->dev,
2208 "%s: rsp size %d is bigger than buffer size %d",
2209 __func__, resp_len, buf_len);
2210 return -EINVAL;
2211 }
2212 }
2213
2214 return 0;
2215 }
2216
2217
2218
2219
2220
2221
2222
2223 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2224 {
2225 int err;
2226
2227 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2228 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2229 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
2230
2231
2232 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2233 hba->nutmrs =
2234 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2235 hba->reserved_slot = hba->nutrs - 1;
2236
2237
2238 err = ufshcd_hba_init_crypto_capabilities(hba);
2239 if (err)
2240 dev_err(hba->dev, "crypto setup failed\n");
2241
2242 return err;
2243 }
2244
2245
2246
2247
2248
2249
2250
2251 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2252 {
2253 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
2254 }
2255
2256
2257
2258
2259
2260
2261
2262
2263 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2264 {
2265 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2266 }
2267
2268
2269
2270
2271
2272
2273 static inline void
2274 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2275 {
2276 lockdep_assert_held(&hba->uic_cmd_mutex);
2277
2278 WARN_ON(hba->active_uic_cmd);
2279
2280 hba->active_uic_cmd = uic_cmd;
2281
2282
2283 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2284 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2285 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2286
2287 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2288
2289
2290 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2291 REG_UIC_COMMAND);
2292 }
2293
2294
2295
2296
2297
2298
2299
2300
2301 static int
2302 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2303 {
2304 int ret;
2305 unsigned long flags;
2306
2307 lockdep_assert_held(&hba->uic_cmd_mutex);
2308
2309 if (wait_for_completion_timeout(&uic_cmd->done,
2310 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2311 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2312 } else {
2313 ret = -ETIMEDOUT;
2314 dev_err(hba->dev,
2315 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2316 uic_cmd->command, uic_cmd->argument3);
2317
2318 if (!uic_cmd->cmd_active) {
2319 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2320 __func__);
2321 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2322 }
2323 }
2324
2325 spin_lock_irqsave(hba->host->host_lock, flags);
2326 hba->active_uic_cmd = NULL;
2327 spin_unlock_irqrestore(hba->host->host_lock, flags);
2328
2329 return ret;
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 static int
2341 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2342 bool completion)
2343 {
2344 lockdep_assert_held(&hba->uic_cmd_mutex);
2345 lockdep_assert_held(hba->host->host_lock);
2346
2347 if (!ufshcd_ready_for_uic_cmd(hba)) {
2348 dev_err(hba->dev,
2349 "Controller not ready to accept UIC commands\n");
2350 return -EIO;
2351 }
2352
2353 if (completion)
2354 init_completion(&uic_cmd->done);
2355
2356 uic_cmd->cmd_active = 1;
2357 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2358
2359 return 0;
2360 }
2361
2362
2363
2364
2365
2366
2367
2368
2369 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2370 {
2371 int ret;
2372 unsigned long flags;
2373
2374 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2375 return 0;
2376
2377 ufshcd_hold(hba, false);
2378 mutex_lock(&hba->uic_cmd_mutex);
2379 ufshcd_add_delay_before_dme_cmd(hba);
2380
2381 spin_lock_irqsave(hba->host->host_lock, flags);
2382 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2383 spin_unlock_irqrestore(hba->host->host_lock, flags);
2384 if (!ret)
2385 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2386
2387 mutex_unlock(&hba->uic_cmd_mutex);
2388
2389 ufshcd_release(hba);
2390 return ret;
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2401 {
2402 struct ufshcd_sg_entry *prd_table;
2403 struct scatterlist *sg;
2404 struct scsi_cmnd *cmd;
2405 int sg_segments;
2406 int i;
2407
2408 cmd = lrbp->cmd;
2409 sg_segments = scsi_dma_map(cmd);
2410 if (sg_segments < 0)
2411 return sg_segments;
2412
2413 if (sg_segments) {
2414
2415 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2416 lrbp->utr_descriptor_ptr->prd_table_length =
2417 cpu_to_le16((sg_segments *
2418 sizeof(struct ufshcd_sg_entry)));
2419 else
2420 lrbp->utr_descriptor_ptr->prd_table_length =
2421 cpu_to_le16(sg_segments);
2422
2423 prd_table = lrbp->ucd_prdt_ptr;
2424
2425 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2426 const unsigned int len = sg_dma_len(sg);
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436 WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
2437 prd_table[i].size = cpu_to_le32(len - 1);
2438 prd_table[i].addr = cpu_to_le64(sg->dma_address);
2439 prd_table[i].reserved = 0;
2440 }
2441 } else {
2442 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2443 }
2444
2445 return 0;
2446 }
2447
2448
2449
2450
2451
2452
2453 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2454 {
2455 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2456
2457 if (hba->ufs_version == ufshci_version(1, 0)) {
2458 u32 rw;
2459 rw = set & INTERRUPT_MASK_RW_VER_10;
2460 set = rw | ((set ^ intrs) & intrs);
2461 } else {
2462 set |= intrs;
2463 }
2464
2465 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2466 }
2467
2468
2469
2470
2471
2472
2473 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2474 {
2475 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2476
2477 if (hba->ufs_version == ufshci_version(1, 0)) {
2478 u32 rw;
2479 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2480 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2481 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2482
2483 } else {
2484 set &= ~intrs;
2485 }
2486
2487 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2488 }
2489
2490
2491
2492
2493
2494
2495
2496
2497 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2498 u8 *upiu_flags, enum dma_data_direction cmd_dir)
2499 {
2500 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2501 u32 data_direction;
2502 u32 dword_0;
2503 u32 dword_1 = 0;
2504 u32 dword_3 = 0;
2505
2506 if (cmd_dir == DMA_FROM_DEVICE) {
2507 data_direction = UTP_DEVICE_TO_HOST;
2508 *upiu_flags = UPIU_CMD_FLAGS_READ;
2509 } else if (cmd_dir == DMA_TO_DEVICE) {
2510 data_direction = UTP_HOST_TO_DEVICE;
2511 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2512 } else {
2513 data_direction = UTP_NO_DATA_TRANSFER;
2514 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2515 }
2516
2517 dword_0 = data_direction | (lrbp->command_type
2518 << UPIU_COMMAND_TYPE_OFFSET);
2519 if (lrbp->intr_cmd)
2520 dword_0 |= UTP_REQ_DESC_INT_CMD;
2521
2522
2523 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2524
2525
2526 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2527 req_desc->header.dword_1 = cpu_to_le32(dword_1);
2528
2529
2530
2531
2532
2533 req_desc->header.dword_2 =
2534 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2535 req_desc->header.dword_3 = cpu_to_le32(dword_3);
2536
2537 req_desc->prd_table_length = 0;
2538 }
2539
2540
2541
2542
2543
2544
2545
2546 static
2547 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2548 {
2549 struct scsi_cmnd *cmd = lrbp->cmd;
2550 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2551 unsigned short cdb_len;
2552
2553
2554 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2555 UPIU_TRANSACTION_COMMAND, upiu_flags,
2556 lrbp->lun, lrbp->task_tag);
2557 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2558 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2559
2560
2561 ucd_req_ptr->header.dword_2 = 0;
2562
2563 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2564
2565 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2566 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2567 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2568
2569 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2570 }
2571
2572
2573
2574
2575
2576
2577
2578
2579 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2580 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2581 {
2582 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2583 struct ufs_query *query = &hba->dev_cmd.query;
2584 u16 len = be16_to_cpu(query->request.upiu_req.length);
2585
2586
2587 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2588 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2589 lrbp->lun, lrbp->task_tag);
2590 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2591 0, query->request.query_func, 0, 0);
2592
2593
2594 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2595 ucd_req_ptr->header.dword_2 =
2596 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2597 else
2598 ucd_req_ptr->header.dword_2 = 0;
2599
2600
2601 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2602 QUERY_OSF_SIZE);
2603
2604
2605 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2606 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2607
2608 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2609 }
2610
2611 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2612 {
2613 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2614
2615 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2616
2617
2618 ucd_req_ptr->header.dword_0 =
2619 UPIU_HEADER_DWORD(
2620 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2621
2622 ucd_req_ptr->header.dword_1 = 0;
2623 ucd_req_ptr->header.dword_2 = 0;
2624
2625 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2626 }
2627
2628
2629
2630
2631
2632
2633
2634 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2635 struct ufshcd_lrb *lrbp)
2636 {
2637 u8 upiu_flags;
2638 int ret = 0;
2639
2640 if (hba->ufs_version <= ufshci_version(1, 1))
2641 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2642 else
2643 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2644
2645 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2646 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2647 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2648 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2649 ufshcd_prepare_utp_nop_upiu(lrbp);
2650 else
2651 ret = -EINVAL;
2652
2653 return ret;
2654 }
2655
2656
2657
2658
2659
2660
2661
2662 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2663 {
2664 u8 upiu_flags;
2665 int ret = 0;
2666
2667 if (hba->ufs_version <= ufshci_version(1, 1))
2668 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2669 else
2670 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2671
2672 if (likely(lrbp->cmd)) {
2673 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2674 lrbp->cmd->sc_data_direction);
2675 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2676 } else {
2677 ret = -EINVAL;
2678 }
2679
2680 return ret;
2681 }
2682
2683
2684
2685
2686
2687
2688
2689 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2690 {
2691 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2692 }
2693
2694 static inline bool is_device_wlun(struct scsi_device *sdev)
2695 {
2696 return sdev->lun ==
2697 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2698 }
2699
2700
2701
2702
2703
2704 static int ufshcd_map_queues(struct Scsi_Host *shost)
2705 {
2706 int i, ret;
2707
2708 for (i = 0; i < shost->nr_maps; i++) {
2709 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2710
2711 switch (i) {
2712 case HCTX_TYPE_DEFAULT:
2713 case HCTX_TYPE_POLL:
2714 map->nr_queues = 1;
2715 break;
2716 case HCTX_TYPE_READ:
2717 map->nr_queues = 0;
2718 continue;
2719 default:
2720 WARN_ON_ONCE(true);
2721 }
2722 map->queue_offset = 0;
2723 ret = blk_mq_map_queues(map);
2724 WARN_ON_ONCE(ret);
2725 }
2726
2727 return 0;
2728 }
2729
2730 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2731 {
2732 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2733 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2734 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2735 i * sizeof(struct utp_transfer_cmd_desc);
2736 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2737 response_upiu);
2738 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2739
2740 lrb->utr_descriptor_ptr = utrdlp + i;
2741 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2742 i * sizeof(struct utp_transfer_req_desc);
2743 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2744 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2745 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2746 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2747 lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
2748 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2749 }
2750
2751
2752
2753
2754
2755
2756
2757
2758 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2759 {
2760 struct ufs_hba *hba = shost_priv(host);
2761 int tag = scsi_cmd_to_rq(cmd)->tag;
2762 struct ufshcd_lrb *lrbp;
2763 int err = 0;
2764
2765 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
2766
2767
2768
2769
2770
2771 rcu_read_lock();
2772
2773 switch (hba->ufshcd_state) {
2774 case UFSHCD_STATE_OPERATIONAL:
2775 break;
2776 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2777
2778
2779
2780
2781
2782
2783
2784 if (ufshcd_eh_in_progress(hba)) {
2785 err = SCSI_MLQUEUE_HOST_BUSY;
2786 goto out;
2787 }
2788 break;
2789 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800 if (hba->pm_op_in_progress) {
2801 hba->force_reset = true;
2802 set_host_byte(cmd, DID_BAD_TARGET);
2803 scsi_done(cmd);
2804 goto out;
2805 }
2806 fallthrough;
2807 case UFSHCD_STATE_RESET:
2808 err = SCSI_MLQUEUE_HOST_BUSY;
2809 goto out;
2810 case UFSHCD_STATE_ERROR:
2811 set_host_byte(cmd, DID_ERROR);
2812 scsi_done(cmd);
2813 goto out;
2814 }
2815
2816 hba->req_abort_count = 0;
2817
2818 err = ufshcd_hold(hba, true);
2819 if (err) {
2820 err = SCSI_MLQUEUE_HOST_BUSY;
2821 goto out;
2822 }
2823 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2824 (hba->clk_gating.state != CLKS_ON));
2825
2826 lrbp = &hba->lrb[tag];
2827 WARN_ON(lrbp->cmd);
2828 lrbp->cmd = cmd;
2829 lrbp->task_tag = tag;
2830 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2831 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2832
2833 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2834
2835 lrbp->req_abort_skip = false;
2836
2837 ufshpb_prep(hba, lrbp);
2838
2839 ufshcd_comp_scsi_upiu(hba, lrbp);
2840
2841 err = ufshcd_map_sg(hba, lrbp);
2842 if (err) {
2843 lrbp->cmd = NULL;
2844 ufshcd_release(hba);
2845 goto out;
2846 }
2847
2848 ufshcd_send_command(hba, tag);
2849
2850 out:
2851 rcu_read_unlock();
2852
2853 if (ufs_trigger_eh()) {
2854 unsigned long flags;
2855
2856 spin_lock_irqsave(hba->host->host_lock, flags);
2857 ufshcd_schedule_eh_work(hba);
2858 spin_unlock_irqrestore(hba->host->host_lock, flags);
2859 }
2860
2861 return err;
2862 }
2863
2864 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2865 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2866 {
2867 lrbp->cmd = NULL;
2868 lrbp->task_tag = tag;
2869 lrbp->lun = 0;
2870 lrbp->intr_cmd = true;
2871 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2872 hba->dev_cmd.type = cmd_type;
2873
2874 return ufshcd_compose_devman_upiu(hba, lrbp);
2875 }
2876
2877
2878
2879
2880
2881
2882 static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
2883 {
2884 unsigned long flags;
2885
2886
2887 spin_lock_irqsave(hba->host->host_lock, flags);
2888 ufshcd_utrl_clear(hba, mask);
2889 spin_unlock_irqrestore(hba->host->host_lock, flags);
2890
2891
2892
2893
2894
2895 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
2896 mask, ~mask, 1000, 1000);
2897 }
2898
2899 static int
2900 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2901 {
2902 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2903
2904
2905 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2906 UPIU_RSP_CODE_OFFSET;
2907 return query_res->response;
2908 }
2909
2910
2911
2912
2913
2914
2915 static int
2916 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2917 {
2918 int resp;
2919 int err = 0;
2920
2921 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2922 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2923
2924 switch (resp) {
2925 case UPIU_TRANSACTION_NOP_IN:
2926 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2927 err = -EINVAL;
2928 dev_err(hba->dev, "%s: unexpected response %x\n",
2929 __func__, resp);
2930 }
2931 break;
2932 case UPIU_TRANSACTION_QUERY_RSP:
2933 err = ufshcd_check_query_response(hba, lrbp);
2934 if (!err)
2935 err = ufshcd_copy_query_response(hba, lrbp);
2936 break;
2937 case UPIU_TRANSACTION_REJECT_UPIU:
2938
2939 err = -EPERM;
2940 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2941 __func__);
2942 break;
2943 default:
2944 err = -EINVAL;
2945 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2946 __func__, resp);
2947 break;
2948 }
2949
2950 return err;
2951 }
2952
2953 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2954 struct ufshcd_lrb *lrbp, int max_timeout)
2955 {
2956 unsigned long time_left = msecs_to_jiffies(max_timeout);
2957 unsigned long flags;
2958 bool pending;
2959 int err;
2960
2961 retry:
2962 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2963 time_left);
2964
2965 if (likely(time_left)) {
2966
2967
2968
2969
2970
2971 hba->dev_cmd.complete = NULL;
2972 err = ufshcd_get_tr_ocs(lrbp);
2973 if (!err)
2974 err = ufshcd_dev_cmd_completion(hba, lrbp);
2975 } else {
2976 err = -ETIMEDOUT;
2977 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2978 __func__, lrbp->task_tag);
2979 if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
2980
2981 err = -EAGAIN;
2982
2983
2984
2985
2986
2987 spin_lock_irqsave(&hba->outstanding_lock, flags);
2988 pending = test_bit(lrbp->task_tag,
2989 &hba->outstanding_reqs);
2990 if (pending) {
2991 hba->dev_cmd.complete = NULL;
2992 __clear_bit(lrbp->task_tag,
2993 &hba->outstanding_reqs);
2994 }
2995 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2996
2997 if (!pending) {
2998
2999
3000
3001
3002 time_left = 1;
3003 goto retry;
3004 }
3005 } else {
3006 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3007 __func__, lrbp->task_tag);
3008 }
3009 }
3010
3011 return err;
3012 }
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3024 enum dev_cmd_type cmd_type, int timeout)
3025 {
3026 DECLARE_COMPLETION_ONSTACK(wait);
3027 const u32 tag = hba->reserved_slot;
3028 struct ufshcd_lrb *lrbp;
3029 int err;
3030
3031
3032 lockdep_assert_held(&hba->dev_cmd.lock);
3033
3034 down_read(&hba->clk_scaling_lock);
3035
3036 lrbp = &hba->lrb[tag];
3037 WARN_ON(lrbp->cmd);
3038 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3039 if (unlikely(err))
3040 goto out;
3041
3042 hba->dev_cmd.complete = &wait;
3043
3044 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3045
3046 ufshcd_send_command(hba, tag);
3047 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3048 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3049 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3050
3051 out:
3052 up_read(&hba->clk_scaling_lock);
3053 return err;
3054 }
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066 static inline void ufshcd_init_query(struct ufs_hba *hba,
3067 struct ufs_query_req **request, struct ufs_query_res **response,
3068 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3069 {
3070 *request = &hba->dev_cmd.query.request;
3071 *response = &hba->dev_cmd.query.response;
3072 memset(*request, 0, sizeof(struct ufs_query_req));
3073 memset(*response, 0, sizeof(struct ufs_query_res));
3074 (*request)->upiu_req.opcode = opcode;
3075 (*request)->upiu_req.idn = idn;
3076 (*request)->upiu_req.index = index;
3077 (*request)->upiu_req.selector = selector;
3078 }
3079
3080 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3081 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3082 {
3083 int ret;
3084 int retries;
3085
3086 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3087 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3088 if (ret)
3089 dev_dbg(hba->dev,
3090 "%s: failed with error %d, retries %d\n",
3091 __func__, ret, retries);
3092 else
3093 break;
3094 }
3095
3096 if (ret)
3097 dev_err(hba->dev,
3098 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
3099 __func__, opcode, idn, ret, retries);
3100 return ret;
3101 }
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3114 enum flag_idn idn, u8 index, bool *flag_res)
3115 {
3116 struct ufs_query_req *request = NULL;
3117 struct ufs_query_res *response = NULL;
3118 int err, selector = 0;
3119 int timeout = QUERY_REQ_TIMEOUT;
3120
3121 BUG_ON(!hba);
3122
3123 ufshcd_hold(hba, false);
3124 mutex_lock(&hba->dev_cmd.lock);
3125 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3126 selector);
3127
3128 switch (opcode) {
3129 case UPIU_QUERY_OPCODE_SET_FLAG:
3130 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3131 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3132 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3133 break;
3134 case UPIU_QUERY_OPCODE_READ_FLAG:
3135 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3136 if (!flag_res) {
3137
3138 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3139 __func__);
3140 err = -EINVAL;
3141 goto out_unlock;
3142 }
3143 break;
3144 default:
3145 dev_err(hba->dev,
3146 "%s: Expected query flag opcode but got = %d\n",
3147 __func__, opcode);
3148 err = -EINVAL;
3149 goto out_unlock;
3150 }
3151
3152 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3153
3154 if (err) {
3155 dev_err(hba->dev,
3156 "%s: Sending flag query for idn %d failed, err = %d\n",
3157 __func__, idn, err);
3158 goto out_unlock;
3159 }
3160
3161 if (flag_res)
3162 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3163 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3164
3165 out_unlock:
3166 mutex_unlock(&hba->dev_cmd.lock);
3167 ufshcd_release(hba);
3168 return err;
3169 }
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3183 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3184 {
3185 struct ufs_query_req *request = NULL;
3186 struct ufs_query_res *response = NULL;
3187 int err;
3188
3189 BUG_ON(!hba);
3190
3191 if (!attr_val) {
3192 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3193 __func__, opcode);
3194 return -EINVAL;
3195 }
3196
3197 ufshcd_hold(hba, false);
3198
3199 mutex_lock(&hba->dev_cmd.lock);
3200 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3201 selector);
3202
3203 switch (opcode) {
3204 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3205 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3206 request->upiu_req.value = cpu_to_be32(*attr_val);
3207 break;
3208 case UPIU_QUERY_OPCODE_READ_ATTR:
3209 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3210 break;
3211 default:
3212 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3213 __func__, opcode);
3214 err = -EINVAL;
3215 goto out_unlock;
3216 }
3217
3218 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3219
3220 if (err) {
3221 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3222 __func__, opcode, idn, index, err);
3223 goto out_unlock;
3224 }
3225
3226 *attr_val = be32_to_cpu(response->upiu_res.value);
3227
3228 out_unlock:
3229 mutex_unlock(&hba->dev_cmd.lock);
3230 ufshcd_release(hba);
3231 return err;
3232 }
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3248 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3249 u32 *attr_val)
3250 {
3251 int ret = 0;
3252 u32 retries;
3253
3254 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3255 ret = ufshcd_query_attr(hba, opcode, idn, index,
3256 selector, attr_val);
3257 if (ret)
3258 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3259 __func__, ret, retries);
3260 else
3261 break;
3262 }
3263
3264 if (ret)
3265 dev_err(hba->dev,
3266 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3267 __func__, idn, ret, QUERY_REQ_RETRIES);
3268 return ret;
3269 }
3270
3271 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3272 enum query_opcode opcode, enum desc_idn idn, u8 index,
3273 u8 selector, u8 *desc_buf, int *buf_len)
3274 {
3275 struct ufs_query_req *request = NULL;
3276 struct ufs_query_res *response = NULL;
3277 int err;
3278
3279 BUG_ON(!hba);
3280
3281 if (!desc_buf) {
3282 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3283 __func__, opcode);
3284 return -EINVAL;
3285 }
3286
3287 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3288 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3289 __func__, *buf_len);
3290 return -EINVAL;
3291 }
3292
3293 ufshcd_hold(hba, false);
3294
3295 mutex_lock(&hba->dev_cmd.lock);
3296 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3297 selector);
3298 hba->dev_cmd.query.descriptor = desc_buf;
3299 request->upiu_req.length = cpu_to_be16(*buf_len);
3300
3301 switch (opcode) {
3302 case UPIU_QUERY_OPCODE_WRITE_DESC:
3303 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3304 break;
3305 case UPIU_QUERY_OPCODE_READ_DESC:
3306 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3307 break;
3308 default:
3309 dev_err(hba->dev,
3310 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3311 __func__, opcode);
3312 err = -EINVAL;
3313 goto out_unlock;
3314 }
3315
3316 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3317
3318 if (err) {
3319 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3320 __func__, opcode, idn, index, err);
3321 goto out_unlock;
3322 }
3323
3324 *buf_len = be16_to_cpu(response->upiu_res.length);
3325
3326 out_unlock:
3327 hba->dev_cmd.query.descriptor = NULL;
3328 mutex_unlock(&hba->dev_cmd.lock);
3329 ufshcd_release(hba);
3330 return err;
3331 }
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3348 enum query_opcode opcode,
3349 enum desc_idn idn, u8 index,
3350 u8 selector,
3351 u8 *desc_buf, int *buf_len)
3352 {
3353 int err;
3354 int retries;
3355
3356 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3357 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3358 selector, desc_buf, buf_len);
3359 if (!err || err == -EINVAL)
3360 break;
3361 }
3362
3363 return err;
3364 }
3365
3366
3367
3368
3369
3370
3371
3372 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3373 int *desc_len)
3374 {
3375 if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3376 desc_id == QUERY_DESC_IDN_RFU_1)
3377 *desc_len = 0;
3378 else
3379 *desc_len = hba->desc_size[desc_id];
3380 }
3381 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3382
3383 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3384 enum desc_idn desc_id, int desc_index,
3385 unsigned char desc_len)
3386 {
3387 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3388 desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3389
3390
3391
3392
3393
3394 hba->desc_size[desc_id] = desc_len;
3395 }
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408 int ufshcd_read_desc_param(struct ufs_hba *hba,
3409 enum desc_idn desc_id,
3410 int desc_index,
3411 u8 param_offset,
3412 u8 *param_read_buf,
3413 u8 param_size)
3414 {
3415 int ret;
3416 u8 *desc_buf;
3417 int buff_len;
3418 bool is_kmalloc = true;
3419
3420
3421 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3422 return -EINVAL;
3423
3424
3425 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3426 if (!buff_len) {
3427 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3428 return -EINVAL;
3429 }
3430
3431 if (param_offset >= buff_len) {
3432 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3433 __func__, param_offset, desc_id, buff_len);
3434 return -EINVAL;
3435 }
3436
3437
3438 if (param_offset != 0 || param_size < buff_len) {
3439 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3440 if (!desc_buf)
3441 return -ENOMEM;
3442 } else {
3443 desc_buf = param_read_buf;
3444 is_kmalloc = false;
3445 }
3446
3447
3448 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3449 desc_id, desc_index, 0,
3450 desc_buf, &buff_len);
3451
3452 if (ret) {
3453 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3454 __func__, desc_id, desc_index, param_offset, ret);
3455 goto out;
3456 }
3457
3458
3459 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3460 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3461 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3462 ret = -EINVAL;
3463 goto out;
3464 }
3465
3466
3467 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3468 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3469
3470 if (is_kmalloc) {
3471
3472 if (param_offset >= buff_len)
3473 ret = -EINVAL;
3474 else
3475 memcpy(param_read_buf, &desc_buf[param_offset],
3476 min_t(u32, param_size, buff_len - param_offset));
3477 }
3478 out:
3479 if (is_kmalloc)
3480 kfree(desc_buf);
3481 return ret;
3482 }
3483
3484
3485
3486
3487
3488
3489
3490
3491 struct uc_string_id {
3492 u8 len;
3493 u8 type;
3494 wchar_t uc[];
3495 } __packed;
3496
3497
3498 static inline char ufshcd_remove_non_printable(u8 ch)
3499 {
3500 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3501 }
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3518 u8 **buf, bool ascii)
3519 {
3520 struct uc_string_id *uc_str;
3521 u8 *str;
3522 int ret;
3523
3524 if (!buf)
3525 return -EINVAL;
3526
3527 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3528 if (!uc_str)
3529 return -ENOMEM;
3530
3531 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3532 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3533 if (ret < 0) {
3534 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3535 QUERY_REQ_RETRIES, ret);
3536 str = NULL;
3537 goto out;
3538 }
3539
3540 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3541 dev_dbg(hba->dev, "String Desc is of zero length\n");
3542 str = NULL;
3543 ret = 0;
3544 goto out;
3545 }
3546
3547 if (ascii) {
3548 ssize_t ascii_len;
3549 int i;
3550
3551 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3552 str = kzalloc(ascii_len, GFP_KERNEL);
3553 if (!str) {
3554 ret = -ENOMEM;
3555 goto out;
3556 }
3557
3558
3559
3560
3561
3562 ret = utf16s_to_utf8s(uc_str->uc,
3563 uc_str->len - QUERY_DESC_HDR_SIZE,
3564 UTF16_BIG_ENDIAN, str, ascii_len);
3565
3566
3567 for (i = 0; i < ret; i++)
3568 str[i] = ufshcd_remove_non_printable(str[i]);
3569
3570 str[ret++] = '\0';
3571
3572 } else {
3573 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3574 if (!str) {
3575 ret = -ENOMEM;
3576 goto out;
3577 }
3578 ret = uc_str->len;
3579 }
3580 out:
3581 *buf = str;
3582 kfree(uc_str);
3583 return ret;
3584 }
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3597 int lun,
3598 enum unit_desc_param param_offset,
3599 u8 *param_read_buf,
3600 u32 param_size)
3601 {
3602
3603
3604
3605
3606 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3607 return -EOPNOTSUPP;
3608
3609 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3610 param_offset, param_read_buf, param_size);
3611 }
3612
3613 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3614 {
3615 int err = 0;
3616 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3617
3618 if (hba->dev_info.wspecversion >= 0x300) {
3619 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3620 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3621 &gating_wait);
3622 if (err)
3623 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3624 err, gating_wait);
3625
3626 if (gating_wait == 0) {
3627 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3628 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3629 gating_wait);
3630 }
3631
3632 hba->dev_info.clk_gating_wait_us = gating_wait;
3633 }
3634
3635 return err;
3636 }
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3652 {
3653 size_t utmrdl_size, utrdl_size, ucdl_size;
3654
3655
3656 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3657 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3658 ucdl_size,
3659 &hba->ucdl_dma_addr,
3660 GFP_KERNEL);
3661
3662
3663
3664
3665
3666
3667
3668 if (!hba->ucdl_base_addr ||
3669 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3670 dev_err(hba->dev,
3671 "Command Descriptor Memory allocation failed\n");
3672 goto out;
3673 }
3674
3675
3676
3677
3678
3679 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3680 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3681 utrdl_size,
3682 &hba->utrdl_dma_addr,
3683 GFP_KERNEL);
3684 if (!hba->utrdl_base_addr ||
3685 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3686 dev_err(hba->dev,
3687 "Transfer Descriptor Memory allocation failed\n");
3688 goto out;
3689 }
3690
3691
3692
3693
3694
3695 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3696 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3697 utmrdl_size,
3698 &hba->utmrdl_dma_addr,
3699 GFP_KERNEL);
3700 if (!hba->utmrdl_base_addr ||
3701 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3702 dev_err(hba->dev,
3703 "Task Management Descriptor Memory allocation failed\n");
3704 goto out;
3705 }
3706
3707
3708 hba->lrb = devm_kcalloc(hba->dev,
3709 hba->nutrs, sizeof(struct ufshcd_lrb),
3710 GFP_KERNEL);
3711 if (!hba->lrb) {
3712 dev_err(hba->dev, "LRB Memory allocation failed\n");
3713 goto out;
3714 }
3715 return 0;
3716 out:
3717 return -ENOMEM;
3718 }
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3734 {
3735 struct utp_transfer_req_desc *utrdlp;
3736 dma_addr_t cmd_desc_dma_addr;
3737 dma_addr_t cmd_desc_element_addr;
3738 u16 response_offset;
3739 u16 prdt_offset;
3740 int cmd_desc_size;
3741 int i;
3742
3743 utrdlp = hba->utrdl_base_addr;
3744
3745 response_offset =
3746 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3747 prdt_offset =
3748 offsetof(struct utp_transfer_cmd_desc, prd_table);
3749
3750 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3751 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3752
3753 for (i = 0; i < hba->nutrs; i++) {
3754
3755 cmd_desc_element_addr =
3756 (cmd_desc_dma_addr + (cmd_desc_size * i));
3757 utrdlp[i].command_desc_base_addr_lo =
3758 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3759 utrdlp[i].command_desc_base_addr_hi =
3760 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3761
3762
3763 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3764 utrdlp[i].response_upiu_offset =
3765 cpu_to_le16(response_offset);
3766 utrdlp[i].prd_table_offset =
3767 cpu_to_le16(prdt_offset);
3768 utrdlp[i].response_upiu_length =
3769 cpu_to_le16(ALIGNED_UPIU_SIZE);
3770 } else {
3771 utrdlp[i].response_upiu_offset =
3772 cpu_to_le16(response_offset >> 2);
3773 utrdlp[i].prd_table_offset =
3774 cpu_to_le16(prdt_offset >> 2);
3775 utrdlp[i].response_upiu_length =
3776 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3777 }
3778
3779 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3780 }
3781 }
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3795 {
3796 struct uic_command uic_cmd = {0};
3797 int ret;
3798
3799 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3800
3801 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3802 if (ret)
3803 dev_dbg(hba->dev,
3804 "dme-link-startup: error code %d\n", ret);
3805 return ret;
3806 }
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816 static int ufshcd_dme_reset(struct ufs_hba *hba)
3817 {
3818 struct uic_command uic_cmd = {0};
3819 int ret;
3820
3821 uic_cmd.command = UIC_CMD_DME_RESET;
3822
3823 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3824 if (ret)
3825 dev_err(hba->dev,
3826 "dme-reset: error code %d\n", ret);
3827
3828 return ret;
3829 }
3830
3831 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3832 int agreed_gear,
3833 int adapt_val)
3834 {
3835 int ret;
3836
3837 if (agreed_gear < UFS_HS_G4)
3838 adapt_val = PA_NO_ADAPT;
3839
3840 ret = ufshcd_dme_set(hba,
3841 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3842 adapt_val);
3843 return ret;
3844 }
3845 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855 static int ufshcd_dme_enable(struct ufs_hba *hba)
3856 {
3857 struct uic_command uic_cmd = {0};
3858 int ret;
3859
3860 uic_cmd.command = UIC_CMD_DME_ENABLE;
3861
3862 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3863 if (ret)
3864 dev_err(hba->dev,
3865 "dme-enable: error code %d\n", ret);
3866
3867 return ret;
3868 }
3869
3870 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3871 {
3872 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3873 unsigned long min_sleep_time_us;
3874
3875 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3876 return;
3877
3878
3879
3880
3881
3882 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3883 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3884 } else {
3885 unsigned long delta =
3886 (unsigned long) ktime_to_us(
3887 ktime_sub(ktime_get(),
3888 hba->last_dme_cmd_tstamp));
3889
3890 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3891 min_sleep_time_us =
3892 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3893 else
3894 return;
3895 }
3896
3897
3898 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3899 }
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3912 u8 attr_set, u32 mib_val, u8 peer)
3913 {
3914 struct uic_command uic_cmd = {0};
3915 static const char *const action[] = {
3916 "dme-set",
3917 "dme-peer-set"
3918 };
3919 const char *set = action[!!peer];
3920 int ret;
3921 int retries = UFS_UIC_COMMAND_RETRIES;
3922
3923 uic_cmd.command = peer ?
3924 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3925 uic_cmd.argument1 = attr_sel;
3926 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3927 uic_cmd.argument3 = mib_val;
3928
3929 do {
3930
3931 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3932 if (ret)
3933 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3934 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3935 } while (ret && peer && --retries);
3936
3937 if (ret)
3938 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3939 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3940 UFS_UIC_COMMAND_RETRIES - retries);
3941
3942 return ret;
3943 }
3944 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3956 u32 *mib_val, u8 peer)
3957 {
3958 struct uic_command uic_cmd = {0};
3959 static const char *const action[] = {
3960 "dme-get",
3961 "dme-peer-get"
3962 };
3963 const char *get = action[!!peer];
3964 int ret;
3965 int retries = UFS_UIC_COMMAND_RETRIES;
3966 struct ufs_pa_layer_attr orig_pwr_info;
3967 struct ufs_pa_layer_attr temp_pwr_info;
3968 bool pwr_mode_change = false;
3969
3970 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3971 orig_pwr_info = hba->pwr_info;
3972 temp_pwr_info = orig_pwr_info;
3973
3974 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3975 orig_pwr_info.pwr_rx == FAST_MODE) {
3976 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3977 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3978 pwr_mode_change = true;
3979 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3980 orig_pwr_info.pwr_rx == SLOW_MODE) {
3981 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3982 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3983 pwr_mode_change = true;
3984 }
3985 if (pwr_mode_change) {
3986 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3987 if (ret)
3988 goto out;
3989 }
3990 }
3991
3992 uic_cmd.command = peer ?
3993 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3994 uic_cmd.argument1 = attr_sel;
3995
3996 do {
3997
3998 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3999 if (ret)
4000 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4001 get, UIC_GET_ATTR_ID(attr_sel), ret);
4002 } while (ret && peer && --retries);
4003
4004 if (ret)
4005 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4006 get, UIC_GET_ATTR_ID(attr_sel),
4007 UFS_UIC_COMMAND_RETRIES - retries);
4008
4009 if (mib_val && !ret)
4010 *mib_val = uic_cmd.argument3;
4011
4012 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4013 && pwr_mode_change)
4014 ufshcd_change_power_mode(hba, &orig_pwr_info);
4015 out:
4016 return ret;
4017 }
4018 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4037 {
4038 DECLARE_COMPLETION_ONSTACK(uic_async_done);
4039 unsigned long flags;
4040 u8 status;
4041 int ret;
4042 bool reenable_intr = false;
4043
4044 mutex_lock(&hba->uic_cmd_mutex);
4045 ufshcd_add_delay_before_dme_cmd(hba);
4046
4047 spin_lock_irqsave(hba->host->host_lock, flags);
4048 if (ufshcd_is_link_broken(hba)) {
4049 ret = -ENOLINK;
4050 goto out_unlock;
4051 }
4052 hba->uic_async_done = &uic_async_done;
4053 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4054 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4055
4056
4057
4058
4059 wmb();
4060 reenable_intr = true;
4061 }
4062 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4063 spin_unlock_irqrestore(hba->host->host_lock, flags);
4064 if (ret) {
4065 dev_err(hba->dev,
4066 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4067 cmd->command, cmd->argument3, ret);
4068 goto out;
4069 }
4070
4071 if (!wait_for_completion_timeout(hba->uic_async_done,
4072 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4073 dev_err(hba->dev,
4074 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4075 cmd->command, cmd->argument3);
4076
4077 if (!cmd->cmd_active) {
4078 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4079 __func__);
4080 goto check_upmcrs;
4081 }
4082
4083 ret = -ETIMEDOUT;
4084 goto out;
4085 }
4086
4087 check_upmcrs:
4088 status = ufshcd_get_upmcrs(hba);
4089 if (status != PWR_LOCAL) {
4090 dev_err(hba->dev,
4091 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4092 cmd->command, status);
4093 ret = (status != PWR_OK) ? status : -1;
4094 }
4095 out:
4096 if (ret) {
4097 ufshcd_print_host_state(hba);
4098 ufshcd_print_pwr_info(hba);
4099 ufshcd_print_evt_hist(hba);
4100 }
4101
4102 spin_lock_irqsave(hba->host->host_lock, flags);
4103 hba->active_uic_cmd = NULL;
4104 hba->uic_async_done = NULL;
4105 if (reenable_intr)
4106 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4107 if (ret) {
4108 ufshcd_set_link_broken(hba);
4109 ufshcd_schedule_eh_work(hba);
4110 }
4111 out_unlock:
4112 spin_unlock_irqrestore(hba->host->host_lock, flags);
4113 mutex_unlock(&hba->uic_cmd_mutex);
4114
4115 return ret;
4116 }
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4127 {
4128 struct uic_command uic_cmd = {0};
4129 int ret;
4130
4131 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4132 ret = ufshcd_dme_set(hba,
4133 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4134 if (ret) {
4135 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4136 __func__, ret);
4137 goto out;
4138 }
4139 }
4140
4141 uic_cmd.command = UIC_CMD_DME_SET;
4142 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4143 uic_cmd.argument3 = mode;
4144 ufshcd_hold(hba, false);
4145 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4146 ufshcd_release(hba);
4147
4148 out:
4149 return ret;
4150 }
4151 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4152
4153 int ufshcd_link_recovery(struct ufs_hba *hba)
4154 {
4155 int ret;
4156 unsigned long flags;
4157
4158 spin_lock_irqsave(hba->host->host_lock, flags);
4159 hba->ufshcd_state = UFSHCD_STATE_RESET;
4160 ufshcd_set_eh_in_progress(hba);
4161 spin_unlock_irqrestore(hba->host->host_lock, flags);
4162
4163
4164 ufshcd_device_reset(hba);
4165
4166 ret = ufshcd_host_reset_and_restore(hba);
4167
4168 spin_lock_irqsave(hba->host->host_lock, flags);
4169 if (ret)
4170 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4171 ufshcd_clear_eh_in_progress(hba);
4172 spin_unlock_irqrestore(hba->host->host_lock, flags);
4173
4174 if (ret)
4175 dev_err(hba->dev, "%s: link recovery failed, err %d",
4176 __func__, ret);
4177
4178 return ret;
4179 }
4180 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4181
4182 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4183 {
4184 int ret;
4185 struct uic_command uic_cmd = {0};
4186 ktime_t start = ktime_get();
4187
4188 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4189
4190 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4191 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4192 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4193 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4194
4195 if (ret)
4196 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4197 __func__, ret);
4198 else
4199 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4200 POST_CHANGE);
4201
4202 return ret;
4203 }
4204 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4205
4206 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4207 {
4208 struct uic_command uic_cmd = {0};
4209 int ret;
4210 ktime_t start = ktime_get();
4211
4212 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4213
4214 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4215 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4216 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4217 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4218
4219 if (ret) {
4220 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4221 __func__, ret);
4222 } else {
4223 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4224 POST_CHANGE);
4225 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4226 hba->ufs_stats.hibern8_exit_cnt++;
4227 }
4228
4229 return ret;
4230 }
4231 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4232
4233 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4234 {
4235 unsigned long flags;
4236 bool update = false;
4237
4238 if (!ufshcd_is_auto_hibern8_supported(hba))
4239 return;
4240
4241 spin_lock_irqsave(hba->host->host_lock, flags);
4242 if (hba->ahit != ahit) {
4243 hba->ahit = ahit;
4244 update = true;
4245 }
4246 spin_unlock_irqrestore(hba->host->host_lock, flags);
4247
4248 if (update &&
4249 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4250 ufshcd_rpm_get_sync(hba);
4251 ufshcd_hold(hba, false);
4252 ufshcd_auto_hibern8_enable(hba);
4253 ufshcd_release(hba);
4254 ufshcd_rpm_put_sync(hba);
4255 }
4256 }
4257 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4258
4259 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4260 {
4261 if (!ufshcd_is_auto_hibern8_supported(hba))
4262 return;
4263
4264 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4265 }
4266
4267
4268
4269
4270
4271
4272 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4273 {
4274 hba->pwr_info.gear_rx = UFS_PWM_G1;
4275 hba->pwr_info.gear_tx = UFS_PWM_G1;
4276 hba->pwr_info.lane_rx = 1;
4277 hba->pwr_info.lane_tx = 1;
4278 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4279 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4280 hba->pwr_info.hs_rate = 0;
4281 }
4282
4283
4284
4285
4286
4287 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4288 {
4289 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4290
4291 if (hba->max_pwr_info.is_valid)
4292 return 0;
4293
4294 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4295 pwr_info->pwr_tx = FASTAUTO_MODE;
4296 pwr_info->pwr_rx = FASTAUTO_MODE;
4297 } else {
4298 pwr_info->pwr_tx = FAST_MODE;
4299 pwr_info->pwr_rx = FAST_MODE;
4300 }
4301 pwr_info->hs_rate = PA_HS_MODE_B;
4302
4303
4304 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4305 &pwr_info->lane_rx);
4306 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4307 &pwr_info->lane_tx);
4308
4309 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4310 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4311 __func__,
4312 pwr_info->lane_rx,
4313 pwr_info->lane_tx);
4314 return -EINVAL;
4315 }
4316
4317
4318
4319
4320
4321
4322 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4323 if (!pwr_info->gear_rx) {
4324 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4325 &pwr_info->gear_rx);
4326 if (!pwr_info->gear_rx) {
4327 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4328 __func__, pwr_info->gear_rx);
4329 return -EINVAL;
4330 }
4331 pwr_info->pwr_rx = SLOW_MODE;
4332 }
4333
4334 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4335 &pwr_info->gear_tx);
4336 if (!pwr_info->gear_tx) {
4337 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4338 &pwr_info->gear_tx);
4339 if (!pwr_info->gear_tx) {
4340 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4341 __func__, pwr_info->gear_tx);
4342 return -EINVAL;
4343 }
4344 pwr_info->pwr_tx = SLOW_MODE;
4345 }
4346
4347 hba->max_pwr_info.is_valid = true;
4348 return 0;
4349 }
4350
4351 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4352 struct ufs_pa_layer_attr *pwr_mode)
4353 {
4354 int ret;
4355
4356
4357 if (!hba->force_pmc &&
4358 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4359 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4360 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4361 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4362 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4363 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4364 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4365 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4366 return 0;
4367 }
4368
4369
4370
4371
4372
4373
4374
4375 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4376 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4377 pwr_mode->lane_rx);
4378 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4379 pwr_mode->pwr_rx == FAST_MODE)
4380 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4381 else
4382 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4383
4384 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4385 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4386 pwr_mode->lane_tx);
4387 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4388 pwr_mode->pwr_tx == FAST_MODE)
4389 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4390 else
4391 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4392
4393 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4394 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4395 pwr_mode->pwr_rx == FAST_MODE ||
4396 pwr_mode->pwr_tx == FAST_MODE)
4397 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4398 pwr_mode->hs_rate);
4399
4400 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4401 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4402 DL_FC0ProtectionTimeOutVal_Default);
4403 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4404 DL_TC0ReplayTimeOutVal_Default);
4405 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4406 DL_AFC0ReqTimeOutVal_Default);
4407 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4408 DL_FC1ProtectionTimeOutVal_Default);
4409 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4410 DL_TC1ReplayTimeOutVal_Default);
4411 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4412 DL_AFC1ReqTimeOutVal_Default);
4413
4414 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4415 DL_FC0ProtectionTimeOutVal_Default);
4416 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4417 DL_TC0ReplayTimeOutVal_Default);
4418 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4419 DL_AFC0ReqTimeOutVal_Default);
4420 }
4421
4422 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4423 | pwr_mode->pwr_tx);
4424
4425 if (ret) {
4426 dev_err(hba->dev,
4427 "%s: power mode change failed %d\n", __func__, ret);
4428 } else {
4429 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4430 pwr_mode);
4431
4432 memcpy(&hba->pwr_info, pwr_mode,
4433 sizeof(struct ufs_pa_layer_attr));
4434 }
4435
4436 return ret;
4437 }
4438
4439
4440
4441
4442
4443
4444 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4445 struct ufs_pa_layer_attr *desired_pwr_mode)
4446 {
4447 struct ufs_pa_layer_attr final_params = { 0 };
4448 int ret;
4449
4450 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4451 desired_pwr_mode, &final_params);
4452
4453 if (ret)
4454 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4455
4456 ret = ufshcd_change_power_mode(hba, &final_params);
4457
4458 return ret;
4459 }
4460 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4461
4462
4463
4464
4465
4466
4467
4468 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4469 {
4470 int err;
4471 bool flag_res = true;
4472 ktime_t timeout;
4473
4474 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4475 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4476 if (err) {
4477 dev_err(hba->dev,
4478 "%s setting fDeviceInit flag failed with error %d\n",
4479 __func__, err);
4480 goto out;
4481 }
4482
4483
4484 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4485 do {
4486 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4487 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4488 if (!flag_res)
4489 break;
4490 usleep_range(500, 1000);
4491 } while (ktime_before(ktime_get(), timeout));
4492
4493 if (err) {
4494 dev_err(hba->dev,
4495 "%s reading fDeviceInit flag failed with error %d\n",
4496 __func__, err);
4497 } else if (flag_res) {
4498 dev_err(hba->dev,
4499 "%s fDeviceInit was not cleared by the device\n",
4500 __func__);
4501 err = -EBUSY;
4502 }
4503 out:
4504 return err;
4505 }
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4520 {
4521 int err = 0;
4522 u32 reg;
4523
4524
4525 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4526
4527
4528 if (ufshcd_is_intr_aggr_allowed(hba))
4529 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4530 else
4531 ufshcd_disable_intr_aggr(hba);
4532
4533
4534 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4535 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4536 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4537 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4538 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4539 REG_UTP_TASK_REQ_LIST_BASE_L);
4540 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4541 REG_UTP_TASK_REQ_LIST_BASE_H);
4542
4543
4544
4545
4546
4547 wmb();
4548
4549
4550
4551
4552 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4553 if (!(ufshcd_get_lists_status(reg))) {
4554 ufshcd_enable_run_stop_reg(hba);
4555 } else {
4556 dev_err(hba->dev,
4557 "Host controller not ready to process requests");
4558 err = -EIO;
4559 }
4560
4561 return err;
4562 }
4563 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4564
4565
4566
4567
4568
4569 void ufshcd_hba_stop(struct ufs_hba *hba)
4570 {
4571 unsigned long flags;
4572 int err;
4573
4574
4575
4576
4577
4578 spin_lock_irqsave(hba->host->host_lock, flags);
4579 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4580 spin_unlock_irqrestore(hba->host->host_lock, flags);
4581
4582 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4583 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4584 10, 1);
4585 if (err)
4586 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4587 }
4588 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4601 {
4602 int retry_outer = 3;
4603 int retry_inner;
4604
4605 start:
4606 if (ufshcd_is_hba_active(hba))
4607
4608 ufshcd_hba_stop(hba);
4609
4610
4611 ufshcd_set_link_off(hba);
4612
4613 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4614
4615
4616 ufshcd_hba_start(hba);
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4629
4630
4631 retry_inner = 50;
4632 while (!ufshcd_is_hba_active(hba)) {
4633 if (retry_inner) {
4634 retry_inner--;
4635 } else {
4636 dev_err(hba->dev,
4637 "Controller enable failed\n");
4638 if (retry_outer) {
4639 retry_outer--;
4640 goto start;
4641 }
4642 return -EIO;
4643 }
4644 usleep_range(1000, 1100);
4645 }
4646
4647
4648 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4649
4650 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4651
4652 return 0;
4653 }
4654
4655 int ufshcd_hba_enable(struct ufs_hba *hba)
4656 {
4657 int ret;
4658
4659 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4660 ufshcd_set_link_off(hba);
4661 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4662
4663
4664 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4665 ret = ufshcd_dme_reset(hba);
4666 if (!ret) {
4667 ret = ufshcd_dme_enable(hba);
4668 if (!ret)
4669 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4670 if (ret)
4671 dev_err(hba->dev,
4672 "Host controller enable failed with non-hce\n");
4673 }
4674 } else {
4675 ret = ufshcd_hba_execute_hce(hba);
4676 }
4677
4678 return ret;
4679 }
4680 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4681
4682 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4683 {
4684 int tx_lanes = 0, i, err = 0;
4685
4686 if (!peer)
4687 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4688 &tx_lanes);
4689 else
4690 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4691 &tx_lanes);
4692 for (i = 0; i < tx_lanes; i++) {
4693 if (!peer)
4694 err = ufshcd_dme_set(hba,
4695 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4696 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4697 0);
4698 else
4699 err = ufshcd_dme_peer_set(hba,
4700 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4701 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4702 0);
4703 if (err) {
4704 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4705 __func__, peer, i, err);
4706 break;
4707 }
4708 }
4709
4710 return err;
4711 }
4712
4713 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4714 {
4715 return ufshcd_disable_tx_lcc(hba, true);
4716 }
4717
4718 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4719 {
4720 struct ufs_event_hist *e;
4721
4722 if (id >= UFS_EVT_CNT)
4723 return;
4724
4725 e = &hba->ufs_stats.event[id];
4726 e->val[e->pos] = val;
4727 e->tstamp[e->pos] = ktime_get();
4728 e->cnt += 1;
4729 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4730
4731 ufshcd_vops_event_notify(hba, id, &val);
4732 }
4733 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4734
4735
4736
4737
4738
4739
4740
4741 static int ufshcd_link_startup(struct ufs_hba *hba)
4742 {
4743 int ret;
4744 int retries = DME_LINKSTARTUP_RETRIES;
4745 bool link_startup_again = false;
4746
4747
4748
4749
4750
4751 if (!ufshcd_is_ufs_dev_active(hba))
4752 link_startup_again = true;
4753
4754 link_startup:
4755 do {
4756 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4757
4758 ret = ufshcd_dme_link_startup(hba);
4759
4760
4761 if (!ret && !ufshcd_is_device_present(hba)) {
4762 ufshcd_update_evt_hist(hba,
4763 UFS_EVT_LINK_STARTUP_FAIL,
4764 0);
4765 dev_err(hba->dev, "%s: Device not present\n", __func__);
4766 ret = -ENXIO;
4767 goto out;
4768 }
4769
4770
4771
4772
4773
4774
4775 if (ret && retries && ufshcd_hba_enable(hba)) {
4776 ufshcd_update_evt_hist(hba,
4777 UFS_EVT_LINK_STARTUP_FAIL,
4778 (u32)ret);
4779 goto out;
4780 }
4781 } while (ret && retries--);
4782
4783 if (ret) {
4784
4785 ufshcd_update_evt_hist(hba,
4786 UFS_EVT_LINK_STARTUP_FAIL,
4787 (u32)ret);
4788 goto out;
4789 }
4790
4791 if (link_startup_again) {
4792 link_startup_again = false;
4793 retries = DME_LINKSTARTUP_RETRIES;
4794 goto link_startup;
4795 }
4796
4797
4798 ufshcd_init_pwr_info(hba);
4799 ufshcd_print_pwr_info(hba);
4800
4801 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4802 ret = ufshcd_disable_device_tx_lcc(hba);
4803 if (ret)
4804 goto out;
4805 }
4806
4807
4808 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4809 if (ret)
4810 goto out;
4811
4812
4813 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4814 ret = ufshcd_make_hba_operational(hba);
4815 out:
4816 if (ret) {
4817 dev_err(hba->dev, "link startup failed %d\n", ret);
4818 ufshcd_print_host_state(hba);
4819 ufshcd_print_pwr_info(hba);
4820 ufshcd_print_evt_hist(hba);
4821 }
4822 return ret;
4823 }
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4836 {
4837 int err = 0;
4838 int retries;
4839
4840 ufshcd_hold(hba, false);
4841 mutex_lock(&hba->dev_cmd.lock);
4842 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4843 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4844 hba->nop_out_timeout);
4845
4846 if (!err || err == -ETIMEDOUT)
4847 break;
4848
4849 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4850 }
4851 mutex_unlock(&hba->dev_cmd.lock);
4852 ufshcd_release(hba);
4853
4854 if (err)
4855 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4856 return err;
4857 }
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4869 {
4870 int ret = 0;
4871 u8 lun_qdepth;
4872 struct ufs_hba *hba;
4873
4874 hba = shost_priv(sdev->host);
4875
4876 lun_qdepth = hba->nutrs;
4877 ret = ufshcd_read_unit_desc_param(hba,
4878 ufshcd_scsi_to_upiu_lun(sdev->lun),
4879 UNIT_DESC_PARAM_LU_Q_DEPTH,
4880 &lun_qdepth,
4881 sizeof(lun_qdepth));
4882
4883
4884 if (ret == -EOPNOTSUPP)
4885 lun_qdepth = 1;
4886 else if (!lun_qdepth)
4887
4888 lun_qdepth = hba->nutrs;
4889 else
4890 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4891
4892 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4893 __func__, lun_qdepth);
4894 scsi_change_queue_depth(sdev, lun_qdepth);
4895 }
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4909 u8 lun,
4910 u8 *b_lu_write_protect)
4911 {
4912 int ret;
4913
4914 if (!b_lu_write_protect)
4915 ret = -EINVAL;
4916
4917
4918
4919
4920
4921 else if (lun >= hba->dev_info.max_lu_supported)
4922 ret = -ENOTSUPP;
4923 else
4924 ret = ufshcd_read_unit_desc_param(hba,
4925 lun,
4926 UNIT_DESC_PARAM_LU_WR_PROTECT,
4927 b_lu_write_protect,
4928 sizeof(*b_lu_write_protect));
4929 return ret;
4930 }
4931
4932
4933
4934
4935
4936
4937
4938
4939 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4940 const struct scsi_device *sdev)
4941 {
4942 if (hba->dev_info.f_power_on_wp_en &&
4943 !hba->dev_info.is_lu_power_on_wp) {
4944 u8 b_lu_write_protect;
4945
4946 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4947 &b_lu_write_protect) &&
4948 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4949 hba->dev_info.is_lu_power_on_wp = true;
4950 }
4951 }
4952
4953
4954
4955
4956
4957
4958 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4959 {
4960 struct device_link *link;
4961
4962
4963
4964
4965
4966 if (hba->ufs_device_wlun) {
4967 link = device_link_add(&sdev->sdev_gendev,
4968 &hba->ufs_device_wlun->sdev_gendev,
4969 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4970 if (!link) {
4971 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4972 dev_name(&hba->ufs_device_wlun->sdev_gendev));
4973 return;
4974 }
4975 hba->luns_avail--;
4976
4977 if (hba->luns_avail == 1) {
4978 ufshcd_rpm_put(hba);
4979 return;
4980 }
4981 } else {
4982
4983
4984
4985
4986 hba->luns_avail--;
4987 }
4988 }
4989
4990
4991
4992
4993
4994
4995
4996 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4997 {
4998 struct ufs_hba *hba;
4999
5000 hba = shost_priv(sdev->host);
5001
5002
5003 sdev->use_10_for_ms = 1;
5004
5005
5006 sdev->set_dbd_for_ms = 1;
5007
5008
5009 sdev->allow_restart = 1;
5010
5011
5012 sdev->no_report_opcodes = 1;
5013
5014
5015 sdev->no_write_same = 1;
5016
5017 ufshcd_set_queue_depth(sdev);
5018
5019 ufshcd_get_lu_power_on_wp_status(hba, sdev);
5020
5021 ufshcd_setup_links(hba, sdev);
5022
5023 return 0;
5024 }
5025
5026
5027
5028
5029
5030
5031
5032
5033 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5034 {
5035 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5036 }
5037
5038 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
5039 {
5040
5041 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5042 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
5043 return;
5044
5045 ufshpb_destroy_lu(hba, sdev);
5046 }
5047
5048 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
5049 {
5050
5051 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5052 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
5053 return;
5054
5055 ufshpb_init_hpb_lu(hba, sdev);
5056 }
5057
5058
5059
5060
5061
5062 static int ufshcd_slave_configure(struct scsi_device *sdev)
5063 {
5064 struct ufs_hba *hba = shost_priv(sdev->host);
5065 struct request_queue *q = sdev->request_queue;
5066
5067 ufshcd_hpb_configure(hba, sdev);
5068
5069 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5070 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
5071 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
5072
5073
5074
5075
5076 if (is_device_wlun(sdev))
5077 pm_runtime_get_noresume(&sdev->sdev_gendev);
5078 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5079 sdev->rpm_autosuspend = 1;
5080
5081
5082
5083
5084
5085 sdev->silence_suspend = 1;
5086
5087 ufshcd_crypto_register(hba, q);
5088
5089 return 0;
5090 }
5091
5092
5093
5094
5095
5096 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5097 {
5098 struct ufs_hba *hba;
5099 unsigned long flags;
5100
5101 hba = shost_priv(sdev->host);
5102
5103 ufshcd_hpb_destroy(hba, sdev);
5104
5105
5106 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5107 spin_lock_irqsave(hba->host->host_lock, flags);
5108 hba->ufs_device_wlun = NULL;
5109 spin_unlock_irqrestore(hba->host->host_lock, flags);
5110 } else if (hba->ufs_device_wlun) {
5111 struct device *supplier = NULL;
5112
5113
5114 spin_lock_irqsave(hba->host->host_lock, flags);
5115 if (hba->ufs_device_wlun) {
5116 supplier = &hba->ufs_device_wlun->sdev_gendev;
5117 get_device(supplier);
5118 }
5119 spin_unlock_irqrestore(hba->host->host_lock, flags);
5120
5121 if (supplier) {
5122
5123
5124
5125
5126
5127 device_link_remove(&sdev->sdev_gendev, supplier);
5128 put_device(supplier);
5129 }
5130 }
5131 }
5132
5133
5134
5135
5136
5137
5138
5139
5140 static inline int
5141 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5142 {
5143 int result = 0;
5144
5145 switch (scsi_status) {
5146 case SAM_STAT_CHECK_CONDITION:
5147 ufshcd_copy_sense_data(lrbp);
5148 fallthrough;
5149 case SAM_STAT_GOOD:
5150 result |= DID_OK << 16 | scsi_status;
5151 break;
5152 case SAM_STAT_TASK_SET_FULL:
5153 case SAM_STAT_BUSY:
5154 case SAM_STAT_TASK_ABORTED:
5155 ufshcd_copy_sense_data(lrbp);
5156 result |= scsi_status;
5157 break;
5158 default:
5159 result |= DID_ERROR << 16;
5160 break;
5161 }
5162
5163 return result;
5164 }
5165
5166
5167
5168
5169
5170
5171
5172
5173 static inline int
5174 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5175 {
5176 int result = 0;
5177 int scsi_status;
5178 enum utp_ocs ocs;
5179
5180
5181 ocs = ufshcd_get_tr_ocs(lrbp);
5182
5183 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5184 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5185 MASK_RSP_UPIU_RESULT)
5186 ocs = OCS_SUCCESS;
5187 }
5188
5189 switch (ocs) {
5190 case OCS_SUCCESS:
5191 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
5192 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5193 switch (result) {
5194 case UPIU_TRANSACTION_RESPONSE:
5195
5196
5197
5198
5199 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5200
5201
5202
5203
5204
5205 scsi_status = result & MASK_SCSI_STATUS;
5206 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220 if (!hba->pm_op_in_progress &&
5221 !ufshcd_eh_in_progress(hba) &&
5222 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5223
5224 schedule_work(&hba->eeh_work);
5225
5226 if (scsi_status == SAM_STAT_GOOD)
5227 ufshpb_rsp_upiu(hba, lrbp);
5228 break;
5229 case UPIU_TRANSACTION_REJECT_UPIU:
5230
5231 result = DID_ERROR << 16;
5232 dev_err(hba->dev,
5233 "Reject UPIU not fully implemented\n");
5234 break;
5235 default:
5236 dev_err(hba->dev,
5237 "Unexpected request response code = %x\n",
5238 result);
5239 result = DID_ERROR << 16;
5240 break;
5241 }
5242 break;
5243 case OCS_ABORTED:
5244 result |= DID_ABORT << 16;
5245 break;
5246 case OCS_INVALID_COMMAND_STATUS:
5247 result |= DID_REQUEUE << 16;
5248 break;
5249 case OCS_INVALID_CMD_TABLE_ATTR:
5250 case OCS_INVALID_PRDT_ATTR:
5251 case OCS_MISMATCH_DATA_BUF_SIZE:
5252 case OCS_MISMATCH_RESP_UPIU_SIZE:
5253 case OCS_PEER_COMM_FAILURE:
5254 case OCS_FATAL_ERROR:
5255 case OCS_DEVICE_FATAL_ERROR:
5256 case OCS_INVALID_CRYPTO_CONFIG:
5257 case OCS_GENERAL_CRYPTO_ERROR:
5258 default:
5259 result |= DID_ERROR << 16;
5260 dev_err(hba->dev,
5261 "OCS error from controller = %x for tag %d\n",
5262 ocs, lrbp->task_tag);
5263 ufshcd_print_evt_hist(hba);
5264 ufshcd_print_host_state(hba);
5265 break;
5266 }
5267
5268 if ((host_byte(result) != DID_OK) &&
5269 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5270 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
5271 return result;
5272 }
5273
5274 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5275 u32 intr_mask)
5276 {
5277 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5278 !ufshcd_is_auto_hibern8_enabled(hba))
5279 return false;
5280
5281 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5282 return false;
5283
5284 if (hba->active_uic_cmd &&
5285 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5286 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5287 return false;
5288
5289 return true;
5290 }
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5302 {
5303 irqreturn_t retval = IRQ_NONE;
5304
5305 spin_lock(hba->host->host_lock);
5306 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5307 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5308
5309 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5310 hba->active_uic_cmd->argument2 |=
5311 ufshcd_get_uic_cmd_result(hba);
5312 hba->active_uic_cmd->argument3 =
5313 ufshcd_get_dme_attr_val(hba);
5314 if (!hba->uic_async_done)
5315 hba->active_uic_cmd->cmd_active = 0;
5316 complete(&hba->active_uic_cmd->done);
5317 retval = IRQ_HANDLED;
5318 }
5319
5320 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5321 hba->active_uic_cmd->cmd_active = 0;
5322 complete(hba->uic_async_done);
5323 retval = IRQ_HANDLED;
5324 }
5325
5326 if (retval == IRQ_HANDLED)
5327 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5328 UFS_CMD_COMP);
5329 spin_unlock(hba->host->host_lock);
5330 return retval;
5331 }
5332
5333
5334 static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5335 struct ufshcd_lrb *lrbp)
5336 {
5337 struct scsi_cmnd *cmd = lrbp->cmd;
5338
5339 scsi_dma_unmap(cmd);
5340 lrbp->cmd = NULL;
5341 ufshcd_release(hba);
5342 ufshcd_clk_scaling_update_busy(hba);
5343 }
5344
5345
5346
5347
5348
5349
5350 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5351 unsigned long completed_reqs)
5352 {
5353 struct ufshcd_lrb *lrbp;
5354 struct scsi_cmnd *cmd;
5355 int index;
5356
5357 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5358 lrbp = &hba->lrb[index];
5359 lrbp->compl_time_stamp = ktime_get();
5360 cmd = lrbp->cmd;
5361 if (cmd) {
5362 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5363 ufshcd_update_monitor(hba, lrbp);
5364 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
5365 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
5366 ufshcd_release_scsi_cmd(hba, lrbp);
5367
5368 scsi_done(cmd);
5369 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5370 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5371 if (hba->dev_cmd.complete) {
5372 ufshcd_add_command_trace(hba, index,
5373 UFS_DEV_COMP);
5374 complete(hba->dev_cmd.complete);
5375 ufshcd_clk_scaling_update_busy(hba);
5376 }
5377 }
5378 }
5379 }
5380
5381
5382
5383
5384
5385 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5386 {
5387 struct ufs_hba *hba = shost_priv(shost);
5388 unsigned long completed_reqs, flags;
5389 u32 tr_doorbell;
5390
5391 spin_lock_irqsave(&hba->outstanding_lock, flags);
5392 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5393 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5394 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5395 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5396 hba->outstanding_reqs);
5397 hba->outstanding_reqs &= ~completed_reqs;
5398 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5399
5400 if (completed_reqs)
5401 __ufshcd_transfer_req_compl(hba, completed_reqs);
5402
5403 return completed_reqs;
5404 }
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5415 {
5416
5417
5418
5419
5420
5421
5422
5423 if (ufshcd_is_intr_aggr_allowed(hba) &&
5424 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5425 ufshcd_reset_intr_aggr(hba);
5426
5427 if (ufs_fail_completion())
5428 return IRQ_HANDLED;
5429
5430
5431
5432
5433
5434 ufshcd_poll(hba->host, 0);
5435
5436 return IRQ_HANDLED;
5437 }
5438
5439 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5440 {
5441 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5442 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5443 &ee_ctrl_mask);
5444 }
5445
5446 int ufshcd_write_ee_control(struct ufs_hba *hba)
5447 {
5448 int err;
5449
5450 mutex_lock(&hba->ee_ctrl_mutex);
5451 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5452 mutex_unlock(&hba->ee_ctrl_mutex);
5453 if (err)
5454 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5455 __func__, err);
5456 return err;
5457 }
5458
5459 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5460 const u16 *other_mask, u16 set, u16 clr)
5461 {
5462 u16 new_mask, ee_ctrl_mask;
5463 int err = 0;
5464
5465 mutex_lock(&hba->ee_ctrl_mutex);
5466 new_mask = (*mask & ~clr) | set;
5467 ee_ctrl_mask = new_mask | *other_mask;
5468 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5469 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5470
5471 if (!err) {
5472 hba->ee_ctrl_mask = ee_ctrl_mask;
5473 *mask = new_mask;
5474 }
5475 mutex_unlock(&hba->ee_ctrl_mutex);
5476 return err;
5477 }
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5490 {
5491 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5492 }
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5505 {
5506 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5507 }
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5521 {
5522 int err = 0;
5523
5524 if (hba->auto_bkops_enabled)
5525 goto out;
5526
5527 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5528 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5529 if (err) {
5530 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5531 __func__, err);
5532 goto out;
5533 }
5534
5535 hba->auto_bkops_enabled = true;
5536 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5537
5538
5539 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5540 if (err)
5541 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5542 __func__, err);
5543 out:
5544 return err;
5545 }
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5560 {
5561 int err = 0;
5562
5563 if (!hba->auto_bkops_enabled)
5564 goto out;
5565
5566
5567
5568
5569
5570 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5571 if (err) {
5572 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5573 __func__, err);
5574 goto out;
5575 }
5576
5577 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5578 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5579 if (err) {
5580 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5581 __func__, err);
5582 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5583 goto out;
5584 }
5585
5586 hba->auto_bkops_enabled = false;
5587 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5588 hba->is_urgent_bkops_lvl_checked = false;
5589 out:
5590 return err;
5591 }
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5603 {
5604 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5605 hba->auto_bkops_enabled = false;
5606 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5607 ufshcd_enable_auto_bkops(hba);
5608 } else {
5609 hba->auto_bkops_enabled = true;
5610 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5611 ufshcd_disable_auto_bkops(hba);
5612 }
5613 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5614 hba->is_urgent_bkops_lvl_checked = false;
5615 }
5616
5617 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5618 {
5619 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5620 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5621 }
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5640 enum bkops_status status)
5641 {
5642 int err;
5643 u32 curr_status = 0;
5644
5645 err = ufshcd_get_bkops_status(hba, &curr_status);
5646 if (err) {
5647 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5648 __func__, err);
5649 goto out;
5650 } else if (curr_status > BKOPS_STATUS_MAX) {
5651 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5652 __func__, curr_status);
5653 err = -EINVAL;
5654 goto out;
5655 }
5656
5657 if (curr_status >= status)
5658 err = ufshcd_enable_auto_bkops(hba);
5659 else
5660 err = ufshcd_disable_auto_bkops(hba);
5661 out:
5662 return err;
5663 }
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5676 {
5677 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5678 }
5679
5680 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5681 {
5682 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5683 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5684 }
5685
5686 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5687 {
5688 int err;
5689 u32 curr_status = 0;
5690
5691 if (hba->is_urgent_bkops_lvl_checked)
5692 goto enable_auto_bkops;
5693
5694 err = ufshcd_get_bkops_status(hba, &curr_status);
5695 if (err) {
5696 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5697 __func__, err);
5698 goto out;
5699 }
5700
5701
5702
5703
5704
5705
5706
5707 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5708 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5709 __func__, curr_status);
5710
5711 hba->urgent_bkops_lvl = curr_status;
5712 hba->is_urgent_bkops_lvl_checked = true;
5713 }
5714
5715 enable_auto_bkops:
5716 err = ufshcd_enable_auto_bkops(hba);
5717 out:
5718 if (err < 0)
5719 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5720 __func__, err);
5721 }
5722
5723 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5724 {
5725 u32 value;
5726
5727 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5728 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5729 return;
5730
5731 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5732
5733 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5734
5735
5736
5737
5738
5739 }
5740
5741 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5742 {
5743 u8 index;
5744 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5745 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5746
5747 index = ufshcd_wb_get_query_index(hba);
5748 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5749 }
5750
5751 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5752 {
5753 int ret;
5754
5755 if (!ufshcd_is_wb_allowed(hba))
5756 return 0;
5757
5758 if (!(enable ^ hba->dev_info.wb_enabled))
5759 return 0;
5760
5761 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5762 if (ret) {
5763 dev_err(hba->dev, "%s Write Booster %s failed %d\n",
5764 __func__, enable ? "enable" : "disable", ret);
5765 return ret;
5766 }
5767
5768 hba->dev_info.wb_enabled = enable;
5769 dev_dbg(hba->dev, "%s Write Booster %s\n",
5770 __func__, enable ? "enabled" : "disabled");
5771
5772 return ret;
5773 }
5774
5775 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5776 {
5777 int ret;
5778
5779 ret = __ufshcd_wb_toggle(hba, set,
5780 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5781 if (ret) {
5782 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
5783 __func__, set ? "enable" : "disable", ret);
5784 return;
5785 }
5786 dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
5787 __func__, set ? "enabled" : "disabled");
5788 }
5789
5790 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5791 {
5792 int ret;
5793
5794 if (!ufshcd_is_wb_allowed(hba) ||
5795 hba->dev_info.wb_buf_flush_enabled == enable)
5796 return;
5797
5798 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
5799 if (ret) {
5800 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5801 enable ? "enable" : "disable", ret);
5802 return;
5803 }
5804
5805 hba->dev_info.wb_buf_flush_enabled = enable;
5806
5807 dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
5808 __func__, enable ? "enabled" : "disabled");
5809 }
5810
5811 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5812 u32 avail_buf)
5813 {
5814 u32 cur_buf;
5815 int ret;
5816 u8 index;
5817
5818 index = ufshcd_wb_get_query_index(hba);
5819 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5820 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5821 index, 0, &cur_buf);
5822 if (ret) {
5823 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5824 __func__, ret);
5825 return false;
5826 }
5827
5828 if (!cur_buf) {
5829 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5830 cur_buf);
5831 return false;
5832 }
5833
5834 return avail_buf < hba->vps->wb_flush_threshold;
5835 }
5836
5837 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
5838 {
5839 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
5840 ufshcd_wb_toggle_flush(hba, false);
5841
5842 ufshcd_wb_toggle_flush_during_h8(hba, false);
5843 ufshcd_wb_toggle(hba, false);
5844 hba->caps &= ~UFSHCD_CAP_WB_EN;
5845
5846 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
5847 }
5848
5849 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
5850 {
5851 u32 lifetime;
5852 int ret;
5853 u8 index;
5854
5855 index = ufshcd_wb_get_query_index(hba);
5856 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5857 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
5858 index, 0, &lifetime);
5859 if (ret) {
5860 dev_err(hba->dev,
5861 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5862 __func__, ret);
5863 return false;
5864 }
5865
5866 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
5867 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
5868 __func__, lifetime);
5869 return false;
5870 }
5871
5872 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
5873 __func__, lifetime);
5874
5875 return true;
5876 }
5877
5878 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5879 {
5880 int ret;
5881 u32 avail_buf;
5882 u8 index;
5883
5884 if (!ufshcd_is_wb_allowed(hba))
5885 return false;
5886
5887 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
5888 ufshcd_wb_force_disable(hba);
5889 return false;
5890 }
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903 index = ufshcd_wb_get_query_index(hba);
5904 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5905 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5906 index, 0, &avail_buf);
5907 if (ret) {
5908 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5909 __func__, ret);
5910 return false;
5911 }
5912
5913 if (!hba->dev_info.b_presrv_uspc_en)
5914 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
5915
5916 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5917 }
5918
5919 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5920 {
5921 struct ufs_hba *hba = container_of(to_delayed_work(work),
5922 struct ufs_hba,
5923 rpm_dev_flush_recheck_work);
5924
5925
5926
5927
5928
5929
5930 ufshcd_rpm_get_sync(hba);
5931 ufshcd_rpm_put_sync(hba);
5932 }
5933
5934
5935
5936
5937
5938
5939
5940
5941 static void ufshcd_exception_event_handler(struct work_struct *work)
5942 {
5943 struct ufs_hba *hba;
5944 int err;
5945 u32 status = 0;
5946 hba = container_of(work, struct ufs_hba, eeh_work);
5947
5948 ufshcd_scsi_block_requests(hba);
5949 err = ufshcd_get_ee_status(hba, &status);
5950 if (err) {
5951 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5952 __func__, err);
5953 goto out;
5954 }
5955
5956 trace_ufshcd_exception_event(dev_name(hba->dev), status);
5957
5958 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
5959 ufshcd_bkops_exception_event_handler(hba);
5960
5961 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
5962 ufshcd_temp_exception_event_handler(hba, status);
5963
5964 ufs_debugfs_exception_event(hba, status);
5965 out:
5966 ufshcd_scsi_unblock_requests(hba);
5967 }
5968
5969
5970 static void ufshcd_complete_requests(struct ufs_hba *hba)
5971 {
5972 ufshcd_transfer_req_compl(hba);
5973 ufshcd_tmc_handler(hba);
5974 }
5975
5976
5977
5978
5979
5980
5981
5982
5983 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5984 {
5985 unsigned long flags;
5986 bool err_handling = true;
5987
5988 spin_lock_irqsave(hba->host->host_lock, flags);
5989
5990
5991
5992
5993 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5994 goto out;
5995
5996 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5997 ((hba->saved_err & UIC_ERROR) &&
5998 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5999 goto out;
6000
6001 if ((hba->saved_err & UIC_ERROR) &&
6002 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6003 int err;
6004
6005
6006
6007 spin_unlock_irqrestore(hba->host->host_lock, flags);
6008 msleep(50);
6009 spin_lock_irqsave(hba->host->host_lock, flags);
6010
6011
6012
6013
6014
6015 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6016 ((hba->saved_err & UIC_ERROR) &&
6017 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6018 goto out;
6019
6020
6021
6022
6023
6024
6025
6026
6027 spin_unlock_irqrestore(hba->host->host_lock, flags);
6028 err = ufshcd_verify_dev_init(hba);
6029 spin_lock_irqsave(hba->host->host_lock, flags);
6030
6031 if (err)
6032 goto out;
6033
6034
6035 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6036 hba->saved_err &= ~UIC_ERROR;
6037
6038 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6039 if (!hba->saved_uic_err)
6040 err_handling = false;
6041 }
6042 out:
6043 spin_unlock_irqrestore(hba->host->host_lock, flags);
6044 return err_handling;
6045 }
6046
6047
6048 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6049 {
6050 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6051 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6052 }
6053
6054 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6055 {
6056 lockdep_assert_held(hba->host->host_lock);
6057
6058
6059 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6060 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6061 ufshcd_is_saved_err_fatal(hba))
6062 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6063 else
6064 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6065 queue_work(hba->eh_wq, &hba->eh_work);
6066 }
6067 }
6068
6069 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6070 {
6071 down_write(&hba->clk_scaling_lock);
6072 hba->clk_scaling.is_allowed = allow;
6073 up_write(&hba->clk_scaling_lock);
6074 }
6075
6076 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6077 {
6078 if (suspend) {
6079 if (hba->clk_scaling.is_enabled)
6080 ufshcd_suspend_clkscaling(hba);
6081 ufshcd_clk_scaling_allow(hba, false);
6082 } else {
6083 ufshcd_clk_scaling_allow(hba, true);
6084 if (hba->clk_scaling.is_enabled)
6085 ufshcd_resume_clkscaling(hba);
6086 }
6087 }
6088
6089 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6090 {
6091 ufshcd_rpm_get_sync(hba);
6092 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6093 hba->is_sys_suspended) {
6094 enum ufs_pm_op pm_op;
6095
6096
6097
6098
6099
6100
6101 ufshcd_setup_hba_vreg(hba, true);
6102 ufshcd_enable_irq(hba);
6103 ufshcd_setup_vreg(hba, true);
6104 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6105 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6106 ufshcd_hold(hba, false);
6107 if (!ufshcd_is_clkgating_allowed(hba))
6108 ufshcd_setup_clocks(hba, true);
6109 ufshcd_release(hba);
6110 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6111 ufshcd_vops_resume(hba, pm_op);
6112 } else {
6113 ufshcd_hold(hba, false);
6114 if (ufshcd_is_clkscaling_supported(hba) &&
6115 hba->clk_scaling.is_enabled)
6116 ufshcd_suspend_clkscaling(hba);
6117 ufshcd_clk_scaling_allow(hba, false);
6118 }
6119 ufshcd_scsi_block_requests(hba);
6120
6121 synchronize_rcu();
6122 cancel_work_sync(&hba->eeh_work);
6123 }
6124
6125 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6126 {
6127 ufshcd_scsi_unblock_requests(hba);
6128 ufshcd_release(hba);
6129 if (ufshcd_is_clkscaling_supported(hba))
6130 ufshcd_clk_scaling_suspend(hba, false);
6131 ufshcd_rpm_put(hba);
6132 }
6133
6134 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6135 {
6136 return (!hba->is_powered || hba->shutting_down ||
6137 !hba->ufs_device_wlun ||
6138 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6139 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6140 ufshcd_is_link_broken(hba))));
6141 }
6142
6143 #ifdef CONFIG_PM
6144 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6145 {
6146 struct Scsi_Host *shost = hba->host;
6147 struct scsi_device *sdev;
6148 struct request_queue *q;
6149 int ret;
6150
6151 hba->is_sys_suspended = false;
6152
6153
6154
6155
6156 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6157
6158
6159 if (ret)
6160 ret = pm_runtime_set_active(hba->dev);
6161
6162
6163
6164
6165
6166
6167 if (!ret) {
6168 shost_for_each_device(sdev, shost) {
6169 q = sdev->request_queue;
6170 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6171 q->rpm_status == RPM_SUSPENDING))
6172 pm_request_resume(q->dev);
6173 }
6174 }
6175 }
6176 #else
6177 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6178 {
6179 }
6180 #endif
6181
6182 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6183 {
6184 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6185 u32 mode;
6186
6187 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6188
6189 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6190 return true;
6191
6192 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6193 return true;
6194
6195 return false;
6196 }
6197
6198
6199
6200
6201
6202 static void ufshcd_err_handler(struct work_struct *work)
6203 {
6204 int retries = MAX_ERR_HANDLER_RETRIES;
6205 struct ufs_hba *hba;
6206 unsigned long flags;
6207 bool needs_restore;
6208 bool needs_reset;
6209 bool err_xfer;
6210 bool err_tm;
6211 int pmc_err;
6212 int tag;
6213
6214 hba = container_of(work, struct ufs_hba, eh_work);
6215
6216 dev_info(hba->dev,
6217 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6218 __func__, ufshcd_state_name[hba->ufshcd_state],
6219 hba->is_powered, hba->shutting_down, hba->saved_err,
6220 hba->saved_uic_err, hba->force_reset,
6221 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6222
6223 down(&hba->host_sem);
6224 spin_lock_irqsave(hba->host->host_lock, flags);
6225 if (ufshcd_err_handling_should_stop(hba)) {
6226 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6227 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6228 spin_unlock_irqrestore(hba->host->host_lock, flags);
6229 up(&hba->host_sem);
6230 return;
6231 }
6232 ufshcd_set_eh_in_progress(hba);
6233 spin_unlock_irqrestore(hba->host->host_lock, flags);
6234 ufshcd_err_handling_prepare(hba);
6235
6236 ufshcd_complete_requests(hba);
6237 spin_lock_irqsave(hba->host->host_lock, flags);
6238 again:
6239 needs_restore = false;
6240 needs_reset = false;
6241 err_xfer = false;
6242 err_tm = false;
6243
6244 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6245 hba->ufshcd_state = UFSHCD_STATE_RESET;
6246
6247
6248
6249
6250 if (ufshcd_err_handling_should_stop(hba))
6251 goto skip_err_handling;
6252
6253 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6254 bool ret;
6255
6256 spin_unlock_irqrestore(hba->host->host_lock, flags);
6257
6258 ret = ufshcd_quirk_dl_nac_errors(hba);
6259 spin_lock_irqsave(hba->host->host_lock, flags);
6260 if (!ret && ufshcd_err_handling_should_stop(hba))
6261 goto skip_err_handling;
6262 }
6263
6264 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6265 (hba->saved_uic_err &&
6266 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6267 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6268
6269 spin_unlock_irqrestore(hba->host->host_lock, flags);
6270 ufshcd_print_host_state(hba);
6271 ufshcd_print_pwr_info(hba);
6272 ufshcd_print_evt_hist(hba);
6273 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6274 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6275 spin_lock_irqsave(hba->host->host_lock, flags);
6276 }
6277
6278
6279
6280
6281
6282
6283 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6284 ufshcd_is_saved_err_fatal(hba) ||
6285 ((hba->saved_err & UIC_ERROR) &&
6286 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6287 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6288 needs_reset = true;
6289 goto do_reset;
6290 }
6291
6292
6293
6294
6295
6296 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6297 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6298 if (!hba->saved_uic_err)
6299 hba->saved_err &= ~UIC_ERROR;
6300 spin_unlock_irqrestore(hba->host->host_lock, flags);
6301 if (ufshcd_is_pwr_mode_restore_needed(hba))
6302 needs_restore = true;
6303 spin_lock_irqsave(hba->host->host_lock, flags);
6304 if (!hba->saved_err && !needs_restore)
6305 goto skip_err_handling;
6306 }
6307
6308 hba->silence_err_logs = true;
6309
6310 spin_unlock_irqrestore(hba->host->host_lock, flags);
6311
6312 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6313 if (ufshcd_try_to_abort_task(hba, tag)) {
6314 err_xfer = true;
6315 goto lock_skip_pending_xfer_clear;
6316 }
6317 dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
6318 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
6319 }
6320
6321
6322 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6323 if (ufshcd_clear_tm_cmd(hba, tag)) {
6324 err_tm = true;
6325 goto lock_skip_pending_xfer_clear;
6326 }
6327 }
6328
6329 lock_skip_pending_xfer_clear:
6330
6331 ufshcd_complete_requests(hba);
6332
6333 spin_lock_irqsave(hba->host->host_lock, flags);
6334 hba->silence_err_logs = false;
6335 if (err_xfer || err_tm) {
6336 needs_reset = true;
6337 goto do_reset;
6338 }
6339
6340
6341
6342
6343
6344 if (needs_restore) {
6345 spin_unlock_irqrestore(hba->host->host_lock, flags);
6346
6347
6348
6349
6350 down_write(&hba->clk_scaling_lock);
6351 hba->force_pmc = true;
6352 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6353 if (pmc_err) {
6354 needs_reset = true;
6355 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6356 __func__, pmc_err);
6357 }
6358 hba->force_pmc = false;
6359 ufshcd_print_pwr_info(hba);
6360 up_write(&hba->clk_scaling_lock);
6361 spin_lock_irqsave(hba->host->host_lock, flags);
6362 }
6363
6364 do_reset:
6365
6366 if (needs_reset) {
6367 int err;
6368
6369 hba->force_reset = false;
6370 spin_unlock_irqrestore(hba->host->host_lock, flags);
6371 err = ufshcd_reset_and_restore(hba);
6372 if (err)
6373 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6374 __func__, err);
6375 else
6376 ufshcd_recover_pm_error(hba);
6377 spin_lock_irqsave(hba->host->host_lock, flags);
6378 }
6379
6380 skip_err_handling:
6381 if (!needs_reset) {
6382 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6383 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6384 if (hba->saved_err || hba->saved_uic_err)
6385 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6386 __func__, hba->saved_err, hba->saved_uic_err);
6387 }
6388
6389 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6390 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6391 if (--retries)
6392 goto again;
6393 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6394 }
6395 ufshcd_clear_eh_in_progress(hba);
6396 spin_unlock_irqrestore(hba->host->host_lock, flags);
6397 ufshcd_err_handling_unprepare(hba);
6398 up(&hba->host_sem);
6399
6400 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6401 ufshcd_state_name[hba->ufshcd_state]);
6402 }
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6413 {
6414 u32 reg;
6415 irqreturn_t retval = IRQ_NONE;
6416
6417
6418 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6419 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6420 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6421 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6422
6423
6424
6425
6426 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6427 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6428 __func__);
6429
6430
6431 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6432 struct uic_command *cmd = NULL;
6433
6434 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6435 if (hba->uic_async_done && hba->active_uic_cmd)
6436 cmd = hba->active_uic_cmd;
6437
6438
6439
6440
6441 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6442 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6443 }
6444 retval |= IRQ_HANDLED;
6445 }
6446
6447
6448 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6449 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6450 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6451 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6452
6453 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6454 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6455 else if (hba->dev_quirks &
6456 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6457 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6458 hba->uic_error |=
6459 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6460 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6461 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6462 }
6463 retval |= IRQ_HANDLED;
6464 }
6465
6466
6467 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6468 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6469 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6470 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6471 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6472 retval |= IRQ_HANDLED;
6473 }
6474
6475 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6476 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6477 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6478 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6479 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6480 retval |= IRQ_HANDLED;
6481 }
6482
6483 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6484 if ((reg & UIC_DME_ERROR) &&
6485 (reg & UIC_DME_ERROR_CODE_MASK)) {
6486 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6487 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6488 retval |= IRQ_HANDLED;
6489 }
6490
6491 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6492 __func__, hba->uic_error);
6493 return retval;
6494 }
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6506 {
6507 bool queue_eh_work = false;
6508 irqreturn_t retval = IRQ_NONE;
6509
6510 spin_lock(hba->host->host_lock);
6511 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6512
6513 if (hba->errors & INT_FATAL_ERRORS) {
6514 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6515 hba->errors);
6516 queue_eh_work = true;
6517 }
6518
6519 if (hba->errors & UIC_ERROR) {
6520 hba->uic_error = 0;
6521 retval = ufshcd_update_uic_error(hba);
6522 if (hba->uic_error)
6523 queue_eh_work = true;
6524 }
6525
6526 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6527 dev_err(hba->dev,
6528 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6529 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6530 "Enter" : "Exit",
6531 hba->errors, ufshcd_get_upmcrs(hba));
6532 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6533 hba->errors);
6534 ufshcd_set_link_broken(hba);
6535 queue_eh_work = true;
6536 }
6537
6538 if (queue_eh_work) {
6539
6540
6541
6542
6543 hba->saved_err |= hba->errors;
6544 hba->saved_uic_err |= hba->uic_error;
6545
6546
6547 if ((hba->saved_err &
6548 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6549 (hba->saved_uic_err &&
6550 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6551 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6552 __func__, hba->saved_err,
6553 hba->saved_uic_err);
6554 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6555 "host_regs: ");
6556 ufshcd_print_pwr_info(hba);
6557 }
6558 ufshcd_schedule_eh_work(hba);
6559 retval |= IRQ_HANDLED;
6560 }
6561
6562
6563
6564
6565
6566
6567 hba->errors = 0;
6568 hba->uic_error = 0;
6569 spin_unlock(hba->host->host_lock);
6570 return retval;
6571 }
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6582 {
6583 unsigned long flags, pending, issued;
6584 irqreturn_t ret = IRQ_NONE;
6585 int tag;
6586
6587 spin_lock_irqsave(hba->host->host_lock, flags);
6588 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6589 issued = hba->outstanding_tasks & ~pending;
6590 for_each_set_bit(tag, &issued, hba->nutmrs) {
6591 struct request *req = hba->tmf_rqs[tag];
6592 struct completion *c = req->end_io_data;
6593
6594 complete(c);
6595 ret = IRQ_HANDLED;
6596 }
6597 spin_unlock_irqrestore(hba->host->host_lock, flags);
6598
6599 return ret;
6600 }
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6612 {
6613 irqreturn_t retval = IRQ_NONE;
6614
6615 if (intr_status & UFSHCD_UIC_MASK)
6616 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6617
6618 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6619 retval |= ufshcd_check_errors(hba, intr_status);
6620
6621 if (intr_status & UTP_TASK_REQ_COMPL)
6622 retval |= ufshcd_tmc_handler(hba);
6623
6624 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6625 retval |= ufshcd_transfer_req_compl(hba);
6626
6627 return retval;
6628 }
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6640 {
6641 u32 intr_status, enabled_intr_status = 0;
6642 irqreturn_t retval = IRQ_NONE;
6643 struct ufs_hba *hba = __hba;
6644 int retries = hba->nutrs;
6645
6646 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6647 hba->ufs_stats.last_intr_status = intr_status;
6648 hba->ufs_stats.last_intr_ts = ktime_get();
6649
6650
6651
6652
6653
6654
6655
6656 while (intr_status && retries--) {
6657 enabled_intr_status =
6658 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6659 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6660 if (enabled_intr_status)
6661 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6662
6663 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6664 }
6665
6666 if (enabled_intr_status && retval == IRQ_NONE &&
6667 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6668 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6669 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6670 __func__,
6671 intr_status,
6672 hba->ufs_stats.last_intr_status,
6673 enabled_intr_status);
6674 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6675 }
6676
6677 return retval;
6678 }
6679
6680 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6681 {
6682 int err = 0;
6683 u32 mask = 1 << tag;
6684 unsigned long flags;
6685
6686 if (!test_bit(tag, &hba->outstanding_tasks))
6687 goto out;
6688
6689 spin_lock_irqsave(hba->host->host_lock, flags);
6690 ufshcd_utmrl_clear(hba, tag);
6691 spin_unlock_irqrestore(hba->host->host_lock, flags);
6692
6693
6694 err = ufshcd_wait_for_register(hba,
6695 REG_UTP_TASK_REQ_DOOR_BELL,
6696 mask, 0, 1000, 1000);
6697
6698 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6699 tag, err ? "succeeded" : "failed");
6700
6701 out:
6702 return err;
6703 }
6704
6705 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6706 struct utp_task_req_desc *treq, u8 tm_function)
6707 {
6708 struct request_queue *q = hba->tmf_queue;
6709 struct Scsi_Host *host = hba->host;
6710 DECLARE_COMPLETION_ONSTACK(wait);
6711 struct request *req;
6712 unsigned long flags;
6713 int task_tag, err;
6714
6715
6716
6717
6718 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
6719 if (IS_ERR(req))
6720 return PTR_ERR(req);
6721
6722 req->end_io_data = &wait;
6723 ufshcd_hold(hba, false);
6724
6725 spin_lock_irqsave(host->host_lock, flags);
6726
6727 task_tag = req->tag;
6728 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
6729 task_tag);
6730 hba->tmf_rqs[req->tag] = req;
6731 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
6732
6733 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6734 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6735
6736
6737 __set_bit(task_tag, &hba->outstanding_tasks);
6738
6739 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6740
6741 wmb();
6742
6743 spin_unlock_irqrestore(host->host_lock, flags);
6744
6745 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6746
6747
6748 err = wait_for_completion_io_timeout(&wait,
6749 msecs_to_jiffies(TM_CMD_TIMEOUT));
6750 if (!err) {
6751 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
6752 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6753 __func__, tm_function);
6754 if (ufshcd_clear_tm_cmd(hba, task_tag))
6755 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6756 __func__, task_tag);
6757 err = -ETIMEDOUT;
6758 } else {
6759 err = 0;
6760 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6761
6762 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
6763 }
6764
6765 spin_lock_irqsave(hba->host->host_lock, flags);
6766 hba->tmf_rqs[req->tag] = NULL;
6767 __clear_bit(task_tag, &hba->outstanding_tasks);
6768 spin_unlock_irqrestore(hba->host->host_lock, flags);
6769
6770 ufshcd_release(hba);
6771 blk_mq_free_request(req);
6772
6773 return err;
6774 }
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6787 u8 tm_function, u8 *tm_response)
6788 {
6789 struct utp_task_req_desc treq = { { 0 }, };
6790 enum utp_ocs ocs_value;
6791 int err;
6792
6793
6794 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6795 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6796
6797
6798 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6799 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6800 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6801
6802
6803
6804
6805
6806 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6807 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
6808
6809 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6810 if (err == -ETIMEDOUT)
6811 return err;
6812
6813 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6814 if (ocs_value != OCS_SUCCESS)
6815 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6816 __func__, ocs_value);
6817 else if (tm_response)
6818 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
6819 MASK_TM_SERVICE_RESP;
6820 return err;
6821 }
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6841 struct utp_upiu_req *req_upiu,
6842 struct utp_upiu_req *rsp_upiu,
6843 u8 *desc_buff, int *buff_len,
6844 enum dev_cmd_type cmd_type,
6845 enum query_opcode desc_op)
6846 {
6847 DECLARE_COMPLETION_ONSTACK(wait);
6848 const u32 tag = hba->reserved_slot;
6849 struct ufshcd_lrb *lrbp;
6850 int err = 0;
6851 u8 upiu_flags;
6852
6853
6854 lockdep_assert_held(&hba->dev_cmd.lock);
6855
6856 down_read(&hba->clk_scaling_lock);
6857
6858 lrbp = &hba->lrb[tag];
6859 WARN_ON(lrbp->cmd);
6860 lrbp->cmd = NULL;
6861 lrbp->task_tag = tag;
6862 lrbp->lun = 0;
6863 lrbp->intr_cmd = true;
6864 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6865 hba->dev_cmd.type = cmd_type;
6866
6867 if (hba->ufs_version <= ufshci_version(1, 1))
6868 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6869 else
6870 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6871
6872
6873 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6874
6875 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6876
6877
6878 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6879 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6880
6881
6882
6883
6884 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6885 *buff_len = 0;
6886 }
6887
6888 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6889
6890 hba->dev_cmd.complete = &wait;
6891
6892 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
6893
6894 ufshcd_send_command(hba, tag);
6895
6896
6897
6898
6899
6900 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6901
6902
6903 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6904 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6905 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6906 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6907 MASK_QUERY_DATA_SEG_LEN;
6908
6909 if (*buff_len >= resp_len) {
6910 memcpy(desc_buff, descp, resp_len);
6911 *buff_len = resp_len;
6912 } else {
6913 dev_warn(hba->dev,
6914 "%s: rsp size %d is bigger than buffer size %d",
6915 __func__, resp_len, *buff_len);
6916 *buff_len = 0;
6917 err = -EINVAL;
6918 }
6919 }
6920 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
6921 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
6922
6923 up_read(&hba->clk_scaling_lock);
6924 return err;
6925 }
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6943 struct utp_upiu_req *req_upiu,
6944 struct utp_upiu_req *rsp_upiu,
6945 int msgcode,
6946 u8 *desc_buff, int *buff_len,
6947 enum query_opcode desc_op)
6948 {
6949 int err;
6950 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6951 struct utp_task_req_desc treq = { { 0 }, };
6952 enum utp_ocs ocs_value;
6953 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6954
6955 switch (msgcode) {
6956 case UPIU_TRANSACTION_NOP_OUT:
6957 cmd_type = DEV_CMD_TYPE_NOP;
6958 fallthrough;
6959 case UPIU_TRANSACTION_QUERY_REQ:
6960 ufshcd_hold(hba, false);
6961 mutex_lock(&hba->dev_cmd.lock);
6962 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6963 desc_buff, buff_len,
6964 cmd_type, desc_op);
6965 mutex_unlock(&hba->dev_cmd.lock);
6966 ufshcd_release(hba);
6967
6968 break;
6969 case UPIU_TRANSACTION_TASK_REQ:
6970 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6971 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6972
6973 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
6974
6975 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6976 if (err == -ETIMEDOUT)
6977 break;
6978
6979 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6980 if (ocs_value != OCS_SUCCESS) {
6981 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6982 ocs_value);
6983 break;
6984 }
6985
6986 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
6987
6988 break;
6989 default:
6990 err = -EINVAL;
6991
6992 break;
6993 }
6994
6995 return err;
6996 }
6997
6998
6999
7000
7001
7002
7003
7004 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7005 {
7006 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7007 struct Scsi_Host *host;
7008 struct ufs_hba *hba;
7009 u32 pos;
7010 int err;
7011 u8 resp = 0xF, lun;
7012
7013 host = cmd->device->host;
7014 hba = shost_priv(host);
7015
7016 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7017 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7018 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7019 if (!err)
7020 err = resp;
7021 goto out;
7022 }
7023
7024
7025 spin_lock_irqsave(&hba->outstanding_lock, flags);
7026 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7027 if (hba->lrb[pos].lun == lun)
7028 __set_bit(pos, &pending_reqs);
7029 hba->outstanding_reqs &= ~pending_reqs;
7030 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7031
7032 if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
7033 spin_lock_irqsave(&hba->outstanding_lock, flags);
7034 not_cleared = pending_reqs &
7035 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7036 hba->outstanding_reqs |= not_cleared;
7037 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7038
7039 dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
7040 __func__, not_cleared);
7041 }
7042 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
7043
7044 out:
7045 hba->req_abort_count = 0;
7046 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7047 if (!err) {
7048 err = SUCCESS;
7049 } else {
7050 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7051 err = FAILED;
7052 }
7053 return err;
7054 }
7055
7056 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7057 {
7058 struct ufshcd_lrb *lrbp;
7059 int tag;
7060
7061 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7062 lrbp = &hba->lrb[tag];
7063 lrbp->req_abort_skip = true;
7064 }
7065 }
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7081 {
7082 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7083 int err = 0;
7084 int poll_cnt;
7085 u8 resp = 0xF;
7086 u32 reg;
7087
7088 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7089 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7090 UFS_QUERY_TASK, &resp);
7091 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7092
7093 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7094 __func__, tag);
7095 break;
7096 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7097
7098
7099
7100
7101 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7102 __func__, tag);
7103 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7104 if (reg & (1 << tag)) {
7105
7106 usleep_range(100, 200);
7107 continue;
7108 }
7109
7110 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7111 __func__, tag);
7112 goto out;
7113 } else {
7114 dev_err(hba->dev,
7115 "%s: no response from device. tag = %d, err %d\n",
7116 __func__, tag, err);
7117 if (!err)
7118 err = resp;
7119 goto out;
7120 }
7121 }
7122
7123 if (!poll_cnt) {
7124 err = -EBUSY;
7125 goto out;
7126 }
7127
7128 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7129 UFS_ABORT_TASK, &resp);
7130 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7131 if (!err) {
7132 err = resp;
7133 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7134 __func__, tag, err);
7135 }
7136 goto out;
7137 }
7138
7139 err = ufshcd_clear_cmds(hba, 1U << tag);
7140 if (err)
7141 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7142 __func__, tag, err);
7143
7144 out:
7145 return err;
7146 }
7147
7148
7149
7150
7151
7152
7153
7154 static int ufshcd_abort(struct scsi_cmnd *cmd)
7155 {
7156 struct Scsi_Host *host = cmd->device->host;
7157 struct ufs_hba *hba = shost_priv(host);
7158 int tag = scsi_cmd_to_rq(cmd)->tag;
7159 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7160 unsigned long flags;
7161 int err = FAILED;
7162 bool outstanding;
7163 u32 reg;
7164
7165 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7166
7167 ufshcd_hold(hba, false);
7168 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7169
7170 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7171 dev_err(hba->dev,
7172 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7173 __func__, tag, hba->outstanding_reqs, reg);
7174 goto release;
7175 }
7176
7177
7178 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7179
7180
7181
7182
7183
7184
7185
7186
7187 scsi_print_command(cmd);
7188 if (!hba->req_abort_count) {
7189 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7190 ufshcd_print_evt_hist(hba);
7191 ufshcd_print_host_state(hba);
7192 ufshcd_print_pwr_info(hba);
7193 ufshcd_print_trs(hba, 1 << tag, true);
7194 } else {
7195 ufshcd_print_trs(hba, 1 << tag, false);
7196 }
7197 hba->req_abort_count++;
7198
7199 if (!(reg & (1 << tag))) {
7200 dev_err(hba->dev,
7201 "%s: cmd was completed, but without a notifying intr, tag = %d",
7202 __func__, tag);
7203 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7204 goto release;
7205 }
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7216 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7217
7218 spin_lock_irqsave(host->host_lock, flags);
7219 hba->force_reset = true;
7220 ufshcd_schedule_eh_work(hba);
7221 spin_unlock_irqrestore(host->host_lock, flags);
7222 goto release;
7223 }
7224
7225
7226 if (lrbp->req_abort_skip) {
7227 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7228 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7229 goto release;
7230 }
7231
7232 err = ufshcd_try_to_abort_task(hba, tag);
7233 if (err) {
7234 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7235 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7236 err = FAILED;
7237 goto release;
7238 }
7239
7240
7241
7242
7243
7244 spin_lock_irqsave(&hba->outstanding_lock, flags);
7245 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7246 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7247
7248 if (outstanding)
7249 ufshcd_release_scsi_cmd(hba, lrbp);
7250
7251 err = SUCCESS;
7252
7253 release:
7254
7255 ufshcd_release(hba);
7256 return err;
7257 }
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7270 {
7271 int err;
7272
7273
7274
7275
7276
7277 ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
7278 ufshcd_hba_stop(hba);
7279 hba->silence_err_logs = true;
7280 ufshcd_complete_requests(hba);
7281 hba->silence_err_logs = false;
7282
7283
7284 ufshcd_scale_clks(hba, true);
7285
7286 err = ufshcd_hba_enable(hba);
7287
7288
7289 if (!err)
7290 err = ufshcd_probe_hba(hba, false);
7291
7292 if (err)
7293 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7294 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7295 return err;
7296 }
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7308 {
7309 u32 saved_err = 0;
7310 u32 saved_uic_err = 0;
7311 int err = 0;
7312 unsigned long flags;
7313 int retries = MAX_HOST_RESET_RETRIES;
7314
7315 spin_lock_irqsave(hba->host->host_lock, flags);
7316 do {
7317
7318
7319
7320
7321 saved_err |= hba->saved_err;
7322 saved_uic_err |= hba->saved_uic_err;
7323 hba->saved_err = 0;
7324 hba->saved_uic_err = 0;
7325 hba->force_reset = false;
7326 hba->ufshcd_state = UFSHCD_STATE_RESET;
7327 spin_unlock_irqrestore(hba->host->host_lock, flags);
7328
7329
7330 ufshcd_device_reset(hba);
7331
7332 err = ufshcd_host_reset_and_restore(hba);
7333
7334 spin_lock_irqsave(hba->host->host_lock, flags);
7335 if (err)
7336 continue;
7337
7338 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7339 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7340 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7341 err = -EAGAIN;
7342 } while (err && --retries);
7343
7344
7345
7346
7347
7348 scsi_report_bus_reset(hba->host, 0);
7349 if (err) {
7350 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7351 hba->saved_err |= saved_err;
7352 hba->saved_uic_err |= saved_uic_err;
7353 }
7354 spin_unlock_irqrestore(hba->host->host_lock, flags);
7355
7356 return err;
7357 }
7358
7359
7360
7361
7362
7363
7364
7365 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7366 {
7367 int err = SUCCESS;
7368 unsigned long flags;
7369 struct ufs_hba *hba;
7370
7371 hba = shost_priv(cmd->device->host);
7372
7373 spin_lock_irqsave(hba->host->host_lock, flags);
7374 hba->force_reset = true;
7375 ufshcd_schedule_eh_work(hba);
7376 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7377 spin_unlock_irqrestore(hba->host->host_lock, flags);
7378
7379 flush_work(&hba->eh_work);
7380
7381 spin_lock_irqsave(hba->host->host_lock, flags);
7382 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7383 err = FAILED;
7384 spin_unlock_irqrestore(hba->host->host_lock, flags);
7385
7386 return err;
7387 }
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7398 const char *buff)
7399 {
7400 int i;
7401 int curr_uA;
7402 u16 data;
7403 u16 unit;
7404
7405 for (i = start_scan; i >= 0; i--) {
7406 data = get_unaligned_be16(&buff[2 * i]);
7407 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7408 ATTR_ICC_LVL_UNIT_OFFSET;
7409 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7410 switch (unit) {
7411 case UFSHCD_NANO_AMP:
7412 curr_uA = curr_uA / 1000;
7413 break;
7414 case UFSHCD_MILI_AMP:
7415 curr_uA = curr_uA * 1000;
7416 break;
7417 case UFSHCD_AMP:
7418 curr_uA = curr_uA * 1000 * 1000;
7419 break;
7420 case UFSHCD_MICRO_AMP:
7421 default:
7422 break;
7423 }
7424 if (sup_curr_uA >= curr_uA)
7425 break;
7426 }
7427 if (i < 0) {
7428 i = 0;
7429 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7430 }
7431
7432 return (u32)i;
7433 }
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7445 const u8 *desc_buf, int len)
7446 {
7447 u32 icc_level = 0;
7448
7449 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7450 !hba->vreg_info.vccq2) {
7451
7452
7453
7454
7455
7456
7457 dev_dbg(hba->dev,
7458 "%s: Regulator capability was not set, actvIccLevel=%d",
7459 __func__, icc_level);
7460 goto out;
7461 }
7462
7463 if (hba->vreg_info.vcc->max_uA)
7464 icc_level = ufshcd_get_max_icc_level(
7465 hba->vreg_info.vcc->max_uA,
7466 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7467 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7468
7469 if (hba->vreg_info.vccq->max_uA)
7470 icc_level = ufshcd_get_max_icc_level(
7471 hba->vreg_info.vccq->max_uA,
7472 icc_level,
7473 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7474
7475 if (hba->vreg_info.vccq2->max_uA)
7476 icc_level = ufshcd_get_max_icc_level(
7477 hba->vreg_info.vccq2->max_uA,
7478 icc_level,
7479 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7480 out:
7481 return icc_level;
7482 }
7483
7484 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7485 {
7486 int ret;
7487 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7488 u8 *desc_buf;
7489 u32 icc_level;
7490
7491 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7492 if (!desc_buf)
7493 return;
7494
7495 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7496 desc_buf, buff_len);
7497 if (ret) {
7498 dev_err(hba->dev,
7499 "%s: Failed reading power descriptor.len = %d ret = %d",
7500 __func__, buff_len, ret);
7501 goto out;
7502 }
7503
7504 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7505 buff_len);
7506 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7507
7508 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7509 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7510
7511 if (ret)
7512 dev_err(hba->dev,
7513 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7514 __func__, icc_level, ret);
7515
7516 out:
7517 kfree(desc_buf);
7518 }
7519
7520 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7521 {
7522 scsi_autopm_get_device(sdev);
7523 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7524 if (sdev->rpm_autosuspend)
7525 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7526 RPM_AUTOSUSPEND_DELAY_MS);
7527 scsi_autopm_put_device(sdev);
7528 }
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555
7556 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7557 {
7558 int ret = 0;
7559 struct scsi_device *sdev_boot, *sdev_rpmb;
7560
7561 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
7562 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7563 if (IS_ERR(hba->ufs_device_wlun)) {
7564 ret = PTR_ERR(hba->ufs_device_wlun);
7565 hba->ufs_device_wlun = NULL;
7566 goto out;
7567 }
7568 scsi_device_put(hba->ufs_device_wlun);
7569
7570 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7571 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7572 if (IS_ERR(sdev_rpmb)) {
7573 ret = PTR_ERR(sdev_rpmb);
7574 goto remove_ufs_device_wlun;
7575 }
7576 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7577 scsi_device_put(sdev_rpmb);
7578
7579 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7580 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7581 if (IS_ERR(sdev_boot)) {
7582 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7583 } else {
7584 ufshcd_blk_pm_runtime_init(sdev_boot);
7585 scsi_device_put(sdev_boot);
7586 }
7587 goto out;
7588
7589 remove_ufs_device_wlun:
7590 scsi_remove_device(hba->ufs_device_wlun);
7591 out:
7592 return ret;
7593 }
7594
7595 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
7596 {
7597 struct ufs_dev_info *dev_info = &hba->dev_info;
7598 u8 lun;
7599 u32 d_lu_wb_buf_alloc;
7600 u32 ext_ufs_feature;
7601
7602 if (!ufshcd_is_wb_allowed(hba))
7603 return;
7604
7605
7606
7607
7608
7609
7610 if (!(dev_info->wspecversion >= 0x310 ||
7611 dev_info->wspecversion == 0x220 ||
7612 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7613 goto wb_disabled;
7614
7615 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7616 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7617 goto wb_disabled;
7618
7619 ext_ufs_feature = get_unaligned_be32(desc_buf +
7620 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7621
7622 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
7623 goto wb_disabled;
7624
7625
7626
7627
7628
7629
7630 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7631
7632 dev_info->b_presrv_uspc_en =
7633 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7634
7635 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
7636 if (!get_unaligned_be32(desc_buf +
7637 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
7638 goto wb_disabled;
7639 } else {
7640 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7641 d_lu_wb_buf_alloc = 0;
7642 ufshcd_read_unit_desc_param(hba,
7643 lun,
7644 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7645 (u8 *)&d_lu_wb_buf_alloc,
7646 sizeof(d_lu_wb_buf_alloc));
7647 if (d_lu_wb_buf_alloc) {
7648 dev_info->wb_dedicated_lu = lun;
7649 break;
7650 }
7651 }
7652
7653 if (!d_lu_wb_buf_alloc)
7654 goto wb_disabled;
7655 }
7656
7657 if (!ufshcd_is_wb_buf_lifetime_available(hba))
7658 goto wb_disabled;
7659
7660 return;
7661
7662 wb_disabled:
7663 hba->caps &= ~UFSHCD_CAP_WB_EN;
7664 }
7665
7666 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
7667 {
7668 struct ufs_dev_info *dev_info = &hba->dev_info;
7669 u32 ext_ufs_feature;
7670 u8 mask = 0;
7671
7672 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7673 return;
7674
7675 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7676
7677 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7678 mask |= MASK_EE_TOO_LOW_TEMP;
7679
7680 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7681 mask |= MASK_EE_TOO_HIGH_TEMP;
7682
7683 if (mask) {
7684 ufshcd_enable_ee(hba, mask);
7685 ufs_hwmon_probe(hba, mask);
7686 }
7687 }
7688
7689 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
7690 const struct ufs_dev_quirk *fixups)
7691 {
7692 const struct ufs_dev_quirk *f;
7693 struct ufs_dev_info *dev_info = &hba->dev_info;
7694
7695 if (!fixups)
7696 return;
7697
7698 for (f = fixups; f->quirk; f++) {
7699 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7700 f->wmanufacturerid == UFS_ANY_VENDOR) &&
7701 ((dev_info->model &&
7702 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7703 !strcmp(f->model, UFS_ANY_MODEL)))
7704 hba->dev_quirks |= f->quirk;
7705 }
7706 }
7707 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7708
7709 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7710 {
7711
7712 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7713
7714
7715 ufshcd_vops_fixup_dev_quirks(hba);
7716 }
7717
7718 static int ufs_get_device_desc(struct ufs_hba *hba)
7719 {
7720 int err;
7721 u8 model_index;
7722 u8 b_ufs_feature_sup;
7723 u8 *desc_buf;
7724 struct ufs_dev_info *dev_info = &hba->dev_info;
7725
7726 desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7727 if (!desc_buf) {
7728 err = -ENOMEM;
7729 goto out;
7730 }
7731
7732 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7733 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7734 if (err) {
7735 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7736 __func__, err);
7737 goto out;
7738 }
7739
7740
7741
7742
7743
7744 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7745 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7746
7747
7748 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7749 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7750 b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
7751
7752 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7753
7754 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7755 (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
7756 bool hpb_en = false;
7757
7758 ufshpb_get_dev_info(hba, desc_buf);
7759
7760 if (!ufshpb_is_legacy(hba))
7761 err = ufshcd_query_flag_retry(hba,
7762 UPIU_QUERY_OPCODE_READ_FLAG,
7763 QUERY_FLAG_IDN_HPB_EN, 0,
7764 &hpb_en);
7765
7766 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7767 dev_info->hpb_enabled = true;
7768 }
7769
7770 err = ufshcd_read_string_desc(hba, model_index,
7771 &dev_info->model, SD_ASCII_STD);
7772 if (err < 0) {
7773 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7774 __func__, err);
7775 goto out;
7776 }
7777
7778 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
7779 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
7780
7781 ufs_fixup_device_setup(hba);
7782
7783 ufshcd_wb_probe(hba, desc_buf);
7784
7785 ufshcd_temp_notif_probe(hba, desc_buf);
7786
7787
7788
7789
7790
7791 err = 0;
7792
7793 out:
7794 kfree(desc_buf);
7795 return err;
7796 }
7797
7798 static void ufs_put_device_desc(struct ufs_hba *hba)
7799 {
7800 struct ufs_dev_info *dev_info = &hba->dev_info;
7801
7802 kfree(dev_info->model);
7803 dev_info->model = NULL;
7804 }
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7818 {
7819 int ret = 0;
7820 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7821
7822 ret = ufshcd_dme_peer_get(hba,
7823 UIC_ARG_MIB_SEL(
7824 RX_MIN_ACTIVATETIME_CAPABILITY,
7825 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7826 &peer_rx_min_activatetime);
7827 if (ret)
7828 goto out;
7829
7830
7831 tuned_pa_tactivate =
7832 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7833 / PA_TACTIVATE_TIME_UNIT_US);
7834 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7835 tuned_pa_tactivate);
7836
7837 out:
7838 return ret;
7839 }
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851
7852 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7853 {
7854 int ret = 0;
7855 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7856 u32 max_hibern8_time, tuned_pa_hibern8time;
7857
7858 ret = ufshcd_dme_get(hba,
7859 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7860 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7861 &local_tx_hibern8_time_cap);
7862 if (ret)
7863 goto out;
7864
7865 ret = ufshcd_dme_peer_get(hba,
7866 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7867 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7868 &peer_rx_hibern8_time_cap);
7869 if (ret)
7870 goto out;
7871
7872 max_hibern8_time = max(local_tx_hibern8_time_cap,
7873 peer_rx_hibern8_time_cap);
7874
7875 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7876 / PA_HIBERN8_TIME_UNIT_US);
7877 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7878 tuned_pa_hibern8time);
7879 out:
7880 return ret;
7881 }
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891
7892
7893
7894 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7895 {
7896 int ret = 0;
7897 u32 granularity, peer_granularity;
7898 u32 pa_tactivate, peer_pa_tactivate;
7899 u32 pa_tactivate_us, peer_pa_tactivate_us;
7900 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7901
7902 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7903 &granularity);
7904 if (ret)
7905 goto out;
7906
7907 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7908 &peer_granularity);
7909 if (ret)
7910 goto out;
7911
7912 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7913 (granularity > PA_GRANULARITY_MAX_VAL)) {
7914 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7915 __func__, granularity);
7916 return -EINVAL;
7917 }
7918
7919 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7920 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7921 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7922 __func__, peer_granularity);
7923 return -EINVAL;
7924 }
7925
7926 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7927 if (ret)
7928 goto out;
7929
7930 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7931 &peer_pa_tactivate);
7932 if (ret)
7933 goto out;
7934
7935 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7936 peer_pa_tactivate_us = peer_pa_tactivate *
7937 gran_to_us_table[peer_granularity - 1];
7938
7939 if (pa_tactivate_us >= peer_pa_tactivate_us) {
7940 u32 new_peer_pa_tactivate;
7941
7942 new_peer_pa_tactivate = pa_tactivate_us /
7943 gran_to_us_table[peer_granularity - 1];
7944 new_peer_pa_tactivate++;
7945 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7946 new_peer_pa_tactivate);
7947 }
7948
7949 out:
7950 return ret;
7951 }
7952
7953 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7954 {
7955 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7956 ufshcd_tune_pa_tactivate(hba);
7957 ufshcd_tune_pa_hibern8time(hba);
7958 }
7959
7960 ufshcd_vops_apply_dev_quirks(hba);
7961
7962 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7963
7964 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7965
7966 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7967 ufshcd_quirk_tune_host_pa_tactivate(hba);
7968 }
7969
7970 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7971 {
7972 hba->ufs_stats.hibern8_exit_cnt = 0;
7973 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7974 hba->req_abort_count = 0;
7975 }
7976
7977 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7978 {
7979 int err;
7980 size_t buff_len;
7981 u8 *desc_buf;
7982
7983 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7984 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7985 if (!desc_buf) {
7986 err = -ENOMEM;
7987 goto out;
7988 }
7989
7990 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7991 desc_buf, buff_len);
7992 if (err) {
7993 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7994 __func__, err);
7995 goto out;
7996 }
7997
7998 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7999 hba->dev_info.max_lu_supported = 32;
8000 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8001 hba->dev_info.max_lu_supported = 8;
8002
8003 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
8004 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
8005 ufshpb_get_geo_info(hba, desc_buf);
8006
8007 out:
8008 kfree(desc_buf);
8009 return err;
8010 }
8011
8012 struct ufs_ref_clk {
8013 unsigned long freq_hz;
8014 enum ufs_ref_clk_freq val;
8015 };
8016
8017 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8018 {19200000, REF_CLK_FREQ_19_2_MHZ},
8019 {26000000, REF_CLK_FREQ_26_MHZ},
8020 {38400000, REF_CLK_FREQ_38_4_MHZ},
8021 {52000000, REF_CLK_FREQ_52_MHZ},
8022 {0, REF_CLK_FREQ_INVAL},
8023 };
8024
8025 static enum ufs_ref_clk_freq
8026 ufs_get_bref_clk_from_hz(unsigned long freq)
8027 {
8028 int i;
8029
8030 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8031 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8032 return ufs_ref_clk_freqs[i].val;
8033
8034 return REF_CLK_FREQ_INVAL;
8035 }
8036
8037 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8038 {
8039 unsigned long freq;
8040
8041 freq = clk_get_rate(refclk);
8042
8043 hba->dev_ref_clk_freq =
8044 ufs_get_bref_clk_from_hz(freq);
8045
8046 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8047 dev_err(hba->dev,
8048 "invalid ref_clk setting = %ld\n", freq);
8049 }
8050
8051 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8052 {
8053 int err;
8054 u32 ref_clk;
8055 u32 freq = hba->dev_ref_clk_freq;
8056
8057 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8058 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8059
8060 if (err) {
8061 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8062 err);
8063 goto out;
8064 }
8065
8066 if (ref_clk == freq)
8067 goto out;
8068
8069 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8070 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8071
8072 if (err) {
8073 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8074 ufs_ref_clk_freqs[freq].freq_hz);
8075 goto out;
8076 }
8077
8078 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8079 ufs_ref_clk_freqs[freq].freq_hz);
8080
8081 out:
8082 return err;
8083 }
8084
8085 static int ufshcd_device_params_init(struct ufs_hba *hba)
8086 {
8087 bool flag;
8088 int ret, i;
8089
8090
8091 for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
8092 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
8093
8094
8095 ret = ufshcd_device_geo_params_init(hba);
8096 if (ret)
8097 goto out;
8098
8099
8100 ret = ufs_get_device_desc(hba);
8101 if (ret) {
8102 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8103 __func__, ret);
8104 goto out;
8105 }
8106
8107 ufshcd_get_ref_clk_gating_wait(hba);
8108
8109 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8110 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8111 hba->dev_info.f_power_on_wp_en = flag;
8112
8113
8114 if (ufshcd_get_max_pwr_mode(hba))
8115 dev_err(hba->dev,
8116 "%s: Failed getting max supported power mode\n",
8117 __func__);
8118 out:
8119 return ret;
8120 }
8121
8122
8123
8124
8125
8126 static int ufshcd_add_lus(struct ufs_hba *hba)
8127 {
8128 int ret;
8129
8130
8131 ret = ufshcd_scsi_add_wlus(hba);
8132 if (ret)
8133 goto out;
8134
8135
8136 if (ufshcd_is_clkscaling_supported(hba)) {
8137 memcpy(&hba->clk_scaling.saved_pwr_info.info,
8138 &hba->pwr_info,
8139 sizeof(struct ufs_pa_layer_attr));
8140 hba->clk_scaling.saved_pwr_info.is_valid = true;
8141 hba->clk_scaling.is_allowed = true;
8142
8143 ret = ufshcd_devfreq_init(hba);
8144 if (ret)
8145 goto out;
8146
8147 hba->clk_scaling.is_enabled = true;
8148 ufshcd_init_clk_scaling_sysfs(hba);
8149 }
8150
8151 ufs_bsg_probe(hba);
8152 ufshpb_init(hba);
8153 scsi_scan_host(hba->host);
8154 pm_runtime_put_sync(hba->dev);
8155
8156 out:
8157 return ret;
8158 }
8159
8160
8161
8162
8163
8164
8165
8166
8167 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8168 {
8169 int ret;
8170 unsigned long flags;
8171 ktime_t start = ktime_get();
8172
8173 hba->ufshcd_state = UFSHCD_STATE_RESET;
8174
8175 ret = ufshcd_link_startup(hba);
8176 if (ret)
8177 goto out;
8178
8179 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8180 goto out;
8181
8182
8183 ufshcd_clear_dbg_ufs_stats(hba);
8184
8185
8186 ufshcd_set_link_active(hba);
8187
8188
8189 ret = ufshcd_verify_dev_init(hba);
8190 if (ret)
8191 goto out;
8192
8193
8194 ret = ufshcd_complete_dev_init(hba);
8195 if (ret)
8196 goto out;
8197
8198
8199
8200
8201
8202 if (init_dev_params) {
8203 ret = ufshcd_device_params_init(hba);
8204 if (ret)
8205 goto out;
8206 }
8207
8208 ufshcd_tune_unipro_params(hba);
8209
8210
8211 ufshcd_set_ufs_dev_active(hba);
8212 ufshcd_force_reset_auto_bkops(hba);
8213
8214
8215 if (hba->max_pwr_info.is_valid) {
8216
8217
8218
8219
8220 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8221 ufshcd_set_dev_ref_clk(hba);
8222 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8223 if (ret) {
8224 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8225 __func__, ret);
8226 goto out;
8227 }
8228 ufshcd_print_pwr_info(hba);
8229 }
8230
8231
8232
8233
8234
8235
8236
8237 ufshcd_set_active_icc_lvl(hba);
8238
8239 ufshcd_wb_config(hba);
8240 if (hba->ee_usr_mask)
8241 ufshcd_write_ee_control(hba);
8242
8243 ufshcd_auto_hibern8_enable(hba);
8244
8245 ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
8246 out:
8247 spin_lock_irqsave(hba->host->host_lock, flags);
8248 if (ret)
8249 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8250 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8251 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8252 spin_unlock_irqrestore(hba->host->host_lock, flags);
8253
8254 trace_ufshcd_init(dev_name(hba->dev), ret,
8255 ktime_to_us(ktime_sub(ktime_get(), start)),
8256 hba->curr_dev_pwr_mode, hba->uic_link_state);
8257 return ret;
8258 }
8259
8260
8261
8262
8263
8264
8265 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8266 {
8267 struct ufs_hba *hba = (struct ufs_hba *)data;
8268 int ret;
8269
8270 down(&hba->host_sem);
8271
8272 ret = ufshcd_probe_hba(hba, true);
8273 up(&hba->host_sem);
8274 if (ret)
8275 goto out;
8276
8277
8278 ret = ufshcd_add_lus(hba);
8279 out:
8280
8281
8282
8283
8284 if (ret) {
8285 pm_runtime_put_sync(hba->dev);
8286 ufshcd_hba_exit(hba);
8287 }
8288 }
8289
8290 static const struct attribute_group *ufshcd_driver_groups[] = {
8291 &ufs_sysfs_unit_descriptor_group,
8292 &ufs_sysfs_lun_attributes_group,
8293 #ifdef CONFIG_SCSI_UFS_HPB
8294 &ufs_sysfs_hpb_stat_group,
8295 &ufs_sysfs_hpb_param_group,
8296 #endif
8297 NULL,
8298 };
8299
8300 static struct ufs_hba_variant_params ufs_hba_vps = {
8301 .hba_enable_delay_us = 1000,
8302 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
8303 .devfreq_profile.polling_ms = 100,
8304 .devfreq_profile.target = ufshcd_devfreq_target,
8305 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8306 .ondemand_data.upthreshold = 70,
8307 .ondemand_data.downdifferential = 5,
8308 };
8309
8310 static struct scsi_host_template ufshcd_driver_template = {
8311 .module = THIS_MODULE,
8312 .name = UFSHCD,
8313 .proc_name = UFSHCD,
8314 .map_queues = ufshcd_map_queues,
8315 .queuecommand = ufshcd_queuecommand,
8316 .mq_poll = ufshcd_poll,
8317 .slave_alloc = ufshcd_slave_alloc,
8318 .slave_configure = ufshcd_slave_configure,
8319 .slave_destroy = ufshcd_slave_destroy,
8320 .change_queue_depth = ufshcd_change_queue_depth,
8321 .eh_abort_handler = ufshcd_abort,
8322 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8323 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
8324 .this_id = -1,
8325 .sg_tablesize = SG_ALL,
8326 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8327 .can_queue = UFSHCD_CAN_QUEUE,
8328 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
8329 .max_sectors = (1 << 20) / SECTOR_SIZE,
8330 .max_host_blocked = 1,
8331 .track_queue_depth = 1,
8332 .sdev_groups = ufshcd_driver_groups,
8333 .dma_boundary = PAGE_SIZE - 1,
8334 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
8335 };
8336
8337 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8338 int ua)
8339 {
8340 int ret;
8341
8342 if (!vreg)
8343 return 0;
8344
8345
8346
8347
8348
8349
8350
8351 if (!vreg->max_uA)
8352 return 0;
8353
8354 ret = regulator_set_load(vreg->reg, ua);
8355 if (ret < 0) {
8356 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8357 __func__, vreg->name, ua, ret);
8358 }
8359
8360 return ret;
8361 }
8362
8363 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8364 struct ufs_vreg *vreg)
8365 {
8366 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8367 }
8368
8369 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8370 struct ufs_vreg *vreg)
8371 {
8372 if (!vreg)
8373 return 0;
8374
8375 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8376 }
8377
8378 static int ufshcd_config_vreg(struct device *dev,
8379 struct ufs_vreg *vreg, bool on)
8380 {
8381 if (regulator_count_voltages(vreg->reg) <= 0)
8382 return 0;
8383
8384 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
8385 }
8386
8387 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8388 {
8389 int ret = 0;
8390
8391 if (!vreg || vreg->enabled)
8392 goto out;
8393
8394 ret = ufshcd_config_vreg(dev, vreg, true);
8395 if (!ret)
8396 ret = regulator_enable(vreg->reg);
8397
8398 if (!ret)
8399 vreg->enabled = true;
8400 else
8401 dev_err(dev, "%s: %s enable failed, err=%d\n",
8402 __func__, vreg->name, ret);
8403 out:
8404 return ret;
8405 }
8406
8407 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8408 {
8409 int ret = 0;
8410
8411 if (!vreg || !vreg->enabled || vreg->always_on)
8412 goto out;
8413
8414 ret = regulator_disable(vreg->reg);
8415
8416 if (!ret) {
8417
8418 ufshcd_config_vreg(dev, vreg, false);
8419 vreg->enabled = false;
8420 } else {
8421 dev_err(dev, "%s: %s disable failed, err=%d\n",
8422 __func__, vreg->name, ret);
8423 }
8424 out:
8425 return ret;
8426 }
8427
8428 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8429 {
8430 int ret = 0;
8431 struct device *dev = hba->dev;
8432 struct ufs_vreg_info *info = &hba->vreg_info;
8433
8434 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8435 if (ret)
8436 goto out;
8437
8438 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8439 if (ret)
8440 goto out;
8441
8442 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8443
8444 out:
8445 if (ret) {
8446 ufshcd_toggle_vreg(dev, info->vccq2, false);
8447 ufshcd_toggle_vreg(dev, info->vccq, false);
8448 ufshcd_toggle_vreg(dev, info->vcc, false);
8449 }
8450 return ret;
8451 }
8452
8453 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8454 {
8455 struct ufs_vreg_info *info = &hba->vreg_info;
8456
8457 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8458 }
8459
8460 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8461 {
8462 int ret = 0;
8463
8464 if (!vreg)
8465 goto out;
8466
8467 vreg->reg = devm_regulator_get(dev, vreg->name);
8468 if (IS_ERR(vreg->reg)) {
8469 ret = PTR_ERR(vreg->reg);
8470 dev_err(dev, "%s: %s get failed, err=%d\n",
8471 __func__, vreg->name, ret);
8472 }
8473 out:
8474 return ret;
8475 }
8476 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
8477
8478 static int ufshcd_init_vreg(struct ufs_hba *hba)
8479 {
8480 int ret = 0;
8481 struct device *dev = hba->dev;
8482 struct ufs_vreg_info *info = &hba->vreg_info;
8483
8484 ret = ufshcd_get_vreg(dev, info->vcc);
8485 if (ret)
8486 goto out;
8487
8488 ret = ufshcd_get_vreg(dev, info->vccq);
8489 if (!ret)
8490 ret = ufshcd_get_vreg(dev, info->vccq2);
8491 out:
8492 return ret;
8493 }
8494
8495 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8496 {
8497 struct ufs_vreg_info *info = &hba->vreg_info;
8498
8499 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8500 }
8501
8502 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8503 {
8504 int ret = 0;
8505 struct ufs_clk_info *clki;
8506 struct list_head *head = &hba->clk_list_head;
8507 unsigned long flags;
8508 ktime_t start = ktime_get();
8509 bool clk_state_changed = false;
8510
8511 if (list_empty(head))
8512 goto out;
8513
8514 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8515 if (ret)
8516 return ret;
8517
8518 list_for_each_entry(clki, head, list) {
8519 if (!IS_ERR_OR_NULL(clki->clk)) {
8520
8521
8522
8523
8524 if (ufshcd_is_link_active(hba) &&
8525 clki->keep_link_active)
8526 continue;
8527
8528 clk_state_changed = on ^ clki->enabled;
8529 if (on && !clki->enabled) {
8530 ret = clk_prepare_enable(clki->clk);
8531 if (ret) {
8532 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8533 __func__, clki->name, ret);
8534 goto out;
8535 }
8536 } else if (!on && clki->enabled) {
8537 clk_disable_unprepare(clki->clk);
8538 }
8539 clki->enabled = on;
8540 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8541 clki->name, on ? "en" : "dis");
8542 }
8543 }
8544
8545 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8546 if (ret)
8547 return ret;
8548
8549 out:
8550 if (ret) {
8551 list_for_each_entry(clki, head, list) {
8552 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8553 clk_disable_unprepare(clki->clk);
8554 }
8555 } else if (!ret && on) {
8556 spin_lock_irqsave(hba->host->host_lock, flags);
8557 hba->clk_gating.state = CLKS_ON;
8558 trace_ufshcd_clk_gating(dev_name(hba->dev),
8559 hba->clk_gating.state);
8560 spin_unlock_irqrestore(hba->host->host_lock, flags);
8561 }
8562
8563 if (clk_state_changed)
8564 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8565 (on ? "on" : "off"),
8566 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8567 return ret;
8568 }
8569
8570 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
8571 {
8572 u32 freq;
8573 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
8574
8575 if (ret) {
8576 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
8577 return REF_CLK_FREQ_INVAL;
8578 }
8579
8580 return ufs_get_bref_clk_from_hz(freq);
8581 }
8582
8583 static int ufshcd_init_clocks(struct ufs_hba *hba)
8584 {
8585 int ret = 0;
8586 struct ufs_clk_info *clki;
8587 struct device *dev = hba->dev;
8588 struct list_head *head = &hba->clk_list_head;
8589
8590 if (list_empty(head))
8591 goto out;
8592
8593 list_for_each_entry(clki, head, list) {
8594 if (!clki->name)
8595 continue;
8596
8597 clki->clk = devm_clk_get(dev, clki->name);
8598 if (IS_ERR(clki->clk)) {
8599 ret = PTR_ERR(clki->clk);
8600 dev_err(dev, "%s: %s clk get failed, %d\n",
8601 __func__, clki->name, ret);
8602 goto out;
8603 }
8604
8605
8606
8607
8608
8609
8610 if (!strcmp(clki->name, "ref_clk"))
8611 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8612
8613 if (clki->max_freq) {
8614 ret = clk_set_rate(clki->clk, clki->max_freq);
8615 if (ret) {
8616 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8617 __func__, clki->name,
8618 clki->max_freq, ret);
8619 goto out;
8620 }
8621 clki->curr_freq = clki->max_freq;
8622 }
8623 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8624 clki->name, clk_get_rate(clki->clk));
8625 }
8626 out:
8627 return ret;
8628 }
8629
8630 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8631 {
8632 int err = 0;
8633
8634 if (!hba->vops)
8635 goto out;
8636
8637 err = ufshcd_vops_init(hba);
8638 if (err)
8639 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8640 __func__, ufshcd_get_var_name(hba), err);
8641 out:
8642 return err;
8643 }
8644
8645 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8646 {
8647 if (!hba->vops)
8648 return;
8649
8650 ufshcd_vops_exit(hba);
8651 }
8652
8653 static int ufshcd_hba_init(struct ufs_hba *hba)
8654 {
8655 int err;
8656
8657
8658
8659
8660
8661
8662
8663
8664 err = ufshcd_init_hba_vreg(hba);
8665 if (err)
8666 goto out;
8667
8668 err = ufshcd_setup_hba_vreg(hba, true);
8669 if (err)
8670 goto out;
8671
8672 err = ufshcd_init_clocks(hba);
8673 if (err)
8674 goto out_disable_hba_vreg;
8675
8676 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8677 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
8678
8679 err = ufshcd_setup_clocks(hba, true);
8680 if (err)
8681 goto out_disable_hba_vreg;
8682
8683 err = ufshcd_init_vreg(hba);
8684 if (err)
8685 goto out_disable_clks;
8686
8687 err = ufshcd_setup_vreg(hba, true);
8688 if (err)
8689 goto out_disable_clks;
8690
8691 err = ufshcd_variant_hba_init(hba);
8692 if (err)
8693 goto out_disable_vreg;
8694
8695 ufs_debugfs_hba_init(hba);
8696
8697 hba->is_powered = true;
8698 goto out;
8699
8700 out_disable_vreg:
8701 ufshcd_setup_vreg(hba, false);
8702 out_disable_clks:
8703 ufshcd_setup_clocks(hba, false);
8704 out_disable_hba_vreg:
8705 ufshcd_setup_hba_vreg(hba, false);
8706 out:
8707 return err;
8708 }
8709
8710 static void ufshcd_hba_exit(struct ufs_hba *hba)
8711 {
8712 if (hba->is_powered) {
8713 ufshcd_exit_clk_scaling(hba);
8714 ufshcd_exit_clk_gating(hba);
8715 if (hba->eh_wq)
8716 destroy_workqueue(hba->eh_wq);
8717 ufs_debugfs_hba_exit(hba);
8718 ufshcd_variant_hba_exit(hba);
8719 ufshcd_setup_vreg(hba, false);
8720 ufshcd_setup_clocks(hba, false);
8721 ufshcd_setup_hba_vreg(hba, false);
8722 hba->is_powered = false;
8723 ufs_put_device_desc(hba);
8724 }
8725 }
8726
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8737 enum ufs_dev_pwr_mode pwr_mode)
8738 {
8739 unsigned char cmd[6] = { START_STOP };
8740 struct scsi_sense_hdr sshdr;
8741 struct scsi_device *sdp;
8742 unsigned long flags;
8743 int ret, retries;
8744 unsigned long deadline;
8745 int32_t remaining;
8746
8747 spin_lock_irqsave(hba->host->host_lock, flags);
8748 sdp = hba->ufs_device_wlun;
8749 if (sdp) {
8750 ret = scsi_device_get(sdp);
8751 if (!ret && !scsi_device_online(sdp)) {
8752 ret = -ENODEV;
8753 scsi_device_put(sdp);
8754 }
8755 } else {
8756 ret = -ENODEV;
8757 }
8758 spin_unlock_irqrestore(hba->host->host_lock, flags);
8759
8760 if (ret)
8761 return ret;
8762
8763
8764
8765
8766
8767
8768
8769 hba->host->eh_noresume = 1;
8770
8771 cmd[4] = pwr_mode << 4;
8772
8773
8774
8775
8776
8777
8778 deadline = jiffies + 10 * HZ;
8779 for (retries = 3; retries > 0; --retries) {
8780 ret = -ETIMEDOUT;
8781 remaining = deadline - jiffies;
8782 if (remaining <= 0)
8783 break;
8784 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8785 remaining / HZ, 0, 0, RQF_PM, NULL);
8786 if (!scsi_status_is_check_condition(ret) ||
8787 !scsi_sense_valid(&sshdr) ||
8788 sshdr.sense_key != UNIT_ATTENTION)
8789 break;
8790 }
8791 if (ret) {
8792 sdev_printk(KERN_WARNING, sdp,
8793 "START_STOP failed for power mode: %d, result %x\n",
8794 pwr_mode, ret);
8795 if (ret > 0) {
8796 if (scsi_sense_valid(&sshdr))
8797 scsi_print_sense_hdr(sdp, NULL, &sshdr);
8798 ret = -EIO;
8799 }
8800 }
8801
8802 if (!ret)
8803 hba->curr_dev_pwr_mode = pwr_mode;
8804
8805 scsi_device_put(sdp);
8806 hba->host->eh_noresume = 0;
8807 return ret;
8808 }
8809
8810 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8811 enum uic_link_state req_link_state,
8812 int check_for_bkops)
8813 {
8814 int ret = 0;
8815
8816 if (req_link_state == hba->uic_link_state)
8817 return 0;
8818
8819 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8820 ret = ufshcd_uic_hibern8_enter(hba);
8821 if (!ret) {
8822 ufshcd_set_link_hibern8(hba);
8823 } else {
8824 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8825 __func__, ret);
8826 goto out;
8827 }
8828 }
8829
8830
8831
8832
8833
8834 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8835 (!check_for_bkops || !hba->auto_bkops_enabled)) {
8836
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846 ret = ufshcd_uic_hibern8_enter(hba);
8847 if (ret) {
8848 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8849 __func__, ret);
8850 goto out;
8851 }
8852
8853
8854
8855
8856 ufshcd_hba_stop(hba);
8857
8858
8859
8860
8861 ufshcd_set_link_off(hba);
8862 }
8863
8864 out:
8865 return ret;
8866 }
8867
8868 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8869 {
8870 bool vcc_off = false;
8871
8872
8873
8874
8875
8876
8877
8878 if (!ufshcd_is_link_active(hba) &&
8879 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8880 usleep_range(2000, 2100);
8881
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896
8897 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8898 !hba->dev_info.is_lu_power_on_wp) {
8899 ufshcd_setup_vreg(hba, false);
8900 vcc_off = true;
8901 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8902 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8903 vcc_off = true;
8904 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
8905 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8906 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8907 }
8908 }
8909
8910
8911
8912
8913 if (vcc_off && hba->vreg_info.vcc &&
8914 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8915 usleep_range(5000, 5100);
8916 }
8917
8918 #ifdef CONFIG_PM
8919 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8920 {
8921 int ret = 0;
8922
8923 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8924 !hba->dev_info.is_lu_power_on_wp) {
8925 ret = ufshcd_setup_vreg(hba, true);
8926 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8927 if (!ufshcd_is_link_active(hba)) {
8928 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8929 if (ret)
8930 goto vcc_disable;
8931 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8932 if (ret)
8933 goto vccq_lpm;
8934 }
8935 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8936 }
8937 goto out;
8938
8939 vccq_lpm:
8940 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8941 vcc_disable:
8942 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8943 out:
8944 return ret;
8945 }
8946 #endif
8947
8948 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8949 {
8950 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8951 ufshcd_setup_hba_vreg(hba, false);
8952 }
8953
8954 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8955 {
8956 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8957 ufshcd_setup_hba_vreg(hba, true);
8958 }
8959
8960 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8961 {
8962 int ret = 0;
8963 int check_for_bkops;
8964 enum ufs_pm_level pm_lvl;
8965 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8966 enum uic_link_state req_link_state;
8967
8968 hba->pm_op_in_progress = true;
8969 if (pm_op != UFS_SHUTDOWN_PM) {
8970 pm_lvl = pm_op == UFS_RUNTIME_PM ?
8971 hba->rpm_lvl : hba->spm_lvl;
8972 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8973 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8974 } else {
8975 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8976 req_link_state = UIC_LINK_OFF_STATE;
8977 }
8978
8979 ufshpb_suspend(hba);
8980
8981
8982
8983
8984
8985 ufshcd_hold(hba, false);
8986 hba->clk_gating.is_suspended = true;
8987
8988 if (ufshcd_is_clkscaling_supported(hba))
8989 ufshcd_clk_scaling_suspend(hba, true);
8990
8991 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8992 req_link_state == UIC_LINK_ACTIVE_STATE) {
8993 goto vops_suspend;
8994 }
8995
8996 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8997 (req_link_state == hba->uic_link_state))
8998 goto enable_scaling;
8999
9000
9001 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9002 ret = -EINVAL;
9003 goto enable_scaling;
9004 }
9005
9006 if (pm_op == UFS_RUNTIME_PM) {
9007 if (ufshcd_can_autobkops_during_suspend(hba)) {
9008
9009
9010
9011
9012
9013 ret = ufshcd_urgent_bkops(hba);
9014 if (ret)
9015 goto enable_scaling;
9016 } else {
9017
9018 ufshcd_disable_auto_bkops(hba);
9019 }
9020
9021
9022
9023
9024
9025 hba->dev_info.b_rpm_dev_flush_capable =
9026 hba->auto_bkops_enabled ||
9027 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9028 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9029 ufshcd_is_auto_hibern8_enabled(hba))) &&
9030 ufshcd_wb_need_flush(hba));
9031 }
9032
9033 flush_work(&hba->eeh_work);
9034
9035 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9036 if (ret)
9037 goto enable_scaling;
9038
9039 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9040 if (pm_op != UFS_RUNTIME_PM)
9041
9042 ufshcd_disable_auto_bkops(hba);
9043
9044 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9045 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9046 if (ret)
9047 goto enable_scaling;
9048 }
9049 }
9050
9051
9052
9053
9054
9055 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9056 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9057 if (ret)
9058 goto set_dev_active;
9059
9060 vops_suspend:
9061
9062
9063
9064
9065
9066 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9067 if (ret)
9068 goto set_link_active;
9069 goto out;
9070
9071 set_link_active:
9072
9073
9074
9075
9076
9077 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9078 ufshcd_device_reset(hba);
9079 WARN_ON(!ufshcd_is_link_off(hba));
9080 }
9081 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9082 ufshcd_set_link_active(hba);
9083 else if (ufshcd_is_link_off(hba))
9084 ufshcd_host_reset_and_restore(hba);
9085 set_dev_active:
9086
9087 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9088 ufshcd_device_reset(hba);
9089 ufshcd_host_reset_and_restore(hba);
9090 }
9091 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9092 ufshcd_disable_auto_bkops(hba);
9093 enable_scaling:
9094 if (ufshcd_is_clkscaling_supported(hba))
9095 ufshcd_clk_scaling_suspend(hba, false);
9096
9097 hba->dev_info.b_rpm_dev_flush_capable = false;
9098 out:
9099 if (hba->dev_info.b_rpm_dev_flush_capable) {
9100 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9101 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9102 }
9103
9104 if (ret) {
9105 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9106 hba->clk_gating.is_suspended = false;
9107 ufshcd_release(hba);
9108 ufshpb_resume(hba);
9109 }
9110 hba->pm_op_in_progress = false;
9111 return ret;
9112 }
9113
9114 #ifdef CONFIG_PM
9115 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9116 {
9117 int ret;
9118 enum uic_link_state old_link_state = hba->uic_link_state;
9119
9120 hba->pm_op_in_progress = true;
9121
9122
9123
9124
9125
9126
9127 ret = ufshcd_vops_resume(hba, pm_op);
9128 if (ret)
9129 goto out;
9130
9131
9132 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9133
9134 if (ufshcd_is_link_hibern8(hba)) {
9135 ret = ufshcd_uic_hibern8_exit(hba);
9136 if (!ret) {
9137 ufshcd_set_link_active(hba);
9138 } else {
9139 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9140 __func__, ret);
9141 goto vendor_suspend;
9142 }
9143 } else if (ufshcd_is_link_off(hba)) {
9144
9145
9146
9147
9148
9149
9150 ret = ufshcd_reset_and_restore(hba);
9151
9152
9153
9154
9155 if (ret || !ufshcd_is_link_active(hba))
9156 goto vendor_suspend;
9157 }
9158
9159 if (!ufshcd_is_ufs_dev_active(hba)) {
9160 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9161 if (ret)
9162 goto set_old_link_state;
9163 }
9164
9165 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9166 ufshcd_enable_auto_bkops(hba);
9167 else
9168
9169
9170
9171
9172 ufshcd_urgent_bkops(hba);
9173
9174 if (hba->ee_usr_mask)
9175 ufshcd_write_ee_control(hba);
9176
9177 if (ufshcd_is_clkscaling_supported(hba))
9178 ufshcd_clk_scaling_suspend(hba, false);
9179
9180 if (hba->dev_info.b_rpm_dev_flush_capable) {
9181 hba->dev_info.b_rpm_dev_flush_capable = false;
9182 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9183 }
9184
9185
9186 ufshcd_auto_hibern8_enable(hba);
9187
9188 ufshpb_resume(hba);
9189 goto out;
9190
9191 set_old_link_state:
9192 ufshcd_link_state_transition(hba, old_link_state, 0);
9193 vendor_suspend:
9194 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9195 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9196 out:
9197 if (ret)
9198 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9199 hba->clk_gating.is_suspended = false;
9200 ufshcd_release(hba);
9201 hba->pm_op_in_progress = false;
9202 return ret;
9203 }
9204
9205 static int ufshcd_wl_runtime_suspend(struct device *dev)
9206 {
9207 struct scsi_device *sdev = to_scsi_device(dev);
9208 struct ufs_hba *hba;
9209 int ret;
9210 ktime_t start = ktime_get();
9211
9212 hba = shost_priv(sdev->host);
9213
9214 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9215 if (ret)
9216 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9217
9218 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9219 ktime_to_us(ktime_sub(ktime_get(), start)),
9220 hba->curr_dev_pwr_mode, hba->uic_link_state);
9221
9222 return ret;
9223 }
9224
9225 static int ufshcd_wl_runtime_resume(struct device *dev)
9226 {
9227 struct scsi_device *sdev = to_scsi_device(dev);
9228 struct ufs_hba *hba;
9229 int ret = 0;
9230 ktime_t start = ktime_get();
9231
9232 hba = shost_priv(sdev->host);
9233
9234 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9235 if (ret)
9236 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9237
9238 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9239 ktime_to_us(ktime_sub(ktime_get(), start)),
9240 hba->curr_dev_pwr_mode, hba->uic_link_state);
9241
9242 return ret;
9243 }
9244 #endif
9245
9246 #ifdef CONFIG_PM_SLEEP
9247 static int ufshcd_wl_suspend(struct device *dev)
9248 {
9249 struct scsi_device *sdev = to_scsi_device(dev);
9250 struct ufs_hba *hba;
9251 int ret = 0;
9252 ktime_t start = ktime_get();
9253
9254 hba = shost_priv(sdev->host);
9255 down(&hba->host_sem);
9256
9257 if (pm_runtime_suspended(dev))
9258 goto out;
9259
9260 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9261 if (ret) {
9262 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9263 up(&hba->host_sem);
9264 }
9265
9266 out:
9267 if (!ret)
9268 hba->is_sys_suspended = true;
9269 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9270 ktime_to_us(ktime_sub(ktime_get(), start)),
9271 hba->curr_dev_pwr_mode, hba->uic_link_state);
9272
9273 return ret;
9274 }
9275
9276 static int ufshcd_wl_resume(struct device *dev)
9277 {
9278 struct scsi_device *sdev = to_scsi_device(dev);
9279 struct ufs_hba *hba;
9280 int ret = 0;
9281 ktime_t start = ktime_get();
9282
9283 hba = shost_priv(sdev->host);
9284
9285 if (pm_runtime_suspended(dev))
9286 goto out;
9287
9288 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9289 if (ret)
9290 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9291 out:
9292 trace_ufshcd_wl_resume(dev_name(dev), ret,
9293 ktime_to_us(ktime_sub(ktime_get(), start)),
9294 hba->curr_dev_pwr_mode, hba->uic_link_state);
9295 if (!ret)
9296 hba->is_sys_suspended = false;
9297 up(&hba->host_sem);
9298 return ret;
9299 }
9300 #endif
9301
9302 static void ufshcd_wl_shutdown(struct device *dev)
9303 {
9304 struct scsi_device *sdev = to_scsi_device(dev);
9305 struct ufs_hba *hba;
9306
9307 hba = shost_priv(sdev->host);
9308
9309 down(&hba->host_sem);
9310 hba->shutting_down = true;
9311 up(&hba->host_sem);
9312
9313
9314 ufshcd_rpm_get_sync(hba);
9315 scsi_device_quiesce(sdev);
9316 shost_for_each_device(sdev, hba->host) {
9317 if (sdev == hba->ufs_device_wlun)
9318 continue;
9319 scsi_device_quiesce(sdev);
9320 }
9321 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9322 }
9323
9324
9325
9326
9327
9328
9329
9330
9331 static int ufshcd_suspend(struct ufs_hba *hba)
9332 {
9333 int ret;
9334
9335 if (!hba->is_powered)
9336 return 0;
9337
9338
9339
9340
9341 ufshcd_disable_irq(hba);
9342 ret = ufshcd_setup_clocks(hba, false);
9343 if (ret) {
9344 ufshcd_enable_irq(hba);
9345 return ret;
9346 }
9347 if (ufshcd_is_clkgating_allowed(hba)) {
9348 hba->clk_gating.state = CLKS_OFF;
9349 trace_ufshcd_clk_gating(dev_name(hba->dev),
9350 hba->clk_gating.state);
9351 }
9352
9353 ufshcd_vreg_set_lpm(hba);
9354
9355 ufshcd_hba_vreg_set_lpm(hba);
9356 return ret;
9357 }
9358
9359 #ifdef CONFIG_PM
9360
9361
9362
9363
9364
9365
9366
9367
9368
9369 static int ufshcd_resume(struct ufs_hba *hba)
9370 {
9371 int ret;
9372
9373 if (!hba->is_powered)
9374 return 0;
9375
9376 ufshcd_hba_vreg_set_hpm(hba);
9377 ret = ufshcd_vreg_set_hpm(hba);
9378 if (ret)
9379 goto out;
9380
9381
9382 ret = ufshcd_setup_clocks(hba, true);
9383 if (ret)
9384 goto disable_vreg;
9385
9386
9387 ufshcd_enable_irq(hba);
9388 goto out;
9389
9390 disable_vreg:
9391 ufshcd_vreg_set_lpm(hba);
9392 out:
9393 if (ret)
9394 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
9395 return ret;
9396 }
9397 #endif
9398
9399 #ifdef CONFIG_PM_SLEEP
9400
9401
9402
9403
9404
9405
9406
9407
9408
9409 int ufshcd_system_suspend(struct device *dev)
9410 {
9411 struct ufs_hba *hba = dev_get_drvdata(dev);
9412 int ret = 0;
9413 ktime_t start = ktime_get();
9414
9415 if (pm_runtime_suspended(hba->dev))
9416 goto out;
9417
9418 ret = ufshcd_suspend(hba);
9419 out:
9420 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9421 ktime_to_us(ktime_sub(ktime_get(), start)),
9422 hba->curr_dev_pwr_mode, hba->uic_link_state);
9423 return ret;
9424 }
9425 EXPORT_SYMBOL(ufshcd_system_suspend);
9426
9427
9428
9429
9430
9431
9432
9433
9434
9435
9436 int ufshcd_system_resume(struct device *dev)
9437 {
9438 struct ufs_hba *hba = dev_get_drvdata(dev);
9439 ktime_t start = ktime_get();
9440 int ret = 0;
9441
9442 if (pm_runtime_suspended(hba->dev))
9443 goto out;
9444
9445 ret = ufshcd_resume(hba);
9446
9447 out:
9448 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9449 ktime_to_us(ktime_sub(ktime_get(), start)),
9450 hba->curr_dev_pwr_mode, hba->uic_link_state);
9451
9452 return ret;
9453 }
9454 EXPORT_SYMBOL(ufshcd_system_resume);
9455 #endif
9456
9457 #ifdef CONFIG_PM
9458
9459
9460
9461
9462
9463
9464
9465
9466 int ufshcd_runtime_suspend(struct device *dev)
9467 {
9468 struct ufs_hba *hba = dev_get_drvdata(dev);
9469 int ret;
9470 ktime_t start = ktime_get();
9471
9472 ret = ufshcd_suspend(hba);
9473
9474 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9475 ktime_to_us(ktime_sub(ktime_get(), start)),
9476 hba->curr_dev_pwr_mode, hba->uic_link_state);
9477 return ret;
9478 }
9479 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9480
9481
9482
9483
9484
9485
9486
9487
9488
9489
9490
9491 int ufshcd_runtime_resume(struct device *dev)
9492 {
9493 struct ufs_hba *hba = dev_get_drvdata(dev);
9494 int ret;
9495 ktime_t start = ktime_get();
9496
9497 ret = ufshcd_resume(hba);
9498
9499 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9500 ktime_to_us(ktime_sub(ktime_get(), start)),
9501 hba->curr_dev_pwr_mode, hba->uic_link_state);
9502 return ret;
9503 }
9504 EXPORT_SYMBOL(ufshcd_runtime_resume);
9505 #endif
9506
9507
9508
9509
9510
9511
9512
9513
9514
9515
9516 int ufshcd_shutdown(struct ufs_hba *hba)
9517 {
9518 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9519 ufshcd_suspend(hba);
9520
9521 hba->is_powered = false;
9522
9523 return 0;
9524 }
9525 EXPORT_SYMBOL(ufshcd_shutdown);
9526
9527
9528
9529
9530
9531
9532 void ufshcd_remove(struct ufs_hba *hba)
9533 {
9534 if (hba->ufs_device_wlun)
9535 ufshcd_rpm_get_sync(hba);
9536 ufs_hwmon_remove(hba);
9537 ufs_bsg_remove(hba);
9538 ufshpb_remove(hba);
9539 ufs_sysfs_remove_nodes(hba->dev);
9540 blk_mq_destroy_queue(hba->tmf_queue);
9541 blk_mq_free_tag_set(&hba->tmf_tag_set);
9542 scsi_remove_host(hba->host);
9543
9544 ufshcd_disable_intr(hba, hba->intr_mask);
9545 ufshcd_hba_stop(hba);
9546 ufshcd_hba_exit(hba);
9547 }
9548 EXPORT_SYMBOL_GPL(ufshcd_remove);
9549
9550
9551
9552
9553
9554 void ufshcd_dealloc_host(struct ufs_hba *hba)
9555 {
9556 scsi_host_put(hba->host);
9557 }
9558 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9559
9560
9561
9562
9563
9564
9565
9566
9567 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9568 {
9569 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9570 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9571 return 0;
9572 }
9573 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9574 }
9575
9576
9577
9578
9579
9580
9581
9582 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9583 {
9584 struct Scsi_Host *host;
9585 struct ufs_hba *hba;
9586 int err = 0;
9587
9588 if (!dev) {
9589 dev_err(dev,
9590 "Invalid memory reference for dev is NULL\n");
9591 err = -ENODEV;
9592 goto out_error;
9593 }
9594
9595 host = scsi_host_alloc(&ufshcd_driver_template,
9596 sizeof(struct ufs_hba));
9597 if (!host) {
9598 dev_err(dev, "scsi_host_alloc failed\n");
9599 err = -ENOMEM;
9600 goto out_error;
9601 }
9602 host->nr_maps = HCTX_TYPE_POLL + 1;
9603 hba = shost_priv(host);
9604 hba->host = host;
9605 hba->dev = dev;
9606 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9607 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
9608 INIT_LIST_HEAD(&hba->clk_list_head);
9609 spin_lock_init(&hba->outstanding_lock);
9610
9611 *hba_handle = hba;
9612
9613 out_error:
9614 return err;
9615 }
9616 EXPORT_SYMBOL(ufshcd_alloc_host);
9617
9618
9619 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9620 const struct blk_mq_queue_data *qd)
9621 {
9622 WARN_ON_ONCE(true);
9623 return BLK_STS_NOTSUPP;
9624 }
9625
9626 static const struct blk_mq_ops ufshcd_tmf_ops = {
9627 .queue_rq = ufshcd_queue_tmf,
9628 };
9629
9630
9631
9632
9633
9634
9635
9636
9637 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9638 {
9639 int err;
9640 struct Scsi_Host *host = hba->host;
9641 struct device *dev = hba->dev;
9642 char eh_wq_name[sizeof("ufs_eh_wq_00")];
9643
9644
9645
9646
9647
9648
9649 dev_set_drvdata(dev, hba);
9650
9651 if (!mmio_base) {
9652 dev_err(hba->dev,
9653 "Invalid memory reference for mmio_base is NULL\n");
9654 err = -ENODEV;
9655 goto out_error;
9656 }
9657
9658 hba->mmio_base = mmio_base;
9659 hba->irq = irq;
9660 hba->vps = &ufs_hba_vps;
9661
9662 err = ufshcd_hba_init(hba);
9663 if (err)
9664 goto out_error;
9665
9666
9667 err = ufshcd_hba_capabilities(hba);
9668 if (err)
9669 goto out_disable;
9670
9671
9672 hba->ufs_version = ufshcd_get_ufs_version(hba);
9673
9674
9675 hba->intr_mask = ufshcd_get_intr_mask(hba);
9676
9677 err = ufshcd_set_dma_mask(hba);
9678 if (err) {
9679 dev_err(hba->dev, "set dma mask failed\n");
9680 goto out_disable;
9681 }
9682
9683
9684 err = ufshcd_memory_alloc(hba);
9685 if (err) {
9686 dev_err(hba->dev, "Memory allocation failed\n");
9687 goto out_disable;
9688 }
9689
9690
9691 ufshcd_host_memory_configure(hba);
9692
9693 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
9694 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
9695 host->max_id = UFSHCD_MAX_ID;
9696 host->max_lun = UFS_MAX_LUNS;
9697 host->max_channel = UFSHCD_MAX_CHANNEL;
9698 host->unique_id = host->host_no;
9699 host->max_cmd_len = UFS_CDB_SIZE;
9700
9701 hba->max_pwr_info.is_valid = false;
9702
9703
9704 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9705 hba->host->host_no);
9706 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9707 if (!hba->eh_wq) {
9708 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9709 __func__);
9710 err = -ENOMEM;
9711 goto out_disable;
9712 }
9713 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9714 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9715
9716 sema_init(&hba->host_sem, 1);
9717
9718
9719 mutex_init(&hba->uic_cmd_mutex);
9720
9721
9722 mutex_init(&hba->dev_cmd.lock);
9723
9724
9725 mutex_init(&hba->ee_ctrl_mutex);
9726
9727 init_rwsem(&hba->clk_scaling_lock);
9728
9729 ufshcd_init_clk_gating(hba);
9730
9731 ufshcd_init_clk_scaling(hba);
9732
9733
9734
9735
9736
9737
9738 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9739 REG_INTERRUPT_STATUS);
9740 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9741
9742
9743
9744
9745 mb();
9746
9747
9748 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9749 if (err) {
9750 dev_err(hba->dev, "request irq failed\n");
9751 goto out_disable;
9752 } else {
9753 hba->is_irq_enabled = true;
9754 }
9755
9756 err = scsi_add_host(host, hba->dev);
9757 if (err) {
9758 dev_err(hba->dev, "scsi_add_host failed\n");
9759 goto out_disable;
9760 }
9761
9762 hba->tmf_tag_set = (struct blk_mq_tag_set) {
9763 .nr_hw_queues = 1,
9764 .queue_depth = hba->nutmrs,
9765 .ops = &ufshcd_tmf_ops,
9766 .flags = BLK_MQ_F_NO_SCHED,
9767 };
9768 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9769 if (err < 0)
9770 goto out_remove_scsi_host;
9771 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9772 if (IS_ERR(hba->tmf_queue)) {
9773 err = PTR_ERR(hba->tmf_queue);
9774 goto free_tmf_tag_set;
9775 }
9776 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9777 sizeof(*hba->tmf_rqs), GFP_KERNEL);
9778 if (!hba->tmf_rqs) {
9779 err = -ENOMEM;
9780 goto free_tmf_queue;
9781 }
9782
9783
9784 ufshcd_device_reset(hba);
9785
9786 ufshcd_init_crypto(hba);
9787
9788
9789 err = ufshcd_hba_enable(hba);
9790 if (err) {
9791 dev_err(hba->dev, "Host controller enable failed\n");
9792 ufshcd_print_evt_hist(hba);
9793 ufshcd_print_host_state(hba);
9794 goto free_tmf_queue;
9795 }
9796
9797
9798
9799
9800
9801
9802 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9803 UFS_SLEEP_PWR_MODE,
9804 UIC_LINK_HIBERN8_STATE);
9805 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9806 UFS_SLEEP_PWR_MODE,
9807 UIC_LINK_HIBERN8_STATE);
9808
9809 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9810 ufshcd_rpm_dev_flush_recheck_work);
9811
9812
9813 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9814 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9815 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9816 }
9817
9818
9819 pm_runtime_get_sync(dev);
9820 atomic_set(&hba->scsi_block_reqs_cnt, 0);
9821
9822
9823
9824
9825
9826
9827 ufshcd_set_ufs_dev_active(hba);
9828
9829 async_schedule(ufshcd_async_scan, hba);
9830 ufs_sysfs_add_nodes(hba->dev);
9831
9832 device_enable_async_suspend(dev);
9833 return 0;
9834
9835 free_tmf_queue:
9836 blk_mq_destroy_queue(hba->tmf_queue);
9837 free_tmf_tag_set:
9838 blk_mq_free_tag_set(&hba->tmf_tag_set);
9839 out_remove_scsi_host:
9840 scsi_remove_host(hba->host);
9841 out_disable:
9842 hba->is_irq_enabled = false;
9843 ufshcd_hba_exit(hba);
9844 out_error:
9845 return err;
9846 }
9847 EXPORT_SYMBOL_GPL(ufshcd_init);
9848
9849 void ufshcd_resume_complete(struct device *dev)
9850 {
9851 struct ufs_hba *hba = dev_get_drvdata(dev);
9852
9853 if (hba->complete_put) {
9854 ufshcd_rpm_put(hba);
9855 hba->complete_put = false;
9856 }
9857 }
9858 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
9859
9860 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
9861 {
9862 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
9863 enum ufs_dev_pwr_mode dev_pwr_mode;
9864 enum uic_link_state link_state;
9865 unsigned long flags;
9866 bool res;
9867
9868 spin_lock_irqsave(&dev->power.lock, flags);
9869 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
9870 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
9871 res = pm_runtime_suspended(dev) &&
9872 hba->curr_dev_pwr_mode == dev_pwr_mode &&
9873 hba->uic_link_state == link_state &&
9874 !hba->dev_info.b_rpm_dev_flush_capable;
9875 spin_unlock_irqrestore(&dev->power.lock, flags);
9876
9877 return res;
9878 }
9879
9880 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
9881 {
9882 struct ufs_hba *hba = dev_get_drvdata(dev);
9883 int ret;
9884
9885
9886
9887
9888
9889
9890
9891 if (hba->ufs_device_wlun) {
9892
9893 ufshcd_rpm_get_noresume(hba);
9894
9895
9896
9897
9898 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
9899
9900 ret = ufshcd_rpm_resume(hba);
9901 if (ret < 0 && ret != -EACCES) {
9902 ufshcd_rpm_put(hba);
9903 return ret;
9904 }
9905 }
9906 hba->complete_put = true;
9907 }
9908 return 0;
9909 }
9910 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
9911
9912 int ufshcd_suspend_prepare(struct device *dev)
9913 {
9914 return __ufshcd_suspend_prepare(dev, true);
9915 }
9916 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
9917
9918 #ifdef CONFIG_PM_SLEEP
9919 static int ufshcd_wl_poweroff(struct device *dev)
9920 {
9921 struct scsi_device *sdev = to_scsi_device(dev);
9922 struct ufs_hba *hba = shost_priv(sdev->host);
9923
9924 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9925 return 0;
9926 }
9927 #endif
9928
9929 static int ufshcd_wl_probe(struct device *dev)
9930 {
9931 struct scsi_device *sdev = to_scsi_device(dev);
9932
9933 if (!is_device_wlun(sdev))
9934 return -ENODEV;
9935
9936 blk_pm_runtime_init(sdev->request_queue, dev);
9937 pm_runtime_set_autosuspend_delay(dev, 0);
9938 pm_runtime_allow(dev);
9939
9940 return 0;
9941 }
9942
9943 static int ufshcd_wl_remove(struct device *dev)
9944 {
9945 pm_runtime_forbid(dev);
9946 return 0;
9947 }
9948
9949 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
9950 #ifdef CONFIG_PM_SLEEP
9951 .suspend = ufshcd_wl_suspend,
9952 .resume = ufshcd_wl_resume,
9953 .freeze = ufshcd_wl_suspend,
9954 .thaw = ufshcd_wl_resume,
9955 .poweroff = ufshcd_wl_poweroff,
9956 .restore = ufshcd_wl_resume,
9957 #endif
9958 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
9959 };
9960
9961
9962
9963
9964
9965
9966
9967
9968
9969
9970
9971 static struct scsi_driver ufs_dev_wlun_template = {
9972 .gendrv = {
9973 .name = "ufs_device_wlun",
9974 .owner = THIS_MODULE,
9975 .probe = ufshcd_wl_probe,
9976 .remove = ufshcd_wl_remove,
9977 .pm = &ufshcd_wl_pm_ops,
9978 .shutdown = ufshcd_wl_shutdown,
9979 },
9980 };
9981
9982 static int __init ufshcd_core_init(void)
9983 {
9984 int ret;
9985
9986
9987 static_assert(sizeof(struct utp_transfer_cmd_desc) ==
9988 2 * ALIGNED_UPIU_SIZE +
9989 SG_ALL * sizeof(struct ufshcd_sg_entry));
9990
9991 ufs_debugfs_init();
9992
9993 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
9994 if (ret)
9995 ufs_debugfs_exit();
9996 return ret;
9997 }
9998
9999 static void __exit ufshcd_core_exit(void)
10000 {
10001 ufs_debugfs_exit();
10002 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10003 }
10004
10005 module_init(ufshcd_core_init);
10006 module_exit(ufshcd_core_exit);
10007
10008 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10009 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10010 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10011 MODULE_LICENSE("GPL");