0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _UFSHCD_H
0013 #define _UFSHCD_H
0014
0015 #include <linux/bitfield.h>
0016 #include <linux/blk-crypto-profile.h>
0017 #include <linux/blk-mq.h>
0018 #include <linux/devfreq.h>
0019 #include <linux/pm_runtime.h>
0020 #include <scsi/scsi_device.h>
0021 #include <ufs/unipro.h>
0022 #include <ufs/ufs.h>
0023 #include <ufs/ufs_quirks.h>
0024 #include <ufs/ufshci.h>
0025
0026 #define UFSHCD "ufshcd"
0027
0028 struct ufs_hba;
0029
0030 enum dev_cmd_type {
0031 DEV_CMD_TYPE_NOP = 0x0,
0032 DEV_CMD_TYPE_QUERY = 0x1,
0033 };
0034
0035 enum ufs_event_type {
0036
0037 UFS_EVT_PA_ERR = 0,
0038 UFS_EVT_DL_ERR,
0039 UFS_EVT_NL_ERR,
0040 UFS_EVT_TL_ERR,
0041 UFS_EVT_DME_ERR,
0042
0043
0044 UFS_EVT_AUTO_HIBERN8_ERR,
0045 UFS_EVT_FATAL_ERR,
0046 UFS_EVT_LINK_STARTUP_FAIL,
0047 UFS_EVT_RESUME_ERR,
0048 UFS_EVT_SUSPEND_ERR,
0049 UFS_EVT_WL_SUSP_ERR,
0050 UFS_EVT_WL_RES_ERR,
0051
0052
0053 UFS_EVT_DEV_RESET,
0054 UFS_EVT_HOST_RESET,
0055 UFS_EVT_ABORT,
0056
0057 UFS_EVT_CNT,
0058 };
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069 struct uic_command {
0070 u32 command;
0071 u32 argument1;
0072 u32 argument2;
0073 u32 argument3;
0074 int cmd_active;
0075 struct completion done;
0076 };
0077
0078
0079 enum ufs_pm_op {
0080 UFS_RUNTIME_PM,
0081 UFS_SYSTEM_PM,
0082 UFS_SHUTDOWN_PM,
0083 };
0084
0085
0086 enum uic_link_state {
0087 UIC_LINK_OFF_STATE = 0,
0088 UIC_LINK_ACTIVE_STATE = 1,
0089 UIC_LINK_HIBERN8_STATE = 2,
0090 UIC_LINK_BROKEN_STATE = 3,
0091 };
0092
0093 #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
0094 #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
0095 UIC_LINK_ACTIVE_STATE)
0096 #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
0097 UIC_LINK_HIBERN8_STATE)
0098 #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
0099 UIC_LINK_BROKEN_STATE)
0100 #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
0101 #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
0102 UIC_LINK_ACTIVE_STATE)
0103 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
0104 UIC_LINK_HIBERN8_STATE)
0105 #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
0106 UIC_LINK_BROKEN_STATE)
0107
0108 #define ufshcd_set_ufs_dev_active(h) \
0109 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
0110 #define ufshcd_set_ufs_dev_sleep(h) \
0111 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
0112 #define ufshcd_set_ufs_dev_poweroff(h) \
0113 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
0114 #define ufshcd_set_ufs_dev_deepsleep(h) \
0115 ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
0116 #define ufshcd_is_ufs_dev_active(h) \
0117 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
0118 #define ufshcd_is_ufs_dev_sleep(h) \
0119 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
0120 #define ufshcd_is_ufs_dev_poweroff(h) \
0121 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
0122 #define ufshcd_is_ufs_dev_deepsleep(h) \
0123 ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
0124
0125
0126
0127
0128
0129
0130
0131 enum ufs_pm_level {
0132 UFS_PM_LVL_0,
0133 UFS_PM_LVL_1,
0134 UFS_PM_LVL_2,
0135 UFS_PM_LVL_3,
0136 UFS_PM_LVL_4,
0137 UFS_PM_LVL_5,
0138 UFS_PM_LVL_6,
0139 UFS_PM_LVL_MAX
0140 };
0141
0142 struct ufs_pm_lvl_states {
0143 enum ufs_dev_pwr_mode dev_state;
0144 enum uic_link_state link_state;
0145 };
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 struct ufshcd_lrb {
0170 struct utp_transfer_req_desc *utr_descriptor_ptr;
0171 struct utp_upiu_req *ucd_req_ptr;
0172 struct utp_upiu_rsp *ucd_rsp_ptr;
0173 struct ufshcd_sg_entry *ucd_prdt_ptr;
0174
0175 dma_addr_t utrd_dma_addr;
0176 dma_addr_t ucd_req_dma_addr;
0177 dma_addr_t ucd_rsp_dma_addr;
0178 dma_addr_t ucd_prdt_dma_addr;
0179
0180 struct scsi_cmnd *cmd;
0181 int scsi_status;
0182
0183 int command_type;
0184 int task_tag;
0185 u8 lun;
0186 bool intr_cmd;
0187 ktime_t issue_time_stamp;
0188 ktime_t compl_time_stamp;
0189 #ifdef CONFIG_SCSI_UFS_CRYPTO
0190 int crypto_key_slot;
0191 u64 data_unit_num;
0192 #endif
0193
0194 bool req_abort_skip;
0195 };
0196
0197
0198
0199
0200
0201
0202
0203 struct ufs_query {
0204 struct ufs_query_req request;
0205 u8 *descriptor;
0206 struct ufs_query_res response;
0207 };
0208
0209
0210
0211
0212
0213
0214
0215
0216 struct ufs_dev_cmd {
0217 enum dev_cmd_type type;
0218 struct mutex lock;
0219 struct completion *complete;
0220 struct ufs_query query;
0221 };
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 struct ufs_clk_info {
0236 struct list_head list;
0237 struct clk *clk;
0238 const char *name;
0239 u32 max_freq;
0240 u32 min_freq;
0241 u32 curr_freq;
0242 bool keep_link_active;
0243 bool enabled;
0244 };
0245
0246 enum ufs_notify_change_status {
0247 PRE_CHANGE,
0248 POST_CHANGE,
0249 };
0250
0251 struct ufs_pa_layer_attr {
0252 u32 gear_rx;
0253 u32 gear_tx;
0254 u32 lane_rx;
0255 u32 lane_tx;
0256 u32 pwr_rx;
0257 u32 pwr_tx;
0258 u32 hs_rate;
0259 };
0260
0261 struct ufs_pwr_mode_info {
0262 bool is_valid;
0263 struct ufs_pa_layer_attr info;
0264 };
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 struct ufs_hba_variant_ops {
0298 const char *name;
0299 int (*init)(struct ufs_hba *);
0300 void (*exit)(struct ufs_hba *);
0301 u32 (*get_ufs_hci_version)(struct ufs_hba *);
0302 int (*clk_scale_notify)(struct ufs_hba *, bool,
0303 enum ufs_notify_change_status);
0304 int (*setup_clocks)(struct ufs_hba *, bool,
0305 enum ufs_notify_change_status);
0306 int (*hce_enable_notify)(struct ufs_hba *,
0307 enum ufs_notify_change_status);
0308 int (*link_startup_notify)(struct ufs_hba *,
0309 enum ufs_notify_change_status);
0310 int (*pwr_change_notify)(struct ufs_hba *,
0311 enum ufs_notify_change_status status,
0312 struct ufs_pa_layer_attr *,
0313 struct ufs_pa_layer_attr *);
0314 void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
0315 bool is_scsi_cmd);
0316 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
0317 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
0318 enum ufs_notify_change_status);
0319 int (*apply_dev_quirks)(struct ufs_hba *hba);
0320 void (*fixup_dev_quirks)(struct ufs_hba *hba);
0321 int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
0322 enum ufs_notify_change_status);
0323 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
0324 void (*dbg_register_dump)(struct ufs_hba *hba);
0325 int (*phy_initialization)(struct ufs_hba *);
0326 int (*device_reset)(struct ufs_hba *hba);
0327 void (*config_scaling_param)(struct ufs_hba *hba,
0328 struct devfreq_dev_profile *profile,
0329 struct devfreq_simple_ondemand_data *data);
0330 int (*program_key)(struct ufs_hba *hba,
0331 const union ufs_crypto_cfg_entry *cfg, int slot);
0332 void (*event_notify)(struct ufs_hba *hba,
0333 enum ufs_event_type evt, void *data);
0334 };
0335
0336
0337 enum clk_gating_state {
0338 CLKS_OFF,
0339 CLKS_ON,
0340 REQ_CLKS_OFF,
0341 REQ_CLKS_ON,
0342 };
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362 struct ufs_clk_gating {
0363 struct delayed_work gate_work;
0364 struct work_struct ungate_work;
0365 enum clk_gating_state state;
0366 unsigned long delay_ms;
0367 bool is_suspended;
0368 struct device_attribute delay_attr;
0369 struct device_attribute enable_attr;
0370 bool is_enabled;
0371 bool is_initialized;
0372 int active_reqs;
0373 struct workqueue_struct *clk_gating_workq;
0374 };
0375
0376 struct ufs_saved_pwr_info {
0377 struct ufs_pa_layer_attr info;
0378 bool is_valid;
0379 };
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 struct ufs_clk_scaling {
0405 int active_reqs;
0406 unsigned long tot_busy_t;
0407 ktime_t window_start_t;
0408 ktime_t busy_start_t;
0409 struct device_attribute enable_attr;
0410 struct ufs_saved_pwr_info saved_pwr_info;
0411 struct workqueue_struct *workq;
0412 struct work_struct suspend_work;
0413 struct work_struct resume_work;
0414 u32 min_gear;
0415 bool is_enabled;
0416 bool is_allowed;
0417 bool is_initialized;
0418 bool is_busy_started;
0419 bool is_suspended;
0420 };
0421
0422 #define UFS_EVENT_HIST_LENGTH 8
0423
0424
0425
0426
0427
0428
0429
0430 struct ufs_event_hist {
0431 int pos;
0432 u32 val[UFS_EVENT_HIST_LENGTH];
0433 ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
0434 unsigned long long cnt;
0435 };
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447 struct ufs_stats {
0448 u32 last_intr_status;
0449 ktime_t last_intr_ts;
0450
0451 u32 hibern8_exit_cnt;
0452 ktime_t last_hibern8_exit_tstamp;
0453 struct ufs_event_hist event[UFS_EVT_CNT];
0454 };
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 enum ufshcd_state {
0470 UFSHCD_STATE_RESET,
0471 UFSHCD_STATE_OPERATIONAL,
0472 UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
0473 UFSHCD_STATE_EH_SCHEDULED_FATAL,
0474 UFSHCD_STATE_ERROR,
0475 };
0476
0477 enum ufshcd_quirks {
0478
0479 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
0480
0481
0482
0483
0484
0485 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
0486
0487
0488
0489
0490
0491
0492
0493
0494 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
0495
0496
0497
0498
0499
0500
0501 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
0502
0503
0504
0505
0506
0507
0508 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
0509
0510
0511
0512
0513
0514
0515
0516 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
0517
0518
0519
0520
0521 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
0522
0523
0524
0525
0526
0527 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
0528
0529
0530
0531
0532
0533 UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
0534
0535
0536
0537
0538
0539 UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
0540
0541
0542
0543
0544
0545 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
0546
0547
0548
0549
0550
0551 UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
0552
0553
0554
0555
0556 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
0557
0558
0559
0560
0561
0562 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
0563
0564
0565
0566
0567 UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
0568
0569
0570
0571
0572
0573 UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
0574
0575
0576
0577
0578
0579 UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
0580
0581
0582
0583
0584
0585 UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
0586
0587
0588
0589
0590
0591 UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
0592 };
0593
0594 enum ufshcd_caps {
0595
0596 UFSHCD_CAP_CLK_GATING = 1 << 0,
0597
0598
0599 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
0600
0601
0602 UFSHCD_CAP_CLK_SCALING = 1 << 2,
0603
0604
0605 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
0606
0607
0608
0609
0610
0611
0612 UFSHCD_CAP_INTR_AGGR = 1 << 4,
0613
0614
0615
0616
0617
0618
0619
0620
0621 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
0622
0623
0624
0625
0626
0627
0628 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
0629
0630
0631
0632
0633
0634
0635 UFSHCD_CAP_WB_EN = 1 << 7,
0636
0637
0638
0639
0640
0641 UFSHCD_CAP_CRYPTO = 1 << 8,
0642
0643
0644
0645
0646
0647
0648 UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
0649
0650
0651
0652
0653
0654
0655
0656 UFSHCD_CAP_DEEPSLEEP = 1 << 10,
0657
0658
0659
0660
0661
0662 UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
0663 };
0664
0665 struct ufs_hba_variant_params {
0666 struct devfreq_dev_profile devfreq_profile;
0667 struct devfreq_simple_ondemand_data ondemand_data;
0668 u16 hba_enable_delay_us;
0669 u32 wb_flush_threshold;
0670 };
0671
0672 #ifdef CONFIG_SCSI_UFS_HPB
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 struct ufshpb_dev_info {
0686 int num_lu;
0687 int rgn_size;
0688 int srgn_size;
0689 atomic_t slave_conf_cnt;
0690 bool hpb_disabled;
0691 u8 max_hpb_single_cmd;
0692 bool is_legacy;
0693 u8 control_mode;
0694 };
0695 #endif
0696
0697 struct ufs_hba_monitor {
0698 unsigned long chunk_size;
0699
0700 unsigned long nr_sec_rw[2];
0701 ktime_t total_busy[2];
0702
0703 unsigned long nr_req[2];
0704
0705 ktime_t lat_sum[2];
0706 ktime_t lat_max[2];
0707 ktime_t lat_min[2];
0708
0709 u32 nr_queued[2];
0710 ktime_t busy_start_ts[2];
0711
0712 ktime_t enabled_ts;
0713 bool enabled;
0714 };
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822 struct ufs_hba {
0823 void __iomem *mmio_base;
0824
0825
0826 struct utp_transfer_cmd_desc *ucdl_base_addr;
0827 struct utp_transfer_req_desc *utrdl_base_addr;
0828 struct utp_task_req_desc *utmrdl_base_addr;
0829
0830
0831 dma_addr_t ucdl_dma_addr;
0832 dma_addr_t utrdl_dma_addr;
0833 dma_addr_t utmrdl_dma_addr;
0834
0835 struct Scsi_Host *host;
0836 struct device *dev;
0837 struct scsi_device *ufs_device_wlun;
0838
0839 #ifdef CONFIG_SCSI_UFS_HWMON
0840 struct device *hwmon_device;
0841 #endif
0842
0843 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
0844 enum uic_link_state uic_link_state;
0845
0846 enum ufs_pm_level rpm_lvl;
0847
0848 enum ufs_pm_level spm_lvl;
0849 int pm_op_in_progress;
0850
0851
0852 u32 ahit;
0853
0854 struct ufshcd_lrb *lrb;
0855
0856 unsigned long outstanding_tasks;
0857 spinlock_t outstanding_lock;
0858 unsigned long outstanding_reqs;
0859
0860 u32 capabilities;
0861 int nutrs;
0862 int nutmrs;
0863 u32 reserved_slot;
0864 u32 ufs_version;
0865 const struct ufs_hba_variant_ops *vops;
0866 struct ufs_hba_variant_params *vps;
0867 void *priv;
0868 unsigned int irq;
0869 bool is_irq_enabled;
0870 enum ufs_ref_clk_freq dev_ref_clk_freq;
0871
0872 unsigned int quirks;
0873
0874
0875 unsigned int dev_quirks;
0876
0877 struct blk_mq_tag_set tmf_tag_set;
0878 struct request_queue *tmf_queue;
0879 struct request **tmf_rqs;
0880
0881 struct uic_command *active_uic_cmd;
0882 struct mutex uic_cmd_mutex;
0883 struct completion *uic_async_done;
0884
0885 enum ufshcd_state ufshcd_state;
0886 u32 eh_flags;
0887 u32 intr_mask;
0888 u16 ee_ctrl_mask;
0889 u16 ee_drv_mask;
0890 u16 ee_usr_mask;
0891 struct mutex ee_ctrl_mutex;
0892 bool is_powered;
0893 bool shutting_down;
0894 struct semaphore host_sem;
0895
0896
0897 struct workqueue_struct *eh_wq;
0898 struct work_struct eh_work;
0899 struct work_struct eeh_work;
0900
0901
0902 u32 errors;
0903 u32 uic_error;
0904 u32 saved_err;
0905 u32 saved_uic_err;
0906 struct ufs_stats ufs_stats;
0907 bool force_reset;
0908 bool force_pmc;
0909 bool silence_err_logs;
0910
0911
0912 struct ufs_dev_cmd dev_cmd;
0913 ktime_t last_dme_cmd_tstamp;
0914 int nop_out_timeout;
0915
0916
0917 struct ufs_dev_info dev_info;
0918 bool auto_bkops_enabled;
0919 struct ufs_vreg_info vreg_info;
0920 struct list_head clk_list_head;
0921
0922
0923 int req_abort_count;
0924
0925
0926 u32 lanes_per_direction;
0927 struct ufs_pa_layer_attr pwr_info;
0928 struct ufs_pwr_mode_info max_pwr_info;
0929
0930 struct ufs_clk_gating clk_gating;
0931
0932 u32 caps;
0933
0934 struct devfreq *devfreq;
0935 struct ufs_clk_scaling clk_scaling;
0936 bool is_sys_suspended;
0937
0938 enum bkops_status urgent_bkops_lvl;
0939 bool is_urgent_bkops_lvl_checked;
0940
0941 struct rw_semaphore clk_scaling_lock;
0942 unsigned char desc_size[QUERY_DESC_IDN_MAX];
0943 atomic_t scsi_block_reqs_cnt;
0944
0945 struct device bsg_dev;
0946 struct request_queue *bsg_queue;
0947 struct delayed_work rpm_dev_flush_recheck_work;
0948
0949 #ifdef CONFIG_SCSI_UFS_HPB
0950 struct ufshpb_dev_info ufshpb_dev;
0951 #endif
0952
0953 struct ufs_hba_monitor monitor;
0954
0955 #ifdef CONFIG_SCSI_UFS_CRYPTO
0956 union ufs_crypto_capabilities crypto_capabilities;
0957 union ufs_crypto_cap_entry *crypto_cap_array;
0958 u32 crypto_cfg_register;
0959 struct blk_crypto_profile crypto_profile;
0960 #endif
0961 #ifdef CONFIG_DEBUG_FS
0962 struct dentry *debugfs_root;
0963 struct delayed_work debugfs_ee_work;
0964 u32 debugfs_ee_rate_limit_ms;
0965 #endif
0966 u32 luns_avail;
0967 bool complete_put;
0968 };
0969
0970
0971 static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
0972 {
0973 return hba->caps & UFSHCD_CAP_CLK_GATING;
0974 }
0975 static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
0976 {
0977 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
0978 }
0979 static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
0980 {
0981 return hba->caps & UFSHCD_CAP_CLK_SCALING;
0982 }
0983 static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
0984 {
0985 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
0986 }
0987 static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
0988 {
0989 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
0990 }
0991
0992 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
0993 {
0994 return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
0995 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
0996 }
0997
0998 static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
0999 {
1000 return !!(ufshcd_is_link_hibern8(hba) &&
1001 (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
1002 }
1003
1004 static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
1005 {
1006 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
1007 !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
1008 }
1009
1010 static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
1011 {
1012 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
1013 }
1014
1015 static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
1016 {
1017 return hba->caps & UFSHCD_CAP_WB_EN;
1018 }
1019
1020 #define ufshcd_writel(hba, val, reg) \
1021 writel((val), (hba)->mmio_base + (reg))
1022 #define ufshcd_readl(hba, reg) \
1023 readl((hba)->mmio_base + (reg))
1024
1025
1026
1027
1028
1029
1030
1031
1032 static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
1033 {
1034 u32 tmp;
1035
1036 tmp = ufshcd_readl(hba, reg);
1037 tmp &= ~mask;
1038 tmp |= (val & mask);
1039 ufshcd_writel(hba, tmp, reg);
1040 }
1041
1042 int ufshcd_alloc_host(struct device *, struct ufs_hba **);
1043 void ufshcd_dealloc_host(struct ufs_hba *);
1044 int ufshcd_hba_enable(struct ufs_hba *hba);
1045 int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
1046 int ufshcd_link_recovery(struct ufs_hba *hba);
1047 int ufshcd_make_hba_operational(struct ufs_hba *hba);
1048 void ufshcd_remove(struct ufs_hba *);
1049 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
1050 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
1051 void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
1052 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
1053 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
1054 void ufshcd_hba_stop(struct ufs_hba *hba);
1055 void ufshcd_schedule_eh_work(struct ufs_hba *hba);
1056
1057 static inline void check_upiu_size(void)
1058 {
1059 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
1060 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
1061 }
1062
1063
1064
1065
1066
1067
1068 static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
1069 {
1070 BUG_ON(!hba);
1071 hba->priv = variant;
1072 }
1073
1074
1075
1076
1077
1078 static inline void *ufshcd_get_variant(struct ufs_hba *hba)
1079 {
1080 BUG_ON(!hba);
1081 return hba->priv;
1082 }
1083
1084 #ifdef CONFIG_PM
1085 extern int ufshcd_runtime_suspend(struct device *dev);
1086 extern int ufshcd_runtime_resume(struct device *dev);
1087 #endif
1088 #ifdef CONFIG_PM_SLEEP
1089 extern int ufshcd_system_suspend(struct device *dev);
1090 extern int ufshcd_system_resume(struct device *dev);
1091 #endif
1092 extern int ufshcd_shutdown(struct ufs_hba *hba);
1093 extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
1094 int agreed_gear,
1095 int adapt_val);
1096 extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1097 u8 attr_set, u32 mib_val, u8 peer);
1098 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1099 u32 *mib_val, u8 peer);
1100 extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
1101 struct ufs_pa_layer_attr *desired_pwr_mode);
1102 extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
1103
1104
1105 #define DME_LOCAL 0
1106 #define DME_PEER 1
1107 #define ATTR_SET_NOR 0
1108 #define ATTR_SET_ST 1
1109
1110 static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
1111 u32 mib_val)
1112 {
1113 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1114 mib_val, DME_LOCAL);
1115 }
1116
1117 static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
1118 u32 mib_val)
1119 {
1120 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1121 mib_val, DME_LOCAL);
1122 }
1123
1124 static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
1125 u32 mib_val)
1126 {
1127 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
1128 mib_val, DME_PEER);
1129 }
1130
1131 static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
1132 u32 mib_val)
1133 {
1134 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
1135 mib_val, DME_PEER);
1136 }
1137
1138 static inline int ufshcd_dme_get(struct ufs_hba *hba,
1139 u32 attr_sel, u32 *mib_val)
1140 {
1141 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
1142 }
1143
1144 static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
1145 u32 attr_sel, u32 *mib_val)
1146 {
1147 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
1148 }
1149
1150 static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
1151 {
1152 return (pwr_info->pwr_rx == FAST_MODE ||
1153 pwr_info->pwr_rx == FASTAUTO_MODE) &&
1154 (pwr_info->pwr_tx == FAST_MODE ||
1155 pwr_info->pwr_tx == FASTAUTO_MODE);
1156 }
1157
1158 static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
1159 {
1160 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
1161 }
1162
1163
1164 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1165 enum query_opcode opcode,
1166 enum desc_idn idn, u8 index,
1167 u8 selector,
1168 u8 *desc_buf, int *buf_len);
1169 int ufshcd_read_desc_param(struct ufs_hba *hba,
1170 enum desc_idn desc_id,
1171 int desc_index,
1172 u8 param_offset,
1173 u8 *param_read_buf,
1174 u8 param_size);
1175 int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
1176 enum attr_idn idn, u8 index, u8 selector,
1177 u32 *attr_val);
1178 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1179 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
1180 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1181 enum flag_idn idn, u8 index, bool *flag_res);
1182
1183 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
1184 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
1185 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
1186 const struct ufs_dev_quirk *fixups);
1187 #define SD_ASCII_STD true
1188 #define SD_RAW false
1189 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
1190 u8 **buf, bool ascii);
1191
1192 int ufshcd_hold(struct ufs_hba *hba, bool async);
1193 void ufshcd_release(struct ufs_hba *hba);
1194
1195 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
1196
1197 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1198 int *desc_length);
1199
1200 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
1201
1202 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
1203
1204 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
1205
1206 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
1207 struct utp_upiu_req *req_upiu,
1208 struct utp_upiu_req *rsp_upiu,
1209 int msgcode,
1210 u8 *desc_buff, int *buff_len,
1211 enum query_opcode desc_op);
1212
1213 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
1214 int ufshcd_suspend_prepare(struct device *dev);
1215 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
1216 void ufshcd_resume_complete(struct device *dev);
1217
1218
1219 static inline int ufshcd_vops_init(struct ufs_hba *hba)
1220 {
1221 if (hba->vops && hba->vops->init)
1222 return hba->vops->init(hba);
1223
1224 return 0;
1225 }
1226
1227 static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
1228 {
1229 if (hba->vops && hba->vops->phy_initialization)
1230 return hba->vops->phy_initialization(hba);
1231
1232 return 0;
1233 }
1234
1235 extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1236
1237 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1238 const char *prefix);
1239
1240 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
1241 int ufshcd_write_ee_control(struct ufs_hba *hba);
1242 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
1243 const u16 *other_mask, u16 set, u16 clr);
1244
1245 #endif