Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2013-2021 Intel Corporation
0004  */
0005 
0006 #include "i915_drv.h"
0007 #include "i915_reg.h"
0008 #include "intel_pcode.h"
0009 
0010 static int gen6_check_mailbox_status(u32 mbox)
0011 {
0012     switch (mbox & GEN6_PCODE_ERROR_MASK) {
0013     case GEN6_PCODE_SUCCESS:
0014         return 0;
0015     case GEN6_PCODE_UNIMPLEMENTED_CMD:
0016         return -ENODEV;
0017     case GEN6_PCODE_ILLEGAL_CMD:
0018         return -ENXIO;
0019     case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
0020     case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
0021         return -EOVERFLOW;
0022     case GEN6_PCODE_TIMEOUT:
0023         return -ETIMEDOUT;
0024     default:
0025         MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
0026         return 0;
0027     }
0028 }
0029 
0030 static int gen7_check_mailbox_status(u32 mbox)
0031 {
0032     switch (mbox & GEN6_PCODE_ERROR_MASK) {
0033     case GEN6_PCODE_SUCCESS:
0034         return 0;
0035     case GEN6_PCODE_ILLEGAL_CMD:
0036         return -ENXIO;
0037     case GEN7_PCODE_TIMEOUT:
0038         return -ETIMEDOUT;
0039     case GEN7_PCODE_ILLEGAL_DATA:
0040         return -EINVAL;
0041     case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
0042         return -ENXIO;
0043     case GEN11_PCODE_LOCKED:
0044         return -EBUSY;
0045     case GEN11_PCODE_REJECTED:
0046         return -EACCES;
0047     case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
0048         return -EOVERFLOW;
0049     default:
0050         MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
0051         return 0;
0052     }
0053 }
0054 
0055 static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
0056               u32 *val, u32 *val1,
0057               int fast_timeout_us, int slow_timeout_ms,
0058               bool is_read)
0059 {
0060     lockdep_assert_held(&uncore->i915->sb_lock);
0061 
0062     /*
0063      * GEN6_PCODE_* are outside of the forcewake domain, we can use
0064      * intel_uncore_read/write_fw variants to reduce the amount of work
0065      * required when reading/writing.
0066      */
0067 
0068     if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
0069         return -EAGAIN;
0070 
0071     intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
0072     intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
0073     intel_uncore_write_fw(uncore,
0074                   GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
0075 
0076     if (__intel_wait_for_register_fw(uncore,
0077                      GEN6_PCODE_MAILBOX,
0078                      GEN6_PCODE_READY, 0,
0079                      fast_timeout_us,
0080                      slow_timeout_ms,
0081                      &mbox))
0082         return -ETIMEDOUT;
0083 
0084     if (is_read)
0085         *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
0086     if (is_read && val1)
0087         *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
0088 
0089     if (GRAPHICS_VER(uncore->i915) > 6)
0090         return gen7_check_mailbox_status(mbox);
0091     else
0092         return gen6_check_mailbox_status(mbox);
0093 }
0094 
0095 int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
0096 {
0097     int err;
0098 
0099     mutex_lock(&uncore->i915->sb_lock);
0100     err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
0101     mutex_unlock(&uncore->i915->sb_lock);
0102 
0103     if (err) {
0104         drm_dbg(&uncore->i915->drm,
0105             "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
0106             mbox, __builtin_return_address(0), err);
0107     }
0108 
0109     return err;
0110 }
0111 
0112 int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
0113                 int fast_timeout_us, int slow_timeout_ms)
0114 {
0115     int err;
0116 
0117     mutex_lock(&uncore->i915->sb_lock);
0118     err = __snb_pcode_rw(uncore, mbox, &val, NULL,
0119                  fast_timeout_us, slow_timeout_ms, false);
0120     mutex_unlock(&uncore->i915->sb_lock);
0121 
0122     if (err) {
0123         drm_dbg(&uncore->i915->drm,
0124             "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
0125             val, mbox, __builtin_return_address(0), err);
0126     }
0127 
0128     return err;
0129 }
0130 
0131 static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
0132                   u32 request, u32 reply_mask, u32 reply,
0133                   u32 *status)
0134 {
0135     *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
0136 
0137     return (*status == 0) && ((request & reply_mask) == reply);
0138 }
0139 
0140 /**
0141  * skl_pcode_request - send PCODE request until acknowledgment
0142  * @uncore: uncore
0143  * @mbox: PCODE mailbox ID the request is targeted for
0144  * @request: request ID
0145  * @reply_mask: mask used to check for request acknowledgment
0146  * @reply: value used to check for request acknowledgment
0147  * @timeout_base_ms: timeout for polling with preemption enabled
0148  *
0149  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
0150  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
0151  * The request is acknowledged once the PCODE reply dword equals @reply after
0152  * applying @reply_mask. Polling is first attempted with preemption enabled
0153  * for @timeout_base_ms and if this times out for another 50 ms with
0154  * preemption disabled.
0155  *
0156  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
0157  * other error as reported by PCODE.
0158  */
0159 int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
0160               u32 reply_mask, u32 reply, int timeout_base_ms)
0161 {
0162     u32 status;
0163     int ret;
0164 
0165     mutex_lock(&uncore->i915->sb_lock);
0166 
0167 #define COND \
0168     skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
0169 
0170     /*
0171      * Prime the PCODE by doing a request first. Normally it guarantees
0172      * that a subsequent request, at most @timeout_base_ms later, succeeds.
0173      * _wait_for() doesn't guarantee when its passed condition is evaluated
0174      * first, so send the first request explicitly.
0175      */
0176     if (COND) {
0177         ret = 0;
0178         goto out;
0179     }
0180     ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
0181     if (!ret)
0182         goto out;
0183 
0184     /*
0185      * The above can time out if the number of requests was low (2 in the
0186      * worst case) _and_ PCODE was busy for some reason even after a
0187      * (queued) request and @timeout_base_ms delay. As a workaround retry
0188      * the poll with preemption disabled to maximize the number of
0189      * requests. Increase the timeout from @timeout_base_ms to 50ms to
0190      * account for interrupts that could reduce the number of these
0191      * requests, and for any quirks of the PCODE firmware that delays
0192      * the request completion.
0193      */
0194     drm_dbg_kms(&uncore->i915->drm,
0195             "PCODE timeout, retrying with preemption disabled\n");
0196     drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
0197     preempt_disable();
0198     ret = wait_for_atomic(COND, 50);
0199     preempt_enable();
0200 
0201 out:
0202     mutex_unlock(&uncore->i915->sb_lock);
0203     return status ? status : ret;
0204 #undef COND
0205 }
0206 
0207 int intel_pcode_init(struct intel_uncore *uncore)
0208 {
0209     if (!IS_DGFX(uncore->i915))
0210         return 0;
0211 
0212     return skl_pcode_request(uncore, DG1_PCODE_STATUS,
0213                  DG1_UNCORE_GET_INIT_STATUS,
0214                  DG1_UNCORE_INIT_STATUS_COMPLETE,
0215                  DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
0216 }
0217 
0218 int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
0219 {
0220     intel_wakeref_t wakeref;
0221     u32 mbox;
0222     int err;
0223 
0224     mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
0225         | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
0226         | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
0227 
0228     with_intel_runtime_pm(uncore->rpm, wakeref)
0229         err = snb_pcode_read(uncore, mbox, val, NULL);
0230 
0231     return err;
0232 }
0233 
0234 int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
0235 {
0236     intel_wakeref_t wakeref;
0237     u32 mbox;
0238     int err;
0239 
0240     mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
0241         | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
0242         | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
0243 
0244     with_intel_runtime_pm(uncore->rpm, wakeref)
0245         err = snb_pcode_write(uncore, mbox, val);
0246 
0247     return err;
0248 }