Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2017 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  */
0024 
0025 #ifndef __INTEL_UNCORE_H__
0026 #define __INTEL_UNCORE_H__
0027 
0028 #include <linux/spinlock.h>
0029 #include <linux/notifier.h>
0030 #include <linux/hrtimer.h>
0031 #include <linux/io-64-nonatomic-lo-hi.h>
0032 #include <linux/types.h>
0033 
0034 #include "i915_reg_defs.h"
0035 
0036 struct drm_i915_private;
0037 struct intel_runtime_pm;
0038 struct intel_uncore;
0039 struct intel_gt;
0040 
0041 struct intel_uncore_mmio_debug {
0042     spinlock_t lock; /** lock is also taken in irq contexts. */
0043     int unclaimed_mmio_check;
0044     int saved_mmio_check;
0045     u32 suspend_count;
0046 };
0047 
0048 enum forcewake_domain_id {
0049     FW_DOMAIN_ID_RENDER = 0,
0050     FW_DOMAIN_ID_GT,        /* also includes blitter engine */
0051     FW_DOMAIN_ID_MEDIA,
0052     FW_DOMAIN_ID_MEDIA_VDBOX0,
0053     FW_DOMAIN_ID_MEDIA_VDBOX1,
0054     FW_DOMAIN_ID_MEDIA_VDBOX2,
0055     FW_DOMAIN_ID_MEDIA_VDBOX3,
0056     FW_DOMAIN_ID_MEDIA_VDBOX4,
0057     FW_DOMAIN_ID_MEDIA_VDBOX5,
0058     FW_DOMAIN_ID_MEDIA_VDBOX6,
0059     FW_DOMAIN_ID_MEDIA_VDBOX7,
0060     FW_DOMAIN_ID_MEDIA_VEBOX0,
0061     FW_DOMAIN_ID_MEDIA_VEBOX1,
0062     FW_DOMAIN_ID_MEDIA_VEBOX2,
0063     FW_DOMAIN_ID_MEDIA_VEBOX3,
0064 
0065     FW_DOMAIN_ID_COUNT
0066 };
0067 
0068 enum forcewake_domains {
0069     FORCEWAKE_RENDER    = BIT(FW_DOMAIN_ID_RENDER),
0070     FORCEWAKE_GT        = BIT(FW_DOMAIN_ID_GT),
0071     FORCEWAKE_MEDIA     = BIT(FW_DOMAIN_ID_MEDIA),
0072     FORCEWAKE_MEDIA_VDBOX0  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
0073     FORCEWAKE_MEDIA_VDBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
0074     FORCEWAKE_MEDIA_VDBOX2  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
0075     FORCEWAKE_MEDIA_VDBOX3  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
0076     FORCEWAKE_MEDIA_VDBOX4  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX4),
0077     FORCEWAKE_MEDIA_VDBOX5  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX5),
0078     FORCEWAKE_MEDIA_VDBOX6  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX6),
0079     FORCEWAKE_MEDIA_VDBOX7  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX7),
0080     FORCEWAKE_MEDIA_VEBOX0  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
0081     FORCEWAKE_MEDIA_VEBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
0082     FORCEWAKE_MEDIA_VEBOX2  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX2),
0083     FORCEWAKE_MEDIA_VEBOX3  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX3),
0084 
0085     FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
0086 };
0087 
0088 struct intel_uncore_fw_get {
0089     void (*force_wake_get)(struct intel_uncore *uncore,
0090                    enum forcewake_domains domains);
0091 };
0092 
0093 struct intel_uncore_funcs {
0094     enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
0095                           i915_reg_t r);
0096     enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
0097                            i915_reg_t r);
0098 
0099     u8 (*mmio_readb)(struct intel_uncore *uncore,
0100              i915_reg_t r, bool trace);
0101     u16 (*mmio_readw)(struct intel_uncore *uncore,
0102               i915_reg_t r, bool trace);
0103     u32 (*mmio_readl)(struct intel_uncore *uncore,
0104               i915_reg_t r, bool trace);
0105     u64 (*mmio_readq)(struct intel_uncore *uncore,
0106               i915_reg_t r, bool trace);
0107 
0108     void (*mmio_writeb)(struct intel_uncore *uncore,
0109                 i915_reg_t r, u8 val, bool trace);
0110     void (*mmio_writew)(struct intel_uncore *uncore,
0111                 i915_reg_t r, u16 val, bool trace);
0112     void (*mmio_writel)(struct intel_uncore *uncore,
0113                 i915_reg_t r, u32 val, bool trace);
0114 };
0115 
0116 struct intel_forcewake_range {
0117     u32 start;
0118     u32 end;
0119 
0120     enum forcewake_domains domains;
0121 };
0122 
0123 /* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
0124 struct i915_range {
0125     u32 start;
0126     u32 end;
0127 };
0128 
0129 struct intel_uncore {
0130     void __iomem *regs;
0131 
0132     struct drm_i915_private *i915;
0133     struct intel_gt *gt;
0134     struct intel_runtime_pm *rpm;
0135 
0136     spinlock_t lock; /** lock is also taken in irq contexts. */
0137 
0138     unsigned int flags;
0139 #define UNCORE_HAS_FORCEWAKE        BIT(0)
0140 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED   BIT(1)
0141 #define UNCORE_HAS_DBG_UNCLAIMED    BIT(2)
0142 #define UNCORE_HAS_FIFO         BIT(3)
0143 
0144     const struct intel_forcewake_range *fw_domains_table;
0145     unsigned int fw_domains_table_entries;
0146 
0147     /*
0148      * Shadowed registers are special cases where we can safely write
0149      * to the register *without* grabbing forcewake.
0150      */
0151     const struct i915_range *shadowed_reg_table;
0152     unsigned int shadowed_reg_table_entries;
0153 
0154     struct notifier_block pmic_bus_access_nb;
0155     const struct intel_uncore_fw_get *fw_get_funcs;
0156     struct intel_uncore_funcs funcs;
0157 
0158     unsigned int fifo_count;
0159 
0160     enum forcewake_domains fw_domains;
0161     enum forcewake_domains fw_domains_active;
0162     enum forcewake_domains fw_domains_timer;
0163     enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
0164 
0165     struct intel_uncore_forcewake_domain {
0166         struct intel_uncore *uncore;
0167         enum forcewake_domain_id id;
0168         enum forcewake_domains mask;
0169         unsigned int wake_count;
0170         bool active;
0171         struct hrtimer timer;
0172         u32 __iomem *reg_set;
0173         u32 __iomem *reg_ack;
0174     } *fw_domain[FW_DOMAIN_ID_COUNT];
0175 
0176     unsigned int user_forcewake_count;
0177 
0178     struct intel_uncore_mmio_debug *debug;
0179 };
0180 
0181 /* Iterate over initialised fw domains */
0182 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
0183     for (tmp__ = (mask__); tmp__ ;) \
0184         for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
0185 
0186 #define for_each_fw_domain(domain__, uncore__, tmp__) \
0187     for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
0188 
0189 static inline bool
0190 intel_uncore_has_forcewake(const struct intel_uncore *uncore)
0191 {
0192     return uncore->flags & UNCORE_HAS_FORCEWAKE;
0193 }
0194 
0195 static inline bool
0196 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
0197 {
0198     return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
0199 }
0200 
0201 static inline bool
0202 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
0203 {
0204     return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
0205 }
0206 
0207 static inline bool
0208 intel_uncore_has_fifo(const struct intel_uncore *uncore)
0209 {
0210     return uncore->flags & UNCORE_HAS_FIFO;
0211 }
0212 
0213 void
0214 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
0215 void intel_uncore_init_early(struct intel_uncore *uncore,
0216                  struct intel_gt *gt);
0217 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
0218 int intel_uncore_init_mmio(struct intel_uncore *uncore);
0219 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
0220                       struct intel_gt *gt);
0221 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
0222 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
0223 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
0224 void intel_uncore_fini_mmio(struct intel_uncore *uncore);
0225 void intel_uncore_suspend(struct intel_uncore *uncore);
0226 void intel_uncore_resume_early(struct intel_uncore *uncore);
0227 void intel_uncore_runtime_resume(struct intel_uncore *uncore);
0228 
0229 void assert_forcewakes_inactive(struct intel_uncore *uncore);
0230 void assert_forcewakes_active(struct intel_uncore *uncore,
0231                   enum forcewake_domains fw_domains);
0232 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
0233 
0234 enum forcewake_domains
0235 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
0236                    i915_reg_t reg, unsigned int op);
0237 #define FW_REG_READ  (1)
0238 #define FW_REG_WRITE (2)
0239 
0240 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
0241                 enum forcewake_domains domains);
0242 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
0243                 enum forcewake_domains domains);
0244 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
0245                     enum forcewake_domains domains);
0246 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
0247                   enum forcewake_domains fw_domains);
0248 
0249 /*
0250  * Like above but the caller must manage the uncore.lock itself.
0251  * Must be used with intel_uncore_read_fw() and friends.
0252  */
0253 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
0254                     enum forcewake_domains domains);
0255 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
0256                     enum forcewake_domains domains);
0257 
0258 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
0259 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
0260 
0261 int __intel_wait_for_register(struct intel_uncore *uncore,
0262                   i915_reg_t reg,
0263                   u32 mask,
0264                   u32 value,
0265                   unsigned int fast_timeout_us,
0266                   unsigned int slow_timeout_ms,
0267                   u32 *out_value);
0268 static inline int
0269 intel_wait_for_register(struct intel_uncore *uncore,
0270             i915_reg_t reg,
0271             u32 mask,
0272             u32 value,
0273             unsigned int timeout_ms)
0274 {
0275     return __intel_wait_for_register(uncore, reg, mask, value, 2,
0276                      timeout_ms, NULL);
0277 }
0278 
0279 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
0280                  i915_reg_t reg,
0281                  u32 mask,
0282                  u32 value,
0283                  unsigned int fast_timeout_us,
0284                  unsigned int slow_timeout_ms,
0285                  u32 *out_value);
0286 static inline int
0287 intel_wait_for_register_fw(struct intel_uncore *uncore,
0288                i915_reg_t reg,
0289                u32 mask,
0290                u32 value,
0291                    unsigned int timeout_ms)
0292 {
0293     return __intel_wait_for_register_fw(uncore, reg, mask, value,
0294                         2, timeout_ms, NULL);
0295 }
0296 
0297 /* register access functions */
0298 #define __raw_read(x__, s__) \
0299 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
0300                         i915_reg_t reg) \
0301 { \
0302     return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
0303 }
0304 
0305 #define __raw_write(x__, s__) \
0306 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
0307                        i915_reg_t reg, u##x__ val) \
0308 { \
0309     write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
0310 }
0311 __raw_read(8, b)
0312 __raw_read(16, w)
0313 __raw_read(32, l)
0314 __raw_read(64, q)
0315 
0316 __raw_write(8, b)
0317 __raw_write(16, w)
0318 __raw_write(32, l)
0319 __raw_write(64, q)
0320 
0321 #undef __raw_read
0322 #undef __raw_write
0323 
0324 #define __uncore_read(name__, x__, s__, trace__) \
0325 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
0326                        i915_reg_t reg) \
0327 { \
0328     return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
0329 }
0330 
0331 #define __uncore_write(name__, x__, s__, trace__) \
0332 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
0333                      i915_reg_t reg, u##x__ val) \
0334 { \
0335     uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
0336 }
0337 
0338 __uncore_read(read8, 8, b, true)
0339 __uncore_read(read16, 16, w, true)
0340 __uncore_read(read, 32, l, true)
0341 __uncore_read(read16_notrace, 16, w, false)
0342 __uncore_read(read_notrace, 32, l, false)
0343 
0344 __uncore_write(write8, 8, b, true)
0345 __uncore_write(write16, 16, w, true)
0346 __uncore_write(write, 32, l, true)
0347 __uncore_write(write_notrace, 32, l, false)
0348 
0349 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
0350  * will be implemented using 2 32-bit writes in an arbitrary order with
0351  * an arbitrary delay between them. This can cause the hardware to
0352  * act upon the intermediate value, possibly leading to corruption and
0353  * machine death. For this reason we do not support intel_uncore_write64,
0354  * or uncore->funcs.mmio_writeq.
0355  *
0356  * When reading a 64-bit value as two 32-bit values, the delay may cause
0357  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
0358  * occasionally a 64-bit register does not actually support a full readq
0359  * and must be read using two 32-bit reads.
0360  *
0361  * You have been warned.
0362  */
0363 __uncore_read(read64, 64, q, true)
0364 
0365 static inline u64
0366 intel_uncore_read64_2x32(struct intel_uncore *uncore,
0367              i915_reg_t lower_reg, i915_reg_t upper_reg)
0368 {
0369     u32 upper, lower, old_upper, loop = 0;
0370     upper = intel_uncore_read(uncore, upper_reg);
0371     do {
0372         old_upper = upper;
0373         lower = intel_uncore_read(uncore, lower_reg);
0374         upper = intel_uncore_read(uncore, upper_reg);
0375     } while (upper != old_upper && loop++ < 2);
0376     return (u64)upper << 32 | lower;
0377 }
0378 
0379 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
0380 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
0381 
0382 #undef __uncore_read
0383 #undef __uncore_write
0384 
0385 /* These are untraced mmio-accessors that are only valid to be used inside
0386  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
0387  * controlled.
0388  *
0389  * Think twice, and think again, before using these.
0390  *
0391  * As an example, these accessors can possibly be used between:
0392  *
0393  * spin_lock_irq(&uncore->lock);
0394  * intel_uncore_forcewake_get__locked();
0395  *
0396  * and
0397  *
0398  * intel_uncore_forcewake_put__locked();
0399  * spin_unlock_irq(&uncore->lock);
0400  *
0401  *
0402  * Note: some registers may not need forcewake held, so
0403  * intel_uncore_forcewake_{get,put} can be omitted, see
0404  * intel_uncore_forcewake_for_reg().
0405  *
0406  * Certain architectures will die if the same cacheline is concurrently accessed
0407  * by different clients (e.g. on Ivybridge). Access to registers should
0408  * therefore generally be serialised, by either the dev_priv->uncore.lock or
0409  * a more localised lock guarding all access to that bank of registers.
0410  */
0411 #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
0412 #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
0413 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
0414 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
0415 
0416 static inline void intel_uncore_rmw(struct intel_uncore *uncore,
0417                     i915_reg_t reg, u32 clear, u32 set)
0418 {
0419     u32 old, val;
0420 
0421     old = intel_uncore_read(uncore, reg);
0422     val = (old & ~clear) | set;
0423     if (val != old)
0424         intel_uncore_write(uncore, reg, val);
0425 }
0426 
0427 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
0428                        i915_reg_t reg, u32 clear, u32 set)
0429 {
0430     u32 old, val;
0431 
0432     old = intel_uncore_read_fw(uncore, reg);
0433     val = (old & ~clear) | set;
0434     if (val != old)
0435         intel_uncore_write_fw(uncore, reg, val);
0436 }
0437 
0438 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
0439                         i915_reg_t reg, u32 val,
0440                         u32 mask, u32 expected_val)
0441 {
0442     u32 reg_val;
0443 
0444     intel_uncore_write(uncore, reg, val);
0445     reg_val = intel_uncore_read(uncore, reg);
0446 
0447     return (reg_val & mask) != expected_val ? -EINVAL : 0;
0448 }
0449 
0450 #define raw_reg_read(base, reg) \
0451     readl(base + i915_mmio_reg_offset(reg))
0452 #define raw_reg_write(base, reg, value) \
0453     writel(value, base + i915_mmio_reg_offset(reg))
0454 
0455 #endif /* !__INTEL_UNCORE_H__ */