0001
0002
0003
0004
0005
0006 #ifndef _INTEL_GUC_H_
0007 #define _INTEL_GUC_H_
0008
0009 #include <linux/delay.h>
0010 #include <linux/iosys-map.h>
0011 #include <linux/xarray.h>
0012
0013 #include "intel_guc_ct.h"
0014 #include "intel_guc_fw.h"
0015 #include "intel_guc_fwif.h"
0016 #include "intel_guc_log.h"
0017 #include "intel_guc_reg.h"
0018 #include "intel_guc_slpc_types.h"
0019 #include "intel_uc_fw.h"
0020 #include "intel_uncore.h"
0021 #include "i915_utils.h"
0022 #include "i915_vma.h"
0023
0024 struct __guc_ads_blob;
0025 struct intel_guc_state_capture;
0026
0027
0028
0029
0030
0031
0032
0033 struct intel_guc {
0034
0035 struct intel_uc_fw fw;
0036
0037 struct intel_guc_log log;
0038
0039 struct intel_guc_ct ct;
0040
0041 struct intel_guc_slpc slpc;
0042
0043 struct intel_guc_state_capture *capture;
0044
0045
0046 struct i915_sched_engine *sched_engine;
0047
0048
0049
0050
0051
0052 struct i915_request *stalled_request;
0053
0054
0055
0056 enum {
0057 STALL_NONE,
0058 STALL_REGISTER_CONTEXT,
0059 STALL_MOVE_LRC_TAIL,
0060 STALL_ADD_REQUEST,
0061 } submission_stall_reason;
0062
0063
0064
0065 spinlock_t irq_lock;
0066
0067
0068
0069
0070 unsigned int msg_enabled_mask;
0071
0072
0073
0074
0075
0076
0077 atomic_t outstanding_submission_g2h;
0078
0079
0080 struct {
0081 void (*reset)(struct intel_guc *guc);
0082 void (*enable)(struct intel_guc *guc);
0083 void (*disable)(struct intel_guc *guc);
0084 } interrupts;
0085
0086
0087
0088
0089
0090 struct {
0091
0092
0093
0094
0095
0096 spinlock_t lock;
0097
0098
0099
0100 struct ida guc_ids;
0101
0102
0103
0104
0105 int num_guc_ids;
0106
0107
0108
0109 unsigned long *guc_ids_bitmap;
0110
0111
0112
0113
0114 struct list_head guc_id_list;
0115
0116
0117
0118
0119 struct list_head destroyed_contexts;
0120
0121
0122
0123
0124
0125 struct work_struct destroyed_worker;
0126
0127
0128
0129
0130 struct work_struct reset_fail_worker;
0131
0132
0133
0134 intel_engine_mask_t reset_fail_mask;
0135 } submission_state;
0136
0137
0138
0139
0140
0141 bool submission_supported;
0142
0143 bool submission_selected;
0144
0145 bool submission_initialized;
0146
0147
0148
0149 bool rc_supported;
0150
0151 bool rc_selected;
0152
0153
0154 struct i915_vma *ads_vma;
0155
0156 struct iosys_map ads_map;
0157
0158 u32 ads_regset_size;
0159
0160
0161
0162
0163 u32 ads_regset_count[I915_NUM_ENGINES];
0164
0165 struct guc_mmio_reg *ads_regset;
0166
0167 u32 ads_golden_ctxt_size;
0168
0169 u32 ads_capture_size;
0170
0171 u32 ads_engine_usage_size;
0172
0173
0174 struct i915_vma *lrc_desc_pool_v69;
0175
0176 void *lrc_desc_pool_vaddr_v69;
0177
0178
0179
0180
0181
0182 struct xarray context_lookup;
0183
0184
0185 u32 params[GUC_CTL_MAX_DWORDS];
0186
0187
0188 struct {
0189 u32 base;
0190 unsigned int count;
0191 enum forcewake_domains fw_domains;
0192 } send_regs;
0193
0194
0195 i915_reg_t notify_reg;
0196
0197
0198
0199
0200
0201
0202 u32 mmio_msg;
0203
0204
0205 struct mutex send_mutex;
0206
0207
0208
0209
0210
0211 struct {
0212
0213
0214
0215 spinlock_t lock;
0216
0217
0218
0219
0220 u64 gt_stamp;
0221
0222
0223
0224
0225
0226 unsigned long ping_delay;
0227
0228
0229
0230
0231
0232 struct delayed_work work;
0233
0234
0235
0236
0237 u32 shift;
0238
0239
0240
0241
0242
0243
0244
0245 unsigned long last_stat_jiffies;
0246 } timestamp;
0247
0248 #ifdef CONFIG_DRM_I915_SELFTEST
0249
0250
0251
0252 int number_guc_id_stolen;
0253 #endif
0254 };
0255
0256 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
0257 {
0258 return container_of(log, struct intel_guc, log);
0259 }
0260
0261 static
0262 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
0263 {
0264 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
0265 }
0266
0267 static
0268 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
0269 u32 g2h_len_dw)
0270 {
0271 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
0272 MAKE_SEND_FLAGS(g2h_len_dw));
0273 }
0274
0275 static inline int
0276 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
0277 u32 *response_buf, u32 response_buf_size)
0278 {
0279 return intel_guc_ct_send(&guc->ct, action, len,
0280 response_buf, response_buf_size, 0);
0281 }
0282
0283 static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
0284 const u32 *action,
0285 u32 len,
0286 u32 g2h_len_dw,
0287 bool loop)
0288 {
0289 int err;
0290 unsigned int sleep_period_ms = 1;
0291 bool not_atomic = !in_atomic() && !irqs_disabled();
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 might_sleep_if(loop && not_atomic);
0302
0303 retry:
0304 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
0305 if (unlikely(err == -EBUSY && loop)) {
0306 if (likely(not_atomic)) {
0307 if (msleep_interruptible(sleep_period_ms))
0308 return -EINTR;
0309 sleep_period_ms = sleep_period_ms << 1;
0310 } else {
0311 cpu_relax();
0312 }
0313 goto retry;
0314 }
0315
0316 return err;
0317 }
0318
0319 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
0320 {
0321 intel_guc_ct_event_handler(&guc->ct);
0322 }
0323
0324
0325 #define GUC_GGTT_TOP 0xFEE00000
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
0341 struct i915_vma *vma)
0342 {
0343 u32 offset = i915_ggtt_offset(vma);
0344
0345 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
0346 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
0347
0348 return offset;
0349 }
0350
0351 void intel_guc_init_early(struct intel_guc *guc);
0352 void intel_guc_init_late(struct intel_guc *guc);
0353 void intel_guc_init_send_regs(struct intel_guc *guc);
0354 void intel_guc_write_params(struct intel_guc *guc);
0355 int intel_guc_init(struct intel_guc *guc);
0356 void intel_guc_fini(struct intel_guc *guc);
0357 void intel_guc_notify(struct intel_guc *guc);
0358 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
0359 u32 *response_buf, u32 response_buf_size);
0360 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
0361 const u32 *payload, u32 len);
0362 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
0363 int intel_guc_suspend(struct intel_guc *guc);
0364 int intel_guc_resume(struct intel_guc *guc);
0365 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
0366 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
0367 struct i915_vma **out_vma, void **out_vaddr);
0368 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
0369 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
0370
0371 static inline bool intel_guc_is_supported(struct intel_guc *guc)
0372 {
0373 return intel_uc_fw_is_supported(&guc->fw);
0374 }
0375
0376 static inline bool intel_guc_is_wanted(struct intel_guc *guc)
0377 {
0378 return intel_uc_fw_is_enabled(&guc->fw);
0379 }
0380
0381 static inline bool intel_guc_is_used(struct intel_guc *guc)
0382 {
0383 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
0384 return intel_uc_fw_is_available(&guc->fw);
0385 }
0386
0387 static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
0388 {
0389 return intel_uc_fw_is_running(&guc->fw);
0390 }
0391
0392 static inline bool intel_guc_is_ready(struct intel_guc *guc)
0393 {
0394 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
0395 }
0396
0397 static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
0398 {
0399 guc->interrupts.reset(guc);
0400 }
0401
0402 static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
0403 {
0404 guc->interrupts.enable(guc);
0405 }
0406
0407 static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
0408 {
0409 guc->interrupts.disable(guc);
0410 }
0411
0412 static inline int intel_guc_sanitize(struct intel_guc *guc)
0413 {
0414 intel_uc_fw_sanitize(&guc->fw);
0415 intel_guc_disable_interrupts(guc);
0416 intel_guc_ct_sanitize(&guc->ct);
0417 guc->mmio_msg = 0;
0418
0419 return 0;
0420 }
0421
0422 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
0423 {
0424 spin_lock_irq(&guc->irq_lock);
0425 guc->msg_enabled_mask |= mask;
0426 spin_unlock_irq(&guc->irq_lock);
0427 }
0428
0429 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
0430 {
0431 spin_lock_irq(&guc->irq_lock);
0432 guc->msg_enabled_mask &= ~mask;
0433 spin_unlock_irq(&guc->irq_lock);
0434 }
0435
0436 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
0437
0438 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
0439 const u32 *msg, u32 len);
0440 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
0441 const u32 *msg, u32 len);
0442 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
0443 const u32 *msg, u32 len);
0444 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
0445 const u32 *msg, u32 len);
0446 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
0447 const u32 *msg, u32 len);
0448
0449 struct intel_engine_cs *
0450 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
0451
0452 void intel_guc_find_hung_context(struct intel_engine_cs *engine);
0453
0454 int intel_guc_global_policies_update(struct intel_guc *guc);
0455
0456 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
0457
0458 void intel_guc_submission_reset_prepare(struct intel_guc *guc);
0459 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
0460 void intel_guc_submission_reset_finish(struct intel_guc *guc);
0461 void intel_guc_submission_cancel_requests(struct intel_guc *guc);
0462
0463 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
0464
0465 void intel_guc_write_barrier(struct intel_guc *guc);
0466
0467 #endif