Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright © 2013 Intel Corporation
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0020  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0021  * IN THE SOFTWARE.
0022  *
0023  * Authors:
0024  *    Brad Volkin <bradley.d.volkin@intel.com>
0025  *
0026  */
0027 
0028 #include <linux/highmem.h>
0029 
0030 #include <drm/drm_cache.h>
0031 
0032 #include "gt/intel_engine.h"
0033 #include "gt/intel_engine_regs.h"
0034 #include "gt/intel_gpu_commands.h"
0035 #include "gt/intel_gt_regs.h"
0036 
0037 #include "i915_cmd_parser.h"
0038 #include "i915_drv.h"
0039 #include "i915_memcpy.h"
0040 #include "i915_reg.h"
0041 
0042 /**
0043  * DOC: batch buffer command parser
0044  *
0045  * Motivation:
0046  * Certain OpenGL features (e.g. transform feedback, performance monitoring)
0047  * require userspace code to submit batches containing commands such as
0048  * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
0049  * generations of the hardware will noop these commands in "unsecure" batches
0050  * (which includes all userspace batches submitted via i915) even though the
0051  * commands may be safe and represent the intended programming model of the
0052  * device.
0053  *
0054  * The software command parser is similar in operation to the command parsing
0055  * done in hardware for unsecure batches. However, the software parser allows
0056  * some operations that would be noop'd by hardware, if the parser determines
0057  * the operation is safe, and submits the batch as "secure" to prevent hardware
0058  * parsing.
0059  *
0060  * Threats:
0061  * At a high level, the hardware (and software) checks attempt to prevent
0062  * granting userspace undue privileges. There are three categories of privilege.
0063  *
0064  * First, commands which are explicitly defined as privileged or which should
0065  * only be used by the kernel driver. The parser rejects such commands
0066  *
0067  * Second, commands which access registers. To support correct/enhanced
0068  * userspace functionality, particularly certain OpenGL extensions, the parser
0069  * provides a whitelist of registers which userspace may safely access
0070  *
0071  * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
0072  * The parser always rejects such commands.
0073  *
0074  * The majority of the problematic commands fall in the MI_* range, with only a
0075  * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
0076  *
0077  * Implementation:
0078  * Each engine maintains tables of commands and registers which the parser
0079  * uses in scanning batch buffers submitted to that engine.
0080  *
0081  * Since the set of commands that the parser must check for is significantly
0082  * smaller than the number of commands supported, the parser tables contain only
0083  * those commands required by the parser. This generally works because command
0084  * opcode ranges have standard command length encodings. So for commands that
0085  * the parser does not need to check, it can easily skip them. This is
0086  * implemented via a per-engine length decoding vfunc.
0087  *
0088  * Unfortunately, there are a number of commands that do not follow the standard
0089  * length encoding for their opcode range, primarily amongst the MI_* commands.
0090  * To handle this, the parser provides a way to define explicit "skip" entries
0091  * in the per-engine command tables.
0092  *
0093  * Other command table entries map fairly directly to high level categories
0094  * mentioned above: rejected, register whitelist. The parser implements a number
0095  * of checks, including the privileged memory checks, via a general bitmasking
0096  * mechanism.
0097  */
0098 
0099 /*
0100  * A command that requires special handling by the command parser.
0101  */
0102 struct drm_i915_cmd_descriptor {
0103     /*
0104      * Flags describing how the command parser processes the command.
0105      *
0106      * CMD_DESC_FIXED: The command has a fixed length if this is set,
0107      *                 a length mask if not set
0108      * CMD_DESC_SKIP: The command is allowed but does not follow the
0109      *                standard length encoding for the opcode range in
0110      *                which it falls
0111      * CMD_DESC_REJECT: The command is never allowed
0112      * CMD_DESC_REGISTER: The command should be checked against the
0113      *                    register whitelist for the appropriate ring
0114      */
0115     u32 flags;
0116 #define CMD_DESC_FIXED    (1<<0)
0117 #define CMD_DESC_SKIP     (1<<1)
0118 #define CMD_DESC_REJECT   (1<<2)
0119 #define CMD_DESC_REGISTER (1<<3)
0120 #define CMD_DESC_BITMASK  (1<<4)
0121 
0122     /*
0123      * The command's unique identification bits and the bitmask to get them.
0124      * This isn't strictly the opcode field as defined in the spec and may
0125      * also include type, subtype, and/or subop fields.
0126      */
0127     struct {
0128         u32 value;
0129         u32 mask;
0130     } cmd;
0131 
0132     /*
0133      * The command's length. The command is either fixed length (i.e. does
0134      * not include a length field) or has a length field mask. The flag
0135      * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
0136      * a length mask. All command entries in a command table must include
0137      * length information.
0138      */
0139     union {
0140         u32 fixed;
0141         u32 mask;
0142     } length;
0143 
0144     /*
0145      * Describes where to find a register address in the command to check
0146      * against the ring's register whitelist. Only valid if flags has the
0147      * CMD_DESC_REGISTER bit set.
0148      *
0149      * A non-zero step value implies that the command may access multiple
0150      * registers in sequence (e.g. LRI), in that case step gives the
0151      * distance in dwords between individual offset fields.
0152      */
0153     struct {
0154         u32 offset;
0155         u32 mask;
0156         u32 step;
0157     } reg;
0158 
0159 #define MAX_CMD_DESC_BITMASKS 3
0160     /*
0161      * Describes command checks where a particular dword is masked and
0162      * compared against an expected value. If the command does not match
0163      * the expected value, the parser rejects it. Only valid if flags has
0164      * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
0165      * are valid.
0166      *
0167      * If the check specifies a non-zero condition_mask then the parser
0168      * only performs the check when the bits specified by condition_mask
0169      * are non-zero.
0170      */
0171     struct {
0172         u32 offset;
0173         u32 mask;
0174         u32 expected;
0175         u32 condition_offset;
0176         u32 condition_mask;
0177     } bits[MAX_CMD_DESC_BITMASKS];
0178 };
0179 
0180 /*
0181  * A table of commands requiring special handling by the command parser.
0182  *
0183  * Each engine has an array of tables. Each table consists of an array of
0184  * command descriptors, which must be sorted with command opcodes in
0185  * ascending order.
0186  */
0187 struct drm_i915_cmd_table {
0188     const struct drm_i915_cmd_descriptor *table;
0189     int count;
0190 };
0191 
0192 #define STD_MI_OPCODE_SHIFT  (32 - 9)
0193 #define STD_3D_OPCODE_SHIFT  (32 - 16)
0194 #define STD_2D_OPCODE_SHIFT  (32 - 10)
0195 #define STD_MFX_OPCODE_SHIFT (32 - 16)
0196 #define MIN_OPCODE_SHIFT 16
0197 
0198 #define CMD(op, opm, f, lm, fl, ...)                \
0199     {                           \
0200         .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
0201         .cmd = { (op & ~0u << (opm)), ~0u << (opm) },   \
0202         .length = { (lm) },             \
0203         __VA_ARGS__                 \
0204     }
0205 
0206 /* Convenience macros to compress the tables */
0207 #define SMI STD_MI_OPCODE_SHIFT
0208 #define S3D STD_3D_OPCODE_SHIFT
0209 #define S2D STD_2D_OPCODE_SHIFT
0210 #define SMFX STD_MFX_OPCODE_SHIFT
0211 #define F true
0212 #define S CMD_DESC_SKIP
0213 #define R CMD_DESC_REJECT
0214 #define W CMD_DESC_REGISTER
0215 #define B CMD_DESC_BITMASK
0216 
0217 /*            Command                          Mask   Fixed Len   Action
0218           ---------------------------------------------------------- */
0219 static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
0220     CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
0221     CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
0222     CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      R  ),
0223     CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
0224     CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
0225     CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
0226     CMD(  MI_SEMAPHORE_MBOX,                SMI,   !F,  0xFF,   R  ),
0227     CMD(  MI_STORE_DWORD_INDEX,             SMI,   !F,  0xFF,   R  ),
0228     CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
0229           .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
0230     CMD(  MI_STORE_REGISTER_MEM,            SMI,    F,  3,     W | B,
0231           .reg = { .offset = 1, .mask = 0x007FFFFC },
0232           .bits = {{
0233             .offset = 0,
0234             .mask = MI_GLOBAL_GTT,
0235             .expected = 0,
0236           }},                              ),
0237     CMD(  MI_LOAD_REGISTER_MEM,             SMI,    F,  3,     W | B,
0238           .reg = { .offset = 1, .mask = 0x007FFFFC },
0239           .bits = {{
0240             .offset = 0,
0241             .mask = MI_GLOBAL_GTT,
0242             .expected = 0,
0243           }},                              ),
0244     /*
0245      * MI_BATCH_BUFFER_START requires some special handling. It's not
0246      * really a 'skip' action but it doesn't seem like it's worth adding
0247      * a new action. See intel_engine_cmd_parser().
0248      */
0249     CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
0250 };
0251 
0252 static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
0253     CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
0254     CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
0255     CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
0256     CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
0257     CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
0258     CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
0259     CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
0260     CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
0261     CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
0262           .bits = {{
0263             .offset = 0,
0264             .mask = MI_GLOBAL_GTT,
0265             .expected = 0,
0266           }},                              ),
0267     CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0xFF,   R  ),
0268     CMD(  MI_CLFLUSH,                       SMI,   !F,  0x3FF,  B,
0269           .bits = {{
0270             .offset = 0,
0271             .mask = MI_GLOBAL_GTT,
0272             .expected = 0,
0273           }},                              ),
0274     CMD(  MI_REPORT_PERF_COUNT,             SMI,   !F,  0x3F,   B,
0275           .bits = {{
0276             .offset = 1,
0277             .mask = MI_REPORT_PERF_COUNT_GGTT,
0278             .expected = 0,
0279           }},                              ),
0280     CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
0281           .bits = {{
0282             .offset = 0,
0283             .mask = MI_GLOBAL_GTT,
0284             .expected = 0,
0285           }},                              ),
0286     CMD(  GFX_OP_3DSTATE_VF_STATISTICS,     S3D,    F,  1,      S  ),
0287     CMD(  PIPELINE_SELECT,                  S3D,    F,  1,      S  ),
0288     CMD(  MEDIA_VFE_STATE,          S3D,   !F,  0xFFFF, B,
0289           .bits = {{
0290             .offset = 2,
0291             .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
0292             .expected = 0,
0293           }},                              ),
0294     CMD(  GPGPU_OBJECT,                     S3D,   !F,  0xFF,   S  ),
0295     CMD(  GPGPU_WALKER,                     S3D,   !F,  0xFF,   S  ),
0296     CMD(  GFX_OP_3DSTATE_SO_DECL_LIST,      S3D,   !F,  0x1FF,  S  ),
0297     CMD(  GFX_OP_PIPE_CONTROL(5),           S3D,   !F,  0xFF,   B,
0298           .bits = {{
0299             .offset = 1,
0300             .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
0301             .expected = 0,
0302           },
0303           {
0304             .offset = 1,
0305                 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
0306                  PIPE_CONTROL_STORE_DATA_INDEX),
0307             .expected = 0,
0308             .condition_offset = 1,
0309             .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
0310           }},                              ),
0311 };
0312 
0313 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
0314     CMD(  MI_SET_PREDICATE,                 SMI,    F,  1,      S  ),
0315     CMD(  MI_RS_CONTROL,                    SMI,    F,  1,      S  ),
0316     CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
0317     CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
0318     CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
0319     CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
0320     CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
0321     CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   W,
0322           .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
0323     CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
0324     CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
0325     CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
0326     CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_VS,  S3D,   !F,  0x7FF,  S  ),
0327     CMD(  GFX_OP_3DSTATE_DX9_CONSTANTF_PS,  S3D,   !F,  0x7FF,  S  ),
0328 
0329     CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS,  S3D,   !F,  0x1FF,  S  ),
0330     CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS,  S3D,   !F,  0x1FF,  S  ),
0331     CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS,  S3D,   !F,  0x1FF,  S  ),
0332     CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS,  S3D,   !F,  0x1FF,  S  ),
0333     CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
0334 };
0335 
0336 static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
0337     CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
0338     CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
0339     CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
0340           .bits = {{
0341             .offset = 0,
0342             .mask = MI_GLOBAL_GTT,
0343             .expected = 0,
0344           }},                              ),
0345     CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
0346     CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
0347           .bits = {{
0348             .offset = 0,
0349             .mask = MI_FLUSH_DW_NOTIFY,
0350             .expected = 0,
0351           },
0352           {
0353             .offset = 1,
0354             .mask = MI_FLUSH_DW_USE_GTT,
0355             .expected = 0,
0356             .condition_offset = 0,
0357             .condition_mask = MI_FLUSH_DW_OP_MASK,
0358           },
0359           {
0360             .offset = 0,
0361             .mask = MI_FLUSH_DW_STORE_INDEX,
0362             .expected = 0,
0363             .condition_offset = 0,
0364             .condition_mask = MI_FLUSH_DW_OP_MASK,
0365           }},                              ),
0366     CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
0367           .bits = {{
0368             .offset = 0,
0369             .mask = MI_GLOBAL_GTT,
0370             .expected = 0,
0371           }},                              ),
0372     /*
0373      * MFX_WAIT doesn't fit the way we handle length for most commands.
0374      * It has a length field but it uses a non-standard length bias.
0375      * It is always 1 dword though, so just treat it as fixed length.
0376      */
0377     CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
0378 };
0379 
0380 static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
0381     CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
0382     CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
0383     CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
0384           .bits = {{
0385             .offset = 0,
0386             .mask = MI_GLOBAL_GTT,
0387             .expected = 0,
0388           }},                              ),
0389     CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
0390     CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
0391           .bits = {{
0392             .offset = 0,
0393             .mask = MI_FLUSH_DW_NOTIFY,
0394             .expected = 0,
0395           },
0396           {
0397             .offset = 1,
0398             .mask = MI_FLUSH_DW_USE_GTT,
0399             .expected = 0,
0400             .condition_offset = 0,
0401             .condition_mask = MI_FLUSH_DW_OP_MASK,
0402           },
0403           {
0404             .offset = 0,
0405             .mask = MI_FLUSH_DW_STORE_INDEX,
0406             .expected = 0,
0407             .condition_offset = 0,
0408             .condition_mask = MI_FLUSH_DW_OP_MASK,
0409           }},                              ),
0410     CMD(  MI_CONDITIONAL_BATCH_BUFFER_END,  SMI,   !F,  0xFF,   B,
0411           .bits = {{
0412             .offset = 0,
0413             .mask = MI_GLOBAL_GTT,
0414             .expected = 0,
0415           }},                              ),
0416 };
0417 
0418 static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
0419     CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
0420     CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
0421           .bits = {{
0422             .offset = 0,
0423             .mask = MI_GLOBAL_GTT,
0424             .expected = 0,
0425           }},                              ),
0426     CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3F,   R  ),
0427     CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   B,
0428           .bits = {{
0429             .offset = 0,
0430             .mask = MI_FLUSH_DW_NOTIFY,
0431             .expected = 0,
0432           },
0433           {
0434             .offset = 1,
0435             .mask = MI_FLUSH_DW_USE_GTT,
0436             .expected = 0,
0437             .condition_offset = 0,
0438             .condition_mask = MI_FLUSH_DW_OP_MASK,
0439           },
0440           {
0441             .offset = 0,
0442             .mask = MI_FLUSH_DW_STORE_INDEX,
0443             .expected = 0,
0444             .condition_offset = 0,
0445             .condition_mask = MI_FLUSH_DW_OP_MASK,
0446           }},                              ),
0447     CMD(  COLOR_BLT,                        S2D,   !F,  0x3F,   S  ),
0448     CMD(  SRC_COPY_BLT,                     S2D,   !F,  0x3F,   S  ),
0449 };
0450 
0451 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
0452     CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
0453     CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
0454 };
0455 
0456 /*
0457  * For Gen9 we can still rely on the h/w to enforce cmd security, and only
0458  * need to re-enforce the register access checks. We therefore only need to
0459  * teach the cmdparser how to find the end of each command, and identify
0460  * register accesses. The table doesn't need to reject any commands, and so
0461  * the only commands listed here are:
0462  *   1) Those that touch registers
0463  *   2) Those that do not have the default 8-bit length
0464  *
0465  * Note that the default MI length mask chosen for this table is 0xFF, not
0466  * the 0x3F used on older devices. This is because the vast majority of MI
0467  * cmds on Gen9 use a standard 8-bit Length field.
0468  * All the Gen9 blitter instructions are standard 0xFF length mask, and
0469  * none allow access to non-general registers, so in fact no BLT cmds are
0470  * included in the table at all.
0471  *
0472  */
0473 static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
0474     CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
0475     CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      S  ),
0476     CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      S  ),
0477     CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
0478     CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
0479     CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
0480     CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      S  ),
0481     CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
0482     CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   S  ),
0483     CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   S  ),
0484     CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  S  ),
0485     CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
0486           .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
0487     CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3FF,  S  ),
0488     CMD(  MI_STORE_REGISTER_MEM_GEN8,       SMI,    F,  4,      W,
0489           .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
0490     CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   S  ),
0491     CMD(  MI_LOAD_REGISTER_MEM_GEN8,        SMI,    F,  4,      W,
0492           .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
0493     CMD(  MI_LOAD_REGISTER_REG,             SMI,    !F,  0xFF,  W,
0494           .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
0495 
0496     /*
0497      * We allow BB_START but apply further checks. We just sanitize the
0498      * basic fields here.
0499      */
0500 #define MI_BB_START_OPERAND_MASK   GENMASK(SMI-1, 0)
0501 #define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
0502     CMD(  MI_BATCH_BUFFER_START_GEN8,       SMI,    !F,  0xFF,  B,
0503           .bits = {{
0504             .offset = 0,
0505             .mask = MI_BB_START_OPERAND_MASK,
0506             .expected = MI_BB_START_OPERAND_EXPECT,
0507           }},                              ),
0508 };
0509 
0510 static const struct drm_i915_cmd_descriptor noop_desc =
0511     CMD(MI_NOOP, SMI, F, 1, S);
0512 
0513 #undef CMD
0514 #undef SMI
0515 #undef S3D
0516 #undef S2D
0517 #undef SMFX
0518 #undef F
0519 #undef S
0520 #undef R
0521 #undef W
0522 #undef B
0523 
0524 static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
0525     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0526     { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
0527 };
0528 
0529 static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
0530     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0531     { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
0532     { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
0533 };
0534 
0535 static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
0536     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0537     { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
0538 };
0539 
0540 static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
0541     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0542     { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
0543 };
0544 
0545 static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
0546     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0547     { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
0548 };
0549 
0550 static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
0551     { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
0552     { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
0553     { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
0554 };
0555 
0556 static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
0557     { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
0558 };
0559 
0560 
0561 /*
0562  * Register whitelists, sorted by increasing register offset.
0563  */
0564 
0565 /*
0566  * An individual whitelist entry granting access to register addr.  If
0567  * mask is non-zero the argument of immediate register writes will be
0568  * AND-ed with mask, and the command will be rejected if the result
0569  * doesn't match value.
0570  *
0571  * Registers with non-zero mask are only allowed to be written using
0572  * LRI.
0573  */
0574 struct drm_i915_reg_descriptor {
0575     i915_reg_t addr;
0576     u32 mask;
0577     u32 value;
0578 };
0579 
0580 /* Convenience macro for adding 32-bit registers. */
0581 #define REG32(_reg, ...) \
0582     { .addr = (_reg), __VA_ARGS__ }
0583 
0584 #define REG32_IDX(_reg, idx) \
0585     { .addr = _reg(idx) }
0586 
0587 /*
0588  * Convenience macro for adding 64-bit registers.
0589  *
0590  * Some registers that userspace accesses are 64 bits. The register
0591  * access commands only allow 32-bit accesses. Hence, we have to include
0592  * entries for both halves of the 64-bit registers.
0593  */
0594 #define REG64(_reg) \
0595     { .addr = _reg }, \
0596     { .addr = _reg ## _UDW }
0597 
0598 #define REG64_IDX(_reg, idx) \
0599     { .addr = _reg(idx) }, \
0600     { .addr = _reg ## _UDW(idx) }
0601 
0602 #define REG64_BASE_IDX(_reg, base, idx) \
0603     { .addr = _reg(base, idx) }, \
0604     { .addr = _reg ## _UDW(base, idx) }
0605 
0606 static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
0607     REG64(GPGPU_THREADS_DISPATCHED),
0608     REG64(HS_INVOCATION_COUNT),
0609     REG64(DS_INVOCATION_COUNT),
0610     REG64(IA_VERTICES_COUNT),
0611     REG64(IA_PRIMITIVES_COUNT),
0612     REG64(VS_INVOCATION_COUNT),
0613     REG64(GS_INVOCATION_COUNT),
0614     REG64(GS_PRIMITIVES_COUNT),
0615     REG64(CL_INVOCATION_COUNT),
0616     REG64(CL_PRIMITIVES_COUNT),
0617     REG64(PS_INVOCATION_COUNT),
0618     REG64(PS_DEPTH_COUNT),
0619     REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
0620     REG64_IDX(MI_PREDICATE_SRC0, RENDER_RING_BASE),
0621     REG64_IDX(MI_PREDICATE_SRC1, RENDER_RING_BASE),
0622     REG32(GEN7_3DPRIM_END_OFFSET),
0623     REG32(GEN7_3DPRIM_START_VERTEX),
0624     REG32(GEN7_3DPRIM_VERTEX_COUNT),
0625     REG32(GEN7_3DPRIM_INSTANCE_COUNT),
0626     REG32(GEN7_3DPRIM_START_INSTANCE),
0627     REG32(GEN7_3DPRIM_BASE_VERTEX),
0628     REG32(GEN7_GPGPU_DISPATCHDIMX),
0629     REG32(GEN7_GPGPU_DISPATCHDIMY),
0630     REG32(GEN7_GPGPU_DISPATCHDIMZ),
0631     REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
0632     REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
0633     REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
0634     REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
0635     REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
0636     REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
0637     REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
0638     REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
0639     REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
0640     REG32(GEN7_SO_WRITE_OFFSET(0)),
0641     REG32(GEN7_SO_WRITE_OFFSET(1)),
0642     REG32(GEN7_SO_WRITE_OFFSET(2)),
0643     REG32(GEN7_SO_WRITE_OFFSET(3)),
0644     REG32(GEN7_L3SQCREG1),
0645     REG32(GEN7_L3CNTLREG2),
0646     REG32(GEN7_L3CNTLREG3),
0647     REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
0648 };
0649 
0650 static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
0651     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 0),
0652     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 1),
0653     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 2),
0654     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 3),
0655     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 4),
0656     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 5),
0657     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 6),
0658     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 7),
0659     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 8),
0660     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 9),
0661     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 10),
0662     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 11),
0663     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 12),
0664     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 13),
0665     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 14),
0666     REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 15),
0667     REG32(HSW_SCRATCH1,
0668           .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
0669           .value = 0),
0670     REG32(HSW_ROW_CHICKEN3,
0671           .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
0672                         HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0673           .value = 0),
0674 };
0675 
0676 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
0677     REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
0678     REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
0679     REG32(BCS_SWCTRL),
0680     REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
0681 };
0682 
0683 static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
0684     REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
0685     REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
0686     REG32(BCS_SWCTRL),
0687     REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
0688     REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE),
0689     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 0),
0690     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 1),
0691     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 2),
0692     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 3),
0693     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 4),
0694     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 5),
0695     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 6),
0696     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 7),
0697     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 8),
0698     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 9),
0699     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 10),
0700     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 11),
0701     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 12),
0702     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 13),
0703     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 14),
0704     REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 15),
0705 };
0706 
0707 #undef REG64
0708 #undef REG32
0709 
0710 struct drm_i915_reg_table {
0711     const struct drm_i915_reg_descriptor *regs;
0712     int num_regs;
0713 };
0714 
0715 static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
0716     { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
0717 };
0718 
0719 static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
0720     { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
0721 };
0722 
0723 static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
0724     { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
0725     { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
0726 };
0727 
0728 static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
0729     { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
0730 };
0731 
0732 static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
0733     { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
0734 };
0735 
0736 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
0737 {
0738     u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
0739     u32 subclient =
0740         (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
0741 
0742     if (client == INSTR_MI_CLIENT)
0743         return 0x3F;
0744     else if (client == INSTR_RC_CLIENT) {
0745         if (subclient == INSTR_MEDIA_SUBCLIENT)
0746             return 0xFFFF;
0747         else
0748             return 0xFF;
0749     }
0750 
0751     DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
0752     return 0;
0753 }
0754 
0755 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
0756 {
0757     u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
0758     u32 subclient =
0759         (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
0760     u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
0761 
0762     if (client == INSTR_MI_CLIENT)
0763         return 0x3F;
0764     else if (client == INSTR_RC_CLIENT) {
0765         if (subclient == INSTR_MEDIA_SUBCLIENT) {
0766             if (op == 6)
0767                 return 0xFFFF;
0768             else
0769                 return 0xFFF;
0770         } else
0771             return 0xFF;
0772     }
0773 
0774     DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
0775     return 0;
0776 }
0777 
0778 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
0779 {
0780     u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
0781 
0782     if (client == INSTR_MI_CLIENT)
0783         return 0x3F;
0784     else if (client == INSTR_BC_CLIENT)
0785         return 0xFF;
0786 
0787     DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
0788     return 0;
0789 }
0790 
0791 static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
0792 {
0793     u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
0794 
0795     if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
0796         return 0xFF;
0797 
0798     DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
0799     return 0;
0800 }
0801 
0802 static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
0803                  const struct drm_i915_cmd_table *cmd_tables,
0804                  int cmd_table_count)
0805 {
0806     int i;
0807     bool ret = true;
0808 
0809     if (!cmd_tables || cmd_table_count == 0)
0810         return true;
0811 
0812     for (i = 0; i < cmd_table_count; i++) {
0813         const struct drm_i915_cmd_table *table = &cmd_tables[i];
0814         u32 previous = 0;
0815         int j;
0816 
0817         for (j = 0; j < table->count; j++) {
0818             const struct drm_i915_cmd_descriptor *desc =
0819                 &table->table[j];
0820             u32 curr = desc->cmd.value & desc->cmd.mask;
0821 
0822             if (curr < previous) {
0823                 drm_err(&engine->i915->drm,
0824                     "CMD: %s [%d] command table not sorted: "
0825                     "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
0826                     engine->name, engine->id,
0827                     i, j, curr, previous);
0828                 ret = false;
0829             }
0830 
0831             previous = curr;
0832         }
0833     }
0834 
0835     return ret;
0836 }
0837 
0838 static bool check_sorted(const struct intel_engine_cs *engine,
0839              const struct drm_i915_reg_descriptor *reg_table,
0840              int reg_count)
0841 {
0842     int i;
0843     u32 previous = 0;
0844     bool ret = true;
0845 
0846     for (i = 0; i < reg_count; i++) {
0847         u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
0848 
0849         if (curr < previous) {
0850             drm_err(&engine->i915->drm,
0851                 "CMD: %s [%d] register table not sorted: "
0852                 "entry=%d reg=0x%08X prev=0x%08X\n",
0853                 engine->name, engine->id,
0854                 i, curr, previous);
0855             ret = false;
0856         }
0857 
0858         previous = curr;
0859     }
0860 
0861     return ret;
0862 }
0863 
0864 static bool validate_regs_sorted(struct intel_engine_cs *engine)
0865 {
0866     int i;
0867     const struct drm_i915_reg_table *table;
0868 
0869     for (i = 0; i < engine->reg_table_count; i++) {
0870         table = &engine->reg_tables[i];
0871         if (!check_sorted(engine, table->regs, table->num_regs))
0872             return false;
0873     }
0874 
0875     return true;
0876 }
0877 
0878 struct cmd_node {
0879     const struct drm_i915_cmd_descriptor *desc;
0880     struct hlist_node node;
0881 };
0882 
0883 /*
0884  * Different command ranges have different numbers of bits for the opcode. For
0885  * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
0886  * problem is that, for example, MI commands use bits 22:16 for other fields
0887  * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
0888  * we mask a command from a batch it could hash to the wrong bucket due to
0889  * non-opcode bits being set. But if we don't include those bits, some 3D
0890  * commands may hash to the same bucket due to not including opcode bits that
0891  * make the command unique. For now, we will risk hashing to the same bucket.
0892  */
0893 static inline u32 cmd_header_key(u32 x)
0894 {
0895     switch (x >> INSTR_CLIENT_SHIFT) {
0896     default:
0897     case INSTR_MI_CLIENT:
0898         return x >> STD_MI_OPCODE_SHIFT;
0899     case INSTR_RC_CLIENT:
0900         return x >> STD_3D_OPCODE_SHIFT;
0901     case INSTR_BC_CLIENT:
0902         return x >> STD_2D_OPCODE_SHIFT;
0903     }
0904 }
0905 
0906 static int init_hash_table(struct intel_engine_cs *engine,
0907                const struct drm_i915_cmd_table *cmd_tables,
0908                int cmd_table_count)
0909 {
0910     int i, j;
0911 
0912     hash_init(engine->cmd_hash);
0913 
0914     for (i = 0; i < cmd_table_count; i++) {
0915         const struct drm_i915_cmd_table *table = &cmd_tables[i];
0916 
0917         for (j = 0; j < table->count; j++) {
0918             const struct drm_i915_cmd_descriptor *desc =
0919                 &table->table[j];
0920             struct cmd_node *desc_node =
0921                 kmalloc(sizeof(*desc_node), GFP_KERNEL);
0922 
0923             if (!desc_node)
0924                 return -ENOMEM;
0925 
0926             desc_node->desc = desc;
0927             hash_add(engine->cmd_hash, &desc_node->node,
0928                  cmd_header_key(desc->cmd.value));
0929         }
0930     }
0931 
0932     return 0;
0933 }
0934 
0935 static void fini_hash_table(struct intel_engine_cs *engine)
0936 {
0937     struct hlist_node *tmp;
0938     struct cmd_node *desc_node;
0939     int i;
0940 
0941     hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
0942         hash_del(&desc_node->node);
0943         kfree(desc_node);
0944     }
0945 }
0946 
0947 /**
0948  * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
0949  * @engine: the engine to initialize
0950  *
0951  * Optionally initializes fields related to batch buffer command parsing in the
0952  * struct intel_engine_cs based on whether the platform requires software
0953  * command parsing.
0954  */
0955 int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
0956 {
0957     const struct drm_i915_cmd_table *cmd_tables;
0958     int cmd_table_count;
0959     int ret;
0960 
0961     if (GRAPHICS_VER(engine->i915) != 7 && !(GRAPHICS_VER(engine->i915) == 9 &&
0962                          engine->class == COPY_ENGINE_CLASS))
0963         return 0;
0964 
0965     switch (engine->class) {
0966     case RENDER_CLASS:
0967         if (IS_HASWELL(engine->i915)) {
0968             cmd_tables = hsw_render_ring_cmd_table;
0969             cmd_table_count =
0970                 ARRAY_SIZE(hsw_render_ring_cmd_table);
0971         } else {
0972             cmd_tables = gen7_render_cmd_table;
0973             cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
0974         }
0975 
0976         if (IS_HASWELL(engine->i915)) {
0977             engine->reg_tables = hsw_render_reg_tables;
0978             engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
0979         } else {
0980             engine->reg_tables = ivb_render_reg_tables;
0981             engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
0982         }
0983         engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
0984         break;
0985     case VIDEO_DECODE_CLASS:
0986         cmd_tables = gen7_video_cmd_table;
0987         cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
0988         engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
0989         break;
0990     case COPY_ENGINE_CLASS:
0991         engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
0992         if (GRAPHICS_VER(engine->i915) == 9) {
0993             cmd_tables = gen9_blt_cmd_table;
0994             cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
0995             engine->get_cmd_length_mask =
0996                 gen9_blt_get_cmd_length_mask;
0997 
0998             /* BCS Engine unsafe without parser */
0999             engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
1000         } else if (IS_HASWELL(engine->i915)) {
1001             cmd_tables = hsw_blt_ring_cmd_table;
1002             cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
1003         } else {
1004             cmd_tables = gen7_blt_cmd_table;
1005             cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
1006         }
1007 
1008         if (GRAPHICS_VER(engine->i915) == 9) {
1009             engine->reg_tables = gen9_blt_reg_tables;
1010             engine->reg_table_count =
1011                 ARRAY_SIZE(gen9_blt_reg_tables);
1012         } else if (IS_HASWELL(engine->i915)) {
1013             engine->reg_tables = hsw_blt_reg_tables;
1014             engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
1015         } else {
1016             engine->reg_tables = ivb_blt_reg_tables;
1017             engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
1018         }
1019         break;
1020     case VIDEO_ENHANCEMENT_CLASS:
1021         cmd_tables = hsw_vebox_cmd_table;
1022         cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
1023         /* VECS can use the same length_mask function as VCS */
1024         engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
1025         break;
1026     default:
1027         MISSING_CASE(engine->class);
1028         goto out;
1029     }
1030 
1031     if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
1032         drm_err(&engine->i915->drm,
1033             "%s: command descriptions are not sorted\n",
1034             engine->name);
1035         goto out;
1036     }
1037     if (!validate_regs_sorted(engine)) {
1038         drm_err(&engine->i915->drm,
1039             "%s: registers are not sorted\n", engine->name);
1040         goto out;
1041     }
1042 
1043     ret = init_hash_table(engine, cmd_tables, cmd_table_count);
1044     if (ret) {
1045         drm_err(&engine->i915->drm,
1046             "%s: initialised failed!\n", engine->name);
1047         fini_hash_table(engine);
1048         goto out;
1049     }
1050 
1051     engine->flags |= I915_ENGINE_USING_CMD_PARSER;
1052 
1053 out:
1054     if (intel_engine_requires_cmd_parser(engine) &&
1055         !intel_engine_using_cmd_parser(engine))
1056         return -EINVAL;
1057 
1058     return 0;
1059 }
1060 
1061 /**
1062  * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
1063  * @engine: the engine to clean up
1064  *
1065  * Releases any resources related to command parsing that may have been
1066  * initialized for the specified engine.
1067  */
1068 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
1069 {
1070     if (!intel_engine_using_cmd_parser(engine))
1071         return;
1072 
1073     fini_hash_table(engine);
1074 }
1075 
1076 static const struct drm_i915_cmd_descriptor*
1077 find_cmd_in_table(struct intel_engine_cs *engine,
1078           u32 cmd_header)
1079 {
1080     struct cmd_node *desc_node;
1081 
1082     hash_for_each_possible(engine->cmd_hash, desc_node, node,
1083                    cmd_header_key(cmd_header)) {
1084         const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
1085         if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
1086             return desc;
1087     }
1088 
1089     return NULL;
1090 }
1091 
1092 /*
1093  * Returns a pointer to a descriptor for the command specified by cmd_header.
1094  *
1095  * The caller must supply space for a default descriptor via the default_desc
1096  * parameter. If no descriptor for the specified command exists in the engine's
1097  * command parser tables, this function fills in default_desc based on the
1098  * engine's default length encoding and returns default_desc.
1099  */
1100 static const struct drm_i915_cmd_descriptor*
1101 find_cmd(struct intel_engine_cs *engine,
1102      u32 cmd_header,
1103      const struct drm_i915_cmd_descriptor *desc,
1104      struct drm_i915_cmd_descriptor *default_desc)
1105 {
1106     u32 mask;
1107 
1108     if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
1109         return desc;
1110 
1111     desc = find_cmd_in_table(engine, cmd_header);
1112     if (desc)
1113         return desc;
1114 
1115     mask = engine->get_cmd_length_mask(cmd_header);
1116     if (!mask)
1117         return NULL;
1118 
1119     default_desc->cmd.value = cmd_header;
1120     default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT;
1121     default_desc->length.mask = mask;
1122     default_desc->flags = CMD_DESC_SKIP;
1123     return default_desc;
1124 }
1125 
1126 static const struct drm_i915_reg_descriptor *
1127 __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
1128 {
1129     int start = 0, end = count;
1130     while (start < end) {
1131         int mid = start + (end - start) / 2;
1132         int ret = addr - i915_mmio_reg_offset(table[mid].addr);
1133         if (ret < 0)
1134             end = mid;
1135         else if (ret > 0)
1136             start = mid + 1;
1137         else
1138             return &table[mid];
1139     }
1140     return NULL;
1141 }
1142 
1143 static const struct drm_i915_reg_descriptor *
1144 find_reg(const struct intel_engine_cs *engine, u32 addr)
1145 {
1146     const struct drm_i915_reg_table *table = engine->reg_tables;
1147     const struct drm_i915_reg_descriptor *reg = NULL;
1148     int count = engine->reg_table_count;
1149 
1150     for (; !reg && (count > 0); ++table, --count)
1151         reg = __find_reg(table->regs, table->num_regs, addr);
1152 
1153     return reg;
1154 }
1155 
1156 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
1157 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
1158                struct drm_i915_gem_object *src_obj,
1159                unsigned long offset, unsigned long length,
1160                bool *needs_clflush_after)
1161 {
1162     unsigned int src_needs_clflush;
1163     unsigned int dst_needs_clflush;
1164     void *dst, *src;
1165     int ret;
1166 
1167     ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
1168     if (ret)
1169         return ERR_PTR(ret);
1170 
1171     dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
1172     i915_gem_object_finish_access(dst_obj);
1173     if (IS_ERR(dst))
1174         return dst;
1175 
1176     ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
1177     if (ret) {
1178         i915_gem_object_unpin_map(dst_obj);
1179         return ERR_PTR(ret);
1180     }
1181 
1182     src = ERR_PTR(-ENODEV);
1183     if (src_needs_clflush && i915_has_memcpy_from_wc()) {
1184         src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
1185         if (!IS_ERR(src)) {
1186             i915_unaligned_memcpy_from_wc(dst,
1187                               src + offset,
1188                               length);
1189             i915_gem_object_unpin_map(src_obj);
1190         }
1191     }
1192     if (IS_ERR(src)) {
1193         unsigned long x, n, remain;
1194         void *ptr;
1195 
1196         /*
1197          * We can avoid clflushing partial cachelines before the write
1198          * if we only every write full cache-lines. Since we know that
1199          * both the source and destination are in multiples of
1200          * PAGE_SIZE, we can simply round up to the next cacheline.
1201          * We don't care about copying too much here as we only
1202          * validate up to the end of the batch.
1203          */
1204         remain = length;
1205         if (dst_needs_clflush & CLFLUSH_BEFORE)
1206             remain = round_up(remain,
1207                       boot_cpu_data.x86_clflush_size);
1208 
1209         ptr = dst;
1210         x = offset_in_page(offset);
1211         for (n = offset >> PAGE_SHIFT; remain; n++) {
1212             int len = min(remain, PAGE_SIZE - x);
1213 
1214             src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
1215             if (src_needs_clflush)
1216                 drm_clflush_virt_range(src + x, len);
1217             memcpy(ptr, src + x, len);
1218             kunmap_atomic(src);
1219 
1220             ptr += len;
1221             remain -= len;
1222             x = 0;
1223         }
1224     }
1225 
1226     i915_gem_object_finish_access(src_obj);
1227 
1228     memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
1229 
1230     /* dst_obj is returned with vmap pinned */
1231     *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
1232 
1233     return dst;
1234 }
1235 
1236 static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
1237                    const u32 cmd)
1238 {
1239     return desc->cmd.value == (cmd & desc->cmd.mask);
1240 }
1241 
1242 static bool check_cmd(const struct intel_engine_cs *engine,
1243               const struct drm_i915_cmd_descriptor *desc,
1244               const u32 *cmd, u32 length)
1245 {
1246     if (desc->flags & CMD_DESC_SKIP)
1247         return true;
1248 
1249     if (desc->flags & CMD_DESC_REJECT) {
1250         DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
1251         return false;
1252     }
1253 
1254     if (desc->flags & CMD_DESC_REGISTER) {
1255         /*
1256          * Get the distance between individual register offset
1257          * fields if the command can perform more than one
1258          * access at a time.
1259          */
1260         const u32 step = desc->reg.step ? desc->reg.step : length;
1261         u32 offset;
1262 
1263         for (offset = desc->reg.offset; offset < length;
1264              offset += step) {
1265             const u32 reg_addr = cmd[offset] & desc->reg.mask;
1266             const struct drm_i915_reg_descriptor *reg =
1267                 find_reg(engine, reg_addr);
1268 
1269             if (!reg) {
1270                 DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
1271                       reg_addr, *cmd, engine->name);
1272                 return false;
1273             }
1274 
1275             /*
1276              * Check the value written to the register against the
1277              * allowed mask/value pair given in the whitelist entry.
1278              */
1279             if (reg->mask) {
1280                 if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
1281                     DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
1282                           reg_addr);
1283                     return false;
1284                 }
1285 
1286                 if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
1287                     DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
1288                           reg_addr);
1289                     return false;
1290                 }
1291 
1292                 if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
1293                     (offset + 2 > length ||
1294                      (cmd[offset + 1] & reg->mask) != reg->value)) {
1295                     DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
1296                           reg_addr);
1297                     return false;
1298                 }
1299             }
1300         }
1301     }
1302 
1303     if (desc->flags & CMD_DESC_BITMASK) {
1304         int i;
1305 
1306         for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
1307             u32 dword;
1308 
1309             if (desc->bits[i].mask == 0)
1310                 break;
1311 
1312             if (desc->bits[i].condition_mask != 0) {
1313                 u32 offset =
1314                     desc->bits[i].condition_offset;
1315                 u32 condition = cmd[offset] &
1316                     desc->bits[i].condition_mask;
1317 
1318                 if (condition == 0)
1319                     continue;
1320             }
1321 
1322             if (desc->bits[i].offset >= length) {
1323                 DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
1324                       *cmd, engine->name);
1325                 return false;
1326             }
1327 
1328             dword = cmd[desc->bits[i].offset] &
1329                 desc->bits[i].mask;
1330 
1331             if (dword != desc->bits[i].expected) {
1332                 DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
1333                       *cmd,
1334                       desc->bits[i].mask,
1335                       desc->bits[i].expected,
1336                       dword, engine->name);
1337                 return false;
1338             }
1339         }
1340     }
1341 
1342     return true;
1343 }
1344 
1345 static int check_bbstart(u32 *cmd, u32 offset, u32 length,
1346              u32 batch_length,
1347              u64 batch_addr,
1348              u64 shadow_addr,
1349              const unsigned long *jump_whitelist)
1350 {
1351     u64 jump_offset, jump_target;
1352     u32 target_cmd_offset, target_cmd_index;
1353 
1354     /* For igt compatibility on older platforms */
1355     if (!jump_whitelist) {
1356         DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1357         return -EACCES;
1358     }
1359 
1360     if (length != 3) {
1361         DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1362               length);
1363         return -EINVAL;
1364     }
1365 
1366     jump_target = *(u64 *)(cmd + 1);
1367     jump_offset = jump_target - batch_addr;
1368 
1369     /*
1370      * Any underflow of jump_target is guaranteed to be outside the range
1371      * of a u32, so >= test catches both too large and too small
1372      */
1373     if (jump_offset >= batch_length) {
1374         DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1375               jump_target);
1376         return -EINVAL;
1377     }
1378 
1379     /*
1380      * This cannot overflow a u32 because we already checked jump_offset
1381      * is within the BB, and the batch_length is a u32
1382      */
1383     target_cmd_offset = lower_32_bits(jump_offset);
1384     target_cmd_index = target_cmd_offset / sizeof(u32);
1385 
1386     *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
1387 
1388     if (target_cmd_index == offset)
1389         return 0;
1390 
1391     if (IS_ERR(jump_whitelist))
1392         return PTR_ERR(jump_whitelist);
1393 
1394     if (!test_bit(target_cmd_index, jump_whitelist)) {
1395         DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1396               jump_target);
1397         return -EINVAL;
1398     }
1399 
1400     return 0;
1401 }
1402 
1403 static unsigned long *alloc_whitelist(u32 batch_length)
1404 {
1405     unsigned long *jmp;
1406 
1407     /*
1408      * We expect batch_length to be less than 256KiB for known users,
1409      * i.e. we need at most an 8KiB bitmap allocation which should be
1410      * reasonably cheap due to kmalloc caches.
1411      */
1412 
1413     /* Prefer to report transient allocation failure rather than hit oom */
1414     jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
1415                 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
1416     if (!jmp)
1417         return ERR_PTR(-ENOMEM);
1418 
1419     return jmp;
1420 }
1421 
1422 #define LENGTH_BIAS 2
1423 
1424 /**
1425  * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
1426  * @engine: the engine on which the batch is to execute
1427  * @batch: the batch buffer in question
1428  * @batch_offset: byte offset in the batch at which execution starts
1429  * @batch_length: length of the commands in batch_obj
1430  * @shadow: validated copy of the batch buffer in question
1431  * @trampoline: true if we need to trampoline into privileged execution
1432  *
1433  * Parses the specified batch buffer looking for privilege violations as
1434  * described in the overview.
1435  *
1436  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1437  * if the batch appears legal but should use hardware parsing
1438  */
1439 
1440 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1441                 struct i915_vma *batch,
1442                 unsigned long batch_offset,
1443                 unsigned long batch_length,
1444                 struct i915_vma *shadow,
1445                 bool trampoline)
1446 {
1447     u32 *cmd, *batch_end, offset = 0;
1448     struct drm_i915_cmd_descriptor default_desc = noop_desc;
1449     const struct drm_i915_cmd_descriptor *desc = &default_desc;
1450     bool needs_clflush_after = false;
1451     unsigned long *jump_whitelist;
1452     u64 batch_addr, shadow_addr;
1453     int ret = 0;
1454 
1455     GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
1456     GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
1457     GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
1458                      batch->size));
1459     GEM_BUG_ON(!batch_length);
1460 
1461     cmd = copy_batch(shadow->obj, batch->obj,
1462              batch_offset, batch_length,
1463              &needs_clflush_after);
1464     if (IS_ERR(cmd)) {
1465         DRM_DEBUG("CMD: Failed to copy batch\n");
1466         return PTR_ERR(cmd);
1467     }
1468 
1469     jump_whitelist = NULL;
1470     if (!trampoline)
1471         /* Defer failure until attempted use */
1472         jump_whitelist = alloc_whitelist(batch_length);
1473 
1474     shadow_addr = gen8_canonical_addr(shadow->node.start);
1475     batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
1476 
1477     /*
1478      * We use the batch length as size because the shadow object is as
1479      * large or larger and copy_batch() will write MI_NOPs to the extra
1480      * space. Parsing should be faster in some cases this way.
1481      */
1482     batch_end = cmd + batch_length / sizeof(*batch_end);
1483     do {
1484         u32 length;
1485 
1486         if (*cmd == MI_BATCH_BUFFER_END)
1487             break;
1488 
1489         desc = find_cmd(engine, *cmd, desc, &default_desc);
1490         if (!desc) {
1491             DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
1492             ret = -EINVAL;
1493             break;
1494         }
1495 
1496         if (desc->flags & CMD_DESC_FIXED)
1497             length = desc->length.fixed;
1498         else
1499             length = (*cmd & desc->length.mask) + LENGTH_BIAS;
1500 
1501         if ((batch_end - cmd) < length) {
1502             DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1503                   *cmd,
1504                   length,
1505                   batch_end - cmd);
1506             ret = -EINVAL;
1507             break;
1508         }
1509 
1510         if (!check_cmd(engine, desc, cmd, length)) {
1511             ret = -EACCES;
1512             break;
1513         }
1514 
1515         if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
1516             ret = check_bbstart(cmd, offset, length, batch_length,
1517                         batch_addr, shadow_addr,
1518                         jump_whitelist);
1519             break;
1520         }
1521 
1522         if (!IS_ERR_OR_NULL(jump_whitelist))
1523             __set_bit(offset, jump_whitelist);
1524 
1525         cmd += length;
1526         offset += length;
1527         if  (cmd >= batch_end) {
1528             DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1529             ret = -EINVAL;
1530             break;
1531         }
1532     } while (1);
1533 
1534     if (trampoline) {
1535         /*
1536          * With the trampoline, the shadow is executed twice.
1537          *
1538          *   1 - starting at offset 0, in privileged mode
1539          *   2 - starting at offset batch_len, as non-privileged
1540          *
1541          * Only if the batch is valid and safe to execute, do we
1542          * allow the first privileged execution to proceed. If not,
1543          * we terminate the first batch and use the second batchbuffer
1544          * entry to chain to the original unsafe non-privileged batch,
1545          * leaving it to the HW to validate.
1546          */
1547         *batch_end = MI_BATCH_BUFFER_END;
1548 
1549         if (ret) {
1550             /* Batch unsafe to execute with privileges, cancel! */
1551             cmd = page_mask_bits(shadow->obj->mm.mapping);
1552             *cmd = MI_BATCH_BUFFER_END;
1553 
1554             /* If batch is unsafe but valid, jump to the original */
1555             if (ret == -EACCES) {
1556                 unsigned int flags;
1557 
1558                 flags = MI_BATCH_NON_SECURE_I965;
1559                 if (IS_HASWELL(engine->i915))
1560                     flags = MI_BATCH_NON_SECURE_HSW;
1561 
1562                 GEM_BUG_ON(!IS_GRAPHICS_VER(engine->i915, 6, 7));
1563                 __gen6_emit_bb_start(batch_end,
1564                              batch_addr,
1565                              flags);
1566 
1567                 ret = 0; /* allow execution */
1568             }
1569         }
1570     }
1571 
1572     i915_gem_object_flush_map(shadow->obj);
1573 
1574     if (!IS_ERR_OR_NULL(jump_whitelist))
1575         kfree(jump_whitelist);
1576     i915_gem_object_unpin_map(shadow->obj);
1577     return ret;
1578 }
1579 
1580 /**
1581  * i915_cmd_parser_get_version() - get the cmd parser version number
1582  * @dev_priv: i915 device private
1583  *
1584  * The cmd parser maintains a simple increasing integer version number suitable
1585  * for passing to userspace clients to determine what operations are permitted.
1586  *
1587  * Return: the current version number of the cmd parser
1588  */
1589 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1590 {
1591     struct intel_engine_cs *engine;
1592     bool active = false;
1593 
1594     /* If the command parser is not enabled, report 0 - unsupported */
1595     for_each_uabi_engine(engine, dev_priv) {
1596         if (intel_engine_using_cmd_parser(engine)) {
1597             active = true;
1598             break;
1599         }
1600     }
1601     if (!active)
1602         return 0;
1603 
1604     /*
1605      * Command parser version history
1606      *
1607      * 1. Initial version. Checks batches and reports violations, but leaves
1608      *    hardware parsing enabled (so does not allow new use cases).
1609      * 2. Allow access to the MI_PREDICATE_SRC0 and
1610      *    MI_PREDICATE_SRC1 registers.
1611      * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1612      * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1613      * 5. GPGPU dispatch compute indirect registers.
1614      * 6. TIMESTAMP register and Haswell CS GPR registers
1615      * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1616      * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
1617      *    rely on the HW to NOOP disallowed commands as it would without
1618      *    the parser enabled.
1619      * 9. Don't whitelist or handle oacontrol specially, as ownership
1620      *    for oacontrol state is moving to i915-perf.
1621      * 10. Support for Gen9 BCS Parsing
1622      */
1623     return 10;
1624 }