Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2015, 2016 ARM Ltd.
0004  */
0005 #ifndef __KVM_ARM_VGIC_MMIO_H__
0006 #define __KVM_ARM_VGIC_MMIO_H__
0007 
0008 struct vgic_register_region {
0009     unsigned int reg_offset;
0010     unsigned int len;
0011     unsigned int bits_per_irq;
0012     unsigned int access_flags;
0013     union {
0014         unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
0015                       unsigned int len);
0016         unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
0017                       gpa_t addr, unsigned int len);
0018     };
0019     union {
0020         void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
0021                   unsigned int len, unsigned long val);
0022         void (*its_write)(struct kvm *kvm, struct vgic_its *its,
0023                   gpa_t addr, unsigned int len,
0024                   unsigned long val);
0025     };
0026     unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
0027                       unsigned int len);
0028     union {
0029         int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
0030                      unsigned int len, unsigned long val);
0031         int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
0032                      gpa_t addr, unsigned int len,
0033                      unsigned long val);
0034     };
0035 };
0036 
0037 extern const struct kvm_io_device_ops kvm_io_gic_ops;
0038 
0039 #define VGIC_ACCESS_8bit    1
0040 #define VGIC_ACCESS_32bit   2
0041 #define VGIC_ACCESS_64bit   4
0042 
0043 /*
0044  * Generate a mask that covers the number of bytes required to address
0045  * up to 1024 interrupts, each represented by <bits> bits. This assumes
0046  * that <bits> is a power of two.
0047  */
0048 #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
0049 
0050 /*
0051  * (addr & mask) gives us the _byte_ offset for the INT ID.
0052  * We multiply this by 8 the get the _bit_ offset, then divide this by
0053  * the number of bits to learn the actual INT ID.
0054  * But instead of a division (which requires a "long long div" implementation),
0055  * we shift by the binary logarithm of <bits>.
0056  * This assumes that <bits> is a power of two.
0057  */
0058 #define VGIC_ADDR_TO_INTID(addr, bits)  (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
0059                     8 >> ilog2(bits))
0060 
0061 /*
0062  * Some VGIC registers store per-IRQ information, with a different number
0063  * of bits per IRQ. For those registers this macro is used.
0064  * The _WITH_LENGTH version instantiates registers with a fixed length
0065  * and is mutually exclusive with the _PER_IRQ version.
0066  */
0067 #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc)  \
0068     {                               \
0069         .reg_offset = off,                  \
0070         .bits_per_irq = bpi,                    \
0071         .len = bpi * 1024 / 8,                  \
0072         .access_flags = acc,                    \
0073         .read = rd,                     \
0074         .write = wr,                        \
0075         .uaccess_read = ur,                 \
0076         .uaccess_write = uw,                    \
0077     }
0078 
0079 #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc)     \
0080     {                               \
0081         .reg_offset = off,                  \
0082         .bits_per_irq = 0,                  \
0083         .len = length,                      \
0084         .access_flags = acc,                    \
0085         .read = rd,                     \
0086         .write = wr,                        \
0087     }
0088 
0089 #define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
0090     {                               \
0091         .reg_offset = off,                  \
0092         .bits_per_irq = 0,                  \
0093         .len = length,                      \
0094         .access_flags = acc,                    \
0095         .read = rd,                     \
0096         .write = wr,                        \
0097         .uaccess_read = urd,                    \
0098         .uaccess_write = uwr,                   \
0099     }
0100 
0101 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
0102 
0103 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
0104                 unsigned long data);
0105 
0106 unsigned long extract_bytes(u64 data, unsigned int offset,
0107                 unsigned int num);
0108 
0109 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
0110              unsigned long val);
0111 
0112 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
0113                  gpa_t addr, unsigned int len);
0114 
0115 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
0116                  gpa_t addr, unsigned int len);
0117 
0118 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
0119             unsigned int len, unsigned long val);
0120 
0121 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
0122                    unsigned int len, unsigned long val);
0123 
0124 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
0125                    unsigned int len);
0126 
0127 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
0128                unsigned int len, unsigned long val);
0129 
0130 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
0131                     gpa_t addr, unsigned int len);
0132 
0133 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
0134                  gpa_t addr, unsigned int len,
0135                  unsigned long val);
0136 
0137 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
0138                  gpa_t addr, unsigned int len,
0139                  unsigned long val);
0140 
0141 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
0142                    gpa_t addr, unsigned int len,
0143                    unsigned long val);
0144 
0145 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
0146                    gpa_t addr, unsigned int len,
0147                    unsigned long val);
0148 
0149 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
0150                      gpa_t addr, unsigned int len);
0151 
0152 unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
0153                     gpa_t addr, unsigned int len);
0154 
0155 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
0156                   gpa_t addr, unsigned int len,
0157                   unsigned long val);
0158 
0159 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
0160                   gpa_t addr, unsigned int len,
0161                   unsigned long val);
0162 
0163 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
0164                 gpa_t addr, unsigned int len,
0165                 unsigned long val);
0166 
0167 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
0168                 gpa_t addr, unsigned int len,
0169                 unsigned long val);
0170 
0171 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
0172                     gpa_t addr, unsigned int len);
0173 
0174 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
0175                     gpa_t addr, unsigned int len);
0176 
0177 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
0178                  gpa_t addr, unsigned int len,
0179                  unsigned long val);
0180 
0181 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
0182                  gpa_t addr, unsigned int len,
0183                  unsigned long val);
0184 
0185 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
0186                     gpa_t addr, unsigned int len,
0187                     unsigned long val);
0188 
0189 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
0190                     gpa_t addr, unsigned int len,
0191                     unsigned long val);
0192 
0193 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
0194                       gpa_t addr, unsigned int len);
0195 
0196 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
0197                   gpa_t addr, unsigned int len,
0198                   unsigned long val);
0199 
0200 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
0201                     gpa_t addr, unsigned int len);
0202 
0203 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
0204                 gpa_t addr, unsigned int len,
0205                 unsigned long val);
0206 
0207 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
0208          bool is_write, int offset, u32 *val);
0209 
0210 u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
0211 
0212 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
0213                     const u32 val);
0214 
0215 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
0216 
0217 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
0218 
0219 u64 vgic_sanitise_outer_cacheability(u64 reg);
0220 u64 vgic_sanitise_inner_cacheability(u64 reg);
0221 u64 vgic_sanitise_shareability(u64 reg);
0222 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
0223             u64 (*sanitise_fn)(u64));
0224 
0225 /* Find the proper register handler entry given a certain address offset */
0226 const struct vgic_register_region *
0227 vgic_find_mmio_region(const struct vgic_register_region *regions,
0228               int nr_regions, unsigned int offset);
0229 
0230 #endif