Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
0004  */
0005 
0006 #ifndef _KVM_PPC_BOOK3S_XIVE_H
0007 #define _KVM_PPC_BOOK3S_XIVE_H
0008 
0009 #ifdef CONFIG_KVM_XICS
0010 #include "book3s_xics.h"
0011 
0012 /*
0013  * The XIVE Interrupt source numbers are within the range 0 to
0014  * KVMPPC_XICS_NR_IRQS.
0015  */
0016 #define KVMPPC_XIVE_FIRST_IRQ   0
0017 #define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS
0018 
0019 /*
0020  * State for one guest irq source.
0021  *
0022  * For each guest source we allocate a HW interrupt in the XIVE
0023  * which we use for all SW triggers. It will be unused for
0024  * pass-through but it's easier to keep around as the same
0025  * guest interrupt can alternatively be emulated or pass-through
0026  * if a physical device is hot unplugged and replaced with an
0027  * emulated one.
0028  *
0029  * This state structure is very similar to the XICS one with
0030  * additional XIVE specific tracking.
0031  */
0032 struct kvmppc_xive_irq_state {
0033     bool valid;         /* Interrupt entry is valid */
0034 
0035     u32 number;         /* Guest IRQ number */
0036     u32 ipi_number;         /* XIVE IPI HW number */
0037     struct xive_irq_data ipi_data;  /* XIVE IPI associated data */
0038     u32 pt_number;          /* XIVE Pass-through number if any */
0039     struct xive_irq_data *pt_data;  /* XIVE Pass-through associated data */
0040 
0041     /* Targetting as set by guest */
0042     u8 guest_priority;      /* Guest set priority */
0043     u8 saved_priority;      /* Saved priority when masking */
0044 
0045     /* Actual targetting */
0046     u32 act_server;         /* Actual server */
0047     u8 act_priority;        /* Actual priority */
0048 
0049     /* Various state bits */
0050     bool in_eoi;            /* Synchronize with H_EOI */
0051     bool old_p;         /* P bit state when masking */
0052     bool old_q;         /* Q bit state when masking */
0053     bool lsi;           /* level-sensitive interrupt */
0054     bool asserted;          /* Only for emulated LSI: current state */
0055 
0056     /* Saved for migration state */
0057     bool in_queue;
0058     bool saved_p;
0059     bool saved_q;
0060     u8 saved_scan_prio;
0061 
0062     /* Xive native */
0063     u32 eisn;           /* Guest Effective IRQ number */
0064 };
0065 
0066 /* Select the "right" interrupt (IPI vs. passthrough) */
0067 static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
0068                       u32 *out_hw_irq,
0069                       struct xive_irq_data **out_xd)
0070 {
0071     if (state->pt_number) {
0072         if (out_hw_irq)
0073             *out_hw_irq = state->pt_number;
0074         if (out_xd)
0075             *out_xd = state->pt_data;
0076     } else {
0077         if (out_hw_irq)
0078             *out_hw_irq = state->ipi_number;
0079         if (out_xd)
0080             *out_xd = &state->ipi_data;
0081     }
0082 }
0083 
0084 /*
0085  * This corresponds to an "ICS" in XICS terminology, we use it
0086  * as a mean to break up source information into multiple structures.
0087  */
0088 struct kvmppc_xive_src_block {
0089     arch_spinlock_t lock;
0090     u16 id;
0091     struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
0092 };
0093 
0094 struct kvmppc_xive;
0095 
0096 struct kvmppc_xive_ops {
0097     int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
0098 };
0099 
0100 #define KVMPPC_XIVE_FLAG_SINGLE_ESCALATION 0x1
0101 #define KVMPPC_XIVE_FLAG_SAVE_RESTORE 0x2
0102 
0103 struct kvmppc_xive {
0104     struct kvm *kvm;
0105     struct kvm_device *dev;
0106     struct dentry *dentry;
0107 
0108     /* VP block associated with the VM */
0109     u32 vp_base;
0110 
0111     /* Blocks of sources */
0112     struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
0113     u32 max_sbid;
0114 
0115     /*
0116      * For state save, we lazily scan the queues on the first interrupt
0117      * being migrated. We don't have a clean way to reset that flags
0118      * so we keep track of the number of valid sources and how many of
0119      * them were migrated so we can reset when all of them have been
0120      * processed.
0121      */
0122     u32 src_count;
0123     u32 saved_src_count;
0124 
0125     /*
0126      * Some irqs are delayed on restore until the source is created,
0127      * keep track here of how many of them
0128      */
0129     u32 delayed_irqs;
0130 
0131     /* Which queues (priorities) are in use by the guest */
0132     u8  qmap;
0133 
0134     /* Queue orders */
0135     u32 q_order;
0136     u32 q_page_order;
0137 
0138     /* Flags */
0139     u8  flags;
0140 
0141     /* Number of entries in the VP block */
0142     u32 nr_servers;
0143 
0144     struct kvmppc_xive_ops *ops;
0145     struct address_space   *mapping;
0146     struct mutex mapping_lock;
0147     struct mutex lock;
0148 };
0149 
0150 #define KVMPPC_XIVE_Q_COUNT 8
0151 
0152 struct kvmppc_xive_vcpu {
0153     struct kvmppc_xive  *xive;
0154     struct kvm_vcpu     *vcpu;
0155     bool            valid;
0156 
0157     /* Server number. This is the HW CPU ID from a guest perspective */
0158     u32         server_num;
0159 
0160     /*
0161      * HW VP corresponding to this VCPU. This is the base of the VP
0162      * block plus the server number.
0163      */
0164     u32         vp_id;
0165     u32         vp_chip_id;
0166     u32         vp_cam;
0167 
0168     /* IPI used for sending ... IPIs */
0169     u32         vp_ipi;
0170     struct xive_irq_data    vp_ipi_data;
0171 
0172     /* Local emulation state */
0173     uint8_t         cppr;   /* guest CPPR */
0174     uint8_t         hw_cppr;/* Hardware CPPR */
0175     uint8_t         mfrr;
0176     uint8_t         pending;
0177 
0178     /* Each VP has 8 queues though we only provision some */
0179     struct xive_q       queues[KVMPPC_XIVE_Q_COUNT];
0180     u32         esc_virq[KVMPPC_XIVE_Q_COUNT];
0181     char            *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
0182 
0183     /* Stash a delayed irq on restore from migration (see set_icp) */
0184     u32         delayed_irq;
0185 
0186     /* Stats */
0187     u64         stat_rm_h_xirr;
0188     u64         stat_rm_h_ipoll;
0189     u64         stat_rm_h_cppr;
0190     u64         stat_rm_h_eoi;
0191     u64         stat_rm_h_ipi;
0192     u64         stat_vm_h_xirr;
0193     u64         stat_vm_h_ipoll;
0194     u64         stat_vm_h_cppr;
0195     u64         stat_vm_h_eoi;
0196     u64         stat_vm_h_ipi;
0197 };
0198 
0199 static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
0200 {
0201     struct kvm_vcpu *vcpu = NULL;
0202     unsigned long i;
0203 
0204     kvm_for_each_vcpu(i, vcpu, kvm) {
0205         if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
0206             return vcpu;
0207     }
0208     return NULL;
0209 }
0210 
0211 static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
0212         u32 irq, u16 *source)
0213 {
0214     u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
0215     u16 src = irq & KVMPPC_XICS_SRC_MASK;
0216 
0217     if (source)
0218         *source = src;
0219     if (bid > KVMPPC_XICS_MAX_ICS_ID)
0220         return NULL;
0221     return xive->src_blocks[bid];
0222 }
0223 
0224 /*
0225  * When the XIVE resources are allocated at the HW level, the VP
0226  * structures describing the vCPUs of a guest are distributed among
0227  * the chips to optimize the PowerBUS usage. For best performance, the
0228  * guest vCPUs can be pinned to match the VP structure distribution.
0229  *
0230  * Currently, the VP identifiers are deduced from the vCPU id using
0231  * the kvmppc_pack_vcpu_id() routine which is not incorrect but not
0232  * optimal either. It VSMT is used, the result is not continuous and
0233  * the constraints on HW resources described above can not be met.
0234  */
0235 static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
0236 {
0237     return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
0238 }
0239 
0240 static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
0241 {
0242     struct kvm_vcpu *vcpu = NULL;
0243     unsigned long i;
0244 
0245     kvm_for_each_vcpu(i, vcpu, kvm) {
0246         if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
0247             return true;
0248     }
0249     return false;
0250 }
0251 
0252 /*
0253  * Mapping between guest priorities and host priorities
0254  * is as follow.
0255  *
0256  * Guest request for 0...6 are honored. Guest request for anything
0257  * higher results in a priority of 6 being applied.
0258  *
0259  * Similar mapping is done for CPPR values
0260  */
0261 static inline u8 xive_prio_from_guest(u8 prio)
0262 {
0263     if (prio == 0xff || prio < 6)
0264         return prio;
0265     return 6;
0266 }
0267 
0268 static inline u8 xive_prio_to_guest(u8 prio)
0269 {
0270     return prio;
0271 }
0272 
0273 static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
0274 {
0275     u32 cur;
0276 
0277     if (!qpage)
0278         return 0;
0279     cur = be32_to_cpup(qpage + *idx);
0280     if ((cur >> 31) == *toggle)
0281         return 0;
0282     *idx = (*idx + 1) & msk;
0283     if (*idx == 0)
0284         (*toggle) ^= 1;
0285     return cur & 0x7fffffff;
0286 }
0287 
0288 /*
0289  * Common Xive routines for XICS-over-XIVE and XIVE native
0290  */
0291 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
0292 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
0293 void kvmppc_xive_debug_show_sources(struct seq_file *m,
0294                     struct kvmppc_xive_src_block *sb);
0295 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
0296     struct kvmppc_xive *xive, int irq);
0297 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
0298 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
0299 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
0300                   bool single_escalation);
0301 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
0302 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
0303                     struct kvmppc_xive_vcpu *xc, int irq);
0304 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
0305 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
0306 bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu);
0307 
0308 static inline bool kvmppc_xive_has_single_escalation(struct kvmppc_xive *xive)
0309 {
0310     return xive->flags & KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
0311 }
0312 
0313 #endif /* CONFIG_KVM_XICS */
0314 #endif /* _KVM_PPC_BOOK3S_XICS_H */