0001
0002 #include <linux/bitmap.h>
0003 #include <linux/workqueue.h>
0004
0005 #include "nitrox_csr.h"
0006 #include "nitrox_hal.h"
0007 #include "nitrox_dev.h"
0008 #include "nitrox_mbx.h"
0009
0010 #define RING_TO_VFNO(_x, _y) ((_x) / (_y))
0011
0012
0013
0014
0015 enum mbx_msg_type {
0016 MBX_MSG_TYPE_NOP,
0017 MBX_MSG_TYPE_REQ,
0018 MBX_MSG_TYPE_ACK,
0019 MBX_MSG_TYPE_NACK,
0020 };
0021
0022
0023
0024
0025 enum mbx_msg_opcode {
0026 MSG_OP_VF_MODE = 1,
0027 MSG_OP_VF_UP,
0028 MSG_OP_VF_DOWN,
0029 MSG_OP_CHIPID_VFID,
0030 MSG_OP_MCODE_INFO = 11,
0031 };
0032
0033 struct pf2vf_work {
0034 struct nitrox_vfdev *vfdev;
0035 struct nitrox_device *ndev;
0036 struct work_struct pf2vf_resp;
0037 };
0038
0039 static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
0040 {
0041 u64 reg_addr;
0042
0043 reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
0044 return nitrox_read_csr(ndev, reg_addr);
0045 }
0046
0047 static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
0048 int ring)
0049 {
0050 u64 reg_addr;
0051
0052 reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
0053 nitrox_write_csr(ndev, reg_addr, value);
0054 }
0055
0056 static void pf2vf_send_response(struct nitrox_device *ndev,
0057 struct nitrox_vfdev *vfdev)
0058 {
0059 union mbox_msg msg;
0060
0061 msg.value = vfdev->msg.value;
0062
0063 switch (vfdev->msg.opcode) {
0064 case MSG_OP_VF_MODE:
0065 msg.data = ndev->mode;
0066 break;
0067 case MSG_OP_VF_UP:
0068 vfdev->nr_queues = vfdev->msg.data;
0069 atomic_set(&vfdev->state, __NDEV_READY);
0070 break;
0071 case MSG_OP_CHIPID_VFID:
0072 msg.id.chipid = ndev->idx;
0073 msg.id.vfid = vfdev->vfno;
0074 break;
0075 case MSG_OP_VF_DOWN:
0076 vfdev->nr_queues = 0;
0077 atomic_set(&vfdev->state, __NDEV_NOT_READY);
0078 break;
0079 case MSG_OP_MCODE_INFO:
0080 msg.data = 0;
0081 msg.mcode_info.count = 2;
0082 msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
0083 msg.mcode_info.next_se_grp = 1;
0084 msg.mcode_info.next_ae_grp = 1;
0085 break;
0086 default:
0087 msg.type = MBX_MSG_TYPE_NOP;
0088 break;
0089 }
0090
0091 if (msg.type == MBX_MSG_TYPE_NOP)
0092 return;
0093
0094
0095 msg.type = MBX_MSG_TYPE_ACK;
0096 pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
0097
0098 vfdev->msg.value = 0;
0099 atomic64_inc(&vfdev->mbx_resp);
0100 }
0101
0102 static void pf2vf_resp_handler(struct work_struct *work)
0103 {
0104 struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
0105 pf2vf_resp);
0106 struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
0107 struct nitrox_device *ndev = pf2vf_resp->ndev;
0108
0109 switch (vfdev->msg.type) {
0110 case MBX_MSG_TYPE_REQ:
0111
0112 pf2vf_send_response(ndev, vfdev);
0113 break;
0114 case MBX_MSG_TYPE_ACK:
0115 case MBX_MSG_TYPE_NACK:
0116 break;
0117 }
0118
0119 kfree(pf2vf_resp);
0120 }
0121
0122 void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
0123 {
0124 DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
0125 struct nitrox_vfdev *vfdev;
0126 struct pf2vf_work *pfwork;
0127 u64 value, reg_addr;
0128 u32 i;
0129 int vfno;
0130
0131
0132 reg_addr = NPS_PKT_MBOX_INT_LO;
0133 value = nitrox_read_csr(ndev, reg_addr);
0134 bitmap_from_u64(csr, value);
0135 for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
0136
0137 vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
0138 vfdev = ndev->iov.vfdev + vfno;
0139 vfdev->ring = i;
0140
0141 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
0142 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
0143 if (!pfwork)
0144 continue;
0145
0146 pfwork->vfdev = vfdev;
0147 pfwork->ndev = ndev;
0148 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
0149 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
0150
0151 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
0152 }
0153
0154
0155 reg_addr = NPS_PKT_MBOX_INT_HI;
0156 value = nitrox_read_csr(ndev, reg_addr);
0157 bitmap_from_u64(csr, value);
0158 for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
0159
0160 vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
0161 vfdev = ndev->iov.vfdev + vfno;
0162 vfdev->ring = (i + 64);
0163
0164 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
0165
0166 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
0167 if (!pfwork)
0168 continue;
0169
0170 pfwork->vfdev = vfdev;
0171 pfwork->ndev = ndev;
0172 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
0173 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
0174
0175 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
0176 }
0177 }
0178
0179 int nitrox_mbox_init(struct nitrox_device *ndev)
0180 {
0181 struct nitrox_vfdev *vfdev;
0182 int i;
0183
0184 ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
0185 sizeof(struct nitrox_vfdev), GFP_KERNEL);
0186 if (!ndev->iov.vfdev)
0187 return -ENOMEM;
0188
0189 for (i = 0; i < ndev->iov.num_vfs; i++) {
0190 vfdev = ndev->iov.vfdev + i;
0191 vfdev->vfno = i;
0192 }
0193
0194
0195 ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
0196 if (!ndev->iov.pf2vf_wq) {
0197 kfree(ndev->iov.vfdev);
0198 return -ENOMEM;
0199 }
0200
0201 enable_pf2vf_mbox_interrupts(ndev);
0202
0203 return 0;
0204 }
0205
0206 void nitrox_mbox_cleanup(struct nitrox_device *ndev)
0207 {
0208
0209 disable_pf2vf_mbox_interrupts(ndev);
0210
0211 if (ndev->iov.pf2vf_wq)
0212 destroy_workqueue(ndev->iov.pf2vf_wq);
0213
0214 kfree(ndev->iov.vfdev);
0215 ndev->iov.pf2vf_wq = NULL;
0216 ndev->iov.vfdev = NULL;
0217 }