0001
0002
0003
0004
0005
0006 #include <linux/pci.h>
0007 #include <linux/module.h>
0008 #include "net_driver.h"
0009 #include "efx.h"
0010 #include "efx_channels.h"
0011 #include "nic.h"
0012 #include "io.h"
0013 #include "mcdi.h"
0014 #include "filter.h"
0015 #include "mcdi_pcol.h"
0016 #include "farch_regs.h"
0017 #include "siena_sriov.h"
0018 #include "vfdi.h"
0019
0020
0021 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
0022
0023
0024 #define VF_MAX_RX_QUEUES 63
0025
0026
0027
0028
0029
0030
0031
0032
0033 enum efx_vf_tx_filter_mode {
0034 VF_TX_FILTER_OFF,
0035 VF_TX_FILTER_AUTO,
0036 VF_TX_FILTER_ON,
0037 };
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 struct siena_vf {
0086 struct efx_nic *efx;
0087 unsigned int pci_rid;
0088 char pci_name[13];
0089 unsigned int index;
0090 struct work_struct req;
0091 u64 req_addr;
0092 int req_type;
0093 unsigned req_seqno;
0094 unsigned msg_seqno;
0095 bool busy;
0096 struct efx_buffer buf;
0097 unsigned buftbl_base;
0098 bool rx_filtering;
0099 enum efx_filter_flags rx_filter_flags;
0100 unsigned rx_filter_qid;
0101 int rx_filter_id;
0102 enum efx_vf_tx_filter_mode tx_filter_mode;
0103 int tx_filter_id;
0104 struct vfdi_endpoint addr;
0105 u64 status_addr;
0106 struct mutex status_lock;
0107 u64 *peer_page_addrs;
0108 unsigned peer_page_count;
0109 u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
0110 EFX_BUF_SIZE];
0111 unsigned evq0_count;
0112 wait_queue_head_t flush_waitq;
0113 struct mutex txq_lock;
0114 unsigned long txq_mask[VI_MASK_LENGTH];
0115 unsigned txq_count;
0116 unsigned long rxq_mask[VI_MASK_LENGTH];
0117 unsigned rxq_count;
0118 unsigned long rxq_retry_mask[VI_MASK_LENGTH];
0119 atomic_t rxq_retry_count;
0120 struct work_struct reset_work;
0121 };
0122
0123 struct efx_memcpy_req {
0124 unsigned int from_rid;
0125 void *from_buf;
0126 u64 from_addr;
0127 unsigned int to_rid;
0128 u64 to_addr;
0129 unsigned length;
0130 };
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 struct efx_local_addr {
0144 struct list_head link;
0145 u8 addr[ETH_ALEN];
0146 };
0147
0148
0149
0150
0151
0152
0153
0154
0155 struct efx_endpoint_page {
0156 struct list_head link;
0157 void *ptr;
0158 dma_addr_t addr;
0159 };
0160
0161
0162 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
0163 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
0164 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
0165 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
0166 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
0167 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
0168 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
0169 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
0170
0171 #define EFX_FIELD_MASK(_field) \
0172 ((1 << _field ## _WIDTH) - 1)
0173
0174
0175 static unsigned int vf_max_tx_channels = 2;
0176 module_param(vf_max_tx_channels, uint, 0444);
0177 MODULE_PARM_DESC(vf_max_tx_channels,
0178 "Limit the number of TX channels VFs can use");
0179
0180 static int max_vfs = -1;
0181 module_param(max_vfs, int, 0444);
0182 MODULE_PARM_DESC(max_vfs,
0183 "Reduce the number of VFs initialized by the driver");
0184
0185
0186
0187
0188
0189 static struct workqueue_struct *vfdi_workqueue;
0190
0191 static unsigned abs_index(struct siena_vf *vf, unsigned index)
0192 {
0193 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
0194 }
0195
0196 static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
0197 unsigned *vi_scale_out, unsigned *vf_total_out)
0198 {
0199 MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
0200 MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
0201 unsigned vi_scale, vf_total;
0202 size_t outlen;
0203 int rc;
0204
0205 MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
0206 MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
0207 MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
0208
0209 rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf,
0210 MC_CMD_SRIOV_IN_LEN, outbuf,
0211 MC_CMD_SRIOV_OUT_LEN, &outlen);
0212 if (rc)
0213 return rc;
0214 if (outlen < MC_CMD_SRIOV_OUT_LEN)
0215 return -EIO;
0216
0217 vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
0218 vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
0219 if (vi_scale > EFX_VI_SCALE_MAX)
0220 return -EOPNOTSUPP;
0221
0222 if (vi_scale_out)
0223 *vi_scale_out = vi_scale;
0224 if (vf_total_out)
0225 *vf_total_out = vf_total;
0226
0227 return 0;
0228 }
0229
0230 static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
0231 {
0232 struct siena_nic_data *nic_data = efx->nic_data;
0233 efx_oword_t reg;
0234
0235 EFX_POPULATE_OWORD_2(reg,
0236 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
0237 FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
0238 efx_writeo(efx, ®, FR_CZ_USR_EV_CFG);
0239 }
0240
0241 static int efx_siena_sriov_memcpy(struct efx_nic *efx,
0242 struct efx_memcpy_req *req,
0243 unsigned int count)
0244 {
0245 MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
0246 MCDI_DECLARE_STRUCT_PTR(record);
0247 unsigned int index, used;
0248 u64 from_addr;
0249 u32 from_rid;
0250 int rc;
0251
0252 mb();
0253
0254 if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
0255 return -ENOBUFS;
0256 used = MC_CMD_MEMCPY_IN_LEN(count);
0257
0258 for (index = 0; index < count; index++) {
0259 record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
0260 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
0261 count);
0262 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
0263 req->to_rid);
0264 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
0265 req->to_addr);
0266 if (req->from_buf == NULL) {
0267 from_rid = req->from_rid;
0268 from_addr = req->from_addr;
0269 } else {
0270 if (WARN_ON(used + req->length >
0271 MCDI_CTL_SDU_LEN_MAX_V1)) {
0272 rc = -ENOBUFS;
0273 goto out;
0274 }
0275
0276 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
0277 from_addr = used;
0278 memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
0279 req->length);
0280 used += req->length;
0281 }
0282
0283 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
0284 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
0285 from_addr);
0286 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
0287 req->length);
0288
0289 ++req;
0290 }
0291
0292 rc = efx_siena_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
0293 out:
0294 mb();
0295
0296 return rc;
0297 }
0298
0299
0300
0301
0302 static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
0303 {
0304 struct efx_nic *efx = vf->efx;
0305 struct efx_filter_spec filter;
0306 u16 vlan;
0307 int rc;
0308
0309 if (vf->tx_filter_id != -1) {
0310 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
0311 vf->tx_filter_id);
0312 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
0313 vf->pci_name, vf->tx_filter_id);
0314 vf->tx_filter_id = -1;
0315 }
0316
0317 if (is_zero_ether_addr(vf->addr.mac_addr))
0318 return;
0319
0320
0321
0322
0323 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
0324 vf->tx_filter_mode = VF_TX_FILTER_ON;
0325
0326 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
0327 efx_filter_init_tx(&filter, abs_index(vf, 0));
0328 rc = efx_filter_set_eth_local(&filter,
0329 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
0330 vf->addr.mac_addr);
0331 BUG_ON(rc);
0332
0333 rc = efx_filter_insert_filter(efx, &filter, true);
0334 if (rc < 0) {
0335 netif_warn(efx, hw, efx->net_dev,
0336 "Unable to migrate tx filter for vf %s\n",
0337 vf->pci_name);
0338 } else {
0339 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
0340 vf->pci_name, rc);
0341 vf->tx_filter_id = rc;
0342 }
0343 }
0344
0345
0346 static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
0347 {
0348 struct efx_nic *efx = vf->efx;
0349 struct efx_filter_spec filter;
0350 u16 vlan;
0351 int rc;
0352
0353 if (vf->rx_filter_id != -1) {
0354 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
0355 vf->rx_filter_id);
0356 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
0357 vf->pci_name, vf->rx_filter_id);
0358 vf->rx_filter_id = -1;
0359 }
0360
0361 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
0362 return;
0363
0364 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
0365 efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
0366 vf->rx_filter_flags,
0367 abs_index(vf, vf->rx_filter_qid));
0368 rc = efx_filter_set_eth_local(&filter,
0369 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
0370 vf->addr.mac_addr);
0371 BUG_ON(rc);
0372
0373 rc = efx_filter_insert_filter(efx, &filter, true);
0374 if (rc < 0) {
0375 netif_warn(efx, hw, efx->net_dev,
0376 "Unable to insert rx filter for vf %s\n",
0377 vf->pci_name);
0378 } else {
0379 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
0380 vf->pci_name, rc);
0381 vf->rx_filter_id = rc;
0382 }
0383 }
0384
0385 static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
0386 {
0387 struct efx_nic *efx = vf->efx;
0388 struct siena_nic_data *nic_data = efx->nic_data;
0389
0390 efx_siena_sriov_reset_tx_filter(vf);
0391 efx_siena_sriov_reset_rx_filter(vf);
0392 queue_work(vfdi_workqueue, &nic_data->peer_work);
0393 }
0394
0395
0396
0397
0398
0399
0400 static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
0401 {
0402 struct efx_nic *efx = vf->efx;
0403 struct siena_nic_data *nic_data = efx->nic_data;
0404 struct vfdi_status *status = nic_data->vfdi_status.addr;
0405 struct efx_memcpy_req copy[4];
0406 struct efx_endpoint_page *epp;
0407 unsigned int pos, count;
0408 unsigned data_offset;
0409 efx_qword_t event;
0410
0411 WARN_ON(!mutex_is_locked(&vf->status_lock));
0412 WARN_ON(!vf->status_addr);
0413
0414 status->local = vf->addr;
0415 status->generation_end = ++status->generation_start;
0416
0417 memset(copy, '\0', sizeof(copy));
0418
0419 copy[0].from_buf = &status->generation_start;
0420 copy[0].to_rid = vf->pci_rid;
0421 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
0422 generation_start);
0423 copy[0].length = sizeof(status->generation_start);
0424
0425
0426
0427
0428 data_offset = offsetof(struct vfdi_status, version);
0429 copy[1].from_rid = efx->pci_dev->devfn;
0430 copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
0431 copy[1].to_rid = vf->pci_rid;
0432 copy[1].to_addr = vf->status_addr + data_offset;
0433 copy[1].length = status->length - data_offset;
0434
0435
0436 pos = 2;
0437 count = 0;
0438 list_for_each_entry(epp, &nic_data->local_page_list, link) {
0439 if (count == vf->peer_page_count) {
0440
0441
0442
0443 break;
0444 }
0445 copy[pos].from_buf = NULL;
0446 copy[pos].from_rid = efx->pci_dev->devfn;
0447 copy[pos].from_addr = epp->addr;
0448 copy[pos].to_rid = vf->pci_rid;
0449 copy[pos].to_addr = vf->peer_page_addrs[count];
0450 copy[pos].length = EFX_PAGE_SIZE;
0451
0452 if (++pos == ARRAY_SIZE(copy)) {
0453 efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
0454 pos = 0;
0455 }
0456 ++count;
0457 }
0458
0459
0460 copy[pos].from_buf = &status->generation_end;
0461 copy[pos].to_rid = vf->pci_rid;
0462 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
0463 generation_end);
0464 copy[pos].length = sizeof(status->generation_end);
0465 efx_siena_sriov_memcpy(efx, copy, pos + 1);
0466
0467
0468 EFX_POPULATE_QWORD_3(event,
0469 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
0470 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
0471 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
0472 ++vf->msg_seqno;
0473 efx_farch_generate_event(efx,
0474 EFX_VI_BASE + vf->index * efx_vf_size(efx),
0475 &event);
0476 }
0477
0478 static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
0479 u64 *addr, unsigned count)
0480 {
0481 efx_qword_t buf;
0482 unsigned pos;
0483
0484 for (pos = 0; pos < count; ++pos) {
0485 EFX_POPULATE_QWORD_3(buf,
0486 FRF_AZ_BUF_ADR_REGION, 0,
0487 FRF_AZ_BUF_ADR_FBUF,
0488 addr ? addr[pos] >> 12 : 0,
0489 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
0490 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
0491 &buf, offset + pos);
0492 }
0493 }
0494
0495 static bool bad_vf_index(struct efx_nic *efx, unsigned index)
0496 {
0497 return index >= efx_vf_size(efx);
0498 }
0499
0500 static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
0501 {
0502 unsigned max_buf_count = max_entry_count *
0503 sizeof(efx_qword_t) / EFX_BUF_SIZE;
0504
0505 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
0506 }
0507
0508
0509
0510
0511 static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
0512 struct siena_vf **vf_out, unsigned *rel_index_out)
0513 {
0514 struct siena_nic_data *nic_data = efx->nic_data;
0515 unsigned vf_i;
0516
0517 if (abs_index < EFX_VI_BASE)
0518 return true;
0519 vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
0520 if (vf_i >= efx->vf_init_count)
0521 return true;
0522
0523 if (vf_out)
0524 *vf_out = nic_data->vf + vf_i;
0525 if (rel_index_out)
0526 *rel_index_out = abs_index % efx_vf_size(efx);
0527 return false;
0528 }
0529
0530 static int efx_vfdi_init_evq(struct siena_vf *vf)
0531 {
0532 struct efx_nic *efx = vf->efx;
0533 struct vfdi_req *req = vf->buf.addr;
0534 unsigned vf_evq = req->u.init_evq.index;
0535 unsigned buf_count = req->u.init_evq.buf_count;
0536 unsigned abs_evq = abs_index(vf, vf_evq);
0537 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
0538 efx_oword_t reg;
0539
0540 if (bad_vf_index(efx, vf_evq) ||
0541 bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
0542 if (net_ratelimit())
0543 netif_err(efx, hw, efx->net_dev,
0544 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
0545 vf->pci_name, vf_evq, buf_count);
0546 return VFDI_RC_EINVAL;
0547 }
0548
0549 efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
0550
0551 EFX_POPULATE_OWORD_3(reg,
0552 FRF_CZ_TIMER_Q_EN, 1,
0553 FRF_CZ_HOST_NOTIFY_MODE, 0,
0554 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
0555 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
0556 EFX_POPULATE_OWORD_3(reg,
0557 FRF_AZ_EVQ_EN, 1,
0558 FRF_AZ_EVQ_SIZE, __ffs(buf_count),
0559 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
0560 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
0561
0562 if (vf_evq == 0) {
0563 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
0564 buf_count * sizeof(u64));
0565 vf->evq0_count = buf_count;
0566 }
0567
0568 return VFDI_RC_SUCCESS;
0569 }
0570
0571 static int efx_vfdi_init_rxq(struct siena_vf *vf)
0572 {
0573 struct efx_nic *efx = vf->efx;
0574 struct vfdi_req *req = vf->buf.addr;
0575 unsigned vf_rxq = req->u.init_rxq.index;
0576 unsigned vf_evq = req->u.init_rxq.evq;
0577 unsigned buf_count = req->u.init_rxq.buf_count;
0578 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
0579 unsigned label;
0580 efx_oword_t reg;
0581
0582 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
0583 vf_rxq >= VF_MAX_RX_QUEUES ||
0584 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
0585 if (net_ratelimit())
0586 netif_err(efx, hw, efx->net_dev,
0587 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
0588 "buf_count %d\n", vf->pci_name, vf_rxq,
0589 vf_evq, buf_count);
0590 return VFDI_RC_EINVAL;
0591 }
0592 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
0593 ++vf->rxq_count;
0594 efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
0595
0596 label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
0597 EFX_POPULATE_OWORD_6(reg,
0598 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
0599 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
0600 FRF_AZ_RX_DESCQ_LABEL, label,
0601 FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
0602 FRF_AZ_RX_DESCQ_JUMBO,
0603 !!(req->u.init_rxq.flags &
0604 VFDI_RXQ_FLAG_SCATTER_EN),
0605 FRF_AZ_RX_DESCQ_EN, 1);
0606 efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
0607 abs_index(vf, vf_rxq));
0608
0609 return VFDI_RC_SUCCESS;
0610 }
0611
0612 static int efx_vfdi_init_txq(struct siena_vf *vf)
0613 {
0614 struct efx_nic *efx = vf->efx;
0615 struct vfdi_req *req = vf->buf.addr;
0616 unsigned vf_txq = req->u.init_txq.index;
0617 unsigned vf_evq = req->u.init_txq.evq;
0618 unsigned buf_count = req->u.init_txq.buf_count;
0619 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
0620 unsigned label, eth_filt_en;
0621 efx_oword_t reg;
0622
0623 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
0624 vf_txq >= vf_max_tx_channels ||
0625 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
0626 if (net_ratelimit())
0627 netif_err(efx, hw, efx->net_dev,
0628 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
0629 "buf_count %d\n", vf->pci_name, vf_txq,
0630 vf_evq, buf_count);
0631 return VFDI_RC_EINVAL;
0632 }
0633
0634 mutex_lock(&vf->txq_lock);
0635 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
0636 ++vf->txq_count;
0637 mutex_unlock(&vf->txq_lock);
0638 efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
0639
0640 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
0641
0642 label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
0643 EFX_POPULATE_OWORD_8(reg,
0644 FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
0645 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
0646 FRF_AZ_TX_DESCQ_EN, 1,
0647 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
0648 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
0649 FRF_AZ_TX_DESCQ_LABEL, label,
0650 FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
0651 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
0652 efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
0653 abs_index(vf, vf_txq));
0654
0655 return VFDI_RC_SUCCESS;
0656 }
0657
0658
0659 static bool efx_vfdi_flush_wake(struct siena_vf *vf)
0660 {
0661
0662 smp_mb();
0663
0664 return (!vf->txq_count && !vf->rxq_count) ||
0665 atomic_read(&vf->rxq_retry_count);
0666 }
0667
0668 static void efx_vfdi_flush_clear(struct siena_vf *vf)
0669 {
0670 memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
0671 vf->txq_count = 0;
0672 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
0673 vf->rxq_count = 0;
0674 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
0675 atomic_set(&vf->rxq_retry_count, 0);
0676 }
0677
0678 static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
0679 {
0680 struct efx_nic *efx = vf->efx;
0681 efx_oword_t reg;
0682 unsigned count = efx_vf_size(efx);
0683 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
0684 unsigned timeout = HZ;
0685 unsigned index, rxqs_count;
0686 MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
0687 int rc;
0688
0689 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
0690 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
0691
0692 rtnl_lock();
0693 efx_siena_prepare_flush(efx);
0694 rtnl_unlock();
0695
0696
0697 rxqs_count = 0;
0698 for (index = 0; index < count; ++index) {
0699 if (test_bit(index, vf->txq_mask)) {
0700 EFX_POPULATE_OWORD_2(reg,
0701 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
0702 FRF_AZ_TX_FLUSH_DESCQ,
0703 vf_offset + index);
0704 efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ);
0705 }
0706 if (test_bit(index, vf->rxq_mask)) {
0707 MCDI_SET_ARRAY_DWORD(
0708 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
0709 rxqs_count, vf_offset + index);
0710 rxqs_count++;
0711 }
0712 }
0713
0714 atomic_set(&vf->rxq_retry_count, 0);
0715 while (timeout && (vf->rxq_count || vf->txq_count)) {
0716 rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
0717 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
0718 NULL, 0, NULL);
0719 WARN_ON(rc < 0);
0720
0721 timeout = wait_event_timeout(vf->flush_waitq,
0722 efx_vfdi_flush_wake(vf),
0723 timeout);
0724 rxqs_count = 0;
0725 for (index = 0; index < count; ++index) {
0726 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
0727 atomic_dec(&vf->rxq_retry_count);
0728 MCDI_SET_ARRAY_DWORD(
0729 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
0730 rxqs_count, vf_offset + index);
0731 rxqs_count++;
0732 }
0733 }
0734 }
0735
0736 rtnl_lock();
0737 siena_finish_flush(efx);
0738 rtnl_unlock();
0739
0740
0741 EFX_ZERO_OWORD(reg);
0742 for (index = 0; index < count; ++index) {
0743 efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
0744 vf_offset + index);
0745 efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
0746 vf_offset + index);
0747 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL,
0748 vf_offset + index);
0749 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL,
0750 vf_offset + index);
0751 }
0752 efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
0753 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
0754 efx_vfdi_flush_clear(vf);
0755
0756 vf->evq0_count = 0;
0757
0758 return timeout ? 0 : VFDI_RC_ETIMEDOUT;
0759 }
0760
0761 static int efx_vfdi_insert_filter(struct siena_vf *vf)
0762 {
0763 struct efx_nic *efx = vf->efx;
0764 struct siena_nic_data *nic_data = efx->nic_data;
0765 struct vfdi_req *req = vf->buf.addr;
0766 unsigned vf_rxq = req->u.mac_filter.rxq;
0767 unsigned flags;
0768
0769 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
0770 if (net_ratelimit())
0771 netif_err(efx, hw, efx->net_dev,
0772 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
0773 "flags 0x%x\n", vf->pci_name, vf_rxq,
0774 req->u.mac_filter.flags);
0775 return VFDI_RC_EINVAL;
0776 }
0777
0778 flags = 0;
0779 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
0780 flags |= EFX_FILTER_FLAG_RX_RSS;
0781 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
0782 flags |= EFX_FILTER_FLAG_RX_SCATTER;
0783 vf->rx_filter_flags = flags;
0784 vf->rx_filter_qid = vf_rxq;
0785 vf->rx_filtering = true;
0786
0787 efx_siena_sriov_reset_rx_filter(vf);
0788 queue_work(vfdi_workqueue, &nic_data->peer_work);
0789
0790 return VFDI_RC_SUCCESS;
0791 }
0792
0793 static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
0794 {
0795 struct efx_nic *efx = vf->efx;
0796 struct siena_nic_data *nic_data = efx->nic_data;
0797
0798 vf->rx_filtering = false;
0799 efx_siena_sriov_reset_rx_filter(vf);
0800 queue_work(vfdi_workqueue, &nic_data->peer_work);
0801
0802 return VFDI_RC_SUCCESS;
0803 }
0804
0805 static int efx_vfdi_set_status_page(struct siena_vf *vf)
0806 {
0807 struct efx_nic *efx = vf->efx;
0808 struct siena_nic_data *nic_data = efx->nic_data;
0809 struct vfdi_req *req = vf->buf.addr;
0810 u64 page_count = req->u.set_status_page.peer_page_count;
0811 u64 max_page_count =
0812 (EFX_PAGE_SIZE -
0813 offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
0814 / sizeof(req->u.set_status_page.peer_page_addr[0]);
0815
0816 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
0817 if (net_ratelimit())
0818 netif_err(efx, hw, efx->net_dev,
0819 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
0820 vf->pci_name);
0821 return VFDI_RC_EINVAL;
0822 }
0823
0824 mutex_lock(&nic_data->local_lock);
0825 mutex_lock(&vf->status_lock);
0826 vf->status_addr = req->u.set_status_page.dma_addr;
0827
0828 kfree(vf->peer_page_addrs);
0829 vf->peer_page_addrs = NULL;
0830 vf->peer_page_count = 0;
0831
0832 if (page_count) {
0833 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
0834 GFP_KERNEL);
0835 if (vf->peer_page_addrs) {
0836 memcpy(vf->peer_page_addrs,
0837 req->u.set_status_page.peer_page_addr,
0838 page_count * sizeof(u64));
0839 vf->peer_page_count = page_count;
0840 }
0841 }
0842
0843 __efx_siena_sriov_push_vf_status(vf);
0844 mutex_unlock(&vf->status_lock);
0845 mutex_unlock(&nic_data->local_lock);
0846
0847 return VFDI_RC_SUCCESS;
0848 }
0849
0850 static int efx_vfdi_clear_status_page(struct siena_vf *vf)
0851 {
0852 mutex_lock(&vf->status_lock);
0853 vf->status_addr = 0;
0854 mutex_unlock(&vf->status_lock);
0855
0856 return VFDI_RC_SUCCESS;
0857 }
0858
0859 typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
0860
0861 static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
0862 [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
0863 [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
0864 [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
0865 [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
0866 [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
0867 [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
0868 [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
0869 [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
0870 };
0871
0872 static void efx_siena_sriov_vfdi(struct work_struct *work)
0873 {
0874 struct siena_vf *vf = container_of(work, struct siena_vf, req);
0875 struct efx_nic *efx = vf->efx;
0876 struct vfdi_req *req = vf->buf.addr;
0877 struct efx_memcpy_req copy[2];
0878 int rc;
0879
0880
0881 memset(copy, '\0', sizeof(copy));
0882 copy[0].from_rid = vf->pci_rid;
0883 copy[0].from_addr = vf->req_addr;
0884 copy[0].to_rid = efx->pci_dev->devfn;
0885 copy[0].to_addr = vf->buf.dma_addr;
0886 copy[0].length = EFX_PAGE_SIZE;
0887 rc = efx_siena_sriov_memcpy(efx, copy, 1);
0888 if (rc) {
0889
0890 if (net_ratelimit())
0891 netif_err(efx, hw, efx->net_dev,
0892 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
0893 vf->pci_name, -rc);
0894 vf->busy = false;
0895 return;
0896 }
0897
0898 if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
0899 rc = vfdi_ops[req->op](vf);
0900 if (rc == 0) {
0901 netif_dbg(efx, hw, efx->net_dev,
0902 "vfdi request %d from %s ok\n",
0903 req->op, vf->pci_name);
0904 }
0905 } else {
0906 netif_dbg(efx, hw, efx->net_dev,
0907 "ERROR: Unrecognised request %d from VF %s addr "
0908 "%llx\n", req->op, vf->pci_name,
0909 (unsigned long long)vf->req_addr);
0910 rc = VFDI_RC_EOPNOTSUPP;
0911 }
0912
0913
0914 vf->busy = false;
0915 smp_wmb();
0916
0917
0918 req->rc = rc;
0919 req->op = VFDI_OP_RESPONSE;
0920
0921 memset(copy, '\0', sizeof(copy));
0922 copy[0].from_buf = &req->rc;
0923 copy[0].to_rid = vf->pci_rid;
0924 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
0925 copy[0].length = sizeof(req->rc);
0926 copy[1].from_buf = &req->op;
0927 copy[1].to_rid = vf->pci_rid;
0928 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
0929 copy[1].length = sizeof(req->op);
0930
0931 (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
0932 }
0933
0934
0935
0936
0937
0938
0939
0940
0941 static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
0942 struct efx_buffer *buffer)
0943 {
0944 struct efx_nic *efx = vf->efx;
0945 struct efx_memcpy_req copy_req[4];
0946 efx_qword_t event;
0947 unsigned int pos, count, k, buftbl, abs_evq;
0948 efx_oword_t reg;
0949 efx_dword_t ptr;
0950 int rc;
0951
0952 BUG_ON(buffer->len != EFX_PAGE_SIZE);
0953
0954 if (!vf->evq0_count)
0955 return;
0956 BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
0957
0958 mutex_lock(&vf->status_lock);
0959 EFX_POPULATE_QWORD_3(event,
0960 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
0961 VFDI_EV_SEQ, vf->msg_seqno,
0962 VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
0963 vf->msg_seqno++;
0964 for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
0965 memcpy(buffer->addr + pos, &event, sizeof(event));
0966
0967 for (pos = 0; pos < vf->evq0_count; pos += count) {
0968 count = min_t(unsigned, vf->evq0_count - pos,
0969 ARRAY_SIZE(copy_req));
0970 for (k = 0; k < count; k++) {
0971 copy_req[k].from_buf = NULL;
0972 copy_req[k].from_rid = efx->pci_dev->devfn;
0973 copy_req[k].from_addr = buffer->dma_addr;
0974 copy_req[k].to_rid = vf->pci_rid;
0975 copy_req[k].to_addr = vf->evq0_addrs[pos + k];
0976 copy_req[k].length = EFX_PAGE_SIZE;
0977 }
0978 rc = efx_siena_sriov_memcpy(efx, copy_req, count);
0979 if (rc) {
0980 if (net_ratelimit())
0981 netif_err(efx, hw, efx->net_dev,
0982 "ERROR: Unable to notify %s of reset"
0983 ": %d\n", vf->pci_name, -rc);
0984 break;
0985 }
0986 }
0987
0988
0989 abs_evq = abs_index(vf, 0);
0990 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
0991 efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
0992
0993 EFX_POPULATE_OWORD_3(reg,
0994 FRF_CZ_TIMER_Q_EN, 1,
0995 FRF_CZ_HOST_NOTIFY_MODE, 0,
0996 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
0997 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
0998 EFX_POPULATE_OWORD_3(reg,
0999 FRF_AZ_EVQ_EN, 1,
1000 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
1001 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
1002 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
1003 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
1004 efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
1005
1006 mutex_unlock(&vf->status_lock);
1007 }
1008
1009 static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
1010 {
1011 struct siena_vf *vf = container_of(work, struct siena_vf, req);
1012 struct efx_nic *efx = vf->efx;
1013 struct efx_buffer buf;
1014
1015 if (!efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
1016 efx_siena_sriov_reset_vf(vf, &buf);
1017 efx_siena_free_buffer(efx, &buf);
1018 }
1019 }
1020
1021 static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
1022 {
1023 netif_err(efx, drv, efx->net_dev,
1024 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1025 "vector. IOV disabled\n");
1026 efx->vf_count = 0;
1027 }
1028
1029 static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
1030 {
1031 struct siena_nic_data *nic_data = channel->efx->nic_data;
1032 nic_data->vfdi_channel = channel;
1033
1034 return 0;
1035 }
1036
1037 static void
1038 efx_siena_sriov_get_channel_name(struct efx_channel *channel,
1039 char *buf, size_t len)
1040 {
1041 snprintf(buf, len, "%s-iov", channel->efx->name);
1042 }
1043
1044 static const struct efx_channel_type efx_siena_sriov_channel_type = {
1045 .handle_no_channel = efx_siena_sriov_handle_no_channel,
1046 .pre_probe = efx_siena_sriov_probe_channel,
1047 .post_remove = efx_siena_channel_dummy_op_void,
1048 .get_name = efx_siena_sriov_get_channel_name,
1049
1050 .keep_eventq = true,
1051 };
1052
1053 void efx_siena_sriov_probe(struct efx_nic *efx)
1054 {
1055 unsigned count;
1056
1057 if (!max_vfs)
1058 return;
1059
1060 if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
1061 pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
1062 return;
1063 }
1064 if (count > 0 && count > max_vfs)
1065 count = max_vfs;
1066
1067
1068 efx->vf_count = count;
1069
1070 efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
1071 }
1072
1073
1074
1075
1076
1077 static void efx_siena_sriov_peer_work(struct work_struct *data)
1078 {
1079 struct siena_nic_data *nic_data = container_of(data,
1080 struct siena_nic_data,
1081 peer_work);
1082 struct efx_nic *efx = nic_data->efx;
1083 struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
1084 struct siena_vf *vf;
1085 struct efx_local_addr *local_addr;
1086 struct vfdi_endpoint *peer;
1087 struct efx_endpoint_page *epp;
1088 struct list_head pages;
1089 unsigned int peer_space;
1090 unsigned int peer_count;
1091 unsigned int pos;
1092
1093 mutex_lock(&nic_data->local_lock);
1094
1095
1096 INIT_LIST_HEAD(&pages);
1097 list_splice_tail_init(&nic_data->local_page_list, &pages);
1098
1099
1100
1101
1102 peer = vfdi_status->peers + 1;
1103 peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1104 peer_count = 1;
1105 for (pos = 0; pos < efx->vf_count; ++pos) {
1106 vf = nic_data->vf + pos;
1107
1108 mutex_lock(&vf->status_lock);
1109 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1110 *peer++ = vf->addr;
1111 ++peer_count;
1112 --peer_space;
1113 BUG_ON(peer_space == 0);
1114 }
1115 mutex_unlock(&vf->status_lock);
1116 }
1117
1118
1119 list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
1120 ether_addr_copy(peer->mac_addr, local_addr->addr);
1121 peer->tci = 0;
1122 ++peer;
1123 ++peer_count;
1124 if (--peer_space == 0) {
1125 if (list_empty(&pages)) {
1126 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1127 if (!epp)
1128 break;
1129 epp->ptr = dma_alloc_coherent(
1130 &efx->pci_dev->dev, EFX_PAGE_SIZE,
1131 &epp->addr, GFP_KERNEL);
1132 if (!epp->ptr) {
1133 kfree(epp);
1134 break;
1135 }
1136 } else {
1137 epp = list_first_entry(
1138 &pages, struct efx_endpoint_page, link);
1139 list_del(&epp->link);
1140 }
1141
1142 list_add_tail(&epp->link, &nic_data->local_page_list);
1143 peer = (struct vfdi_endpoint *)epp->ptr;
1144 peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1145 }
1146 }
1147 vfdi_status->peer_count = peer_count;
1148 mutex_unlock(&nic_data->local_lock);
1149
1150
1151 while (!list_empty(&pages)) {
1152 epp = list_first_entry(
1153 &pages, struct efx_endpoint_page, link);
1154 list_del(&epp->link);
1155 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1156 epp->ptr, epp->addr);
1157 kfree(epp);
1158 }
1159
1160
1161 for (pos = 0; pos < efx->vf_count; ++pos) {
1162 vf = nic_data->vf + pos;
1163
1164 mutex_lock(&vf->status_lock);
1165 if (vf->status_addr)
1166 __efx_siena_sriov_push_vf_status(vf);
1167 mutex_unlock(&vf->status_lock);
1168 }
1169 }
1170
1171 static void efx_siena_sriov_free_local(struct efx_nic *efx)
1172 {
1173 struct siena_nic_data *nic_data = efx->nic_data;
1174 struct efx_local_addr *local_addr;
1175 struct efx_endpoint_page *epp;
1176
1177 while (!list_empty(&nic_data->local_addr_list)) {
1178 local_addr = list_first_entry(&nic_data->local_addr_list,
1179 struct efx_local_addr, link);
1180 list_del(&local_addr->link);
1181 kfree(local_addr);
1182 }
1183
1184 while (!list_empty(&nic_data->local_page_list)) {
1185 epp = list_first_entry(&nic_data->local_page_list,
1186 struct efx_endpoint_page, link);
1187 list_del(&epp->link);
1188 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1189 epp->ptr, epp->addr);
1190 kfree(epp);
1191 }
1192 }
1193
1194 static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
1195 {
1196 unsigned index;
1197 struct siena_vf *vf;
1198 struct siena_nic_data *nic_data = efx->nic_data;
1199
1200 nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
1201 GFP_KERNEL);
1202 if (!nic_data->vf)
1203 return -ENOMEM;
1204
1205 for (index = 0; index < efx->vf_count; ++index) {
1206 vf = nic_data->vf + index;
1207
1208 vf->efx = efx;
1209 vf->index = index;
1210 vf->rx_filter_id = -1;
1211 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1212 vf->tx_filter_id = -1;
1213 INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
1214 INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
1215 init_waitqueue_head(&vf->flush_waitq);
1216 mutex_init(&vf->status_lock);
1217 mutex_init(&vf->txq_lock);
1218 }
1219
1220 return 0;
1221 }
1222
1223 static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
1224 {
1225 struct siena_nic_data *nic_data = efx->nic_data;
1226 struct siena_vf *vf;
1227 unsigned int pos;
1228
1229 for (pos = 0; pos < efx->vf_count; ++pos) {
1230 vf = nic_data->vf + pos;
1231
1232 efx_siena_free_buffer(efx, &vf->buf);
1233 kfree(vf->peer_page_addrs);
1234 vf->peer_page_addrs = NULL;
1235 vf->peer_page_count = 0;
1236
1237 vf->evq0_count = 0;
1238 }
1239 }
1240
1241 static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
1242 {
1243 struct pci_dev *pci_dev = efx->pci_dev;
1244 struct siena_nic_data *nic_data = efx->nic_data;
1245 unsigned index, devfn, sriov, buftbl_base;
1246 u16 offset, stride;
1247 struct siena_vf *vf;
1248 int rc;
1249
1250 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1251 if (!sriov)
1252 return -ENOENT;
1253
1254 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1255 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1256
1257 buftbl_base = nic_data->vf_buftbl_base;
1258 devfn = pci_dev->devfn + offset;
1259 for (index = 0; index < efx->vf_count; ++index) {
1260 vf = nic_data->vf + index;
1261
1262
1263 vf->buftbl_base = buftbl_base;
1264 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1265
1266 vf->pci_rid = devfn;
1267 snprintf(vf->pci_name, sizeof(vf->pci_name),
1268 "%04x:%02x:%02x.%d",
1269 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1270 PCI_SLOT(devfn), PCI_FUNC(devfn));
1271
1272 rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
1273 GFP_KERNEL);
1274 if (rc)
1275 goto fail;
1276
1277 devfn += stride;
1278 }
1279
1280 return 0;
1281
1282 fail:
1283 efx_siena_sriov_vfs_fini(efx);
1284 return rc;
1285 }
1286
1287 int efx_siena_sriov_init(struct efx_nic *efx)
1288 {
1289 struct net_device *net_dev = efx->net_dev;
1290 struct siena_nic_data *nic_data = efx->nic_data;
1291 struct vfdi_status *vfdi_status;
1292 int rc;
1293
1294
1295 BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1296
1297 BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1298
1299 if (efx->vf_count == 0)
1300 return 0;
1301
1302 rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
1303 if (rc)
1304 goto fail_cmd;
1305
1306 rc = efx_siena_alloc_buffer(efx, &nic_data->vfdi_status,
1307 sizeof(*vfdi_status), GFP_KERNEL);
1308 if (rc)
1309 goto fail_status;
1310 vfdi_status = nic_data->vfdi_status.addr;
1311 memset(vfdi_status, 0, sizeof(*vfdi_status));
1312 vfdi_status->version = 1;
1313 vfdi_status->length = sizeof(*vfdi_status);
1314 vfdi_status->max_tx_channels = vf_max_tx_channels;
1315 vfdi_status->vi_scale = efx->vi_scale;
1316 vfdi_status->rss_rxq_count = efx->rss_spread;
1317 vfdi_status->peer_count = 1 + efx->vf_count;
1318 vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1319
1320 rc = efx_siena_sriov_vf_alloc(efx);
1321 if (rc)
1322 goto fail_alloc;
1323
1324 mutex_init(&nic_data->local_lock);
1325 INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
1326 INIT_LIST_HEAD(&nic_data->local_addr_list);
1327 INIT_LIST_HEAD(&nic_data->local_page_list);
1328
1329 rc = efx_siena_sriov_vfs_init(efx);
1330 if (rc)
1331 goto fail_vfs;
1332
1333 rtnl_lock();
1334 ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
1335 efx->vf_init_count = efx->vf_count;
1336 rtnl_unlock();
1337
1338 efx_siena_sriov_usrev(efx, true);
1339
1340
1341
1342 rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1343 if (rc)
1344 goto fail_pci;
1345
1346 netif_info(efx, probe, net_dev,
1347 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1348 efx->vf_count, efx_vf_size(efx));
1349 return 0;
1350
1351 fail_pci:
1352 efx_siena_sriov_usrev(efx, false);
1353 rtnl_lock();
1354 efx->vf_init_count = 0;
1355 rtnl_unlock();
1356 efx_siena_sriov_vfs_fini(efx);
1357 fail_vfs:
1358 cancel_work_sync(&nic_data->peer_work);
1359 efx_siena_sriov_free_local(efx);
1360 kfree(nic_data->vf);
1361 fail_alloc:
1362 efx_siena_free_buffer(efx, &nic_data->vfdi_status);
1363 fail_status:
1364 efx_siena_sriov_cmd(efx, false, NULL, NULL);
1365 fail_cmd:
1366 return rc;
1367 }
1368
1369 void efx_siena_sriov_fini(struct efx_nic *efx)
1370 {
1371 struct siena_vf *vf;
1372 unsigned int pos;
1373 struct siena_nic_data *nic_data = efx->nic_data;
1374
1375 if (efx->vf_init_count == 0)
1376 return;
1377
1378
1379 BUG_ON(nic_data->vfdi_channel->enabled);
1380 efx_siena_sriov_usrev(efx, false);
1381 rtnl_lock();
1382 efx->vf_init_count = 0;
1383 rtnl_unlock();
1384
1385
1386 for (pos = 0; pos < efx->vf_count; ++pos) {
1387 vf = nic_data->vf + pos;
1388 cancel_work_sync(&vf->req);
1389 cancel_work_sync(&vf->reset_work);
1390 }
1391 cancel_work_sync(&nic_data->peer_work);
1392
1393 pci_disable_sriov(efx->pci_dev);
1394
1395
1396 efx_siena_sriov_vfs_fini(efx);
1397 efx_siena_sriov_free_local(efx);
1398 kfree(nic_data->vf);
1399 efx_siena_free_buffer(efx, &nic_data->vfdi_status);
1400 efx_siena_sriov_cmd(efx, false, NULL, NULL);
1401 }
1402
1403 void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1404 {
1405 struct efx_nic *efx = channel->efx;
1406 struct siena_vf *vf;
1407 unsigned qid, seq, type, data;
1408
1409 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1410
1411
1412 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1413 seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1414 type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1415 data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1416
1417 netif_vdbg(efx, hw, efx->net_dev,
1418 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1419 qid, seq, type, data);
1420
1421 if (map_vi_index(efx, qid, &vf, NULL))
1422 return;
1423 if (vf->busy)
1424 goto error;
1425
1426 if (type == VFDI_EV_TYPE_REQ_WORD0) {
1427
1428 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1429 vf->req_seqno = seq + 1;
1430 vf->req_addr = 0;
1431 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1432 goto error;
1433
1434 switch (vf->req_type) {
1435 case VFDI_EV_TYPE_REQ_WORD0:
1436 case VFDI_EV_TYPE_REQ_WORD1:
1437 case VFDI_EV_TYPE_REQ_WORD2:
1438 vf->req_addr |= (u64)data << (vf->req_type << 4);
1439 ++vf->req_type;
1440 return;
1441
1442 case VFDI_EV_TYPE_REQ_WORD3:
1443 vf->req_addr |= (u64)data << 48;
1444 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1445 vf->busy = true;
1446 queue_work(vfdi_workqueue, &vf->req);
1447 return;
1448 }
1449
1450 error:
1451 if (net_ratelimit())
1452 netif_err(efx, hw, efx->net_dev,
1453 "ERROR: Screaming VFDI request from %s\n",
1454 vf->pci_name);
1455
1456 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1457 vf->req_seqno = seq + 1;
1458 }
1459
1460 void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1461 {
1462 struct siena_nic_data *nic_data = efx->nic_data;
1463 struct siena_vf *vf;
1464
1465 if (vf_i > efx->vf_init_count)
1466 return;
1467 vf = nic_data->vf + vf_i;
1468 netif_info(efx, hw, efx->net_dev,
1469 "FLR on VF %s\n", vf->pci_name);
1470
1471 vf->status_addr = 0;
1472 efx_vfdi_remove_all_filters(vf);
1473 efx_vfdi_flush_clear(vf);
1474
1475 vf->evq0_count = 0;
1476 }
1477
1478 int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
1479 {
1480 struct siena_nic_data *nic_data = efx->nic_data;
1481 struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
1482
1483 if (!efx->vf_init_count)
1484 return 0;
1485 ether_addr_copy(vfdi_status->peers[0].mac_addr,
1486 efx->net_dev->dev_addr);
1487 queue_work(vfdi_workqueue, &nic_data->peer_work);
1488
1489 return 0;
1490 }
1491
1492 void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1493 {
1494 struct siena_vf *vf;
1495 unsigned queue, qid;
1496
1497 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1498 if (map_vi_index(efx, queue, &vf, &qid))
1499 return;
1500
1501 if (!test_bit(qid, vf->txq_mask))
1502 return;
1503
1504 __clear_bit(qid, vf->txq_mask);
1505 --vf->txq_count;
1506
1507 if (efx_vfdi_flush_wake(vf))
1508 wake_up(&vf->flush_waitq);
1509 }
1510
1511 void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1512 {
1513 struct siena_vf *vf;
1514 unsigned ev_failed, queue, qid;
1515
1516 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1517 ev_failed = EFX_QWORD_FIELD(*event,
1518 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1519 if (map_vi_index(efx, queue, &vf, &qid))
1520 return;
1521 if (!test_bit(qid, vf->rxq_mask))
1522 return;
1523
1524 if (ev_failed) {
1525 set_bit(qid, vf->rxq_retry_mask);
1526 atomic_inc(&vf->rxq_retry_count);
1527 } else {
1528 __clear_bit(qid, vf->rxq_mask);
1529 --vf->rxq_count;
1530 }
1531 if (efx_vfdi_flush_wake(vf))
1532 wake_up(&vf->flush_waitq);
1533 }
1534
1535
1536 void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1537 {
1538 struct siena_vf *vf;
1539 unsigned int rel;
1540
1541 if (map_vi_index(efx, dmaq, &vf, &rel))
1542 return;
1543
1544 if (net_ratelimit())
1545 netif_err(efx, hw, efx->net_dev,
1546 "VF %d DMA Q %d reports descriptor fetch error.\n",
1547 vf->index, rel);
1548 queue_work(vfdi_workqueue, &vf->reset_work);
1549 }
1550
1551
1552 void efx_siena_sriov_reset(struct efx_nic *efx)
1553 {
1554 struct siena_nic_data *nic_data = efx->nic_data;
1555 unsigned int vf_i;
1556 struct efx_buffer buf;
1557 struct siena_vf *vf;
1558
1559 ASSERT_RTNL();
1560
1561 if (efx->vf_init_count == 0)
1562 return;
1563
1564 efx_siena_sriov_usrev(efx, true);
1565 (void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
1566
1567 if (efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
1568 return;
1569
1570 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1571 vf = nic_data->vf + vf_i;
1572 efx_siena_sriov_reset_vf(vf, &buf);
1573 }
1574
1575 efx_siena_free_buffer(efx, &buf);
1576 }
1577
1578 int efx_init_sriov(void)
1579 {
1580
1581
1582
1583
1584 vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1585 if (!vfdi_workqueue)
1586 return -ENOMEM;
1587 return 0;
1588 }
1589
1590 void efx_fini_sriov(void)
1591 {
1592 destroy_workqueue(vfdi_workqueue);
1593 }
1594
1595 int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
1596 {
1597 struct siena_nic_data *nic_data = efx->nic_data;
1598 struct siena_vf *vf;
1599
1600 if (vf_i >= efx->vf_init_count)
1601 return -EINVAL;
1602 vf = nic_data->vf + vf_i;
1603
1604 mutex_lock(&vf->status_lock);
1605 ether_addr_copy(vf->addr.mac_addr, mac);
1606 __efx_siena_sriov_update_vf_addr(vf);
1607 mutex_unlock(&vf->status_lock);
1608
1609 return 0;
1610 }
1611
1612 int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
1613 u16 vlan, u8 qos)
1614 {
1615 struct siena_nic_data *nic_data = efx->nic_data;
1616 struct siena_vf *vf;
1617 u16 tci;
1618
1619 if (vf_i >= efx->vf_init_count)
1620 return -EINVAL;
1621 vf = nic_data->vf + vf_i;
1622
1623 mutex_lock(&vf->status_lock);
1624 tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1625 vf->addr.tci = htons(tci);
1626 __efx_siena_sriov_update_vf_addr(vf);
1627 mutex_unlock(&vf->status_lock);
1628
1629 return 0;
1630 }
1631
1632 int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
1633 bool spoofchk)
1634 {
1635 struct siena_nic_data *nic_data = efx->nic_data;
1636 struct siena_vf *vf;
1637 int rc;
1638
1639 if (vf_i >= efx->vf_init_count)
1640 return -EINVAL;
1641 vf = nic_data->vf + vf_i;
1642
1643 mutex_lock(&vf->txq_lock);
1644 if (vf->txq_count == 0) {
1645 vf->tx_filter_mode =
1646 spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1647 rc = 0;
1648 } else {
1649
1650 rc = -EBUSY;
1651 }
1652 mutex_unlock(&vf->txq_lock);
1653 return rc;
1654 }
1655
1656 int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
1657 struct ifla_vf_info *ivi)
1658 {
1659 struct siena_nic_data *nic_data = efx->nic_data;
1660 struct siena_vf *vf;
1661 u16 tci;
1662
1663 if (vf_i >= efx->vf_init_count)
1664 return -EINVAL;
1665 vf = nic_data->vf + vf_i;
1666
1667 ivi->vf = vf_i;
1668 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1669 ivi->max_tx_rate = 0;
1670 ivi->min_tx_rate = 0;
1671 tci = ntohs(vf->addr.tci);
1672 ivi->vlan = tci & VLAN_VID_MASK;
1673 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1674 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
1675
1676 return 0;
1677 }
1678
1679 bool efx_siena_sriov_wanted(struct efx_nic *efx)
1680 {
1681 return efx->vf_count != 0;
1682 }
1683
1684 int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
1685 {
1686 return 0;
1687 }