0001
0002
0003
0004
0005
0006 #include "ena_eth_com.h"
0007
0008 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
0009 struct ena_com_io_cq *io_cq)
0010 {
0011 struct ena_eth_io_rx_cdesc_base *cdesc;
0012 u16 expected_phase, head_masked;
0013 u16 desc_phase;
0014
0015 head_masked = io_cq->head & (io_cq->q_depth - 1);
0016 expected_phase = io_cq->phase;
0017
0018 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
0019 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
0020
0021 desc_phase = (READ_ONCE(cdesc->status) &
0022 ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
0023 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
0024
0025 if (desc_phase != expected_phase)
0026 return NULL;
0027
0028
0029
0030
0031 dma_rmb();
0032
0033 return cdesc;
0034 }
0035
0036 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
0037 {
0038 u16 tail_masked;
0039 u32 offset;
0040
0041 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
0042
0043 offset = tail_masked * io_sq->desc_entry_size;
0044
0045 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
0046 }
0047
0048 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
0049 u8 *bounce_buffer)
0050 {
0051 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
0052
0053 u16 dst_tail_mask;
0054 u32 dst_offset;
0055
0056 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
0057 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
0058
0059 if (is_llq_max_tx_burst_exists(io_sq)) {
0060 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
0061 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0062 "Error: trying to send more packets than tx burst allows\n");
0063 return -ENOSPC;
0064 }
0065
0066 io_sq->entries_in_tx_burst_left--;
0067 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0068 "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
0069 io_sq->qid, io_sq->entries_in_tx_burst_left);
0070 }
0071
0072
0073
0074
0075 wmb();
0076
0077
0078 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
0079 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
0080
0081 io_sq->tail++;
0082
0083
0084 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
0085 io_sq->phase ^= 1;
0086
0087 return 0;
0088 }
0089
0090 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
0091 u8 *header_src,
0092 u16 header_len)
0093 {
0094 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
0095 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
0096 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
0097 u16 header_offset;
0098
0099 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
0100 return 0;
0101
0102 header_offset =
0103 llq_info->descs_num_before_header * io_sq->desc_entry_size;
0104
0105 if (unlikely((header_offset + header_len) >
0106 llq_info->desc_list_entry_size)) {
0107 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0108 "Trying to write header larger than llq entry can accommodate\n");
0109 return -EFAULT;
0110 }
0111
0112 if (unlikely(!bounce_buffer)) {
0113 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0114 "Bounce buffer is NULL\n");
0115 return -EFAULT;
0116 }
0117
0118 memcpy(bounce_buffer + header_offset, header_src, header_len);
0119
0120 return 0;
0121 }
0122
0123 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
0124 {
0125 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
0126 u8 *bounce_buffer;
0127 void *sq_desc;
0128
0129 bounce_buffer = pkt_ctrl->curr_bounce_buf;
0130
0131 if (unlikely(!bounce_buffer)) {
0132 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0133 "Bounce buffer is NULL\n");
0134 return NULL;
0135 }
0136
0137 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
0138 pkt_ctrl->idx++;
0139 pkt_ctrl->descs_left_in_line--;
0140
0141 return sq_desc;
0142 }
0143
0144 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
0145 {
0146 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
0147 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
0148 int rc;
0149
0150 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
0151 return 0;
0152
0153
0154 if (likely(pkt_ctrl->idx)) {
0155 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
0156 pkt_ctrl->curr_bounce_buf);
0157 if (unlikely(rc)) {
0158 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0159 "Failed to write bounce buffer to device\n");
0160 return rc;
0161 }
0162
0163 pkt_ctrl->curr_bounce_buf =
0164 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
0165 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
0166 0x0, llq_info->desc_list_entry_size);
0167 }
0168
0169 pkt_ctrl->idx = 0;
0170 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
0171 return 0;
0172 }
0173
0174 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
0175 {
0176 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
0177 return get_sq_desc_llq(io_sq);
0178
0179 return get_sq_desc_regular_queue(io_sq);
0180 }
0181
0182 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
0183 {
0184 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
0185 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
0186 int rc;
0187
0188 if (!pkt_ctrl->descs_left_in_line) {
0189 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
0190 pkt_ctrl->curr_bounce_buf);
0191 if (unlikely(rc)) {
0192 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0193 "Failed to write bounce buffer to device\n");
0194 return rc;
0195 }
0196
0197 pkt_ctrl->curr_bounce_buf =
0198 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
0199 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
0200 0x0, llq_info->desc_list_entry_size);
0201
0202 pkt_ctrl->idx = 0;
0203 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
0204 pkt_ctrl->descs_left_in_line = 1;
0205 else
0206 pkt_ctrl->descs_left_in_line =
0207 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
0208 }
0209
0210 return 0;
0211 }
0212
0213 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
0214 {
0215 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
0216 return ena_com_sq_update_llq_tail(io_sq);
0217
0218 io_sq->tail++;
0219
0220
0221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
0222 io_sq->phase ^= 1;
0223
0224 return 0;
0225 }
0226
0227 static struct ena_eth_io_rx_cdesc_base *
0228 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
0229 {
0230 idx &= (io_cq->q_depth - 1);
0231 return (struct ena_eth_io_rx_cdesc_base *)
0232 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
0233 idx * io_cq->cdesc_entry_size_in_bytes);
0234 }
0235
0236 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
0237 u16 *first_cdesc_idx)
0238 {
0239 struct ena_eth_io_rx_cdesc_base *cdesc;
0240 u16 count = 0, head_masked;
0241 u32 last = 0;
0242
0243 do {
0244 cdesc = ena_com_get_next_rx_cdesc(io_cq);
0245 if (!cdesc)
0246 break;
0247
0248 ena_com_cq_inc_head(io_cq);
0249 count++;
0250 last = (READ_ONCE(cdesc->status) &
0251 ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
0252 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
0253 } while (!last);
0254
0255 if (last) {
0256 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
0257 count += io_cq->cur_rx_pkt_cdesc_count;
0258
0259 head_masked = io_cq->head & (io_cq->q_depth - 1);
0260
0261 io_cq->cur_rx_pkt_cdesc_count = 0;
0262 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
0263
0264 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
0265 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
0266 io_cq->qid, *first_cdesc_idx, count);
0267 } else {
0268 io_cq->cur_rx_pkt_cdesc_count += count;
0269 count = 0;
0270 }
0271
0272 return count;
0273 }
0274
0275 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
0276 struct ena_com_tx_meta *ena_meta)
0277 {
0278 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
0279
0280 meta_desc = get_sq_desc(io_sq);
0281 if (unlikely(!meta_desc))
0282 return -EFAULT;
0283
0284 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
0285
0286 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
0287
0288 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
0289
0290
0291 meta_desc->word2 |= ((u32)ena_meta->mss <<
0292 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
0293 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
0294
0295 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
0296 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
0297 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
0298
0299
0300 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
0301 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
0302 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
0303 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
0304
0305 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
0306 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
0307
0308 meta_desc->word2 |= ena_meta->l3_hdr_len &
0309 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
0310 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
0311 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
0312 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
0313
0314 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
0315 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
0316 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
0317
0318 return ena_com_sq_update_tail(io_sq);
0319 }
0320
0321 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
0322 struct ena_com_tx_ctx *ena_tx_ctx,
0323 bool *have_meta)
0324 {
0325 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
0326
0327
0328
0329
0330 if (io_sq->disable_meta_caching) {
0331 if (unlikely(!ena_tx_ctx->meta_valid))
0332 return -EINVAL;
0333
0334 *have_meta = true;
0335 return ena_com_create_meta(io_sq, ena_meta);
0336 }
0337
0338 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
0339 *have_meta = true;
0340
0341 memcpy(&io_sq->cached_tx_meta, ena_meta,
0342 sizeof(struct ena_com_tx_meta));
0343 return ena_com_create_meta(io_sq, ena_meta);
0344 }
0345
0346 *have_meta = false;
0347 return 0;
0348 }
0349
0350 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
0351 struct ena_com_rx_ctx *ena_rx_ctx,
0352 struct ena_eth_io_rx_cdesc_base *cdesc)
0353 {
0354 ena_rx_ctx->l3_proto = cdesc->status &
0355 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
0356 ena_rx_ctx->l4_proto =
0357 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
0358 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
0359 ena_rx_ctx->l3_csum_err =
0360 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
0361 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
0362 ena_rx_ctx->l4_csum_err =
0363 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
0364 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
0365 ena_rx_ctx->l4_csum_checked =
0366 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
0367 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
0368 ena_rx_ctx->hash = cdesc->hash;
0369 ena_rx_ctx->frag =
0370 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
0371 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
0372
0373 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
0374 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
0375 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
0376 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
0377 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
0378 }
0379
0380
0381
0382
0383
0384 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
0385 struct ena_com_tx_ctx *ena_tx_ctx,
0386 int *nb_hw_desc)
0387 {
0388 struct ena_eth_io_tx_desc *desc = NULL;
0389 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
0390 void *buffer_to_push = ena_tx_ctx->push_header;
0391 u16 header_len = ena_tx_ctx->header_len;
0392 u16 num_bufs = ena_tx_ctx->num_bufs;
0393 u16 start_tail = io_sq->tail;
0394 int i, rc;
0395 bool have_meta;
0396 u64 addr_hi;
0397
0398 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
0399
0400
0401 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
0402 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0403 "Not enough space in the tx queue\n");
0404 return -ENOMEM;
0405 }
0406
0407 if (unlikely(header_len > io_sq->tx_max_header_size)) {
0408 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0409 "Header size is too large %d max header: %d\n",
0410 header_len, io_sq->tx_max_header_size);
0411 return -EINVAL;
0412 }
0413
0414 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
0415 !buffer_to_push)) {
0416 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0417 "Push header wasn't provided in LLQ mode\n");
0418 return -EINVAL;
0419 }
0420
0421 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
0422 if (unlikely(rc))
0423 return rc;
0424
0425 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
0426 if (unlikely(rc)) {
0427 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0428 "Failed to create and store tx meta desc\n");
0429 return rc;
0430 }
0431
0432
0433 if (unlikely(!num_bufs && !header_len)) {
0434 rc = ena_com_close_bounce_buffer(io_sq);
0435 if (rc)
0436 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0437 "Failed to write buffers to LLQ\n");
0438 *nb_hw_desc = io_sq->tail - start_tail;
0439 return rc;
0440 }
0441
0442 desc = get_sq_desc(io_sq);
0443 if (unlikely(!desc))
0444 return -EFAULT;
0445 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
0446
0447
0448 if (!have_meta)
0449 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
0450
0451 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
0452 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
0453 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
0454 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
0455 ENA_ETH_IO_TX_DESC_PHASE_MASK;
0456
0457 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
0458
0459
0460 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
0461 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
0462 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
0463
0464 desc->meta_ctrl |= (ena_tx_ctx->df <<
0465 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
0466 ENA_ETH_IO_TX_DESC_DF_MASK;
0467
0468
0469 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
0470 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
0471 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
0472
0473 if (ena_tx_ctx->meta_valid) {
0474 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
0475 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
0476 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
0477 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
0478 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
0479 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
0480 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
0481 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
0482 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
0483 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
0484 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
0485 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
0486 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
0487 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
0488 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
0489 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
0490 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
0491 }
0492
0493 for (i = 0; i < num_bufs; i++) {
0494
0495 if (likely(i != 0)) {
0496 rc = ena_com_sq_update_tail(io_sq);
0497 if (unlikely(rc)) {
0498 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0499 "Failed to update sq tail\n");
0500 return rc;
0501 }
0502
0503 desc = get_sq_desc(io_sq);
0504 if (unlikely(!desc))
0505 return -EFAULT;
0506
0507 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
0508
0509 desc->len_ctrl |= ((u32)io_sq->phase <<
0510 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
0511 ENA_ETH_IO_TX_DESC_PHASE_MASK;
0512 }
0513
0514 desc->len_ctrl |= ena_bufs->len &
0515 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
0516
0517 addr_hi = ((ena_bufs->paddr &
0518 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
0519
0520 desc->buff_addr_lo = (u32)ena_bufs->paddr;
0521 desc->buff_addr_hi_hdr_sz |= addr_hi &
0522 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
0523 ena_bufs++;
0524 }
0525
0526
0527 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
0528
0529 rc = ena_com_sq_update_tail(io_sq);
0530 if (unlikely(rc)) {
0531 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0532 "Failed to update sq tail of the last descriptor\n");
0533 return rc;
0534 }
0535
0536 rc = ena_com_close_bounce_buffer(io_sq);
0537
0538 *nb_hw_desc = io_sq->tail - start_tail;
0539 return rc;
0540 }
0541
0542 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
0543 struct ena_com_io_sq *io_sq,
0544 struct ena_com_rx_ctx *ena_rx_ctx)
0545 {
0546 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
0547 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
0548 u16 q_depth = io_cq->q_depth;
0549 u16 cdesc_idx = 0;
0550 u16 nb_hw_desc;
0551 u16 i = 0;
0552
0553 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
0554
0555 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
0556 if (nb_hw_desc == 0) {
0557 ena_rx_ctx->descs = nb_hw_desc;
0558 return 0;
0559 }
0560
0561 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
0562 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
0563 nb_hw_desc);
0564
0565 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
0566 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
0567 "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
0568 ena_rx_ctx->max_bufs);
0569 return -ENOSPC;
0570 }
0571
0572 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
0573 ena_rx_ctx->pkt_offset = cdesc->offset;
0574
0575 do {
0576 ena_buf[i].len = cdesc->length;
0577 ena_buf[i].req_id = cdesc->req_id;
0578 if (unlikely(ena_buf[i].req_id >= q_depth))
0579 return -EIO;
0580
0581 if (++i >= nb_hw_desc)
0582 break;
0583
0584 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
0585
0586 } while (1);
0587
0588
0589 io_sq->next_to_comp += nb_hw_desc;
0590
0591 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
0592 "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
0593 io_sq->qid, io_sq->next_to_comp);
0594
0595
0596 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
0597
0598 ena_rx_ctx->descs = nb_hw_desc;
0599
0600 return 0;
0601 }
0602
0603 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
0604 struct ena_com_buf *ena_buf,
0605 u16 req_id)
0606 {
0607 struct ena_eth_io_rx_desc *desc;
0608
0609 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
0610
0611 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
0612 return -ENOSPC;
0613
0614 desc = get_sq_desc(io_sq);
0615 if (unlikely(!desc))
0616 return -EFAULT;
0617
0618 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
0619
0620 desc->length = ena_buf->len;
0621
0622 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
0623 ENA_ETH_IO_RX_DESC_LAST_MASK |
0624 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
0625 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
0626
0627 desc->req_id = req_id;
0628
0629 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
0630 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
0631 __func__, io_sq->qid, req_id);
0632
0633 desc->buff_addr_lo = (u32)ena_buf->paddr;
0634 desc->buff_addr_hi =
0635 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
0636
0637 return ena_com_sq_update_tail(io_sq);
0638 }
0639
0640 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
0641 {
0642 struct ena_eth_io_rx_cdesc_base *cdesc;
0643
0644 cdesc = ena_com_get_next_rx_cdesc(io_cq);
0645 if (cdesc)
0646 return false;
0647 else
0648 return true;
0649 }