Back to home page

OSCL-LXR

 
 

    


0001 /**********************************************************************
0002  * Author: Cavium, Inc.
0003  *
0004  * Contact: support@cavium.com
0005  *          Please include "LiquidIO" in the subject.
0006  *
0007  * Copyright (c) 2003-2016 Cavium, Inc.
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more details.
0017  ***********************************************************************/
0018 /*! \file octeon_main.h
0019  *  \brief Host Driver: This file is included by all host driver source files
0020  *  to include common definitions.
0021  */
0022 
0023 #ifndef _OCTEON_MAIN_H_
0024 #define  _OCTEON_MAIN_H_
0025 
0026 #include <linux/sched/signal.h>
0027 
0028 #if BITS_PER_LONG == 32
0029 #define CVM_CAST64(v) ((long long)(v))
0030 #elif BITS_PER_LONG == 64
0031 #define CVM_CAST64(v) ((long long)(long)(v))
0032 #else
0033 #error "Unknown system architecture"
0034 #endif
0035 
0036 #define DRV_NAME "LiquidIO"
0037 
0038 struct octeon_device_priv {
0039     /** Tasklet structures for this device. */
0040     struct tasklet_struct droq_tasklet;
0041     unsigned long napi_mask;
0042     struct octeon_device *dev;
0043 };
0044 
0045 /** This structure is used by NIC driver to store information required
0046  * to free the sk_buff when the packet has been fetched by Octeon.
0047  * Bytes offset below assume worst-case of a 64-bit system.
0048  */
0049 struct octnet_buf_free_info {
0050     /** Bytes 1-8.  Pointer to network device private structure. */
0051     struct lio *lio;
0052 
0053     /** Bytes 9-16.  Pointer to sk_buff. */
0054     struct sk_buff *skb;
0055 
0056     /** Bytes 17-24.  Pointer to gather list. */
0057     struct octnic_gather *g;
0058 
0059     /** Bytes 25-32. Physical address of skb->data or gather list. */
0060     u64 dptr;
0061 
0062     /** Bytes 33-47. Piggybacked soft command, if any */
0063     struct octeon_soft_command *sc;
0064 };
0065 
0066 /* BQL-related functions */
0067 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
0068 void octeon_update_tx_completion_counters(void *buf, int reqtype,
0069                       unsigned int *pkts_compl,
0070                       unsigned int *bytes_compl);
0071 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
0072                     unsigned int bytes_compl);
0073 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
0074 
0075 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
0076                   struct octeon_droq *droq);
0077 
0078 /** Swap 8B blocks */
0079 static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
0080 {
0081     while (blocks) {
0082         cpu_to_be64s(data);
0083         blocks--;
0084         data++;
0085     }
0086 }
0087 
0088 /**
0089  * \brief unmaps a PCI BAR
0090  * @param oct Pointer to Octeon device
0091  * @param baridx bar index
0092  */
0093 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
0094 {
0095     dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
0096         baridx);
0097 
0098     if (oct->mmio[baridx].done)
0099         iounmap(oct->mmio[baridx].hw_addr);
0100 
0101     if (oct->mmio[baridx].start)
0102         pci_release_region(oct->pci_dev, baridx * 2);
0103 }
0104 
0105 /**
0106  * \brief maps a PCI BAR
0107  * @param oct Pointer to Octeon device
0108  * @param baridx bar index
0109  * @param max_map_len maximum length of mapped memory
0110  */
0111 static inline int octeon_map_pci_barx(struct octeon_device *oct,
0112                       int baridx, int max_map_len)
0113 {
0114     u32 mapped_len = 0;
0115 
0116     if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
0117         dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
0118             baridx);
0119         return 1;
0120     }
0121 
0122     oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
0123     oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
0124 
0125     mapped_len = oct->mmio[baridx].len;
0126     if (!mapped_len)
0127         goto err_release_region;
0128 
0129     if (max_map_len && (mapped_len > max_map_len))
0130         mapped_len = max_map_len;
0131 
0132     oct->mmio[baridx].hw_addr =
0133         ioremap(oct->mmio[baridx].start, mapped_len);
0134     oct->mmio[baridx].mapped_len = mapped_len;
0135 
0136     dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
0137         baridx, oct->mmio[baridx].start, mapped_len,
0138         oct->mmio[baridx].len);
0139 
0140     if (!oct->mmio[baridx].hw_addr) {
0141         dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
0142             baridx);
0143         goto err_release_region;
0144     }
0145     oct->mmio[baridx].done = 1;
0146 
0147     return 0;
0148 
0149 err_release_region:
0150     pci_release_region(oct->pci_dev, baridx * 2);
0151     return 1;
0152 }
0153 
0154 /* input parameter:
0155  * sc: pointer to a soft request
0156  * timeout: milli sec which an application wants to wait for the
0157         response of the request.
0158  *          0: the request will wait until its response gets back
0159  *         from the firmware within LIO_SC_MAX_TMO_MS milli sec.
0160  *         It the response does not return within
0161  *         LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
0162  *         will move the request to zombie response list.
0163  *
0164  * return value:
0165  * 0: got the response from firmware for the sc request.
0166  * errno -EINTR: user abort the command.
0167  * errno -ETIME: user spefified timeout value has been expired.
0168  * errno -EBUSY: the response of the request does not return in
0169  *               resonable time (LIO_SC_MAX_TMO_MS).
0170  *               the sc wll be move to zombie response list by
0171  *               lio_process_ordered_list()
0172  *
0173  * A request with non-zero return value, the sc->caller_is_done
0174  *  will be marked 1.
0175  * When getting a request with zero return value, the requestor
0176  *  should mark sc->caller_is_done with 1 after examing the
0177  *  response of sc.
0178  * lio_process_ordered_list() will free the soft command on behalf
0179  * of the soft command requestor.
0180  * This is to fix the possible race condition of both timeout process
0181  * and lio_process_ordered_list()/callback function to free a
0182  * sc strucutre.
0183  */
0184 static inline int
0185 wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
0186                    struct octeon_soft_command *sc,
0187                    unsigned long timeout)
0188 {
0189     int errno = 0;
0190     long timeout_jiff;
0191 
0192     if (timeout)
0193         timeout_jiff = msecs_to_jiffies(timeout);
0194     else
0195         timeout_jiff = MAX_SCHEDULE_TIMEOUT;
0196 
0197     timeout_jiff =
0198         wait_for_completion_interruptible_timeout(&sc->complete,
0199                               timeout_jiff);
0200     if (timeout_jiff == 0) {
0201         dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
0202             __func__);
0203         WRITE_ONCE(sc->caller_is_done, true);
0204         errno = -ETIME;
0205     } else if (timeout_jiff == -ERESTARTSYS) {
0206         dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
0207             __func__);
0208         WRITE_ONCE(sc->caller_is_done, true);
0209         errno = -EINTR;
0210     } else  if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
0211         dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
0212             __func__);
0213         WRITE_ONCE(sc->caller_is_done, true);
0214         errno = -EBUSY;
0215     }
0216 
0217     return errno;
0218 }
0219 
0220 #ifndef ROUNDUP4
0221 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
0222 #endif
0223 
0224 #ifndef ROUNDUP8
0225 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
0226 #endif
0227 
0228 #ifndef ROUNDUP16
0229 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
0230 #endif
0231 
0232 #ifndef ROUNDUP128
0233 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
0234 #endif
0235 
0236 #endif /* _OCTEON_MAIN_H_ */