Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Shared Memory Communications over RDMA (SMC-R) and RoCE
0004  *
0005  *  Definitions for SMC Connections, Link Groups and Links
0006  *
0007  *  Copyright IBM Corp. 2016
0008  *
0009  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
0010  */
0011 
0012 #ifndef _SMC_CORE_H
0013 #define _SMC_CORE_H
0014 
0015 #include <linux/atomic.h>
0016 #include <linux/smc.h>
0017 #include <linux/pci.h>
0018 #include <rdma/ib_verbs.h>
0019 #include <net/genetlink.h>
0020 
0021 #include "smc.h"
0022 #include "smc_ib.h"
0023 
0024 #define SMC_RMBS_PER_LGR_MAX    255 /* max. # of RMBs per link group */
0025 
0026 struct smc_lgr_list {           /* list of link group definition */
0027     struct list_head    list;
0028     spinlock_t      lock;   /* protects list of link groups */
0029     u32         num;    /* unique link group number */
0030 };
0031 
0032 enum smc_lgr_role {     /* possible roles of a link group */
0033     SMC_CLNT,   /* client */
0034     SMC_SERV    /* server */
0035 };
0036 
0037 enum smc_link_state {           /* possible states of a link */
0038     SMC_LNK_UNUSED,     /* link is unused */
0039     SMC_LNK_INACTIVE,   /* link is inactive */
0040     SMC_LNK_ACTIVATING, /* link is being activated */
0041     SMC_LNK_ACTIVE,     /* link is active */
0042 };
0043 
0044 #define SMC_WR_BUF_SIZE     48  /* size of work request buffer */
0045 #define SMC_WR_BUF_V2_SIZE  8192    /* size of v2 work request buffer */
0046 
0047 struct smc_wr_buf {
0048     u8  raw[SMC_WR_BUF_SIZE];
0049 };
0050 
0051 struct smc_wr_v2_buf {
0052     u8  raw[SMC_WR_BUF_V2_SIZE];
0053 };
0054 
0055 #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */
0056 
0057 enum smc_wr_reg_state {
0058     POSTED,     /* ib_wr_reg_mr request posted */
0059     CONFIRMED,  /* ib_wr_reg_mr response: successful */
0060     FAILED      /* ib_wr_reg_mr response: failure */
0061 };
0062 
0063 struct smc_rdma_sge {               /* sges for RDMA writes */
0064     struct ib_sge       wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
0065 };
0066 
0067 #define SMC_MAX_RDMA_WRITES 2       /* max. # of RDMA writes per
0068                          * message send
0069                          */
0070 
0071 struct smc_rdma_sges {              /* sges per message send */
0072     struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
0073 };
0074 
0075 struct smc_rdma_wr {                /* work requests per message
0076                          * send
0077                          */
0078     struct ib_rdma_wr   wr_tx_rdma[SMC_MAX_RDMA_WRITES];
0079 };
0080 
0081 #define SMC_LGR_ID_SIZE     4
0082 
0083 struct smc_link {
0084     struct smc_ib_device    *smcibdev;  /* ib-device */
0085     u8          ibport;     /* port - values 1 | 2 */
0086     struct ib_pd        *roce_pd;   /* IB protection domain,
0087                          * unique for every RoCE QP
0088                          */
0089     struct ib_qp        *roce_qp;   /* IB queue pair */
0090     struct ib_qp_attr   qp_attr;    /* IB queue pair attributes */
0091 
0092     struct smc_wr_buf   *wr_tx_bufs;    /* WR send payload buffers */
0093     struct ib_send_wr   *wr_tx_ibs; /* WR send meta data */
0094     struct ib_sge       *wr_tx_sges;    /* WR send gather meta data */
0095     struct smc_rdma_sges    *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
0096     struct smc_rdma_wr  *wr_tx_rdmas;   /* WR RDMA WRITE */
0097     struct smc_wr_tx_pend   *wr_tx_pends;   /* WR send waiting for CQE */
0098     struct completion   *wr_tx_compl;   /* WR send CQE completion */
0099     /* above four vectors have wr_tx_cnt elements and use the same index */
0100     struct ib_send_wr   *wr_tx_v2_ib;   /* WR send v2 meta data */
0101     struct ib_sge       *wr_tx_v2_sge;  /* WR send v2 gather meta data*/
0102     struct smc_wr_tx_pend   *wr_tx_v2_pend; /* WR send v2 waiting for CQE */
0103     dma_addr_t      wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
0104     dma_addr_t      wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/
0105     atomic_long_t       wr_tx_id;   /* seq # of last sent WR */
0106     unsigned long       *wr_tx_mask;    /* bit mask of used indexes */
0107     u32         wr_tx_cnt;  /* number of WR send buffers */
0108     wait_queue_head_t   wr_tx_wait; /* wait for free WR send buf */
0109     atomic_t        wr_tx_refcnt;   /* tx refs to link */
0110 
0111     struct smc_wr_buf   *wr_rx_bufs;    /* WR recv payload buffers */
0112     struct ib_recv_wr   *wr_rx_ibs; /* WR recv meta data */
0113     struct ib_sge       *wr_rx_sges;    /* WR recv scatter meta data */
0114     /* above three vectors have wr_rx_cnt elements and use the same index */
0115     dma_addr_t      wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
0116     dma_addr_t      wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
0117     u64         wr_rx_id;   /* seq # of last recv WR */
0118     u64         wr_rx_id_compl; /* seq # of last completed WR */
0119     u32         wr_rx_cnt;  /* number of WR recv buffers */
0120     unsigned long       wr_rx_tstamp;   /* jiffies when last buf rx */
0121     wait_queue_head_t       wr_rx_empty_wait; /* wait for RQ empty */
0122 
0123     struct ib_reg_wr    wr_reg;     /* WR register memory region */
0124     wait_queue_head_t   wr_reg_wait;    /* wait for wr_reg result */
0125     atomic_t        wr_reg_refcnt;  /* reg refs to link */
0126     enum smc_wr_reg_state   wr_reg_state;   /* state of wr_reg request */
0127 
0128     u8          gid[SMC_GID_SIZE];/* gid matching used vlan id*/
0129     u8          sgid_index; /* gid index for vlan id      */
0130     u32         peer_qpn;   /* QP number of peer */
0131     enum ib_mtu     path_mtu;   /* used mtu */
0132     enum ib_mtu     peer_mtu;   /* mtu size of peer */
0133     u32         psn_initial;    /* QP tx initial packet seqno */
0134     u32         peer_psn;   /* QP rx initial packet seqno */
0135     u8          peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */
0136     u8          peer_gid[SMC_GID_SIZE]; /* gid of peer*/
0137     u8          link_id;    /* unique # within link group */
0138     u8          link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */
0139     u8          peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */
0140     u8          link_idx;   /* index in lgr link array */
0141     u8          link_is_asym;   /* is link asymmetric? */
0142     u8          clearing : 1;   /* link is being cleared */
0143     refcount_t      refcnt;     /* link reference count */
0144     struct smc_link_group   *lgr;       /* parent link group */
0145     struct work_struct  link_down_wrk;  /* wrk to bring link down */
0146     char            ibname[IB_DEVICE_NAME_MAX]; /* ib device name */
0147     int         ndev_ifidx; /* network device ifindex */
0148 
0149     enum smc_link_state state;      /* state of link */
0150     struct delayed_work llc_testlink_wrk; /* testlink worker */
0151     struct completion   llc_testlink_resp; /* wait for rx of testlink */
0152     int         llc_testlink_time; /* testlink interval */
0153     atomic_t        conn_cnt; /* connections on this link */
0154 };
0155 
0156 /* For now we just allow one parallel link per link group. The SMC protocol
0157  * allows more (up to 8).
0158  */
0159 #define SMC_LINKS_PER_LGR_MAX   3
0160 #define SMC_SINGLE_LINK     0
0161 
0162 /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
0163 struct smc_buf_desc {
0164     struct list_head    list;
0165     void            *cpu_addr;  /* virtual address of buffer */
0166     struct page     *pages;
0167     int         len;        /* length of buffer */
0168     u32         used;       /* currently used / unused */
0169     union {
0170         struct { /* SMC-R */
0171             struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];
0172                     /* virtual buffer */
0173             struct ib_mr    *mr[SMC_LINKS_PER_LGR_MAX];
0174                     /* memory region: for rmb and
0175                      * vzalloced sndbuf
0176                      * incl. rkey provided to peer
0177                      * and lkey provided to local
0178                      */
0179             u32     order;  /* allocation order */
0180 
0181             u8      is_conf_rkey;
0182                     /* confirm_rkey done */
0183             u8      is_reg_mr[SMC_LINKS_PER_LGR_MAX];
0184                     /* mem region registered */
0185             u8      is_map_ib[SMC_LINKS_PER_LGR_MAX];
0186                     /* mem region mapped to lnk */
0187             u8      is_dma_need_sync;
0188             u8      is_reg_err;
0189                     /* buffer registration err */
0190             u8      is_vm;
0191                     /* virtually contiguous */
0192         };
0193         struct { /* SMC-D */
0194             unsigned short  sba_idx;
0195                     /* SBA index number */
0196             u64     token;
0197                     /* DMB token number */
0198             dma_addr_t  dma_addr;
0199                     /* DMA address */
0200         };
0201     };
0202 };
0203 
0204 struct smc_rtoken {             /* address/key of remote RMB */
0205     u64         dma_addr;
0206     u32         rkey;
0207 };
0208 
0209 #define SMC_BUF_MIN_SIZE    16384   /* minimum size of an RMB */
0210 #define SMC_RMBE_SIZES      16  /* number of distinct RMBE sizes */
0211 /* theoretically, the RFC states that largest size would be 512K,
0212  * i.e. compressed 5 and thus 6 sizes (0..5), despite
0213  * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
0214  */
0215 
0216 struct smcd_dev;
0217 
0218 enum smc_lgr_type {             /* redundancy state of lgr */
0219     SMC_LGR_NONE,           /* no active links, lgr to be deleted */
0220     SMC_LGR_SINGLE,         /* 1 active RNIC on each peer */
0221     SMC_LGR_SYMMETRIC,      /* 2 active RNICs on each peer */
0222     SMC_LGR_ASYMMETRIC_PEER,    /* local has 2, peer 1 active RNICs */
0223     SMC_LGR_ASYMMETRIC_LOCAL,   /* local has 1, peer 2 active RNICs */
0224 };
0225 
0226 enum smcr_buf_type {        /* types of SMC-R sndbufs and RMBs */
0227     SMCR_PHYS_CONT_BUFS = 0,
0228     SMCR_VIRT_CONT_BUFS = 1,
0229     SMCR_MIXED_BUFS     = 2,
0230 };
0231 
0232 enum smc_llc_flowtype {
0233     SMC_LLC_FLOW_NONE   = 0,
0234     SMC_LLC_FLOW_ADD_LINK   = 2,
0235     SMC_LLC_FLOW_DEL_LINK   = 4,
0236     SMC_LLC_FLOW_REQ_ADD_LINK = 5,
0237     SMC_LLC_FLOW_RKEY   = 6,
0238 };
0239 
0240 struct smc_llc_qentry;
0241 
0242 struct smc_llc_flow {
0243     enum smc_llc_flowtype type;
0244     struct smc_llc_qentry *qentry;
0245 };
0246 
0247 struct smc_link_group {
0248     struct list_head    list;
0249     struct rb_root      conns_all;  /* connection tree */
0250     rwlock_t        conns_lock; /* protects conns_all */
0251     unsigned int        conns_num;  /* current # of connections */
0252     unsigned short      vlan_id;    /* vlan id of link group */
0253 
0254     struct list_head    sndbufs[SMC_RMBE_SIZES];/* tx buffers */
0255     struct mutex        sndbufs_lock;   /* protects tx buffers */
0256     struct list_head    rmbs[SMC_RMBE_SIZES];   /* rx buffers */
0257     struct mutex        rmbs_lock;  /* protects rx buffers */
0258 
0259     u8          id[SMC_LGR_ID_SIZE];    /* unique lgr id */
0260     struct delayed_work free_work;  /* delayed freeing of an lgr */
0261     struct work_struct  terminate_work; /* abnormal lgr termination */
0262     struct workqueue_struct *tx_wq;     /* wq for conn. tx workers */
0263     u8          sync_err : 1;   /* lgr no longer fits to peer */
0264     u8          terminating : 1;/* lgr is terminating */
0265     u8          freeing : 1;    /* lgr is being freed */
0266 
0267     refcount_t      refcnt;     /* lgr reference count */
0268     bool            is_smcd;    /* SMC-R or SMC-D */
0269     u8          smc_version;
0270     u8          negotiated_eid[SMC_MAX_EID_LEN];
0271     u8          peer_os;    /* peer operating system */
0272     u8          peer_smc_release;
0273     u8          peer_hostname[SMC_MAX_HOSTNAME_LEN];
0274     union {
0275         struct { /* SMC-R */
0276             enum smc_lgr_role   role;
0277                         /* client or server */
0278             struct smc_link     lnk[SMC_LINKS_PER_LGR_MAX];
0279                         /* smc link */
0280             struct smc_wr_v2_buf    *wr_rx_buf_v2;
0281                         /* WR v2 recv payload buffer */
0282             struct smc_wr_v2_buf    *wr_tx_buf_v2;
0283                         /* WR v2 send payload buffer */
0284             char            peer_systemid[SMC_SYSTEMID_LEN];
0285                         /* unique system_id of peer */
0286             struct smc_rtoken   rtokens[SMC_RMBS_PER_LGR_MAX]
0287                         [SMC_LINKS_PER_LGR_MAX];
0288                         /* remote addr/key pairs */
0289             DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX);
0290                         /* used rtoken elements */
0291             u8          next_link_id;
0292             enum smc_lgr_type   type;
0293             enum smcr_buf_type  buf_type;
0294                         /* redundancy state */
0295             u8          pnet_id[SMC_MAX_PNETID_LEN + 1];
0296                         /* pnet id of this lgr */
0297             struct list_head    llc_event_q;
0298                         /* queue for llc events */
0299             spinlock_t      llc_event_q_lock;
0300                         /* protects llc_event_q */
0301             struct mutex        llc_conf_mutex;
0302                         /* protects lgr reconfig. */
0303             struct work_struct  llc_add_link_work;
0304             struct work_struct  llc_del_link_work;
0305             struct work_struct  llc_event_work;
0306                         /* llc event worker */
0307             wait_queue_head_t   llc_flow_waiter;
0308                         /* w4 next llc event */
0309             wait_queue_head_t   llc_msg_waiter;
0310                         /* w4 next llc msg */
0311             struct smc_llc_flow llc_flow_lcl;
0312                         /* llc local control field */
0313             struct smc_llc_flow llc_flow_rmt;
0314                         /* llc remote control field */
0315             struct smc_llc_qentry   *delayed_event;
0316                         /* arrived when flow active */
0317             spinlock_t      llc_flow_lock;
0318                         /* protects llc flow */
0319             int         llc_testlink_time;
0320                         /* link keep alive time */
0321             u32         llc_termination_rsn;
0322                         /* rsn code for termination */
0323             u8          nexthop_mac[ETH_ALEN];
0324             u8          uses_gateway;
0325             __be32          saddr;
0326                         /* net namespace */
0327             struct net      *net;
0328         };
0329         struct { /* SMC-D */
0330             u64         peer_gid;
0331                         /* Peer GID (remote) */
0332             struct smcd_dev     *smcd;
0333                         /* ISM device for VLAN reg. */
0334             u8          peer_shutdown : 1;
0335                         /* peer triggered shutdownn */
0336         };
0337     };
0338 };
0339 
0340 struct smc_clc_msg_local;
0341 
0342 #define GID_LIST_SIZE   2
0343 
0344 struct smc_gidlist {
0345     u8          len;
0346     u8          list[GID_LIST_SIZE][SMC_GID_SIZE];
0347 };
0348 
0349 struct smc_init_info_smcrv2 {
0350     /* Input fields */
0351     __be32          saddr;
0352     struct sock     *clc_sk;
0353     __be32          daddr;
0354 
0355     /* Output fields when saddr is set */
0356     struct smc_ib_device    *ib_dev_v2;
0357     u8          ib_port_v2;
0358     u8          ib_gid_v2[SMC_GID_SIZE];
0359 
0360     /* Additional output fields when clc_sk and daddr is set as well */
0361     u8          uses_gateway;
0362     u8          nexthop_mac[ETH_ALEN];
0363 
0364     struct smc_gidlist  gidlist;
0365 };
0366 
0367 struct smc_init_info {
0368     u8          is_smcd;
0369     u8          smc_type_v1;
0370     u8          smc_type_v2;
0371     u8          first_contact_peer;
0372     u8          first_contact_local;
0373     unsigned short      vlan_id;
0374     u32         rc;
0375     u8          negotiated_eid[SMC_MAX_EID_LEN];
0376     /* SMC-R */
0377     u8          smcr_version;
0378     u8          check_smcrv2;
0379     u8          peer_gid[SMC_GID_SIZE];
0380     u8          peer_mac[ETH_ALEN];
0381     u8          peer_systemid[SMC_SYSTEMID_LEN];
0382     struct smc_ib_device    *ib_dev;
0383     u8          ib_gid[SMC_GID_SIZE];
0384     u8          ib_port;
0385     u32         ib_clcqpn;
0386     struct smc_init_info_smcrv2 smcrv2;
0387     /* SMC-D */
0388     u64         ism_peer_gid[SMC_MAX_ISM_DEVS + 1];
0389     struct smcd_dev     *ism_dev[SMC_MAX_ISM_DEVS + 1];
0390     u16         ism_chid[SMC_MAX_ISM_DEVS + 1];
0391     u8          ism_offered_cnt; /* # of ISM devices offered */
0392     u8          ism_selected;    /* index of selected ISM dev*/
0393     u8          smcd_version;
0394 };
0395 
0396 /* Find the connection associated with the given alert token in the link group.
0397  * To use rbtrees we have to implement our own search core.
0398  * Requires @conns_lock
0399  * @token   alert token to search for
0400  * @lgr      link group to search in
0401  * Returns connection associated with token if found, NULL otherwise.
0402  */
0403 static inline struct smc_connection *smc_lgr_find_conn(
0404     u32 token, struct smc_link_group *lgr)
0405 {
0406     struct smc_connection *res = NULL;
0407     struct rb_node *node;
0408 
0409     node = lgr->conns_all.rb_node;
0410     while (node) {
0411         struct smc_connection *cur = rb_entry(node,
0412                     struct smc_connection, alert_node);
0413 
0414         if (cur->alert_token_local > token) {
0415             node = node->rb_left;
0416         } else {
0417             if (cur->alert_token_local < token) {
0418                 node = node->rb_right;
0419             } else {
0420                 res = cur;
0421                 break;
0422             }
0423         }
0424     }
0425 
0426     return res;
0427 }
0428 
0429 static inline bool smc_conn_lgr_valid(struct smc_connection *conn)
0430 {
0431     return conn->lgr && conn->alert_token_local;
0432 }
0433 
0434 /*
0435  * Returns true if the specified link is usable.
0436  *
0437  * usable means the link is ready to receive RDMA messages, map memory
0438  * on the link, etc. This doesn't ensure we are able to send RDMA messages
0439  * on this link, if sending RDMA messages is needed, use smc_link_sendable()
0440  */
0441 static inline bool smc_link_usable(struct smc_link *lnk)
0442 {
0443     if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE)
0444         return false;
0445     return true;
0446 }
0447 
0448 /*
0449  * Returns true if the specified link is ready to receive AND send RDMA
0450  * messages.
0451  *
0452  * For the client side in first contact, the underlying QP may still in
0453  * RESET or RTR when the link state is ACTIVATING, checks in smc_link_usable()
0454  * is not strong enough. For those places that need to send any CDC or LLC
0455  * messages, use smc_link_sendable(), otherwise, use smc_link_usable() instead
0456  */
0457 static inline bool smc_link_sendable(struct smc_link *lnk)
0458 {
0459     return smc_link_usable(lnk) &&
0460         lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
0461 }
0462 
0463 static inline bool smc_link_active(struct smc_link *lnk)
0464 {
0465     return lnk->state == SMC_LNK_ACTIVE;
0466 }
0467 
0468 static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
0469 {
0470     sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
0471         be16_to_cpu(((__be16 *)gid_raw)[0]),
0472         be16_to_cpu(((__be16 *)gid_raw)[1]),
0473         be16_to_cpu(((__be16 *)gid_raw)[2]),
0474         be16_to_cpu(((__be16 *)gid_raw)[3]),
0475         be16_to_cpu(((__be16 *)gid_raw)[4]),
0476         be16_to_cpu(((__be16 *)gid_raw)[5]),
0477         be16_to_cpu(((__be16 *)gid_raw)[6]),
0478         be16_to_cpu(((__be16 *)gid_raw)[7]));
0479 }
0480 
0481 struct smc_pci_dev {
0482     __u32       pci_fid;
0483     __u16       pci_pchid;
0484     __u16       pci_vendor;
0485     __u16       pci_device;
0486     __u8        pci_id[SMC_PCI_ID_STR_LEN];
0487 };
0488 
0489 static inline void smc_set_pci_values(struct pci_dev *pci_dev,
0490                       struct smc_pci_dev *smc_dev)
0491 {
0492     smc_dev->pci_vendor = pci_dev->vendor;
0493     smc_dev->pci_device = pci_dev->device;
0494     snprintf(smc_dev->pci_id, sizeof(smc_dev->pci_id), "%s",
0495          pci_name(pci_dev));
0496 #if IS_ENABLED(CONFIG_S390)
0497     { /* Set s390 specific PCI information */
0498     struct zpci_dev *zdev;
0499 
0500     zdev = to_zpci(pci_dev);
0501     smc_dev->pci_fid = zdev->fid;
0502     smc_dev->pci_pchid = zdev->pchid;
0503     }
0504 #endif
0505 }
0506 
0507 struct smc_sock;
0508 struct smc_clc_msg_accept_confirm;
0509 
0510 void smc_lgr_cleanup_early(struct smc_link_group *lgr);
0511 void smc_lgr_terminate_sched(struct smc_link_group *lgr);
0512 void smc_lgr_hold(struct smc_link_group *lgr);
0513 void smc_lgr_put(struct smc_link_group *lgr);
0514 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport);
0515 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport);
0516 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
0517             unsigned short vlan);
0518 void smc_smcd_terminate_all(struct smcd_dev *dev);
0519 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
0520 int smc_buf_create(struct smc_sock *smc, bool is_smcd);
0521 int smc_uncompress_bufsize(u8 compressed);
0522 int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link,
0523                 struct smc_clc_msg_accept_confirm *clc);
0524 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey);
0525 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey);
0526 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
0527             __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey);
0528 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
0529              __be64 nw_vaddr, __be32 nw_rkey);
0530 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
0531 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
0532 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
0533 
0534 void smc_conn_free(struct smc_connection *conn);
0535 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
0536 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
0537 int smc_core_init(void);
0538 void smc_core_exit(void);
0539 
0540 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
0541            u8 link_idx, struct smc_init_info *ini);
0542 void smcr_link_clear(struct smc_link *lnk, bool log);
0543 void smcr_link_hold(struct smc_link *lnk);
0544 void smcr_link_put(struct smc_link *lnk);
0545 void smc_switch_link_and_count(struct smc_connection *conn,
0546                    struct smc_link *to_lnk);
0547 int smcr_buf_map_lgr(struct smc_link *lnk);
0548 int smcr_buf_reg_lgr(struct smc_link *lnk);
0549 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
0550 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
0551                 enum smc_lgr_type new_type, int asym_lnk_idx);
0552 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *rmb_desc);
0553 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
0554                   struct smc_link *from_lnk, bool is_dev_err);
0555 void smcr_link_down_cond(struct smc_link *lnk);
0556 void smcr_link_down_cond_sched(struct smc_link *lnk);
0557 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb);
0558 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
0559 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb);
0560 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb);
0561 
0562 static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
0563 {
0564     return link->lgr;
0565 }
0566 #endif