0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/if_vlan.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/mutex.h>
0012 #include <linux/slab.h>
0013 #include <asm/page.h>
0014
0015 #include "smc.h"
0016 #include "smc_core.h"
0017 #include "smc_ism.h"
0018 #include "smc_pnet.h"
0019 #include "smc_netlink.h"
0020
0021 struct smcd_dev_list smcd_dev_list = {
0022 .list = LIST_HEAD_INIT(smcd_dev_list.list),
0023 .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
0024 };
0025
0026 static bool smc_ism_v2_capable;
0027 static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN];
0028
0029
0030 int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
0031 {
0032 return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
0033 vlan_id);
0034 }
0035
0036 void smc_ism_get_system_eid(u8 **eid)
0037 {
0038 if (!smc_ism_v2_capable)
0039 *eid = NULL;
0040 else
0041 *eid = smc_ism_v2_system_eid;
0042 }
0043
0044 u16 smc_ism_get_chid(struct smcd_dev *smcd)
0045 {
0046 return smcd->ops->get_chid(smcd);
0047 }
0048
0049
0050 bool smc_ism_is_v2_capable(void)
0051 {
0052 return smc_ism_v2_capable;
0053 }
0054
0055
0056 void smc_ism_set_conn(struct smc_connection *conn)
0057 {
0058 unsigned long flags;
0059
0060 spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
0061 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
0062 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
0063 }
0064
0065
0066 void smc_ism_unset_conn(struct smc_connection *conn)
0067 {
0068 unsigned long flags;
0069
0070 if (!conn->rmb_desc)
0071 return;
0072
0073 spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
0074 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
0075 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
0076 }
0077
0078
0079
0080
0081
0082 int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
0083 {
0084 struct smc_ism_vlanid *new_vlan, *vlan;
0085 unsigned long flags;
0086 int rc = 0;
0087
0088 if (!vlanid)
0089 return -EINVAL;
0090
0091
0092 new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
0093 if (!new_vlan)
0094 return -ENOMEM;
0095 new_vlan->vlanid = vlanid;
0096 refcount_set(&new_vlan->refcnt, 1);
0097
0098
0099 spin_lock_irqsave(&smcd->lock, flags);
0100 list_for_each_entry(vlan, &smcd->vlan, list) {
0101 if (vlan->vlanid == vlanid) {
0102 refcount_inc(&vlan->refcnt);
0103 kfree(new_vlan);
0104 goto out;
0105 }
0106 }
0107
0108
0109
0110
0111 if (smcd->ops->add_vlan_id(smcd, vlanid)) {
0112 kfree(new_vlan);
0113 rc = -EIO;
0114 goto out;
0115 }
0116 list_add_tail(&new_vlan->list, &smcd->vlan);
0117 out:
0118 spin_unlock_irqrestore(&smcd->lock, flags);
0119 return rc;
0120 }
0121
0122
0123
0124
0125
0126 int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
0127 {
0128 struct smc_ism_vlanid *vlan;
0129 unsigned long flags;
0130 bool found = false;
0131 int rc = 0;
0132
0133 if (!vlanid)
0134 return -EINVAL;
0135
0136 spin_lock_irqsave(&smcd->lock, flags);
0137 list_for_each_entry(vlan, &smcd->vlan, list) {
0138 if (vlan->vlanid == vlanid) {
0139 if (!refcount_dec_and_test(&vlan->refcnt))
0140 goto out;
0141 found = true;
0142 break;
0143 }
0144 }
0145 if (!found) {
0146 rc = -ENOENT;
0147 goto out;
0148 }
0149
0150
0151 if (smcd->ops->del_vlan_id(smcd, vlanid))
0152 rc = -EIO;
0153 list_del(&vlan->list);
0154 kfree(vlan);
0155 out:
0156 spin_unlock_irqrestore(&smcd->lock, flags);
0157 return rc;
0158 }
0159
0160 int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
0161 {
0162 struct smcd_dmb dmb;
0163 int rc = 0;
0164
0165 if (!dmb_desc->dma_addr)
0166 return rc;
0167
0168 memset(&dmb, 0, sizeof(dmb));
0169 dmb.dmb_tok = dmb_desc->token;
0170 dmb.sba_idx = dmb_desc->sba_idx;
0171 dmb.cpu_addr = dmb_desc->cpu_addr;
0172 dmb.dma_addr = dmb_desc->dma_addr;
0173 dmb.dmb_len = dmb_desc->len;
0174 rc = smcd->ops->unregister_dmb(smcd, &dmb);
0175 if (!rc || rc == ISM_ERROR) {
0176 dmb_desc->cpu_addr = NULL;
0177 dmb_desc->dma_addr = 0;
0178 }
0179
0180 return rc;
0181 }
0182
0183 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
0184 struct smc_buf_desc *dmb_desc)
0185 {
0186 struct smcd_dmb dmb;
0187 int rc;
0188
0189 memset(&dmb, 0, sizeof(dmb));
0190 dmb.dmb_len = dmb_len;
0191 dmb.sba_idx = dmb_desc->sba_idx;
0192 dmb.vlan_id = lgr->vlan_id;
0193 dmb.rgid = lgr->peer_gid;
0194 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
0195 if (!rc) {
0196 dmb_desc->sba_idx = dmb.sba_idx;
0197 dmb_desc->token = dmb.dmb_tok;
0198 dmb_desc->cpu_addr = dmb.cpu_addr;
0199 dmb_desc->dma_addr = dmb.dma_addr;
0200 dmb_desc->len = dmb.dmb_len;
0201 }
0202 return rc;
0203 }
0204
0205 static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
0206 struct sk_buff *skb,
0207 struct netlink_callback *cb)
0208 {
0209 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
0210 struct smc_pci_dev smc_pci_dev;
0211 struct nlattr *port_attrs;
0212 struct nlattr *attrs;
0213 int use_cnt = 0;
0214 void *nlh;
0215
0216 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
0217 &smc_gen_nl_family, NLM_F_MULTI,
0218 SMC_NETLINK_GET_DEV_SMCD);
0219 if (!nlh)
0220 goto errmsg;
0221 attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD);
0222 if (!attrs)
0223 goto errout;
0224 use_cnt = atomic_read(&smcd->lgr_cnt);
0225 if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt))
0226 goto errattr;
0227 if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0))
0228 goto errattr;
0229 memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
0230 smc_set_pci_values(to_pci_dev(smcd->dev.parent), &smc_pci_dev);
0231 if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
0232 goto errattr;
0233 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
0234 goto errattr;
0235 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor))
0236 goto errattr;
0237 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device))
0238 goto errattr;
0239 if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id))
0240 goto errattr;
0241
0242 port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT);
0243 if (!port_attrs)
0244 goto errattr;
0245 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
0246 goto errportattr;
0247 memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN);
0248 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
0249 if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
0250 goto errportattr;
0251
0252 nla_nest_end(skb, port_attrs);
0253 nla_nest_end(skb, attrs);
0254 genlmsg_end(skb, nlh);
0255 return 0;
0256
0257 errportattr:
0258 nla_nest_cancel(skb, port_attrs);
0259 errattr:
0260 nla_nest_cancel(skb, attrs);
0261 errout:
0262 nlmsg_cancel(skb, nlh);
0263 errmsg:
0264 return -EMSGSIZE;
0265 }
0266
0267 static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list,
0268 struct sk_buff *skb,
0269 struct netlink_callback *cb)
0270 {
0271 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
0272 int snum = cb_ctx->pos[0];
0273 struct smcd_dev *smcd;
0274 int num = 0;
0275
0276 mutex_lock(&dev_list->mutex);
0277 list_for_each_entry(smcd, &dev_list->list, list) {
0278 if (num < snum)
0279 goto next;
0280 if (smc_nl_handle_smcd_dev(smcd, skb, cb))
0281 goto errout;
0282 next:
0283 num++;
0284 }
0285 errout:
0286 mutex_unlock(&dev_list->mutex);
0287 cb_ctx->pos[0] = num;
0288 }
0289
0290 int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
0291 {
0292 smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb);
0293 return skb->len;
0294 }
0295
0296 struct smc_ism_event_work {
0297 struct work_struct work;
0298 struct smcd_dev *smcd;
0299 struct smcd_event event;
0300 };
0301
0302 #define ISM_EVENT_REQUEST 0x0001
0303 #define ISM_EVENT_RESPONSE 0x0002
0304 #define ISM_EVENT_REQUEST_IR 0x00000001
0305 #define ISM_EVENT_CODE_SHUTDOWN 0x80
0306 #define ISM_EVENT_CODE_TESTLINK 0x83
0307
0308 union smcd_sw_event_info {
0309 u64 info;
0310 struct {
0311 u8 uid[SMC_LGR_ID_SIZE];
0312 unsigned short vlan_id;
0313 u16 code;
0314 };
0315 };
0316
0317 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
0318 {
0319 union smcd_sw_event_info ev_info;
0320
0321 ev_info.info = wrk->event.info;
0322 switch (wrk->event.code) {
0323 case ISM_EVENT_CODE_SHUTDOWN:
0324 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
0325 break;
0326 case ISM_EVENT_CODE_TESTLINK:
0327 if (ev_info.code == ISM_EVENT_REQUEST) {
0328 ev_info.code = ISM_EVENT_RESPONSE;
0329 wrk->smcd->ops->signal_event(wrk->smcd,
0330 wrk->event.tok,
0331 ISM_EVENT_REQUEST_IR,
0332 ISM_EVENT_CODE_TESTLINK,
0333 ev_info.info);
0334 }
0335 break;
0336 }
0337 }
0338
0339 int smc_ism_signal_shutdown(struct smc_link_group *lgr)
0340 {
0341 int rc;
0342 union smcd_sw_event_info ev_info;
0343
0344 if (lgr->peer_shutdown)
0345 return 0;
0346
0347 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
0348 ev_info.vlan_id = lgr->vlan_id;
0349 ev_info.code = ISM_EVENT_REQUEST;
0350 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
0351 ISM_EVENT_REQUEST_IR,
0352 ISM_EVENT_CODE_SHUTDOWN,
0353 ev_info.info);
0354 return rc;
0355 }
0356
0357
0358 static void smc_ism_event_work(struct work_struct *work)
0359 {
0360 struct smc_ism_event_work *wrk =
0361 container_of(work, struct smc_ism_event_work, work);
0362
0363 switch (wrk->event.type) {
0364 case ISM_EVENT_GID:
0365 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
0366 break;
0367 case ISM_EVENT_DMB:
0368 break;
0369 case ISM_EVENT_SWR:
0370 smcd_handle_sw_event(wrk);
0371 break;
0372 }
0373 kfree(wrk);
0374 }
0375
0376 static void smcd_release(struct device *dev)
0377 {
0378 struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
0379
0380 kfree(smcd->conn);
0381 kfree(smcd);
0382 }
0383
0384 struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
0385 const struct smcd_ops *ops, int max_dmbs)
0386 {
0387 struct smcd_dev *smcd;
0388
0389 smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
0390 if (!smcd)
0391 return NULL;
0392 smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
0393 GFP_KERNEL);
0394 if (!smcd->conn) {
0395 kfree(smcd);
0396 return NULL;
0397 }
0398
0399 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
0400 WQ_MEM_RECLAIM, name);
0401 if (!smcd->event_wq) {
0402 kfree(smcd->conn);
0403 kfree(smcd);
0404 return NULL;
0405 }
0406
0407 smcd->dev.parent = parent;
0408 smcd->dev.release = smcd_release;
0409 device_initialize(&smcd->dev);
0410 dev_set_name(&smcd->dev, name);
0411 smcd->ops = ops;
0412 if (smc_pnetid_by_dev_port(parent, 0, smcd->pnetid))
0413 smc_pnetid_by_table_smcd(smcd);
0414
0415 spin_lock_init(&smcd->lock);
0416 spin_lock_init(&smcd->lgr_lock);
0417 INIT_LIST_HEAD(&smcd->vlan);
0418 INIT_LIST_HEAD(&smcd->lgr_list);
0419 init_waitqueue_head(&smcd->lgrs_deleted);
0420 return smcd;
0421 }
0422 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
0423
0424 int smcd_register_dev(struct smcd_dev *smcd)
0425 {
0426 int rc;
0427
0428 mutex_lock(&smcd_dev_list.mutex);
0429 if (list_empty(&smcd_dev_list.list)) {
0430 u8 *system_eid = NULL;
0431
0432 system_eid = smcd->ops->get_system_eid();
0433 if (system_eid[24] != '0' || system_eid[28] != '0') {
0434 smc_ism_v2_capable = true;
0435 memcpy(smc_ism_v2_system_eid, system_eid,
0436 SMC_MAX_EID_LEN);
0437 }
0438 }
0439
0440 if (smcd->pnetid[0])
0441 list_add_tail(&smcd->list, &smcd_dev_list.list);
0442 else
0443 list_add(&smcd->list, &smcd_dev_list.list);
0444 mutex_unlock(&smcd_dev_list.mutex);
0445
0446 pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
0447 dev_name(&smcd->dev), smcd->pnetid,
0448 smcd->pnetid_by_user ? " (user defined)" : "");
0449
0450 rc = device_add(&smcd->dev);
0451 if (rc) {
0452 mutex_lock(&smcd_dev_list.mutex);
0453 list_del(&smcd->list);
0454 mutex_unlock(&smcd_dev_list.mutex);
0455 }
0456
0457 return rc;
0458 }
0459 EXPORT_SYMBOL_GPL(smcd_register_dev);
0460
0461 void smcd_unregister_dev(struct smcd_dev *smcd)
0462 {
0463 pr_warn_ratelimited("smc: removing smcd device %s\n",
0464 dev_name(&smcd->dev));
0465 mutex_lock(&smcd_dev_list.mutex);
0466 list_del_init(&smcd->list);
0467 mutex_unlock(&smcd_dev_list.mutex);
0468 smcd->going_away = 1;
0469 smc_smcd_terminate_all(smcd);
0470 destroy_workqueue(smcd->event_wq);
0471
0472 device_del(&smcd->dev);
0473 }
0474 EXPORT_SYMBOL_GPL(smcd_unregister_dev);
0475
0476 void smcd_free_dev(struct smcd_dev *smcd)
0477 {
0478 put_device(&smcd->dev);
0479 }
0480 EXPORT_SYMBOL_GPL(smcd_free_dev);
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
0494 {
0495 struct smc_ism_event_work *wrk;
0496
0497 if (smcd->going_away)
0498 return;
0499
0500 wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
0501 if (!wrk)
0502 return;
0503 INIT_WORK(&wrk->work, smc_ism_event_work);
0504 wrk->smcd = smcd;
0505 wrk->event = *event;
0506 queue_work(smcd->event_wq, &wrk->work);
0507 }
0508 EXPORT_SYMBOL_GPL(smcd_handle_event);
0509
0510
0511
0512
0513
0514
0515
0516
0517 void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno, u16 dmbemask)
0518 {
0519 struct smc_connection *conn = NULL;
0520 unsigned long flags;
0521
0522 spin_lock_irqsave(&smcd->lock, flags);
0523 conn = smcd->conn[dmbno];
0524 if (conn && !conn->killed)
0525 tasklet_schedule(&conn->rx_tsklet);
0526 spin_unlock_irqrestore(&smcd->lock, flags);
0527 }
0528 EXPORT_SYMBOL_GPL(smcd_handle_irq);
0529
0530 void __init smc_ism_init(void)
0531 {
0532 smc_ism_v2_capable = false;
0533 memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN);
0534 }