0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include "rxe.h"
0023
0024
0025
0026
0027
0028
0029
0030
0031 static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
0032 {
0033 unsigned char ll_addr[ETH_ALEN];
0034
0035 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
0036
0037 return dev_mc_add(rxe->ndev, ll_addr);
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047 static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
0048 {
0049 unsigned char ll_addr[ETH_ALEN];
0050
0051 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
0052
0053 return dev_mc_del(rxe->ndev, ll_addr);
0054 }
0055
0056
0057
0058
0059
0060
0061
0062
0063 static void __rxe_insert_mcg(struct rxe_mcg *mcg)
0064 {
0065 struct rb_root *tree = &mcg->rxe->mcg_tree;
0066 struct rb_node **link = &tree->rb_node;
0067 struct rb_node *node = NULL;
0068 struct rxe_mcg *tmp;
0069 int cmp;
0070
0071 while (*link) {
0072 node = *link;
0073 tmp = rb_entry(node, struct rxe_mcg, node);
0074
0075 cmp = memcmp(&tmp->mgid, &mcg->mgid, sizeof(mcg->mgid));
0076 if (cmp > 0)
0077 link = &(*link)->rb_left;
0078 else
0079 link = &(*link)->rb_right;
0080 }
0081
0082 rb_link_node(&mcg->node, node, link);
0083 rb_insert_color(&mcg->node, tree);
0084 }
0085
0086
0087
0088
0089
0090
0091
0092 static void __rxe_remove_mcg(struct rxe_mcg *mcg)
0093 {
0094 rb_erase(&mcg->node, &mcg->rxe->mcg_tree);
0095 }
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
0106 union ib_gid *mgid)
0107 {
0108 struct rb_root *tree = &rxe->mcg_tree;
0109 struct rxe_mcg *mcg;
0110 struct rb_node *node;
0111 int cmp;
0112
0113 node = tree->rb_node;
0114
0115 while (node) {
0116 mcg = rb_entry(node, struct rxe_mcg, node);
0117
0118 cmp = memcmp(&mcg->mgid, mgid, sizeof(*mgid));
0119
0120 if (cmp > 0)
0121 node = node->rb_left;
0122 else if (cmp < 0)
0123 node = node->rb_right;
0124 else
0125 break;
0126 }
0127
0128 if (node) {
0129 kref_get(&mcg->ref_cnt);
0130 return mcg;
0131 }
0132
0133 return NULL;
0134 }
0135
0136
0137
0138
0139
0140
0141
0142
0143 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
0144 {
0145 struct rxe_mcg *mcg;
0146
0147 spin_lock_bh(&rxe->mcg_lock);
0148 mcg = __rxe_lookup_mcg(rxe, mgid);
0149 spin_unlock_bh(&rxe->mcg_lock);
0150
0151 return mcg;
0152 }
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
0163 struct rxe_mcg *mcg)
0164 {
0165 kref_init(&mcg->ref_cnt);
0166 memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
0167 INIT_LIST_HEAD(&mcg->qp_list);
0168 mcg->rxe = rxe;
0169
0170
0171
0172
0173
0174
0175
0176
0177 kref_get(&mcg->ref_cnt);
0178 __rxe_insert_mcg(mcg);
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188 static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
0189 {
0190 struct rxe_mcg *mcg, *tmp;
0191 int err;
0192
0193 if (rxe->attr.max_mcast_grp == 0)
0194 return ERR_PTR(-EINVAL);
0195
0196
0197 mcg = rxe_lookup_mcg(rxe, mgid);
0198 if (mcg)
0199 return mcg;
0200
0201
0202 if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
0203 err = -ENOMEM;
0204 goto err_dec;
0205 }
0206
0207
0208 mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
0209 if (!mcg) {
0210 err = -ENOMEM;
0211 goto err_dec;
0212 }
0213
0214 spin_lock_bh(&rxe->mcg_lock);
0215
0216 tmp = __rxe_lookup_mcg(rxe, mgid);
0217 if (tmp) {
0218 spin_unlock_bh(&rxe->mcg_lock);
0219 atomic_dec(&rxe->mcg_num);
0220 kfree(mcg);
0221 return tmp;
0222 }
0223
0224 __rxe_init_mcg(rxe, mgid, mcg);
0225 spin_unlock_bh(&rxe->mcg_lock);
0226
0227
0228 err = rxe_mcast_add(rxe, mgid);
0229 if (!err)
0230 return mcg;
0231
0232 kfree(mcg);
0233 err_dec:
0234 atomic_dec(&rxe->mcg_num);
0235 return ERR_PTR(err);
0236 }
0237
0238
0239
0240
0241
0242 void rxe_cleanup_mcg(struct kref *kref)
0243 {
0244 struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
0245
0246 kfree(mcg);
0247 }
0248
0249
0250
0251
0252
0253
0254
0255
0256 static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
0257 {
0258 struct rxe_dev *rxe = mcg->rxe;
0259
0260
0261 __rxe_remove_mcg(mcg);
0262 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
0263
0264 atomic_dec(&rxe->mcg_num);
0265 }
0266
0267
0268
0269
0270
0271
0272
0273 static void rxe_destroy_mcg(struct rxe_mcg *mcg)
0274 {
0275
0276 rxe_mcast_del(mcg->rxe, &mcg->mgid);
0277
0278 spin_lock_bh(&mcg->rxe->mcg_lock);
0279 __rxe_destroy_mcg(mcg);
0280 spin_unlock_bh(&mcg->rxe->mcg_lock);
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294 static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg,
0295 struct rxe_mca *mca)
0296 {
0297 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0298 int n;
0299
0300 n = atomic_inc_return(&rxe->mcg_attach);
0301 if (n > rxe->attr.max_total_mcast_qp_attach) {
0302 atomic_dec(&rxe->mcg_attach);
0303 return -ENOMEM;
0304 }
0305
0306 n = atomic_inc_return(&mcg->qp_num);
0307 if (n > rxe->attr.max_mcast_qp_attach) {
0308 atomic_dec(&mcg->qp_num);
0309 atomic_dec(&rxe->mcg_attach);
0310 return -ENOMEM;
0311 }
0312
0313 atomic_inc(&qp->mcg_num);
0314
0315 rxe_get(qp);
0316 mca->qp = qp;
0317
0318 list_add_tail(&mca->qp_list, &mcg->qp_list);
0319
0320 return 0;
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
0332 {
0333 struct rxe_dev *rxe = mcg->rxe;
0334 struct rxe_mca *mca, *tmp;
0335 int err;
0336
0337
0338 spin_lock_bh(&rxe->mcg_lock);
0339 list_for_each_entry(mca, &mcg->qp_list, qp_list) {
0340 if (mca->qp == qp) {
0341 spin_unlock_bh(&rxe->mcg_lock);
0342 return 0;
0343 }
0344 }
0345 spin_unlock_bh(&rxe->mcg_lock);
0346
0347
0348 mca = kzalloc(sizeof(*mca), GFP_KERNEL);
0349 if (!mca)
0350 return -ENOMEM;
0351
0352 spin_lock_bh(&rxe->mcg_lock);
0353
0354 list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
0355 if (tmp->qp == qp) {
0356 kfree(mca);
0357 err = 0;
0358 goto out;
0359 }
0360 }
0361
0362 err = __rxe_init_mca(qp, mcg, mca);
0363 if (err)
0364 kfree(mca);
0365 out:
0366 spin_unlock_bh(&rxe->mcg_lock);
0367 return err;
0368 }
0369
0370
0371
0372
0373
0374
0375
0376
0377 static void __rxe_cleanup_mca(struct rxe_mca *mca, struct rxe_mcg *mcg)
0378 {
0379 list_del(&mca->qp_list);
0380
0381 atomic_dec(&mcg->qp_num);
0382 atomic_dec(&mcg->rxe->mcg_attach);
0383 atomic_dec(&mca->qp->mcg_num);
0384 rxe_put(mca->qp);
0385
0386 kfree(mca);
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396 static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
0397 {
0398 struct rxe_dev *rxe = mcg->rxe;
0399 struct rxe_mca *mca, *tmp;
0400
0401 spin_lock_bh(&rxe->mcg_lock);
0402 list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
0403 if (mca->qp == qp) {
0404 __rxe_cleanup_mca(mca, mcg);
0405
0406
0407
0408
0409
0410
0411
0412 if (atomic_read(&mcg->qp_num) <= 0)
0413 __rxe_destroy_mcg(mcg);
0414
0415 spin_unlock_bh(&rxe->mcg_lock);
0416 return 0;
0417 }
0418 }
0419
0420
0421 spin_unlock_bh(&rxe->mcg_lock);
0422 return -EINVAL;
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
0434 {
0435 int err;
0436 struct rxe_dev *rxe = to_rdev(ibqp->device);
0437 struct rxe_qp *qp = to_rqp(ibqp);
0438 struct rxe_mcg *mcg;
0439
0440
0441 mcg = rxe_get_mcg(rxe, mgid);
0442 if (IS_ERR(mcg))
0443 return PTR_ERR(mcg);
0444
0445 err = rxe_attach_mcg(mcg, qp);
0446
0447
0448 if (atomic_read(&mcg->qp_num) == 0)
0449 rxe_destroy_mcg(mcg);
0450
0451 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
0452
0453 return err;
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464 int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
0465 {
0466 struct rxe_dev *rxe = to_rdev(ibqp->device);
0467 struct rxe_qp *qp = to_rqp(ibqp);
0468 struct rxe_mcg *mcg;
0469 int err;
0470
0471 mcg = rxe_lookup_mcg(rxe, mgid);
0472 if (!mcg)
0473 return -EINVAL;
0474
0475 err = rxe_detach_mcg(mcg, qp);
0476 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
0477
0478 return err;
0479 }