0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/string.h>
0035 #include <linux/slab.h>
0036
0037 #include <rdma/ib_verbs.h>
0038 #include <rdma/ib_cache.h>
0039
0040 #include "mthca_dev.h"
0041
0042 enum {
0043 MTHCA_RATE_TAVOR_FULL = 0,
0044 MTHCA_RATE_TAVOR_1X = 1,
0045 MTHCA_RATE_TAVOR_4X = 2,
0046 MTHCA_RATE_TAVOR_1X_DDR = 3
0047 };
0048
0049 enum {
0050 MTHCA_RATE_MEMFREE_FULL = 0,
0051 MTHCA_RATE_MEMFREE_QUARTER = 1,
0052 MTHCA_RATE_MEMFREE_EIGHTH = 2,
0053 MTHCA_RATE_MEMFREE_HALF = 3
0054 };
0055
0056 struct mthca_av {
0057 __be32 port_pd;
0058 u8 reserved1;
0059 u8 g_slid;
0060 __be16 dlid;
0061 u8 reserved2;
0062 u8 gid_index;
0063 u8 msg_sr;
0064 u8 hop_limit;
0065 __be32 sl_tclass_flowlabel;
0066 __be32 dgid[4];
0067 };
0068
0069 static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
0070 {
0071 switch (mthca_rate) {
0072 case MTHCA_RATE_MEMFREE_EIGHTH:
0073 return mult_to_ib_rate(port_rate >> 3);
0074 case MTHCA_RATE_MEMFREE_QUARTER:
0075 return mult_to_ib_rate(port_rate >> 2);
0076 case MTHCA_RATE_MEMFREE_HALF:
0077 return mult_to_ib_rate(port_rate >> 1);
0078 case MTHCA_RATE_MEMFREE_FULL:
0079 default:
0080 return mult_to_ib_rate(port_rate);
0081 }
0082 }
0083
0084 static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
0085 {
0086 switch (mthca_rate) {
0087 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
0088 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
0089 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
0090 default: return mult_to_ib_rate(port_rate);
0091 }
0092 }
0093
0094 enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port)
0095 {
0096 if (mthca_is_memfree(dev)) {
0097
0098 if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
0099 return IB_RATE_2_5_GBPS;
0100
0101 return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
0102 } else
0103 return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
0104 }
0105
0106 static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
0107 {
0108 if (cur_rate <= req_rate)
0109 return 0;
0110
0111
0112
0113
0114
0115 switch ((cur_rate - 1) / req_rate) {
0116 case 0: return MTHCA_RATE_MEMFREE_FULL;
0117 case 1: return MTHCA_RATE_MEMFREE_HALF;
0118 case 2:
0119 case 3: return MTHCA_RATE_MEMFREE_QUARTER;
0120 default: return MTHCA_RATE_MEMFREE_EIGHTH;
0121 }
0122 }
0123
0124 static u8 ib_rate_to_tavor(u8 static_rate)
0125 {
0126 switch (static_rate) {
0127 case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
0128 case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
0129 case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
0130 default: return MTHCA_RATE_TAVOR_FULL;
0131 }
0132 }
0133
0134 u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port)
0135 {
0136 u8 rate;
0137
0138 if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
0139 return 0;
0140
0141 if (mthca_is_memfree(dev))
0142 rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
0143 dev->rate[port - 1]);
0144 else
0145 rate = ib_rate_to_tavor(static_rate);
0146
0147 if (!(dev->limits.stat_rate_support & (1 << rate)))
0148 rate = 1;
0149
0150 return rate;
0151 }
0152
0153 int mthca_create_ah(struct mthca_dev *dev,
0154 struct mthca_pd *pd,
0155 struct rdma_ah_attr *ah_attr,
0156 struct mthca_ah *ah)
0157 {
0158 u32 index = -1;
0159 struct mthca_av *av = NULL;
0160
0161 ah->type = MTHCA_AH_PCI_POOL;
0162
0163 if (mthca_is_memfree(dev)) {
0164 ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
0165 if (!ah->av)
0166 return -ENOMEM;
0167
0168 ah->type = MTHCA_AH_KMALLOC;
0169 av = ah->av;
0170 } else if (!atomic_read(&pd->sqp_count) &&
0171 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
0172 index = mthca_alloc(&dev->av_table.alloc);
0173
0174
0175 if (index == -1)
0176 goto on_hca_fail;
0177
0178 av = kmalloc(sizeof *av, GFP_ATOMIC);
0179 if (!av)
0180 goto on_hca_fail;
0181
0182 ah->type = MTHCA_AH_ON_HCA;
0183 ah->avdma = dev->av_table.ddr_av_base +
0184 index * MTHCA_AV_SIZE;
0185 }
0186
0187 on_hca_fail:
0188 if (ah->type == MTHCA_AH_PCI_POOL) {
0189 ah->av = dma_pool_zalloc(dev->av_table.pool,
0190 GFP_ATOMIC, &ah->avdma);
0191 if (!ah->av)
0192 return -ENOMEM;
0193
0194 av = ah->av;
0195 }
0196
0197 ah->key = pd->ntmr.ibmr.lkey;
0198
0199 av->port_pd = cpu_to_be32(pd->pd_num |
0200 (rdma_ah_get_port_num(ah_attr) << 24));
0201 av->g_slid = rdma_ah_get_path_bits(ah_attr);
0202 av->dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
0203 av->msg_sr = (3 << 4) |
0204 mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
0205 rdma_ah_get_port_num(ah_attr));
0206 av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
0207 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
0208 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
0209
0210 av->g_slid |= 0x80;
0211 av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
0212 dev->limits.gid_table_len +
0213 grh->sgid_index;
0214 av->hop_limit = grh->hop_limit;
0215 av->sl_tclass_flowlabel |=
0216 cpu_to_be32((grh->traffic_class << 20) |
0217 grh->flow_label);
0218 memcpy(av->dgid, grh->dgid.raw, 16);
0219 } else {
0220
0221 av->dgid[3] = cpu_to_be32(2);
0222 }
0223
0224 if (0) {
0225 int j;
0226
0227 mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
0228 av, (unsigned long) ah->avdma);
0229 for (j = 0; j < 8; ++j)
0230 printk(KERN_DEBUG " [%2x] %08x\n",
0231 j * 4, be32_to_cpu(((__be32 *) av)[j]));
0232 }
0233
0234 if (ah->type == MTHCA_AH_ON_HCA) {
0235 memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
0236 av, MTHCA_AV_SIZE);
0237 kfree(av);
0238 }
0239
0240 return 0;
0241 }
0242
0243 int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
0244 {
0245 switch (ah->type) {
0246 case MTHCA_AH_ON_HCA:
0247 mthca_free(&dev->av_table.alloc,
0248 (ah->avdma - dev->av_table.ddr_av_base) /
0249 MTHCA_AV_SIZE);
0250 break;
0251
0252 case MTHCA_AH_PCI_POOL:
0253 dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
0254 break;
0255
0256 case MTHCA_AH_KMALLOC:
0257 kfree(ah->av);
0258 break;
0259 }
0260
0261 return 0;
0262 }
0263
0264 int mthca_ah_grh_present(struct mthca_ah *ah)
0265 {
0266 return !!(ah->av->g_slid & 0x80);
0267 }
0268
0269 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
0270 struct ib_ud_header *header)
0271 {
0272 if (ah->type == MTHCA_AH_ON_HCA)
0273 return -EINVAL;
0274
0275 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
0276 header->lrh.destination_lid = ah->av->dlid;
0277 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
0278 if (mthca_ah_grh_present(ah)) {
0279 header->grh.traffic_class =
0280 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
0281 header->grh.flow_label =
0282 ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
0283 header->grh.hop_limit = ah->av->hop_limit;
0284 header->grh.source_gid = ah->ibah.sgid_attr->gid;
0285 memcpy(header->grh.destination_gid.raw,
0286 ah->av->dgid, 16);
0287 }
0288
0289 return 0;
0290 }
0291
0292 int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
0293 {
0294 struct mthca_ah *ah = to_mah(ibah);
0295 struct mthca_dev *dev = to_mdev(ibah->device);
0296 u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
0297
0298
0299 if (ah->type == MTHCA_AH_ON_HCA)
0300 return -ENOSYS;
0301
0302 memset(attr, 0, sizeof *attr);
0303 attr->type = ibah->type;
0304 rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
0305 rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
0306 rdma_ah_set_port_num(attr, port_num);
0307 rdma_ah_set_static_rate(attr,
0308 mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
0309 port_num));
0310 rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
0311 if (mthca_ah_grh_present(ah)) {
0312 u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
0313
0314 rdma_ah_set_grh(attr, NULL,
0315 tc_fl & 0xfffff,
0316 ah->av->gid_index &
0317 (dev->limits.gid_table_len - 1),
0318 ah->av->hop_limit,
0319 (tc_fl >> 20) & 0xff);
0320 rdma_ah_set_dgid_raw(attr, ah->av->dgid);
0321 }
0322
0323 return 0;
0324 }
0325
0326 int mthca_init_av_table(struct mthca_dev *dev)
0327 {
0328 int err;
0329
0330 if (mthca_is_memfree(dev))
0331 return 0;
0332
0333 err = mthca_alloc_init(&dev->av_table.alloc,
0334 dev->av_table.num_ddr_avs,
0335 dev->av_table.num_ddr_avs - 1,
0336 0);
0337 if (err)
0338 return err;
0339
0340 dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
0341 MTHCA_AV_SIZE,
0342 MTHCA_AV_SIZE, 0);
0343 if (!dev->av_table.pool)
0344 goto out_free_alloc;
0345
0346 if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
0347 dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
0348 dev->av_table.ddr_av_base -
0349 dev->ddr_start,
0350 dev->av_table.num_ddr_avs *
0351 MTHCA_AV_SIZE);
0352 if (!dev->av_table.av_map)
0353 goto out_free_pool;
0354 } else
0355 dev->av_table.av_map = NULL;
0356
0357 return 0;
0358
0359 out_free_pool:
0360 dma_pool_destroy(dev->av_table.pool);
0361
0362 out_free_alloc:
0363 mthca_alloc_cleanup(&dev->av_table.alloc);
0364 return -ENOMEM;
0365 }
0366
0367 void mthca_cleanup_av_table(struct mthca_dev *dev)
0368 {
0369 if (mthca_is_memfree(dev))
0370 return;
0371
0372 if (dev->av_table.av_map)
0373 iounmap(dev->av_table.av_map);
0374 dma_pool_destroy(dev->av_table.pool);
0375 mthca_alloc_cleanup(&dev->av_table.alloc);
0376 }