0001
0002
0003
0004
0005
0006 #include <linux/dma-mapping.h>
0007 #include <linux/interrupt.h>
0008 #include <linux/of.h>
0009 #include <linux/skbuff.h>
0010 #include <linux/slab.h>
0011 #include "hnae.h"
0012
0013 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
0014
0015 static struct class *hnae_class;
0016
0017 static void
0018 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
0019 {
0020 unsigned long flags;
0021
0022 spin_lock_irqsave(lock, flags);
0023 list_add_tail_rcu(node, head);
0024 spin_unlock_irqrestore(lock, flags);
0025 }
0026
0027 static void hnae_list_del(spinlock_t *lock, struct list_head *node)
0028 {
0029 unsigned long flags;
0030
0031 spin_lock_irqsave(lock, flags);
0032 list_del_rcu(node);
0033 spin_unlock_irqrestore(lock, flags);
0034 }
0035
0036 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
0037 {
0038 unsigned int order = hnae_page_order(ring);
0039 struct page *p = dev_alloc_pages(order);
0040
0041 if (!p)
0042 return -ENOMEM;
0043
0044 cb->priv = p;
0045 cb->page_offset = 0;
0046 cb->reuse_flag = 0;
0047 cb->buf = page_address(p);
0048 cb->length = hnae_page_size(ring);
0049 cb->type = DESC_TYPE_PAGE;
0050
0051 return 0;
0052 }
0053
0054 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
0055 {
0056 if (unlikely(!cb->priv))
0057 return;
0058
0059 if (cb->type == DESC_TYPE_SKB)
0060 dev_kfree_skb_any((struct sk_buff *)cb->priv);
0061 else if (unlikely(is_rx_ring(ring)))
0062 put_page((struct page *)cb->priv);
0063
0064 cb->priv = NULL;
0065 }
0066
0067 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
0068 {
0069 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
0070 cb->length, ring_to_dma_dir(ring));
0071
0072 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
0073 return -EIO;
0074
0075 return 0;
0076 }
0077
0078 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
0079 {
0080 if (cb->type == DESC_TYPE_SKB)
0081 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
0082 ring_to_dma_dir(ring));
0083 else if (cb->length)
0084 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
0085 ring_to_dma_dir(ring));
0086 }
0087
0088 static struct hnae_buf_ops hnae_bops = {
0089 .alloc_buffer = hnae_alloc_buffer,
0090 .free_buffer = hnae_free_buffer,
0091 .map_buffer = hnae_map_buffer,
0092 .unmap_buffer = hnae_unmap_buffer,
0093 };
0094
0095 static int __ae_match(struct device *dev, const void *data)
0096 {
0097 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
0098
0099 if (dev_of_node(hdev->dev))
0100 return (data == &hdev->dev->of_node->fwnode);
0101 else if (is_acpi_node(hdev->dev->fwnode))
0102 return (data == hdev->dev->fwnode);
0103
0104 dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
0105 return 0;
0106 }
0107
0108 static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
0109 {
0110 struct device *dev;
0111
0112 WARN_ON(!fwnode);
0113
0114 dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
0115
0116 return dev ? cls_to_ae_dev(dev) : NULL;
0117 }
0118
0119 static void hnae_free_buffers(struct hnae_ring *ring)
0120 {
0121 int i;
0122
0123 for (i = 0; i < ring->desc_num; i++)
0124 hnae_free_buffer_detach(ring, i);
0125 }
0126
0127
0128 static int hnae_alloc_buffers(struct hnae_ring *ring)
0129 {
0130 int i, j, ret;
0131
0132 for (i = 0; i < ring->desc_num; i++) {
0133 ret = hnae_alloc_buffer_attach(ring, i);
0134 if (ret)
0135 goto out_buffer_fail;
0136 }
0137
0138 return 0;
0139
0140 out_buffer_fail:
0141 for (j = i - 1; j >= 0; j--)
0142 hnae_free_buffer_detach(ring, j);
0143 return ret;
0144 }
0145
0146
0147 static void hnae_free_desc(struct hnae_ring *ring)
0148 {
0149 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
0150 ring->desc_num * sizeof(ring->desc[0]),
0151 ring_to_dma_dir(ring));
0152 ring->desc_dma_addr = 0;
0153 kfree(ring->desc);
0154 ring->desc = NULL;
0155 }
0156
0157
0158 static int hnae_alloc_desc(struct hnae_ring *ring)
0159 {
0160 int size = ring->desc_num * sizeof(ring->desc[0]);
0161
0162 ring->desc = kzalloc(size, GFP_KERNEL);
0163 if (!ring->desc)
0164 return -ENOMEM;
0165
0166 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
0167 ring->desc, size, ring_to_dma_dir(ring));
0168 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
0169 ring->desc_dma_addr = 0;
0170 kfree(ring->desc);
0171 ring->desc = NULL;
0172 return -ENOMEM;
0173 }
0174
0175 return 0;
0176 }
0177
0178
0179 static void hnae_fini_ring(struct hnae_ring *ring)
0180 {
0181 if (is_rx_ring(ring))
0182 hnae_free_buffers(ring);
0183
0184 hnae_free_desc(ring);
0185 kfree(ring->desc_cb);
0186 ring->desc_cb = NULL;
0187 ring->next_to_clean = 0;
0188 ring->next_to_use = 0;
0189 }
0190
0191
0192 static int
0193 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
0194 {
0195 int ret;
0196
0197 if (ring->desc_num <= 0 || ring->buf_size <= 0)
0198 return -EINVAL;
0199
0200 ring->q = q;
0201 ring->flags = flags;
0202 ring->coal_param = q->handle->coal_param;
0203 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
0204
0205
0206 assert(ring->next_to_use == 0);
0207 assert(ring->next_to_clean == 0);
0208
0209 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
0210 GFP_KERNEL);
0211 if (!ring->desc_cb) {
0212 ret = -ENOMEM;
0213 goto out;
0214 }
0215
0216 ret = hnae_alloc_desc(ring);
0217 if (ret)
0218 goto out_with_desc_cb;
0219
0220 if (is_rx_ring(ring)) {
0221 ret = hnae_alloc_buffers(ring);
0222 if (ret)
0223 goto out_with_desc;
0224 }
0225
0226 return 0;
0227
0228 out_with_desc:
0229 hnae_free_desc(ring);
0230 out_with_desc_cb:
0231 kfree(ring->desc_cb);
0232 ring->desc_cb = NULL;
0233 out:
0234 return ret;
0235 }
0236
0237 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
0238 struct hnae_ae_dev *dev)
0239 {
0240 int ret;
0241
0242 q->dev = dev;
0243 q->handle = h;
0244
0245 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
0246 if (ret)
0247 goto out;
0248
0249 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
0250 if (ret)
0251 goto out_with_tx_ring;
0252
0253 if (dev->ops->init_queue)
0254 dev->ops->init_queue(q);
0255
0256 return 0;
0257
0258 out_with_tx_ring:
0259 hnae_fini_ring(&q->tx_ring);
0260 out:
0261 return ret;
0262 }
0263
0264 static void hnae_fini_queue(struct hnae_queue *q)
0265 {
0266 if (q->dev->ops->fini_queue)
0267 q->dev->ops->fini_queue(q);
0268
0269 hnae_fini_ring(&q->tx_ring);
0270 hnae_fini_ring(&q->rx_ring);
0271 }
0272
0273
0274
0275
0276 static RAW_NOTIFIER_HEAD(ae_chain);
0277
0278 int hnae_register_notifier(struct notifier_block *nb)
0279 {
0280 return raw_notifier_chain_register(&ae_chain, nb);
0281 }
0282 EXPORT_SYMBOL(hnae_register_notifier);
0283
0284 void hnae_unregister_notifier(struct notifier_block *nb)
0285 {
0286 if (raw_notifier_chain_unregister(&ae_chain, nb))
0287 dev_err(NULL, "notifier chain unregister fail\n");
0288 }
0289 EXPORT_SYMBOL(hnae_unregister_notifier);
0290
0291 int hnae_reinit_handle(struct hnae_handle *handle)
0292 {
0293 int i, j;
0294 int ret;
0295
0296 for (i = 0; i < handle->q_num; i++)
0297 hnae_fini_queue(handle->qs[i]);
0298
0299 if (handle->dev->ops->reset)
0300 handle->dev->ops->reset(handle);
0301
0302 for (i = 0; i < handle->q_num; i++) {
0303 ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
0304 if (ret)
0305 goto out_when_init_queue;
0306 }
0307 return 0;
0308 out_when_init_queue:
0309 for (j = i - 1; j >= 0; j--)
0310 hnae_fini_queue(handle->qs[j]);
0311 return ret;
0312 }
0313 EXPORT_SYMBOL(hnae_reinit_handle);
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
0324 const struct fwnode_handle *fwnode,
0325 u32 port_id,
0326 struct hnae_buf_ops *bops)
0327 {
0328 struct hnae_ae_dev *dev;
0329 struct hnae_handle *handle;
0330 int i, j;
0331 int ret;
0332
0333 dev = find_ae(fwnode);
0334 if (!dev)
0335 return ERR_PTR(-ENODEV);
0336
0337 handle = dev->ops->get_handle(dev, port_id);
0338 if (IS_ERR(handle)) {
0339 put_device(&dev->cls_dev);
0340 return handle;
0341 }
0342
0343 handle->dev = dev;
0344 handle->owner_dev = owner_dev;
0345 handle->bops = bops ? bops : &hnae_bops;
0346 handle->eport_id = port_id;
0347
0348 for (i = 0; i < handle->q_num; i++) {
0349 ret = hnae_init_queue(handle, handle->qs[i], dev);
0350 if (ret)
0351 goto out_when_init_queue;
0352 }
0353
0354 __module_get(dev->owner);
0355
0356 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
0357
0358 return handle;
0359
0360 out_when_init_queue:
0361 for (j = i - 1; j >= 0; j--)
0362 hnae_fini_queue(handle->qs[j]);
0363
0364 put_device(&dev->cls_dev);
0365
0366 return ERR_PTR(-ENOMEM);
0367 }
0368 EXPORT_SYMBOL(hnae_get_handle);
0369
0370 void hnae_put_handle(struct hnae_handle *h)
0371 {
0372 struct hnae_ae_dev *dev = h->dev;
0373 int i;
0374
0375 for (i = 0; i < h->q_num; i++)
0376 hnae_fini_queue(h->qs[i]);
0377
0378 if (h->dev->ops->reset)
0379 h->dev->ops->reset(h);
0380
0381 hnae_list_del(&dev->lock, &h->node);
0382
0383 if (dev->ops->put_handle)
0384 dev->ops->put_handle(h);
0385
0386 module_put(dev->owner);
0387
0388 put_device(&dev->cls_dev);
0389 }
0390 EXPORT_SYMBOL(hnae_put_handle);
0391
0392 static void hnae_release(struct device *dev)
0393 {
0394 }
0395
0396
0397
0398
0399
0400
0401
0402 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
0403 {
0404 static atomic_t id = ATOMIC_INIT(-1);
0405 int ret;
0406
0407 if (!hdev->dev)
0408 return -ENODEV;
0409
0410 if (!hdev->ops || !hdev->ops->get_handle ||
0411 !hdev->ops->toggle_ring_irq ||
0412 !hdev->ops->get_status || !hdev->ops->adjust_link)
0413 return -EINVAL;
0414
0415 hdev->owner = owner;
0416 hdev->id = (int)atomic_inc_return(&id);
0417 hdev->cls_dev.parent = hdev->dev;
0418 hdev->cls_dev.class = hnae_class;
0419 hdev->cls_dev.release = hnae_release;
0420 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
0421 ret = device_register(&hdev->cls_dev);
0422 if (ret)
0423 return ret;
0424
0425 __module_get(THIS_MODULE);
0426
0427 INIT_LIST_HEAD(&hdev->handle_list);
0428 spin_lock_init(&hdev->lock);
0429
0430 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
0431 if (ret)
0432 dev_dbg(hdev->dev,
0433 "has not notifier for AE: %s\n", hdev->name);
0434
0435 return 0;
0436 }
0437 EXPORT_SYMBOL(hnae_ae_register);
0438
0439
0440
0441
0442
0443 void hnae_ae_unregister(struct hnae_ae_dev *hdev)
0444 {
0445 device_unregister(&hdev->cls_dev);
0446 module_put(THIS_MODULE);
0447 }
0448 EXPORT_SYMBOL(hnae_ae_unregister);
0449
0450 static int __init hnae_init(void)
0451 {
0452 hnae_class = class_create(THIS_MODULE, "hnae");
0453 return PTR_ERR_OR_ZERO(hnae_class);
0454 }
0455
0456 static void __exit hnae_exit(void)
0457 {
0458 class_destroy(hnae_class);
0459 }
0460
0461 subsys_initcall(hnae_init);
0462 module_exit(hnae_exit);
0463
0464 MODULE_AUTHOR("Hisilicon, Inc.");
0465 MODULE_LICENSE("GPL");
0466 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
0467
0468