Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qedr NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/pci.h>
0008 #include <linux/netdevice.h>
0009 #include <linux/list.h>
0010 #include <linux/mutex.h>
0011 #include <linux/qed/qede_rdma.h>
0012 #include "qede.h"
0013 
0014 static struct qedr_driver *qedr_drv;
0015 static LIST_HEAD(qedr_dev_list);
0016 static DEFINE_MUTEX(qedr_dev_list_lock);
0017 
0018 bool qede_rdma_supported(struct qede_dev *dev)
0019 {
0020     return dev->dev_info.common.rdma_supported;
0021 }
0022 
0023 static void _qede_rdma_dev_add(struct qede_dev *edev)
0024 {
0025     if (!qedr_drv)
0026         return;
0027 
0028     /* Leftovers from previous error recovery */
0029     edev->rdma_info.exp_recovery = false;
0030     edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
0031                          edev->ndev);
0032 }
0033 
0034 static int qede_rdma_create_wq(struct qede_dev *edev)
0035 {
0036     INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
0037     kref_init(&edev->rdma_info.refcnt);
0038     init_completion(&edev->rdma_info.event_comp);
0039 
0040     edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
0041     if (!edev->rdma_info.rdma_wq) {
0042         DP_NOTICE(edev, "qedr: Could not create workqueue\n");
0043         return -ENOMEM;
0044     }
0045 
0046     return 0;
0047 }
0048 
0049 static void qede_rdma_cleanup_event(struct qede_dev *edev)
0050 {
0051     struct list_head *head = &edev->rdma_info.rdma_event_list;
0052     struct qede_rdma_event_work *event_node;
0053 
0054     flush_workqueue(edev->rdma_info.rdma_wq);
0055     while (!list_empty(head)) {
0056         event_node = list_entry(head->next, struct qede_rdma_event_work,
0057                     list);
0058         cancel_work_sync(&event_node->work);
0059         list_del(&event_node->list);
0060         kfree(event_node);
0061     }
0062 }
0063 
0064 static void qede_rdma_complete_event(struct kref *ref)
0065 {
0066     struct qede_rdma_dev *rdma_dev =
0067         container_of(ref, struct qede_rdma_dev, refcnt);
0068 
0069     /* no more events will be added after this */
0070     complete(&rdma_dev->event_comp);
0071 }
0072 
0073 static void qede_rdma_destroy_wq(struct qede_dev *edev)
0074 {
0075     /* Avoid race with add_event flow, make sure it finishes before
0076      * we start accessing the list and cleaning up the work
0077      */
0078     kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
0079     wait_for_completion(&edev->rdma_info.event_comp);
0080 
0081     qede_rdma_cleanup_event(edev);
0082     destroy_workqueue(edev->rdma_info.rdma_wq);
0083     edev->rdma_info.rdma_wq = NULL;
0084 }
0085 
0086 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
0087 {
0088     int rc;
0089 
0090     if (!qede_rdma_supported(edev))
0091         return 0;
0092 
0093     /* Cannot start qedr while recovering since it wasn't fully stopped */
0094     if (recovery)
0095         return 0;
0096 
0097     rc = qede_rdma_create_wq(edev);
0098     if (rc)
0099         return rc;
0100 
0101     INIT_LIST_HEAD(&edev->rdma_info.entry);
0102     mutex_lock(&qedr_dev_list_lock);
0103     list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
0104     _qede_rdma_dev_add(edev);
0105     mutex_unlock(&qedr_dev_list_lock);
0106 
0107     return rc;
0108 }
0109 
0110 static void _qede_rdma_dev_remove(struct qede_dev *edev)
0111 {
0112     if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
0113         qedr_drv->remove(edev->rdma_info.qedr_dev);
0114 }
0115 
0116 void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
0117 {
0118     if (!qede_rdma_supported(edev))
0119         return;
0120 
0121     /* Cannot remove qedr while recovering since it wasn't fully stopped */
0122     if (!recovery) {
0123         qede_rdma_destroy_wq(edev);
0124         mutex_lock(&qedr_dev_list_lock);
0125         if (!edev->rdma_info.exp_recovery)
0126             _qede_rdma_dev_remove(edev);
0127         edev->rdma_info.qedr_dev = NULL;
0128         list_del(&edev->rdma_info.entry);
0129         mutex_unlock(&qedr_dev_list_lock);
0130     } else {
0131         if (!edev->rdma_info.exp_recovery) {
0132             mutex_lock(&qedr_dev_list_lock);
0133             _qede_rdma_dev_remove(edev);
0134             mutex_unlock(&qedr_dev_list_lock);
0135         }
0136         edev->rdma_info.exp_recovery = true;
0137     }
0138 }
0139 
0140 static void _qede_rdma_dev_open(struct qede_dev *edev)
0141 {
0142     if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
0143         qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
0144 }
0145 
0146 static void qede_rdma_dev_open(struct qede_dev *edev)
0147 {
0148     if (!qede_rdma_supported(edev))
0149         return;
0150 
0151     mutex_lock(&qedr_dev_list_lock);
0152     _qede_rdma_dev_open(edev);
0153     mutex_unlock(&qedr_dev_list_lock);
0154 }
0155 
0156 static void _qede_rdma_dev_close(struct qede_dev *edev)
0157 {
0158     if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
0159         qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
0160 }
0161 
0162 static void qede_rdma_dev_close(struct qede_dev *edev)
0163 {
0164     if (!qede_rdma_supported(edev))
0165         return;
0166 
0167     mutex_lock(&qedr_dev_list_lock);
0168     _qede_rdma_dev_close(edev);
0169     mutex_unlock(&qedr_dev_list_lock);
0170 }
0171 
0172 static void qede_rdma_dev_shutdown(struct qede_dev *edev)
0173 {
0174     if (!qede_rdma_supported(edev))
0175         return;
0176 
0177     mutex_lock(&qedr_dev_list_lock);
0178     if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
0179         qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
0180     mutex_unlock(&qedr_dev_list_lock);
0181 }
0182 
0183 int qede_rdma_register_driver(struct qedr_driver *drv)
0184 {
0185     struct qede_dev *edev;
0186     u8 qedr_counter = 0;
0187 
0188     mutex_lock(&qedr_dev_list_lock);
0189     if (qedr_drv) {
0190         mutex_unlock(&qedr_dev_list_lock);
0191         return -EINVAL;
0192     }
0193     qedr_drv = drv;
0194 
0195     list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
0196         struct net_device *ndev;
0197 
0198         qedr_counter++;
0199         _qede_rdma_dev_add(edev);
0200         ndev = edev->ndev;
0201         if (netif_running(ndev) && netif_oper_up(ndev))
0202             _qede_rdma_dev_open(edev);
0203     }
0204     mutex_unlock(&qedr_dev_list_lock);
0205 
0206     pr_notice("qedr: discovered and registered %d RDMA funcs\n",
0207           qedr_counter);
0208 
0209     return 0;
0210 }
0211 EXPORT_SYMBOL(qede_rdma_register_driver);
0212 
0213 void qede_rdma_unregister_driver(struct qedr_driver *drv)
0214 {
0215     struct qede_dev *edev;
0216 
0217     mutex_lock(&qedr_dev_list_lock);
0218     list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
0219         /* If device has experienced recovery it was already removed */
0220         if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
0221             _qede_rdma_dev_remove(edev);
0222     }
0223     qedr_drv = NULL;
0224     mutex_unlock(&qedr_dev_list_lock);
0225 }
0226 EXPORT_SYMBOL(qede_rdma_unregister_driver);
0227 
0228 static void qede_rdma_changeaddr(struct qede_dev *edev)
0229 {
0230     if (!qede_rdma_supported(edev))
0231         return;
0232 
0233     if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
0234         qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
0235 }
0236 
0237 static void qede_rdma_change_mtu(struct qede_dev *edev)
0238 {
0239     if (qede_rdma_supported(edev)) {
0240         if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
0241             qedr_drv->notify(edev->rdma_info.qedr_dev,
0242                      QEDE_CHANGE_MTU);
0243     }
0244 }
0245 
0246 static struct qede_rdma_event_work *
0247 qede_rdma_get_free_event_node(struct qede_dev *edev)
0248 {
0249     struct qede_rdma_event_work *event_node = NULL;
0250     bool found = false;
0251 
0252     list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
0253                 list) {
0254         if (!work_pending(&event_node->work)) {
0255             found = true;
0256             break;
0257         }
0258     }
0259 
0260     if (!found) {
0261         event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
0262         if (!event_node) {
0263             DP_NOTICE(edev,
0264                   "qedr: Could not allocate memory for rdma work\n");
0265             return NULL;
0266         }
0267         list_add_tail(&event_node->list,
0268                   &edev->rdma_info.rdma_event_list);
0269     }
0270 
0271     return event_node;
0272 }
0273 
0274 static void qede_rdma_handle_event(struct work_struct *work)
0275 {
0276     struct qede_rdma_event_work *event_node;
0277     enum qede_rdma_event event;
0278     struct qede_dev *edev;
0279 
0280     event_node = container_of(work, struct qede_rdma_event_work, work);
0281     event = event_node->event;
0282     edev = event_node->ptr;
0283 
0284     switch (event) {
0285     case QEDE_UP:
0286         qede_rdma_dev_open(edev);
0287         break;
0288     case QEDE_DOWN:
0289         qede_rdma_dev_close(edev);
0290         break;
0291     case QEDE_CLOSE:
0292         qede_rdma_dev_shutdown(edev);
0293         break;
0294     case QEDE_CHANGE_ADDR:
0295         qede_rdma_changeaddr(edev);
0296         break;
0297     case QEDE_CHANGE_MTU:
0298         qede_rdma_change_mtu(edev);
0299         break;
0300     default:
0301         DP_NOTICE(edev, "Invalid rdma event %d", event);
0302     }
0303 }
0304 
0305 static void qede_rdma_add_event(struct qede_dev *edev,
0306                 enum qede_rdma_event event)
0307 {
0308     struct qede_rdma_event_work *event_node;
0309 
0310     /* If a recovery was experienced avoid adding the event */
0311     if (edev->rdma_info.exp_recovery)
0312         return;
0313 
0314     if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
0315         return;
0316 
0317     /* We don't want the cleanup flow to start while we're allocating and
0318      * scheduling the work
0319      */
0320     if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
0321         return; /* already being destroyed */
0322 
0323     event_node = qede_rdma_get_free_event_node(edev);
0324     if (!event_node)
0325         goto out;
0326 
0327     event_node->event = event;
0328     event_node->ptr = edev;
0329 
0330     INIT_WORK(&event_node->work, qede_rdma_handle_event);
0331     queue_work(edev->rdma_info.rdma_wq, &event_node->work);
0332 
0333 out:
0334     kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
0335 }
0336 
0337 void qede_rdma_dev_event_open(struct qede_dev *edev)
0338 {
0339     qede_rdma_add_event(edev, QEDE_UP);
0340 }
0341 
0342 void qede_rdma_dev_event_close(struct qede_dev *edev)
0343 {
0344     qede_rdma_add_event(edev, QEDE_DOWN);
0345 }
0346 
0347 void qede_rdma_event_changeaddr(struct qede_dev *edev)
0348 {
0349     qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
0350 }
0351 
0352 void qede_rdma_event_change_mtu(struct qede_dev *edev)
0353 {
0354     qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
0355 }