0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/module.h>
0013 #include <linux/kernel.h>
0014 #include <linux/kthread.h>
0015 #include <linux/major.h>
0016 #include <linux/string.h>
0017 #include <linux/fcntl.h>
0018 #include <linux/slab.h>
0019 #include <linux/poll.h>
0020 #include <linux/init.h>
0021 #include <linux/fs.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/percpu.h>
0025 #include <linux/delay.h>
0026 #include <linux/uaccess.h>
0027 #include <linux/io.h>
0028 #include <linux/miscdevice.h>
0029 #include <linux/sched/signal.h>
0030
0031 #include <asm/byteorder.h>
0032 #include <asm/irq.h>
0033 #include <asm/vio.h>
0034
0035 #include "ibmvmc.h"
0036
0037 #define IBMVMC_DRIVER_VERSION "1.0"
0038
0039
0040
0041
0042 static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
0043
0044 static const char ibmvmc_driver_name[] = "ibmvmc";
0045
0046 static struct ibmvmc_struct ibmvmc;
0047 static struct ibmvmc_hmc hmcs[MAX_HMCS];
0048 static struct crq_server_adapter ibmvmc_adapter;
0049
0050 static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
0051 static int ibmvmc_max_hmcs = DEFAULT_HMCS;
0052 static int ibmvmc_max_mtu = DEFAULT_MTU;
0053
0054 static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
0055 u64 dliobn, u64 dlioba)
0056 {
0057 long rc = 0;
0058
0059
0060 dma_wmb();
0061 pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
0062 length, sliobn, slioba, dliobn, dlioba);
0063 rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
0064 dliobn, dlioba);
0065 pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
0066
0067 return rc;
0068 }
0069
0070 static inline void h_free_crq(uint32_t unit_address)
0071 {
0072 long rc = 0;
0073
0074 do {
0075 if (H_IS_LONG_BUSY(rc))
0076 msleep(get_longbusy_msecs(rc));
0077
0078 rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
0079 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 static inline long h_request_vmc(u32 *vmc_index)
0095 {
0096 long rc = 0;
0097 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0098
0099 do {
0100 if (H_IS_LONG_BUSY(rc))
0101 msleep(get_longbusy_msecs(rc));
0102
0103
0104 rc = plpar_hcall(H_REQUEST_VMC, retbuf);
0105 pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
0106 *vmc_index = retbuf[0];
0107 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
0108
0109 return rc;
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
0123 {
0124 struct crq_server_adapter *adapter =
0125 (struct crq_server_adapter *)dev_instance;
0126
0127 vio_disable_interrupts(to_vio_dev(adapter->dev));
0128 tasklet_schedule(&adapter->work_task);
0129
0130 return IRQ_HANDLED;
0131 }
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
0143 {
0144 struct vio_dev *vdev = to_vio_dev(adapter->dev);
0145 struct crq_queue *queue = &adapter->queue;
0146
0147 free_irq(vdev->irq, (void *)adapter);
0148 tasklet_kill(&adapter->work_task);
0149
0150 if (adapter->reset_task)
0151 kthread_stop(adapter->reset_task);
0152
0153 h_free_crq(vdev->unit_address);
0154 dma_unmap_single(adapter->dev,
0155 queue->msg_token,
0156 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
0157 free_page((unsigned long)queue->msgs);
0158 }
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
0173 {
0174 struct vio_dev *vdev = to_vio_dev(adapter->dev);
0175 struct crq_queue *queue = &adapter->queue;
0176 int rc = 0;
0177
0178
0179 h_free_crq(vdev->unit_address);
0180
0181
0182 memset(queue->msgs, 0x00, PAGE_SIZE);
0183 queue->cur = 0;
0184
0185
0186 rc = plpar_hcall_norets(H_REG_CRQ,
0187 vdev->unit_address,
0188 queue->msg_token, PAGE_SIZE);
0189 if (rc == 2)
0190
0191 dev_warn(adapter->dev, "Partner adapter not ready\n");
0192 else if (rc != 0)
0193 dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
0194
0195 return rc;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205 static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
0206 {
0207 struct ibmvmc_crq_msg *crq;
0208 unsigned long flags;
0209
0210 spin_lock_irqsave(&queue->lock, flags);
0211 crq = &queue->msgs[queue->cur];
0212 if (crq->valid & 0x80) {
0213 if (++queue->cur == queue->size)
0214 queue->cur = 0;
0215
0216
0217
0218
0219 dma_rmb();
0220 } else {
0221 crq = NULL;
0222 }
0223
0224 spin_unlock_irqrestore(&queue->lock, flags);
0225
0226 return crq;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
0241 u64 word1, u64 word2)
0242 {
0243 struct vio_dev *vdev = to_vio_dev(adapter->dev);
0244 long rc = 0;
0245
0246 dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
0247 vdev->unit_address, word1, word2);
0248
0249
0250
0251
0252
0253 dma_wmb();
0254 rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
0255 dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
0256
0257 return rc;
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
0273 dma_addr_t *dma_handle)
0274 {
0275
0276 void *buffer = kzalloc(size, GFP_ATOMIC);
0277
0278 if (!buffer) {
0279 *dma_handle = 0;
0280 return NULL;
0281 }
0282
0283
0284 *dma_handle = dma_map_single(&vdev->dev, buffer, size,
0285 DMA_BIDIRECTIONAL);
0286
0287 if (dma_mapping_error(&vdev->dev, *dma_handle)) {
0288 *dma_handle = 0;
0289 kfree_sensitive(buffer);
0290 return NULL;
0291 }
0292
0293 return buffer;
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
0307 dma_addr_t dma_handle)
0308 {
0309
0310 dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
0311
0312
0313 kfree_sensitive(vaddr);
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
0325 {
0326 struct ibmvmc_buffer *buffer;
0327 struct ibmvmc_buffer *ret_buf = NULL;
0328 unsigned long i;
0329
0330 if (hmc_index > ibmvmc.max_hmc_index)
0331 return NULL;
0332
0333 buffer = hmcs[hmc_index].buffer;
0334
0335 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
0336 if (buffer[i].valid && buffer[i].free &&
0337 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
0338 buffer[i].free = 0;
0339 ret_buf = &buffer[i];
0340 break;
0341 }
0342 }
0343
0344 return ret_buf;
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
0357 u8 hmc_index)
0358 {
0359 struct ibmvmc_buffer *buffer;
0360 struct ibmvmc_buffer *ret_buf = NULL;
0361 unsigned long i;
0362
0363 if (hmc_index > ibmvmc.max_hmc_index) {
0364 dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
0365 hmc_index);
0366 return NULL;
0367 }
0368
0369 buffer = hmcs[hmc_index].buffer;
0370
0371 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
0372 if (buffer[i].free &&
0373 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
0374 buffer[i].free = 0;
0375 ret_buf = &buffer[i];
0376 break;
0377 }
0378 }
0379
0380 return ret_buf;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390 static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
0391 struct ibmvmc_buffer *buffer)
0392 {
0393 unsigned long flags;
0394
0395 spin_lock_irqsave(&hmc->lock, flags);
0396 buffer->free = 1;
0397 spin_unlock_irqrestore(&hmc->lock, flags);
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
0409 unsigned int *free)
0410 {
0411 struct ibmvmc_buffer *buffer;
0412 unsigned long i;
0413 unsigned long flags;
0414
0415 if (hmc_index > ibmvmc.max_hmc_index)
0416 return;
0417
0418 if (!valid || !free)
0419 return;
0420
0421 *valid = 0; *free = 0;
0422
0423 buffer = hmcs[hmc_index].buffer;
0424 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
0425
0426 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
0427 if (buffer[i].valid) {
0428 *valid = *valid + 1;
0429 if (buffer[i].free)
0430 *free = *free + 1;
0431 }
0432 }
0433
0434 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
0435 }
0436
0437
0438
0439
0440
0441
0442
0443
0444 static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
0445 {
0446 unsigned long i;
0447 unsigned long flags;
0448
0449
0450
0451
0452 for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
0453 spin_lock_irqsave(&hmcs[i].lock, flags);
0454 if (hmcs[i].state == ibmhmc_state_free) {
0455 hmcs[i].index = i;
0456 hmcs[i].state = ibmhmc_state_initial;
0457 spin_unlock_irqrestore(&hmcs[i].lock, flags);
0458 return &hmcs[i];
0459 }
0460 spin_unlock_irqrestore(&hmcs[i].lock, flags);
0461 }
0462
0463 return NULL;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
0479 {
0480 struct ibmvmc_buffer *buffer;
0481 struct crq_server_adapter *adapter;
0482 struct vio_dev *vdev;
0483 unsigned long i;
0484 unsigned long flags;
0485
0486 if (!hmc || !hmc->adapter)
0487 return -EIO;
0488
0489 if (release_readers) {
0490 if (hmc->file_session) {
0491 struct ibmvmc_file_session *session = hmc->file_session;
0492
0493 session->valid = 0;
0494 wake_up_interruptible(&ibmvmc_read_wait);
0495 }
0496 }
0497
0498 adapter = hmc->adapter;
0499 vdev = to_vio_dev(adapter->dev);
0500
0501 spin_lock_irqsave(&hmc->lock, flags);
0502 hmc->index = 0;
0503 hmc->state = ibmhmc_state_free;
0504 hmc->queue_head = 0;
0505 hmc->queue_tail = 0;
0506 buffer = hmc->buffer;
0507 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
0508 if (buffer[i].valid) {
0509 free_dma_buffer(vdev,
0510 ibmvmc.max_mtu,
0511 buffer[i].real_addr_local,
0512 buffer[i].dma_addr_local);
0513 dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
0514 }
0515 memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
0516
0517 hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
0518 }
0519
0520 spin_unlock_irqrestore(&hmc->lock, flags);
0521
0522 return 0;
0523 }
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
0544 struct ibmvmc_hmc *hmc)
0545 {
0546 struct ibmvmc_crq_msg crq_msg;
0547 struct crq_server_adapter *adapter;
0548 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0549 int rc = 0;
0550
0551 if (!hmc || !hmc->adapter)
0552 return -EIO;
0553
0554 adapter = hmc->adapter;
0555
0556 dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
0557 (unsigned long)buffer->size, (unsigned long)adapter->liobn,
0558 (unsigned long)buffer->dma_addr_local,
0559 (unsigned long)adapter->riobn,
0560 (unsigned long)buffer->dma_addr_remote);
0561
0562 rc = h_copy_rdma(buffer->size,
0563 adapter->liobn,
0564 buffer->dma_addr_local,
0565 adapter->riobn,
0566 buffer->dma_addr_remote);
0567 if (rc) {
0568 dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
0569 rc);
0570 return -EIO;
0571 }
0572
0573 hmc->state = ibmhmc_state_opening;
0574
0575 crq_msg.valid = 0x80;
0576 crq_msg.type = VMC_MSG_OPEN;
0577 crq_msg.status = 0;
0578 crq_msg.var1.rsvd = 0;
0579 crq_msg.hmc_session = hmc->session;
0580 crq_msg.hmc_index = hmc->index;
0581 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
0582 crq_msg.rsvd = 0;
0583 crq_msg.var3.rsvd = 0;
0584
0585 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0586 be64_to_cpu(crq_as_u64[1]));
0587
0588 return rc;
0589 }
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
0606 {
0607 struct ibmvmc_crq_msg crq_msg;
0608 struct crq_server_adapter *adapter;
0609 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0610 int rc = 0;
0611
0612 if (!hmc || !hmc->adapter)
0613 return -EIO;
0614
0615 adapter = hmc->adapter;
0616
0617 dev_info(adapter->dev, "CRQ send: close\n");
0618
0619 crq_msg.valid = 0x80;
0620 crq_msg.type = VMC_MSG_CLOSE;
0621 crq_msg.status = 0;
0622 crq_msg.var1.rsvd = 0;
0623 crq_msg.hmc_session = hmc->session;
0624 crq_msg.hmc_index = hmc->index;
0625 crq_msg.var2.rsvd = 0;
0626 crq_msg.rsvd = 0;
0627 crq_msg.var3.rsvd = 0;
0628
0629 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0630 be64_to_cpu(crq_as_u64[1]));
0631
0632 return rc;
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651 static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
0652 {
0653 struct ibmvmc_admin_crq_msg crq_msg;
0654 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0655
0656 dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
0657 crq_msg.valid = 0x80;
0658 crq_msg.type = VMC_MSG_CAP;
0659 crq_msg.status = 0;
0660 crq_msg.rsvd[0] = 0;
0661 crq_msg.rsvd[1] = 0;
0662 crq_msg.max_hmc = ibmvmc_max_hmcs;
0663 crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
0664 crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
0665 crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
0666 crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
0667
0668 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0669 be64_to_cpu(crq_as_u64[1]));
0670
0671 ibmvmc.state = ibmvmc_state_capabilities;
0672
0673 return 0;
0674 }
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
0693 u8 status, u8 hmc_session,
0694 u8 hmc_index, u16 buffer_id)
0695 {
0696 struct ibmvmc_crq_msg crq_msg;
0697 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0698
0699 dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
0700 crq_msg.valid = 0x80;
0701 crq_msg.type = VMC_MSG_ADD_BUF_RESP;
0702 crq_msg.status = status;
0703 crq_msg.var1.rsvd = 0;
0704 crq_msg.hmc_session = hmc_session;
0705 crq_msg.hmc_index = hmc_index;
0706 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
0707 crq_msg.rsvd = 0;
0708 crq_msg.var3.rsvd = 0;
0709
0710 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0711 be64_to_cpu(crq_as_u64[1]));
0712
0713 return 0;
0714 }
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733 static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
0734 u8 status, u8 hmc_session,
0735 u8 hmc_index, u16 buffer_id)
0736 {
0737 struct ibmvmc_crq_msg crq_msg;
0738 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0739
0740 dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
0741 crq_msg.valid = 0x80;
0742 crq_msg.type = VMC_MSG_REM_BUF_RESP;
0743 crq_msg.status = status;
0744 crq_msg.var1.rsvd = 0;
0745 crq_msg.hmc_session = hmc_session;
0746 crq_msg.hmc_index = hmc_index;
0747 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
0748 crq_msg.rsvd = 0;
0749 crq_msg.var3.rsvd = 0;
0750
0751 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0752 be64_to_cpu(crq_as_u64[1]));
0753
0754 return 0;
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
0778 struct ibmvmc_buffer *buffer,
0779 struct ibmvmc_hmc *hmc, int msg_len)
0780 {
0781 struct ibmvmc_crq_msg crq_msg;
0782 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
0783 int rc = 0;
0784
0785 dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
0786 rc = h_copy_rdma(msg_len,
0787 adapter->liobn,
0788 buffer->dma_addr_local,
0789 adapter->riobn,
0790 buffer->dma_addr_remote);
0791 if (rc) {
0792 dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
0793 rc);
0794 return rc;
0795 }
0796
0797 crq_msg.valid = 0x80;
0798 crq_msg.type = VMC_MSG_SIGNAL;
0799 crq_msg.status = 0;
0800 crq_msg.var1.rsvd = 0;
0801 crq_msg.hmc_session = hmc->session;
0802 crq_msg.hmc_index = hmc->index;
0803 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
0804 crq_msg.var3.msg_len = cpu_to_be32(msg_len);
0805 dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
0806 be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
0807
0808 buffer->owner = VMC_BUF_OWNER_HV;
0809 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
0810 be64_to_cpu(crq_as_u64[1]));
0811
0812 return rc;
0813 }
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825 static int ibmvmc_open(struct inode *inode, struct file *file)
0826 {
0827 struct ibmvmc_file_session *session;
0828
0829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
0830 (unsigned long)inode, (unsigned long)file,
0831 ibmvmc.state);
0832
0833 session = kzalloc(sizeof(*session), GFP_KERNEL);
0834 if (!session)
0835 return -ENOMEM;
0836
0837 session->file = file;
0838 file->private_data = session;
0839
0840 return 0;
0841 }
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853 static int ibmvmc_close(struct inode *inode, struct file *file)
0854 {
0855 struct ibmvmc_file_session *session;
0856 struct ibmvmc_hmc *hmc;
0857 int rc = 0;
0858 unsigned long flags;
0859
0860 pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
0861 (unsigned long)file, ibmvmc.state);
0862
0863 session = file->private_data;
0864 if (!session)
0865 return -EIO;
0866
0867 hmc = session->hmc;
0868 if (hmc) {
0869 if (!hmc->adapter)
0870 return -EIO;
0871
0872 if (ibmvmc.state == ibmvmc_state_failed) {
0873 dev_warn(hmc->adapter->dev, "close: state_failed\n");
0874 return -EIO;
0875 }
0876
0877 spin_lock_irqsave(&hmc->lock, flags);
0878 if (hmc->state >= ibmhmc_state_opening) {
0879 rc = ibmvmc_send_close(hmc);
0880 if (rc)
0881 dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
0882 }
0883 spin_unlock_irqrestore(&hmc->lock, flags);
0884 }
0885
0886 kfree_sensitive(session);
0887
0888 return rc;
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903 static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
0904 loff_t *ppos)
0905 {
0906 struct ibmvmc_file_session *session;
0907 struct ibmvmc_hmc *hmc;
0908 struct crq_server_adapter *adapter;
0909 struct ibmvmc_buffer *buffer;
0910 ssize_t n;
0911 ssize_t retval = 0;
0912 unsigned long flags;
0913 DEFINE_WAIT(wait);
0914
0915 pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
0916 (unsigned long)file, (unsigned long)buf,
0917 (unsigned long)nbytes);
0918
0919 if (nbytes == 0)
0920 return 0;
0921
0922 if (nbytes > ibmvmc.max_mtu) {
0923 pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
0924 (unsigned int)nbytes);
0925 return -EINVAL;
0926 }
0927
0928 session = file->private_data;
0929 if (!session) {
0930 pr_warn("ibmvmc: read: no session\n");
0931 return -EIO;
0932 }
0933
0934 hmc = session->hmc;
0935 if (!hmc) {
0936 pr_warn("ibmvmc: read: no hmc\n");
0937 return -EIO;
0938 }
0939
0940 adapter = hmc->adapter;
0941 if (!adapter) {
0942 pr_warn("ibmvmc: read: no adapter\n");
0943 return -EIO;
0944 }
0945
0946 do {
0947 prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
0948
0949 spin_lock_irqsave(&hmc->lock, flags);
0950 if (hmc->queue_tail != hmc->queue_head)
0951
0952 break;
0953
0954 spin_unlock_irqrestore(&hmc->lock, flags);
0955
0956 if (!session->valid) {
0957 retval = -EBADFD;
0958 goto out;
0959 }
0960 if (file->f_flags & O_NONBLOCK) {
0961 retval = -EAGAIN;
0962 goto out;
0963 }
0964
0965 schedule();
0966
0967 if (signal_pending(current)) {
0968 retval = -ERESTARTSYS;
0969 goto out;
0970 }
0971 } while (1);
0972
0973 buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
0974 hmc->queue_tail++;
0975 if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
0976 hmc->queue_tail = 0;
0977 spin_unlock_irqrestore(&hmc->lock, flags);
0978
0979 nbytes = min_t(size_t, nbytes, buffer->msg_len);
0980 n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
0981 dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
0982 ibmvmc_free_hmc_buffer(hmc, buffer);
0983 retval = nbytes;
0984
0985 if (n) {
0986 dev_warn(adapter->dev, "read: copy to user failed.\n");
0987 retval = -EFAULT;
0988 }
0989
0990 out:
0991 finish_wait(&ibmvmc_read_wait, &wait);
0992 dev_dbg(adapter->dev, "read: out %ld\n", retval);
0993 return retval;
0994 }
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005 static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1006 {
1007 struct ibmvmc_file_session *session;
1008 struct ibmvmc_hmc *hmc;
1009 unsigned int mask = 0;
1010
1011 session = file->private_data;
1012 if (!session)
1013 return 0;
1014
1015 hmc = session->hmc;
1016 if (!hmc)
1017 return 0;
1018
1019 poll_wait(file, &ibmvmc_read_wait, wait);
1020
1021 if (hmc->queue_head != hmc->queue_tail)
1022 mask |= POLLIN | POLLRDNORM;
1023
1024 return mask;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1040 size_t count, loff_t *ppos)
1041 {
1042 struct ibmvmc_buffer *vmc_buffer;
1043 struct ibmvmc_file_session *session;
1044 struct crq_server_adapter *adapter;
1045 struct ibmvmc_hmc *hmc;
1046 unsigned char *buf;
1047 unsigned long flags;
1048 size_t bytes;
1049 const char *p = buffer;
1050 size_t c = count;
1051 int ret = 0;
1052
1053 session = file->private_data;
1054 if (!session)
1055 return -EIO;
1056
1057 hmc = session->hmc;
1058 if (!hmc)
1059 return -EIO;
1060
1061 spin_lock_irqsave(&hmc->lock, flags);
1062 if (hmc->state == ibmhmc_state_free) {
1063
1064 ret = -EIO;
1065 goto out;
1066 }
1067
1068 adapter = hmc->adapter;
1069 if (!adapter) {
1070 ret = -EIO;
1071 goto out;
1072 }
1073
1074 if (count > ibmvmc.max_mtu) {
1075 dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1076 (unsigned long)count);
1077 ret = -EIO;
1078 goto out;
1079 }
1080
1081
1082 if (hmc->state == ibmhmc_state_opening) {
1083 ret = -EBUSY;
1084 goto out;
1085 }
1086
1087
1088
1089
1090 if (hmc->state != ibmhmc_state_ready) {
1091 ret = -EIO;
1092 goto out;
1093 }
1094
1095 vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1096 if (!vmc_buffer) {
1097
1098
1099
1100
1101 ret = -EBUSY;
1102 goto out;
1103 }
1104 if (!vmc_buffer->real_addr_local) {
1105 dev_err(adapter->dev, "no buffer storage assigned\n");
1106 ret = -EIO;
1107 goto out;
1108 }
1109 buf = vmc_buffer->real_addr_local;
1110
1111 while (c > 0) {
1112 bytes = min_t(size_t, c, vmc_buffer->size);
1113
1114 bytes -= copy_from_user(buf, p, bytes);
1115 if (!bytes) {
1116 ret = -EFAULT;
1117 goto out;
1118 }
1119 c -= bytes;
1120 p += bytes;
1121 }
1122 if (p == buffer)
1123 goto out;
1124
1125 file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
1126 mark_inode_dirty(file->f_path.dentry->d_inode);
1127
1128 dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1129 (unsigned long)file, (unsigned long)count);
1130
1131 ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1132 ret = p - buffer;
1133 out:
1134 spin_unlock_irqrestore(&hmc->lock, flags);
1135 return (ssize_t)(ret);
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1148 {
1149 struct ibmvmc_hmc *hmc;
1150 unsigned int valid, free, index;
1151
1152 if (ibmvmc.state == ibmvmc_state_failed) {
1153 pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1154 return -EIO;
1155 }
1156
1157 if (ibmvmc.state < ibmvmc_state_ready) {
1158 pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1159 return -EAGAIN;
1160 }
1161
1162
1163
1164
1165 for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1166 valid = 0;
1167 ibmvmc_count_hmc_buffers(index, &valid, &free);
1168 if (valid == 0) {
1169 pr_warn("ibmvmc: buffers not ready for index %d\n",
1170 index);
1171 return -ENOBUFS;
1172 }
1173 }
1174
1175
1176 hmc = ibmvmc_get_free_hmc();
1177 if (!hmc) {
1178 pr_warn("%s: free hmc not found\n", __func__);
1179 return -EBUSY;
1180 }
1181
1182 hmc->session = hmc->session + 1;
1183 if (hmc->session == 0xff)
1184 hmc->session = 1;
1185
1186 session->hmc = hmc;
1187 hmc->adapter = &ibmvmc_adapter;
1188 hmc->file_session = session;
1189 session->valid = 1;
1190
1191 return 0;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1207 unsigned char __user *new_hmc_id)
1208 {
1209 struct ibmvmc_hmc *hmc;
1210 struct ibmvmc_buffer *buffer;
1211 size_t bytes;
1212 char print_buffer[HMC_ID_LEN + 1];
1213 unsigned long flags;
1214 long rc = 0;
1215
1216
1217 hmc = session->hmc;
1218 if (!hmc) {
1219 rc = ibmvmc_setup_hmc(session);
1220 if (rc)
1221 return rc;
1222
1223 hmc = session->hmc;
1224 if (!hmc) {
1225 pr_err("ibmvmc: setup_hmc success but no hmc\n");
1226 return -EIO;
1227 }
1228 }
1229
1230 if (hmc->state != ibmhmc_state_initial) {
1231 pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1232 hmc->state);
1233 return -EIO;
1234 }
1235
1236 bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1237 if (bytes)
1238 return -EFAULT;
1239
1240
1241 spin_lock_irqsave(&hmc->lock, flags);
1242 buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1243 spin_unlock_irqrestore(&hmc->lock, flags);
1244
1245 if (!buffer || !buffer->real_addr_local) {
1246 pr_warn("ibmvmc: sethmcid: no buffer available\n");
1247 return -EIO;
1248 }
1249
1250
1251 memset(print_buffer, 0, HMC_ID_LEN + 1);
1252 strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1253 pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1254
1255 memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1256
1257 rc = ibmvmc_send_open(buffer, hmc);
1258
1259 return rc;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1273 struct ibmvmc_query_struct __user *ret_struct)
1274 {
1275 struct ibmvmc_query_struct query_struct;
1276 size_t bytes;
1277
1278 memset(&query_struct, 0, sizeof(query_struct));
1279 query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1280 query_struct.state = ibmvmc.state;
1281 query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1282
1283 bytes = copy_to_user(ret_struct, &query_struct,
1284 sizeof(query_struct));
1285 if (bytes)
1286 return -EFAULT;
1287
1288 return 0;
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1302 u32 __user *ret_vmc_index)
1303 {
1304
1305 size_t bytes;
1306 long rc;
1307 u32 vmc_drc_index;
1308
1309
1310 rc = h_request_vmc(&vmc_drc_index);
1311 pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1312
1313 if (rc == H_SUCCESS) {
1314 rc = 0;
1315 } else if (rc == H_FUNCTION) {
1316 pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1317 return -EPERM;
1318 } else if (rc == H_AUTHORITY) {
1319 pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1320 return -EPERM;
1321 } else if (rc == H_HARDWARE) {
1322 pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1323 return -EIO;
1324 } else if (rc == H_RESOURCE) {
1325 pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1326 return -ENODEV;
1327 } else if (rc == H_NOT_AVAILABLE) {
1328 pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1329 return -EPERM;
1330 } else if (rc == H_PARAMETER) {
1331 pr_err("ibmvmc: requestvmc: invalid parameter\n");
1332 return -EINVAL;
1333 }
1334
1335
1336 ibmvmc.vmc_drc_index = vmc_drc_index;
1337
1338 bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1339 sizeof(*ret_vmc_index));
1340 if (bytes) {
1341 pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1342 return -EFAULT;
1343 }
1344 return rc;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static long ibmvmc_ioctl(struct file *file,
1359 unsigned int cmd, unsigned long arg)
1360 {
1361 struct ibmvmc_file_session *session = file->private_data;
1362
1363 pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1364 (unsigned long)file, cmd, arg,
1365 (unsigned long)session);
1366
1367 if (!session) {
1368 pr_warn("ibmvmc: ioctl: no session\n");
1369 return -EIO;
1370 }
1371
1372 switch (cmd) {
1373 case VMC_IOCTL_SETHMCID:
1374 return ibmvmc_ioctl_sethmcid(session,
1375 (unsigned char __user *)arg);
1376 case VMC_IOCTL_QUERY:
1377 return ibmvmc_ioctl_query(session,
1378 (struct ibmvmc_query_struct __user *)arg);
1379 case VMC_IOCTL_REQUESTVMC:
1380 return ibmvmc_ioctl_requestvmc(session,
1381 (unsigned int __user *)arg);
1382 default:
1383 pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1384 return -EINVAL;
1385 }
1386 }
1387
1388 static const struct file_operations ibmvmc_fops = {
1389 .owner = THIS_MODULE,
1390 .read = ibmvmc_read,
1391 .write = ibmvmc_write,
1392 .poll = ibmvmc_poll,
1393 .unlocked_ioctl = ibmvmc_ioctl,
1394 .open = ibmvmc_open,
1395 .release = ibmvmc_close,
1396 };
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1426 struct ibmvmc_crq_msg *crq)
1427 {
1428 struct ibmvmc_buffer *buffer;
1429 u8 hmc_index;
1430 u8 hmc_session;
1431 u16 buffer_id;
1432 unsigned long flags;
1433 int rc = 0;
1434
1435 if (!crq)
1436 return -1;
1437
1438 hmc_session = crq->hmc_session;
1439 hmc_index = crq->hmc_index;
1440 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1441
1442 if (hmc_index > ibmvmc.max_hmc_index) {
1443 dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1444 hmc_index);
1445 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1446 hmc_session, hmc_index, buffer_id);
1447 return -1;
1448 }
1449
1450 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1451 dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1452 buffer_id);
1453 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1454 hmc_session, hmc_index, buffer_id);
1455 return -1;
1456 }
1457
1458 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1459 buffer = &hmcs[hmc_index].buffer[buffer_id];
1460
1461 if (buffer->real_addr_local || buffer->dma_addr_local) {
1462 dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1463 (unsigned long)buffer_id);
1464 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1465 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1466 hmc_session, hmc_index, buffer_id);
1467 return -1;
1468 }
1469
1470 buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1471 ibmvmc.max_mtu,
1472 &buffer->dma_addr_local);
1473
1474 if (!buffer->real_addr_local) {
1475 dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1476 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1477 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1478 hmc_session, hmc_index, buffer_id);
1479 return -1;
1480 }
1481
1482 buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1483 buffer->size = ibmvmc.max_mtu;
1484 buffer->owner = crq->var1.owner;
1485 buffer->free = 1;
1486
1487 dma_wmb();
1488 buffer->valid = 1;
1489 buffer->id = buffer_id;
1490
1491 dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1492 dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1493 hmc_index, hmc_session, buffer_id, buffer->owner);
1494 dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
1495 (u32)buffer->dma_addr_local,
1496 (u32)buffer->dma_addr_remote);
1497 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1498
1499 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1500 hmc_index, buffer_id);
1501
1502 return rc;
1503 }
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1543 struct ibmvmc_crq_msg *crq)
1544 {
1545 struct ibmvmc_buffer *buffer;
1546 u8 hmc_index;
1547 u8 hmc_session;
1548 u16 buffer_id = 0;
1549 unsigned long flags;
1550 int rc = 0;
1551
1552 if (!crq)
1553 return -1;
1554
1555 hmc_session = crq->hmc_session;
1556 hmc_index = crq->hmc_index;
1557
1558 if (hmc_index > ibmvmc.max_hmc_index) {
1559 dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1560 hmc_index);
1561 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1562 hmc_session, hmc_index, buffer_id);
1563 return -1;
1564 }
1565
1566 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1567 buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1568 if (!buffer) {
1569 dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1570 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1571 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1572 hmc_session, hmc_index,
1573 VMC_INVALID_BUFFER_ID);
1574 return -1;
1575 }
1576
1577 buffer_id = buffer->id;
1578
1579 if (buffer->valid)
1580 free_dma_buffer(to_vio_dev(adapter->dev),
1581 ibmvmc.max_mtu,
1582 buffer->real_addr_local,
1583 buffer->dma_addr_local);
1584
1585 memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1586 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1587
1588 dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1589 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1590 hmc_index, buffer_id);
1591
1592 return rc;
1593 }
1594
1595 static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1596 struct ibmvmc_crq_msg *crq)
1597 {
1598 struct ibmvmc_buffer *buffer;
1599 struct ibmvmc_hmc *hmc;
1600 unsigned long msg_len;
1601 u8 hmc_index;
1602 u8 hmc_session;
1603 u16 buffer_id;
1604 unsigned long flags;
1605 int rc = 0;
1606
1607 if (!crq)
1608 return -1;
1609
1610
1611 dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1612 be64_to_cpu(*((unsigned long *)crq)),
1613 be64_to_cpu(*(((unsigned long *)crq) + 1)));
1614
1615 hmc_session = crq->hmc_session;
1616 hmc_index = crq->hmc_index;
1617 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1618 msg_len = be32_to_cpu(crq->var3.msg_len);
1619
1620 if (hmc_index > ibmvmc.max_hmc_index) {
1621 dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1622 hmc_index);
1623 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1624 hmc_session, hmc_index, buffer_id);
1625 return -1;
1626 }
1627
1628 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1629 dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1630 buffer_id);
1631 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1632 hmc_session, hmc_index, buffer_id);
1633 return -1;
1634 }
1635
1636 hmc = &hmcs[hmc_index];
1637 spin_lock_irqsave(&hmc->lock, flags);
1638
1639 if (hmc->state == ibmhmc_state_free) {
1640 dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1641 hmc->state);
1642
1643 spin_unlock_irqrestore(&hmc->lock, flags);
1644 return -1;
1645 }
1646
1647 buffer = &hmc->buffer[buffer_id];
1648
1649 if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1650 dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
1651 buffer->valid, buffer->owner);
1652 spin_unlock_irqrestore(&hmc->lock, flags);
1653 return -1;
1654 }
1655
1656
1657 rc = h_copy_rdma(msg_len,
1658 adapter->riobn,
1659 buffer->dma_addr_remote,
1660 adapter->liobn,
1661 buffer->dma_addr_local);
1662
1663 dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1664 (unsigned int)msg_len, (unsigned int)buffer_id,
1665 (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1666 buffer->msg_len = msg_len;
1667 buffer->free = 0;
1668 buffer->owner = VMC_BUF_OWNER_ALPHA;
1669
1670 if (rc) {
1671 dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1672 rc);
1673 spin_unlock_irqrestore(&hmc->lock, flags);
1674 return -1;
1675 }
1676
1677
1678 hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1679 hmc->queue_head++;
1680 if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1681 hmc->queue_head = 0;
1682
1683 if (hmc->queue_head == hmc->queue_tail)
1684 dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1685
1686 spin_unlock_irqrestore(&hmc->lock, flags);
1687
1688 wake_up_interruptible(&ibmvmc_read_wait);
1689
1690 return 0;
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700 static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1701 struct ibmvmc_crq_msg *crqp)
1702 {
1703 struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1704
1705 if ((be16_to_cpu(crq->version) >> 8) !=
1706 (IBMVMC_PROTOCOL_VERSION >> 8)) {
1707 dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1708 be16_to_cpu(crq->version),
1709 IBMVMC_PROTOCOL_VERSION);
1710 ibmvmc.state = ibmvmc_state_failed;
1711 return;
1712 }
1713
1714 ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1715 ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1716 be16_to_cpu(crq->pool_size));
1717 ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1718 ibmvmc.state = ibmvmc_state_ready;
1719
1720 dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1721 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1722 ibmvmc.max_hmc_index);
1723 }
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1736 struct ibmvmc_crq_msg *crq)
1737 {
1738 unsigned char hmc_index;
1739
1740 hmc_index = crq->hmc_index;
1741
1742 if (crq->hmc_session == 0)
1743 return 0;
1744
1745 if (hmc_index > ibmvmc.max_hmc_index)
1746 return -1;
1747
1748 if (hmcs[hmc_index].session != crq->hmc_session) {
1749 dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1750 hmcs[hmc_index].session, crq->hmc_session);
1751 return -1;
1752 }
1753
1754 return 0;
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1768 {
1769 int i;
1770
1771 if (ibmvmc.state != ibmvmc_state_sched_reset) {
1772 dev_info(adapter->dev, "*** Reset to initial state.\n");
1773 for (i = 0; i < ibmvmc_max_hmcs; i++)
1774 ibmvmc_return_hmc(&hmcs[i], xport_event);
1775
1776 if (xport_event) {
1777
1778
1779
1780
1781 ibmvmc.state = ibmvmc_state_crqinit;
1782 } else {
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 ibmvmc.state = ibmvmc_state_sched_reset;
1793 dev_dbg(adapter->dev, "Device reset scheduled");
1794 wake_up_interruptible(&adapter->reset_wait_queue);
1795 }
1796 }
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 static int ibmvmc_reset_task(void *data)
1808 {
1809 struct crq_server_adapter *adapter = data;
1810 int rc;
1811
1812 set_user_nice(current, -20);
1813
1814 while (!kthread_should_stop()) {
1815 wait_event_interruptible(adapter->reset_wait_queue,
1816 (ibmvmc.state == ibmvmc_state_sched_reset) ||
1817 kthread_should_stop());
1818
1819 if (kthread_should_stop())
1820 break;
1821
1822 dev_dbg(adapter->dev, "CRQ resetting in process context");
1823 tasklet_disable(&adapter->work_task);
1824
1825 rc = ibmvmc_reset_crq_queue(adapter);
1826
1827 if (rc != H_SUCCESS && rc != H_RESOURCE) {
1828 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
1829 rc);
1830 ibmvmc.state = ibmvmc_state_failed;
1831 } else {
1832 ibmvmc.state = ibmvmc_state_crqinit;
1833
1834 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1835 != 0 && rc != H_RESOURCE)
1836 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1837 }
1838
1839 vio_enable_interrupts(to_vio_dev(adapter->dev));
1840 tasklet_enable(&adapter->work_task);
1841 }
1842
1843 return 0;
1844 }
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1857 struct crq_server_adapter *adapter)
1858 {
1859 unsigned char hmc_index;
1860 unsigned short buffer_id;
1861
1862 hmc_index = crq->hmc_index;
1863 if (hmc_index > ibmvmc.max_hmc_index) {
1864
1865 ibmvmc_reset(adapter, false);
1866 return;
1867 }
1868
1869 if (crq->status) {
1870 dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1871 crq->status);
1872 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1873 return;
1874 }
1875
1876 if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1877 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1878 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1879 dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1880 buffer_id);
1881 hmcs[hmc_index].state = ibmhmc_state_failed;
1882 } else {
1883 ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1884 &hmcs[hmc_index].buffer[buffer_id]);
1885 hmcs[hmc_index].state = ibmhmc_state_ready;
1886 dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1887 }
1888 } else {
1889 dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1890 hmcs[hmc_index].state);
1891 }
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1907 struct crq_server_adapter *adapter)
1908 {
1909 unsigned char hmc_index;
1910
1911 hmc_index = crq->hmc_index;
1912 if (hmc_index > ibmvmc.max_hmc_index) {
1913 ibmvmc_reset(adapter, false);
1914 return;
1915 }
1916
1917 if (crq->status) {
1918 dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1919 crq->status);
1920 ibmvmc_reset(adapter, false);
1921 return;
1922 }
1923
1924 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1925 }
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1937 struct ibmvmc_crq_msg *crq)
1938 {
1939 switch (crq->type) {
1940 case VMC_MSG_CAP_RESP:
1941 dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1942 crq->type);
1943 if (ibmvmc.state == ibmvmc_state_capabilities)
1944 ibmvmc_process_capabilities(adapter, crq);
1945 else
1946 dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1947 ibmvmc.state);
1948 break;
1949 case VMC_MSG_OPEN_RESP:
1950 dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1951 crq->type);
1952 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1953 ibmvmc_process_open_resp(crq, adapter);
1954 break;
1955 case VMC_MSG_ADD_BUF:
1956 dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1957 crq->type);
1958 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1959 ibmvmc_add_buffer(adapter, crq);
1960 break;
1961 case VMC_MSG_REM_BUF:
1962 dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1963 crq->type);
1964 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1965 ibmvmc_rem_buffer(adapter, crq);
1966 break;
1967 case VMC_MSG_SIGNAL:
1968 dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1969 crq->type);
1970 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1971 ibmvmc_recv_msg(adapter, crq);
1972 break;
1973 case VMC_MSG_CLOSE_RESP:
1974 dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1975 crq->type);
1976 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1977 ibmvmc_process_close_resp(crq, adapter);
1978 break;
1979 case VMC_MSG_CAP:
1980 case VMC_MSG_OPEN:
1981 case VMC_MSG_CLOSE:
1982 case VMC_MSG_ADD_BUF_RESP:
1983 case VMC_MSG_REM_BUF_RESP:
1984 dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1985 crq->type);
1986 break;
1987 default:
1988 dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1989 crq->type);
1990 break;
1991 }
1992 }
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004 static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2005 struct crq_server_adapter *adapter)
2006 {
2007 switch (crq->type) {
2008 case 0x01:
2009 dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2010 ibmvmc.state);
2011 if (ibmvmc.state == ibmvmc_state_crqinit) {
2012
2013 if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2014 0) == 0)
2015 ibmvmc_send_capabilities(adapter);
2016 else
2017 dev_err(adapter->dev, " Unable to send init rsp\n");
2018 } else {
2019 dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2020 ibmvmc.state, ibmvmc.max_mtu);
2021 }
2022
2023 break;
2024 case 0x02:
2025 dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2026 ibmvmc.state);
2027 if (ibmvmc.state == ibmvmc_state_crqinit)
2028 ibmvmc_send_capabilities(adapter);
2029 break;
2030 default:
2031 dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2032 (unsigned long)crq->type);
2033 }
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2047 struct crq_server_adapter *adapter)
2048 {
2049 switch (crq->valid) {
2050 case 0xC0:
2051 ibmvmc_handle_crq_init(crq, adapter);
2052 break;
2053 case 0xFF:
2054 dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2055 ibmvmc_reset(adapter, true);
2056 break;
2057 case 0x80:
2058 ibmvmc_crq_process(adapter, crq);
2059 break;
2060 default:
2061 dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2062 crq->valid);
2063 break;
2064 }
2065 }
2066
2067 static void ibmvmc_task(unsigned long data)
2068 {
2069 struct crq_server_adapter *adapter =
2070 (struct crq_server_adapter *)data;
2071 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2072 struct ibmvmc_crq_msg *crq;
2073 int done = 0;
2074
2075 while (!done) {
2076
2077 while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2078 ibmvmc_handle_crq(crq, adapter);
2079 crq->valid = 0x00;
2080
2081
2082
2083 if (ibmvmc.state == ibmvmc_state_sched_reset)
2084 return;
2085 }
2086
2087 vio_enable_interrupts(vdev);
2088 crq = crq_queue_next_crq(&adapter->queue);
2089 if (crq) {
2090 vio_disable_interrupts(vdev);
2091 ibmvmc_handle_crq(crq, adapter);
2092 crq->valid = 0x00;
2093
2094
2095
2096 if (ibmvmc.state == ibmvmc_state_sched_reset)
2097 return;
2098 } else {
2099 done = 1;
2100 }
2101 }
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113 static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2114 {
2115 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2116 struct crq_queue *queue = &adapter->queue;
2117 int rc = 0;
2118 int retrc = 0;
2119
2120 queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2121
2122 if (!queue->msgs)
2123 goto malloc_failed;
2124
2125 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2126
2127 queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2128 queue->size * sizeof(*queue->msgs),
2129 DMA_BIDIRECTIONAL);
2130
2131 if (dma_mapping_error(adapter->dev, queue->msg_token))
2132 goto map_failed;
2133
2134 retrc = plpar_hcall_norets(H_REG_CRQ,
2135 vdev->unit_address,
2136 queue->msg_token, PAGE_SIZE);
2137 rc = retrc;
2138
2139 if (rc == H_RESOURCE)
2140 rc = ibmvmc_reset_crq_queue(adapter);
2141
2142 if (rc == 2) {
2143 dev_warn(adapter->dev, "Partner adapter not ready\n");
2144 retrc = 0;
2145 } else if (rc != 0) {
2146 dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2147 goto reg_crq_failed;
2148 }
2149
2150 queue->cur = 0;
2151 spin_lock_init(&queue->lock);
2152
2153 tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2154
2155 if (request_irq(vdev->irq,
2156 ibmvmc_handle_event,
2157 0, "ibmvmc", (void *)adapter) != 0) {
2158 dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2159 vdev->irq);
2160 goto req_irq_failed;
2161 }
2162
2163 rc = vio_enable_interrupts(vdev);
2164 if (rc != 0) {
2165 dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2166 goto req_irq_failed;
2167 }
2168
2169 return retrc;
2170
2171 req_irq_failed:
2172
2173
2174
2175 tasklet_kill(&adapter->work_task);
2176 h_free_crq(vdev->unit_address);
2177 reg_crq_failed:
2178 dma_unmap_single(adapter->dev,
2179 queue->msg_token,
2180 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2181 map_failed:
2182 free_page((unsigned long)queue->msgs);
2183 malloc_failed:
2184 return -ENOMEM;
2185 }
2186
2187
2188 static int read_dma_window(struct vio_dev *vdev,
2189 struct crq_server_adapter *adapter)
2190 {
2191 const __be32 *dma_window;
2192 const __be32 *prop;
2193
2194
2195
2196
2197
2198 dma_window =
2199 (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2200 NULL);
2201 if (!dma_window) {
2202 dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2203 return -1;
2204 }
2205
2206 adapter->liobn = be32_to_cpu(*dma_window);
2207 dma_window++;
2208
2209 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2210 NULL);
2211 if (!prop) {
2212 dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2213 dma_window++;
2214 } else {
2215 dma_window += be32_to_cpu(*prop);
2216 }
2217
2218 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2219 NULL);
2220 if (!prop) {
2221 dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2222 dma_window++;
2223 } else {
2224 dma_window += be32_to_cpu(*prop);
2225 }
2226
2227
2228 adapter->riobn = be32_to_cpu(*dma_window);
2229
2230 return 0;
2231 }
2232
2233 static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2234 {
2235 struct crq_server_adapter *adapter = &ibmvmc_adapter;
2236 int rc;
2237
2238 dev_set_drvdata(&vdev->dev, NULL);
2239 memset(adapter, 0, sizeof(*adapter));
2240 adapter->dev = &vdev->dev;
2241
2242 dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2243
2244 rc = read_dma_window(vdev, adapter);
2245 if (rc != 0) {
2246 ibmvmc.state = ibmvmc_state_failed;
2247 return -1;
2248 }
2249
2250 dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2251 adapter->liobn, adapter->riobn);
2252
2253 init_waitqueue_head(&adapter->reset_wait_queue);
2254 adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2255 if (IS_ERR(adapter->reset_task)) {
2256 dev_err(adapter->dev, "Failed to start reset thread\n");
2257 ibmvmc.state = ibmvmc_state_failed;
2258 rc = PTR_ERR(adapter->reset_task);
2259 adapter->reset_task = NULL;
2260 return rc;
2261 }
2262
2263 rc = ibmvmc_init_crq_queue(adapter);
2264 if (rc != 0 && rc != H_RESOURCE) {
2265 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
2266 rc);
2267 ibmvmc.state = ibmvmc_state_failed;
2268 goto crq_failed;
2269 }
2270
2271 ibmvmc.state = ibmvmc_state_crqinit;
2272
2273
2274
2275
2276
2277 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2278 rc != H_RESOURCE)
2279 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2280
2281 dev_set_drvdata(&vdev->dev, adapter);
2282
2283 return 0;
2284
2285 crq_failed:
2286 kthread_stop(adapter->reset_task);
2287 adapter->reset_task = NULL;
2288 return -EPERM;
2289 }
2290
2291 static void ibmvmc_remove(struct vio_dev *vdev)
2292 {
2293 struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2294
2295 dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2296 vdev->unit_address);
2297 ibmvmc_release_crq_queue(adapter);
2298 }
2299
2300 static struct vio_device_id ibmvmc_device_table[] = {
2301 { "ibm,vmc", "IBM,vmc" },
2302 { "", "" }
2303 };
2304 MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2305
2306 static struct vio_driver ibmvmc_driver = {
2307 .name = ibmvmc_driver_name,
2308 .id_table = ibmvmc_device_table,
2309 .probe = ibmvmc_probe,
2310 .remove = ibmvmc_remove,
2311 };
2312
2313 static void __init ibmvmc_scrub_module_parms(void)
2314 {
2315 if (ibmvmc_max_mtu > MAX_MTU) {
2316 pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2317 ibmvmc_max_mtu = MAX_MTU;
2318 } else if (ibmvmc_max_mtu < MIN_MTU) {
2319 pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2320 ibmvmc_max_mtu = MIN_MTU;
2321 }
2322
2323 if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2324 pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2325 MAX_BUF_POOL_SIZE);
2326 ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2327 } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2328 pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2329 MIN_BUF_POOL_SIZE);
2330 ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2331 }
2332
2333 if (ibmvmc_max_hmcs > MAX_HMCS) {
2334 pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2335 ibmvmc_max_hmcs = MAX_HMCS;
2336 } else if (ibmvmc_max_hmcs < MIN_HMCS) {
2337 pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2338 ibmvmc_max_hmcs = MIN_HMCS;
2339 }
2340 }
2341
2342 static struct miscdevice ibmvmc_miscdev = {
2343 .name = ibmvmc_driver_name,
2344 .minor = MISC_DYNAMIC_MINOR,
2345 .fops = &ibmvmc_fops,
2346 };
2347
2348 static int __init ibmvmc_module_init(void)
2349 {
2350 int rc, i, j;
2351
2352 ibmvmc.state = ibmvmc_state_initial;
2353 pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2354
2355 rc = misc_register(&ibmvmc_miscdev);
2356 if (rc) {
2357 pr_err("ibmvmc: misc registration failed\n");
2358 goto misc_register_failed;
2359 }
2360 pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2361 ibmvmc_miscdev.minor);
2362
2363
2364 memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2365 for (i = 0; i < MAX_HMCS; i++) {
2366 spin_lock_init(&hmcs[i].lock);
2367 hmcs[i].state = ibmhmc_state_free;
2368 for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2369 hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2370 }
2371
2372
2373 ibmvmc_scrub_module_parms();
2374
2375
2376
2377
2378
2379 ibmvmc.max_mtu = ibmvmc_max_mtu;
2380 ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2381 ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2382
2383 rc = vio_register_driver(&ibmvmc_driver);
2384
2385 if (rc) {
2386 pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2387 goto vio_reg_failed;
2388 }
2389
2390 return 0;
2391
2392 vio_reg_failed:
2393 misc_deregister(&ibmvmc_miscdev);
2394 misc_register_failed:
2395 return rc;
2396 }
2397
2398 static void __exit ibmvmc_module_exit(void)
2399 {
2400 pr_info("ibmvmc: module exit\n");
2401 vio_unregister_driver(&ibmvmc_driver);
2402 misc_deregister(&ibmvmc_miscdev);
2403 }
2404
2405 module_init(ibmvmc_module_init);
2406 module_exit(ibmvmc_module_exit);
2407
2408 module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2409 int, 0644);
2410 MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2411 module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2412 MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2413 module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2414 MODULE_PARM_DESC(max_mtu, "Max MTU");
2415
2416 MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2417 MODULE_DESCRIPTION("IBM VMC");
2418 MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2419 MODULE_LICENSE("GPL v2");