0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 #include <linux/module.h>
0051 #include <linux/mod_devicetable.h>
0052 #include <linux/interrupt.h>
0053 #include <linux/kernel.h>
0054 #include <linux/spinlock.h>
0055 #include <linux/miscdevice.h>
0056 #include <linux/platform_device.h>
0057 #include <linux/poll.h>
0058 #include <linux/sched.h>
0059 #include <linux/bitops.h>
0060 #include <linux/slab.h>
0061 #include <linux/io.h>
0062 #include <linux/dma-mapping.h>
0063 #include <linux/mm.h>
0064 #include <linux/acpi.h>
0065 #include <linux/bug.h>
0066 #include "goldfish_pipe_qemu.h"
0067
0068
0069
0070
0071
0072 enum {
0073 PIPE_DRIVER_VERSION = 2,
0074 PIPE_CURRENT_DEVICE_VERSION = 2
0075 };
0076
0077 enum {
0078 MAX_BUFFERS_PER_COMMAND = 336,
0079 MAX_SIGNALLED_PIPES = 64,
0080 INITIAL_PIPES_CAPACITY = 64
0081 };
0082
0083 struct goldfish_pipe_dev;
0084
0085
0086 struct goldfish_pipe_command {
0087 s32 cmd;
0088 s32 id;
0089 s32 status;
0090 s32 reserved;
0091 union {
0092
0093 struct {
0094
0095 u32 buffers_count;
0096
0097 s32 consumed_size;
0098
0099 u64 ptrs[MAX_BUFFERS_PER_COMMAND];
0100
0101 u32 sizes[MAX_BUFFERS_PER_COMMAND];
0102 } rw_params;
0103 };
0104 };
0105
0106
0107 struct signalled_pipe_buffer {
0108 u32 id;
0109 u32 flags;
0110 };
0111
0112
0113 struct open_command_param {
0114 u64 command_buffer_ptr;
0115 u32 rw_params_max_count;
0116 };
0117
0118
0119 struct goldfish_pipe_dev_buffers {
0120 struct open_command_param open_command_params;
0121 struct signalled_pipe_buffer
0122 signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
0123 };
0124
0125
0126 struct goldfish_pipe {
0127
0128 u32 id;
0129
0130
0131
0132
0133
0134 unsigned long flags;
0135
0136
0137
0138
0139 unsigned long signalled_flags;
0140
0141
0142 struct goldfish_pipe_command *command_buffer;
0143
0144
0145
0146
0147 struct goldfish_pipe *prev_signalled;
0148 struct goldfish_pipe *next_signalled;
0149
0150
0151
0152
0153
0154
0155 struct mutex lock;
0156
0157
0158 wait_queue_head_t wake_queue;
0159
0160
0161 struct goldfish_pipe_dev *dev;
0162
0163
0164 struct page *pages[MAX_BUFFERS_PER_COMMAND];
0165 };
0166
0167
0168
0169
0170
0171 struct goldfish_pipe_dev {
0172
0173 void *magic;
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 spinlock_t lock;
0193
0194
0195
0196
0197
0198 struct goldfish_pipe **pipes;
0199 u32 pipes_capacity;
0200
0201
0202 struct goldfish_pipe_dev_buffers *buffers;
0203
0204
0205 struct goldfish_pipe *first_signalled_pipe;
0206
0207
0208 struct device *pdev_dev;
0209
0210
0211 int irq;
0212 int version;
0213 unsigned char __iomem *base;
0214
0215 struct miscdevice miscdev;
0216 };
0217
0218 static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
0219 enum PipeCmdCode cmd)
0220 {
0221 pipe->command_buffer->cmd = cmd;
0222
0223 pipe->command_buffer->status = PIPE_ERROR_INVAL;
0224 writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
0225 return pipe->command_buffer->status;
0226 }
0227
0228 static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
0229 {
0230 int status;
0231
0232 if (mutex_lock_interruptible(&pipe->lock))
0233 return PIPE_ERROR_IO;
0234 status = goldfish_pipe_cmd_locked(pipe, cmd);
0235 mutex_unlock(&pipe->lock);
0236 return status;
0237 }
0238
0239
0240
0241
0242
0243 static int goldfish_pipe_error_convert(int status)
0244 {
0245 switch (status) {
0246 case PIPE_ERROR_AGAIN:
0247 return -EAGAIN;
0248 case PIPE_ERROR_NOMEM:
0249 return -ENOMEM;
0250 case PIPE_ERROR_IO:
0251 return -EIO;
0252 default:
0253 return -EINVAL;
0254 }
0255 }
0256
0257 static int goldfish_pin_pages(unsigned long first_page,
0258 unsigned long last_page,
0259 unsigned int last_page_size,
0260 int is_write,
0261 struct page *pages[MAX_BUFFERS_PER_COMMAND],
0262 unsigned int *iter_last_page_size)
0263 {
0264 int ret;
0265 int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
0266
0267 if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
0268 requested_pages = MAX_BUFFERS_PER_COMMAND;
0269 *iter_last_page_size = PAGE_SIZE;
0270 } else {
0271 *iter_last_page_size = last_page_size;
0272 }
0273
0274 ret = pin_user_pages_fast(first_page, requested_pages,
0275 !is_write ? FOLL_WRITE : 0,
0276 pages);
0277 if (ret <= 0)
0278 return -EFAULT;
0279 if (ret < requested_pages)
0280 *iter_last_page_size = PAGE_SIZE;
0281
0282 return ret;
0283 }
0284
0285
0286 static void populate_rw_params(struct page **pages,
0287 int pages_count,
0288 unsigned long address,
0289 unsigned long address_end,
0290 unsigned long first_page,
0291 unsigned long last_page,
0292 unsigned int iter_last_page_size,
0293 int is_write,
0294 struct goldfish_pipe_command *command)
0295 {
0296
0297
0298
0299
0300 unsigned long xaddr = page_to_phys(pages[0]);
0301 unsigned long xaddr_prev = xaddr;
0302 int buffer_idx = 0;
0303 int i = 1;
0304 int size_on_page = first_page == last_page
0305 ? (int)(address_end - address)
0306 : (PAGE_SIZE - (address & ~PAGE_MASK));
0307 command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
0308 command->rw_params.sizes[0] = size_on_page;
0309 for (; i < pages_count; ++i) {
0310 xaddr = page_to_phys(pages[i]);
0311 size_on_page = (i == pages_count - 1) ?
0312 iter_last_page_size : PAGE_SIZE;
0313 if (xaddr == xaddr_prev + PAGE_SIZE) {
0314 command->rw_params.sizes[buffer_idx] += size_on_page;
0315 } else {
0316 ++buffer_idx;
0317 command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
0318 command->rw_params.sizes[buffer_idx] = size_on_page;
0319 }
0320 xaddr_prev = xaddr;
0321 }
0322 command->rw_params.buffers_count = buffer_idx + 1;
0323 }
0324
0325 static int transfer_max_buffers(struct goldfish_pipe *pipe,
0326 unsigned long address,
0327 unsigned long address_end,
0328 int is_write,
0329 unsigned long last_page,
0330 unsigned int last_page_size,
0331 s32 *consumed_size,
0332 int *status)
0333 {
0334 unsigned long first_page = address & PAGE_MASK;
0335 unsigned int iter_last_page_size;
0336 int pages_count;
0337
0338
0339 if (mutex_lock_interruptible(&pipe->lock))
0340 return -ERESTARTSYS;
0341
0342 pages_count = goldfish_pin_pages(first_page, last_page,
0343 last_page_size, is_write,
0344 pipe->pages, &iter_last_page_size);
0345 if (pages_count < 0) {
0346 mutex_unlock(&pipe->lock);
0347 return pages_count;
0348 }
0349
0350 populate_rw_params(pipe->pages, pages_count, address, address_end,
0351 first_page, last_page, iter_last_page_size, is_write,
0352 pipe->command_buffer);
0353
0354
0355 *status = goldfish_pipe_cmd_locked(pipe,
0356 is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
0357
0358 *consumed_size = pipe->command_buffer->rw_params.consumed_size;
0359
0360 unpin_user_pages_dirty_lock(pipe->pages, pages_count,
0361 !is_write && *consumed_size > 0);
0362
0363 mutex_unlock(&pipe->lock);
0364 return 0;
0365 }
0366
0367 static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
0368 {
0369 u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
0370
0371 set_bit(wake_bit, &pipe->flags);
0372
0373
0374 goldfish_pipe_cmd(pipe,
0375 is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
0376
0377 while (test_bit(wake_bit, &pipe->flags)) {
0378 if (wait_event_interruptible(pipe->wake_queue,
0379 !test_bit(wake_bit, &pipe->flags)))
0380 return -ERESTARTSYS;
0381
0382 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
0383 return -EIO;
0384 }
0385
0386 return 0;
0387 }
0388
0389 static ssize_t goldfish_pipe_read_write(struct file *filp,
0390 char __user *buffer,
0391 size_t bufflen,
0392 int is_write)
0393 {
0394 struct goldfish_pipe *pipe = filp->private_data;
0395 int count = 0, ret = -EINVAL;
0396 unsigned long address, address_end, last_page;
0397 unsigned int last_page_size;
0398
0399
0400 if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
0401 return -EIO;
0402
0403 if (unlikely(bufflen == 0))
0404 return 0;
0405
0406 if (unlikely(!access_ok(buffer, bufflen)))
0407 return -EFAULT;
0408
0409 address = (unsigned long)buffer;
0410 address_end = address + bufflen;
0411 last_page = (address_end - 1) & PAGE_MASK;
0412 last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
0413
0414 while (address < address_end) {
0415 s32 consumed_size;
0416 int status;
0417
0418 ret = transfer_max_buffers(pipe, address, address_end, is_write,
0419 last_page, last_page_size,
0420 &consumed_size, &status);
0421 if (ret < 0)
0422 break;
0423
0424 if (consumed_size > 0) {
0425
0426
0427
0428 count += consumed_size;
0429 address += consumed_size;
0430 }
0431 if (status > 0)
0432 continue;
0433 if (status == 0) {
0434
0435 ret = 0;
0436 break;
0437 }
0438 if (count > 0) {
0439
0440
0441
0442
0443
0444
0445 if (status != PIPE_ERROR_AGAIN)
0446 dev_err_ratelimited(pipe->dev->pdev_dev,
0447 "backend error %d on %s\n",
0448 status, is_write ? "write" : "read");
0449 break;
0450 }
0451
0452
0453
0454
0455
0456 if (status != PIPE_ERROR_AGAIN ||
0457 (filp->f_flags & O_NONBLOCK) != 0) {
0458 ret = goldfish_pipe_error_convert(status);
0459 break;
0460 }
0461
0462 status = wait_for_host_signal(pipe, is_write);
0463 if (status < 0)
0464 return status;
0465 }
0466
0467 if (count > 0)
0468 return count;
0469 return ret;
0470 }
0471
0472 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
0473 size_t bufflen, loff_t *ppos)
0474 {
0475 return goldfish_pipe_read_write(filp, buffer, bufflen,
0476 0);
0477 }
0478
0479 static ssize_t goldfish_pipe_write(struct file *filp,
0480 const char __user *buffer, size_t bufflen,
0481 loff_t *ppos)
0482 {
0483
0484 char __user *no_const_buffer = (char __user *)buffer;
0485
0486 return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
0487 1);
0488 }
0489
0490 static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
0491 {
0492 struct goldfish_pipe *pipe = filp->private_data;
0493 __poll_t mask = 0;
0494 int status;
0495
0496 poll_wait(filp, &pipe->wake_queue, wait);
0497
0498 status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
0499 if (status < 0)
0500 return -ERESTARTSYS;
0501
0502 if (status & PIPE_POLL_IN)
0503 mask |= EPOLLIN | EPOLLRDNORM;
0504 if (status & PIPE_POLL_OUT)
0505 mask |= EPOLLOUT | EPOLLWRNORM;
0506 if (status & PIPE_POLL_HUP)
0507 mask |= EPOLLHUP;
0508 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
0509 mask |= EPOLLERR;
0510
0511 return mask;
0512 }
0513
0514 static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
0515 u32 id, u32 flags)
0516 {
0517 struct goldfish_pipe *pipe;
0518
0519 if (WARN_ON(id >= dev->pipes_capacity))
0520 return;
0521
0522 pipe = dev->pipes[id];
0523 if (!pipe)
0524 return;
0525 pipe->signalled_flags |= flags;
0526
0527 if (pipe->prev_signalled || pipe->next_signalled ||
0528 dev->first_signalled_pipe == pipe)
0529 return;
0530 pipe->next_signalled = dev->first_signalled_pipe;
0531 if (dev->first_signalled_pipe)
0532 dev->first_signalled_pipe->prev_signalled = pipe;
0533 dev->first_signalled_pipe = pipe;
0534 }
0535
0536 static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
0537 struct goldfish_pipe *pipe)
0538 {
0539 if (pipe->prev_signalled)
0540 pipe->prev_signalled->next_signalled = pipe->next_signalled;
0541 if (pipe->next_signalled)
0542 pipe->next_signalled->prev_signalled = pipe->prev_signalled;
0543 if (pipe == dev->first_signalled_pipe)
0544 dev->first_signalled_pipe = pipe->next_signalled;
0545 pipe->prev_signalled = NULL;
0546 pipe->next_signalled = NULL;
0547 }
0548
0549 static struct goldfish_pipe *signalled_pipes_pop_front(
0550 struct goldfish_pipe_dev *dev, int *wakes)
0551 {
0552 struct goldfish_pipe *pipe;
0553 unsigned long flags;
0554
0555 spin_lock_irqsave(&dev->lock, flags);
0556
0557 pipe = dev->first_signalled_pipe;
0558 if (pipe) {
0559 *wakes = pipe->signalled_flags;
0560 pipe->signalled_flags = 0;
0561
0562
0563
0564
0565
0566
0567 dev->first_signalled_pipe = pipe->next_signalled;
0568 if (dev->first_signalled_pipe)
0569 dev->first_signalled_pipe->prev_signalled = NULL;
0570 pipe->next_signalled = NULL;
0571 }
0572
0573 spin_unlock_irqrestore(&dev->lock, flags);
0574 return pipe;
0575 }
0576
0577 static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
0578 {
0579
0580 struct goldfish_pipe_dev *dev = dev_addr;
0581 struct goldfish_pipe *pipe;
0582 int wakes;
0583
0584 while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
0585 if (wakes & PIPE_WAKE_CLOSED) {
0586 pipe->flags = 1 << BIT_CLOSED_ON_HOST;
0587 } else {
0588 if (wakes & PIPE_WAKE_READ)
0589 clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
0590 if (wakes & PIPE_WAKE_WRITE)
0591 clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
0592 }
0593
0594
0595
0596
0597 wake_up_interruptible(&pipe->wake_queue);
0598 }
0599 return IRQ_HANDLED;
0600 }
0601
0602 static void goldfish_pipe_device_deinit(struct platform_device *pdev,
0603 struct goldfish_pipe_dev *dev);
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
0619 {
0620 u32 count;
0621 u32 i;
0622 unsigned long flags;
0623 struct goldfish_pipe_dev *dev = dev_id;
0624
0625 if (dev->magic != &goldfish_pipe_device_deinit)
0626 return IRQ_NONE;
0627
0628
0629 spin_lock_irqsave(&dev->lock, flags);
0630
0631 count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
0632 if (count == 0) {
0633 spin_unlock_irqrestore(&dev->lock, flags);
0634 return IRQ_NONE;
0635 }
0636 if (count > MAX_SIGNALLED_PIPES)
0637 count = MAX_SIGNALLED_PIPES;
0638
0639 for (i = 0; i < count; ++i)
0640 signalled_pipes_add_locked(dev,
0641 dev->buffers->signalled_pipe_buffers[i].id,
0642 dev->buffers->signalled_pipe_buffers[i].flags);
0643
0644 spin_unlock_irqrestore(&dev->lock, flags);
0645
0646 return IRQ_WAKE_THREAD;
0647 }
0648
0649 static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
0650 {
0651 int id;
0652
0653 for (id = 0; id < dev->pipes_capacity; ++id)
0654 if (!dev->pipes[id])
0655 return id;
0656
0657 {
0658
0659
0660
0661
0662 u32 new_capacity = 2 * dev->pipes_capacity;
0663 struct goldfish_pipe **pipes =
0664 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
0665 if (!pipes)
0666 return -ENOMEM;
0667 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
0668 kfree(dev->pipes);
0669 dev->pipes = pipes;
0670 id = dev->pipes_capacity;
0671 dev->pipes_capacity = new_capacity;
0672 }
0673 return id;
0674 }
0675
0676
0677 static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
0678 {
0679 struct miscdevice *miscdev = file->private_data;
0680
0681 return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
0682 }
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 static int goldfish_pipe_open(struct inode *inode, struct file *file)
0696 {
0697 struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
0698 unsigned long flags;
0699 int id;
0700 int status;
0701
0702
0703 struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
0704
0705 if (!pipe)
0706 return -ENOMEM;
0707
0708 pipe->dev = dev;
0709 mutex_init(&pipe->lock);
0710 init_waitqueue_head(&pipe->wake_queue);
0711
0712
0713
0714
0715
0716 BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
0717 pipe->command_buffer =
0718 (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
0719 if (!pipe->command_buffer) {
0720 status = -ENOMEM;
0721 goto err_pipe;
0722 }
0723
0724 spin_lock_irqsave(&dev->lock, flags);
0725
0726 id = get_free_pipe_id_locked(dev);
0727 if (id < 0) {
0728 status = id;
0729 goto err_id_locked;
0730 }
0731
0732 dev->pipes[id] = pipe;
0733 pipe->id = id;
0734 pipe->command_buffer->id = id;
0735
0736
0737 dev->buffers->open_command_params.rw_params_max_count =
0738 MAX_BUFFERS_PER_COMMAND;
0739 dev->buffers->open_command_params.command_buffer_ptr =
0740 (u64)(unsigned long)__pa(pipe->command_buffer);
0741 status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
0742 spin_unlock_irqrestore(&dev->lock, flags);
0743 if (status < 0)
0744 goto err_cmd;
0745
0746 file->private_data = pipe;
0747 return 0;
0748
0749 err_cmd:
0750 spin_lock_irqsave(&dev->lock, flags);
0751 dev->pipes[id] = NULL;
0752 err_id_locked:
0753 spin_unlock_irqrestore(&dev->lock, flags);
0754 free_page((unsigned long)pipe->command_buffer);
0755 err_pipe:
0756 kfree(pipe);
0757 return status;
0758 }
0759
0760 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
0761 {
0762 unsigned long flags;
0763 struct goldfish_pipe *pipe = filp->private_data;
0764 struct goldfish_pipe_dev *dev = pipe->dev;
0765
0766
0767 goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
0768
0769 spin_lock_irqsave(&dev->lock, flags);
0770 dev->pipes[pipe->id] = NULL;
0771 signalled_pipes_remove_locked(dev, pipe);
0772 spin_unlock_irqrestore(&dev->lock, flags);
0773
0774 filp->private_data = NULL;
0775 free_page((unsigned long)pipe->command_buffer);
0776 kfree(pipe);
0777 return 0;
0778 }
0779
0780 static const struct file_operations goldfish_pipe_fops = {
0781 .owner = THIS_MODULE,
0782 .read = goldfish_pipe_read,
0783 .write = goldfish_pipe_write,
0784 .poll = goldfish_pipe_poll,
0785 .open = goldfish_pipe_open,
0786 .release = goldfish_pipe_release,
0787 };
0788
0789 static void init_miscdevice(struct miscdevice *miscdev)
0790 {
0791 memset(miscdev, 0, sizeof(*miscdev));
0792
0793 miscdev->minor = MISC_DYNAMIC_MINOR;
0794 miscdev->name = "goldfish_pipe";
0795 miscdev->fops = &goldfish_pipe_fops;
0796 }
0797
0798 static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
0799 {
0800 const unsigned long paddr = __pa(addr);
0801
0802 writel(upper_32_bits(paddr), porth);
0803 writel(lower_32_bits(paddr), portl);
0804 }
0805
0806 static int goldfish_pipe_device_init(struct platform_device *pdev,
0807 struct goldfish_pipe_dev *dev)
0808 {
0809 int err;
0810
0811 err = devm_request_threaded_irq(&pdev->dev, dev->irq,
0812 goldfish_pipe_interrupt,
0813 goldfish_interrupt_task,
0814 IRQF_SHARED, "goldfish_pipe", dev);
0815 if (err) {
0816 dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
0817 return err;
0818 }
0819
0820 init_miscdevice(&dev->miscdev);
0821 err = misc_register(&dev->miscdev);
0822 if (err) {
0823 dev_err(&pdev->dev, "unable to register v2 device\n");
0824 return err;
0825 }
0826
0827 dev->pdev_dev = &pdev->dev;
0828 dev->first_signalled_pipe = NULL;
0829 dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
0830 dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
0831 GFP_KERNEL);
0832 if (!dev->pipes) {
0833 misc_deregister(&dev->miscdev);
0834 return -ENOMEM;
0835 }
0836
0837
0838
0839
0840
0841
0842
0843 BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
0844 dev->buffers = (struct goldfish_pipe_dev_buffers *)
0845 __get_free_page(GFP_KERNEL);
0846 if (!dev->buffers) {
0847 kfree(dev->pipes);
0848 misc_deregister(&dev->miscdev);
0849 return -ENOMEM;
0850 }
0851
0852
0853 write_pa_addr(&dev->buffers->signalled_pipe_buffers,
0854 dev->base + PIPE_REG_SIGNAL_BUFFER,
0855 dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
0856
0857 writel(MAX_SIGNALLED_PIPES,
0858 dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
0859
0860 write_pa_addr(&dev->buffers->open_command_params,
0861 dev->base + PIPE_REG_OPEN_BUFFER,
0862 dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
0863
0864 platform_set_drvdata(pdev, dev);
0865 return 0;
0866 }
0867
0868 static void goldfish_pipe_device_deinit(struct platform_device *pdev,
0869 struct goldfish_pipe_dev *dev)
0870 {
0871 misc_deregister(&dev->miscdev);
0872 kfree(dev->pipes);
0873 free_page((unsigned long)dev->buffers);
0874 }
0875
0876 static int goldfish_pipe_probe(struct platform_device *pdev)
0877 {
0878 struct resource *r;
0879 struct goldfish_pipe_dev *dev;
0880
0881 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
0882 if (!dev)
0883 return -ENOMEM;
0884
0885 dev->magic = &goldfish_pipe_device_deinit;
0886 spin_lock_init(&dev->lock);
0887
0888 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0889 if (!r || resource_size(r) < PAGE_SIZE) {
0890 dev_err(&pdev->dev, "can't allocate i/o page\n");
0891 return -EINVAL;
0892 }
0893 dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
0894 if (!dev->base) {
0895 dev_err(&pdev->dev, "ioremap failed\n");
0896 return -EINVAL;
0897 }
0898
0899 dev->irq = platform_get_irq(pdev, 0);
0900 if (dev->irq < 0)
0901 return dev->irq;
0902
0903
0904
0905
0906
0907
0908
0909
0910 writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
0911 dev->version = readl(dev->base + PIPE_REG_VERSION);
0912 if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
0913 return -EINVAL;
0914
0915 return goldfish_pipe_device_init(pdev, dev);
0916 }
0917
0918 static int goldfish_pipe_remove(struct platform_device *pdev)
0919 {
0920 struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
0921
0922 goldfish_pipe_device_deinit(pdev, dev);
0923 return 0;
0924 }
0925
0926 static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
0927 { "GFSH0003", 0 },
0928 { },
0929 };
0930 MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
0931
0932 static const struct of_device_id goldfish_pipe_of_match[] = {
0933 { .compatible = "google,android-pipe", },
0934 {},
0935 };
0936 MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
0937
0938 static struct platform_driver goldfish_pipe_driver = {
0939 .probe = goldfish_pipe_probe,
0940 .remove = goldfish_pipe_remove,
0941 .driver = {
0942 .name = "goldfish_pipe",
0943 .of_match_table = goldfish_pipe_of_match,
0944 .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
0945 }
0946 };
0947
0948 module_platform_driver(goldfish_pipe_driver);
0949 MODULE_AUTHOR("David Turner <digit@google.com>");
0950 MODULE_LICENSE("GPL v2");