0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/init.h>
0014 #include <linux/kernel.h>
0015 #include <linux/device.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/list.h>
0018 #include <linux/io.h>
0019 #include <linux/slab.h>
0020 #include <linux/maple.h>
0021 #include <linux/dma-mapping.h>
0022 #include <linux/delay.h>
0023 #include <linux/module.h>
0024 #include <asm/cacheflush.h>
0025 #include <asm/dma.h>
0026 #include <asm/io.h>
0027 #include <mach/dma.h>
0028 #include <mach/sysasic.h>
0029
0030 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
0031 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
0032 MODULE_LICENSE("GPL v2");
0033
0034 static void maple_dma_handler(struct work_struct *work);
0035 static void maple_vblank_handler(struct work_struct *work);
0036
0037 static DECLARE_WORK(maple_dma_process, maple_dma_handler);
0038 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
0039
0040 static LIST_HEAD(maple_waitq);
0041 static LIST_HEAD(maple_sentq);
0042
0043
0044 static DEFINE_MUTEX(maple_wlist_lock);
0045
0046 static struct maple_driver maple_unsupported_device;
0047 static struct device maple_bus;
0048 static int subdevice_map[MAPLE_PORTS];
0049 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
0050 static unsigned long maple_pnp_time;
0051 static int started, scanning, fullscan;
0052 static struct kmem_cache *maple_queue_cache;
0053
0054 struct maple_device_specify {
0055 int port;
0056 int unit;
0057 };
0058
0059 static bool checked[MAPLE_PORTS];
0060 static bool empty[MAPLE_PORTS];
0061 static struct maple_device *baseunits[MAPLE_PORTS];
0062
0063
0064
0065
0066
0067
0068
0069
0070 int maple_driver_register(struct maple_driver *drv)
0071 {
0072 if (!drv)
0073 return -EINVAL;
0074
0075 drv->drv.bus = &maple_bus_type;
0076
0077 return driver_register(&drv->drv);
0078 }
0079 EXPORT_SYMBOL_GPL(maple_driver_register);
0080
0081
0082
0083
0084
0085
0086
0087
0088 void maple_driver_unregister(struct maple_driver *drv)
0089 {
0090 driver_unregister(&drv->drv);
0091 }
0092 EXPORT_SYMBOL_GPL(maple_driver_unregister);
0093
0094
0095 static void maple_dma_reset(void)
0096 {
0097 __raw_writel(MAPLE_MAGIC, MAPLE_RESET);
0098
0099 __raw_writel(1, MAPLE_TRIGTYPE);
0100
0101
0102
0103
0104
0105
0106
0107
0108 __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
0109 __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
0110 __raw_writel(1, MAPLE_ENABLE);
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120 void maple_getcond_callback(struct maple_device *dev,
0121 void (*callback) (struct mapleq *mq),
0122 unsigned long interval, unsigned long function)
0123 {
0124 dev->callback = callback;
0125 dev->interval = interval;
0126 dev->function = cpu_to_be32(function);
0127 dev->when = jiffies;
0128 }
0129 EXPORT_SYMBOL_GPL(maple_getcond_callback);
0130
0131 static int maple_dma_done(void)
0132 {
0133 return (__raw_readl(MAPLE_STATE) & 1) == 0;
0134 }
0135
0136 static void maple_release_device(struct device *dev)
0137 {
0138 struct maple_device *mdev;
0139 struct mapleq *mq;
0140
0141 mdev = to_maple_dev(dev);
0142 mq = mdev->mq;
0143 kmem_cache_free(maple_queue_cache, mq->recvbuf);
0144 kfree(mq);
0145 kfree(mdev);
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
0157 size_t length, void *data)
0158 {
0159 int ret = 0;
0160 void *sendbuf = NULL;
0161
0162 if (length) {
0163 sendbuf = kcalloc(length, 4, GFP_KERNEL);
0164 if (!sendbuf) {
0165 ret = -ENOMEM;
0166 goto out;
0167 }
0168 ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
0169 }
0170
0171 mdev->mq->command = command;
0172 mdev->mq->length = length;
0173 if (length > 1)
0174 memcpy(sendbuf + 4, data, (length - 1) * 4);
0175 mdev->mq->sendbuf = sendbuf;
0176
0177 mutex_lock(&maple_wlist_lock);
0178 list_add_tail(&mdev->mq->list, &maple_waitq);
0179 mutex_unlock(&maple_wlist_lock);
0180 out:
0181 return ret;
0182 }
0183 EXPORT_SYMBOL_GPL(maple_add_packet);
0184
0185 static struct mapleq *maple_allocq(struct maple_device *mdev)
0186 {
0187 struct mapleq *mq;
0188
0189 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
0190 if (!mq)
0191 goto failed_nomem;
0192
0193 INIT_LIST_HEAD(&mq->list);
0194 mq->dev = mdev;
0195 mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
0196 if (!mq->recvbuf)
0197 goto failed_p2;
0198 mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
0199
0200 return mq;
0201
0202 failed_p2:
0203 kfree(mq);
0204 failed_nomem:
0205 dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
0206 mdev->port, mdev->unit);
0207 return NULL;
0208 }
0209
0210 static struct maple_device *maple_alloc_dev(int port, int unit)
0211 {
0212 struct maple_device *mdev;
0213
0214
0215
0216
0217 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
0218 if (!mdev)
0219 return NULL;
0220
0221 mdev->port = port;
0222 mdev->unit = unit;
0223
0224 mdev->mq = maple_allocq(mdev);
0225
0226 if (!mdev->mq) {
0227 kfree(mdev);
0228 return NULL;
0229 }
0230 mdev->dev.bus = &maple_bus_type;
0231 mdev->dev.parent = &maple_bus;
0232 init_waitqueue_head(&mdev->maple_wait);
0233 return mdev;
0234 }
0235
0236 static void maple_free_dev(struct maple_device *mdev)
0237 {
0238 kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
0239 kfree(mdev->mq);
0240 kfree(mdev);
0241 }
0242
0243
0244
0245
0246 static void maple_build_block(struct mapleq *mq)
0247 {
0248 int port, unit, from, to, len;
0249 unsigned long *lsendbuf = mq->sendbuf;
0250
0251 port = mq->dev->port & 3;
0252 unit = mq->dev->unit;
0253 len = mq->length;
0254 from = port << 6;
0255 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
0256
0257 *maple_lastptr &= 0x7fffffff;
0258 maple_lastptr = maple_sendptr;
0259
0260 *maple_sendptr++ = (port << 16) | len | 0x80000000;
0261 *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
0262 *maple_sendptr++ =
0263 mq->command | (to << 8) | (from << 16) | (len << 24);
0264 while (len-- > 0)
0265 *maple_sendptr++ = *lsendbuf++;
0266 }
0267
0268
0269 static void maple_send(void)
0270 {
0271 int i, maple_packets = 0;
0272 struct mapleq *mq, *nmq;
0273
0274 if (!maple_dma_done())
0275 return;
0276
0277
0278 __raw_writel(0, MAPLE_ENABLE);
0279
0280 if (!list_empty(&maple_sentq))
0281 goto finish;
0282
0283 mutex_lock(&maple_wlist_lock);
0284 if (list_empty(&maple_waitq)) {
0285 mutex_unlock(&maple_wlist_lock);
0286 goto finish;
0287 }
0288
0289 maple_lastptr = maple_sendbuf;
0290 maple_sendptr = maple_sendbuf;
0291
0292 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
0293 maple_build_block(mq);
0294 list_del_init(&mq->list);
0295 list_add_tail(&mq->list, &maple_sentq);
0296 if (maple_packets++ > MAPLE_MAXPACKETS)
0297 break;
0298 }
0299 mutex_unlock(&maple_wlist_lock);
0300 if (maple_packets > 0) {
0301 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
0302 __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
0303 PAGE_SIZE);
0304 }
0305
0306 finish:
0307 maple_dma_reset();
0308 }
0309
0310
0311 static int maple_check_matching_driver(struct device_driver *driver,
0312 void *devptr)
0313 {
0314 struct maple_driver *maple_drv;
0315 struct maple_device *mdev;
0316
0317 mdev = devptr;
0318 maple_drv = to_maple_driver(driver);
0319 if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
0320 return 1;
0321 return 0;
0322 }
0323
0324 static void maple_detach_driver(struct maple_device *mdev)
0325 {
0326 device_unregister(&mdev->dev);
0327 }
0328
0329
0330 static void maple_attach_driver(struct maple_device *mdev)
0331 {
0332 char *p, *recvbuf;
0333 unsigned long function;
0334 int matched, error;
0335
0336 recvbuf = mdev->mq->recvbuf->buf;
0337
0338
0339 memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
0340 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
0341 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
0342 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
0343 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
0344 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
0345 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
0346 memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
0347 mdev->product_name[30] = '\0';
0348 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
0349 mdev->product_licence[60] = '\0';
0350
0351 for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
0352 if (*p == ' ')
0353 *p = '\0';
0354 else
0355 break;
0356 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
0357 if (*p == ' ')
0358 *p = '\0';
0359 else
0360 break;
0361
0362 function = be32_to_cpu(mdev->devinfo.function);
0363
0364 dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
0365 mdev->product_name, function, mdev->port, mdev->unit);
0366
0367 if (function > 0x200) {
0368
0369 function = 0;
0370 mdev->driver = &maple_unsupported_device;
0371 dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
0372 } else {
0373 matched =
0374 bus_for_each_drv(&maple_bus_type, NULL, mdev,
0375 maple_check_matching_driver);
0376
0377 if (matched == 0) {
0378
0379 dev_info(&mdev->dev, "no driver found\n");
0380 mdev->driver = &maple_unsupported_device;
0381 }
0382 dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
0383 mdev->unit, function);
0384 }
0385
0386 mdev->function = function;
0387 mdev->dev.release = &maple_release_device;
0388
0389 atomic_set(&mdev->busy, 0);
0390 error = device_register(&mdev->dev);
0391 if (error) {
0392 dev_warn(&mdev->dev, "could not register device at"
0393 " (%d, %d), with error 0x%X\n", mdev->unit,
0394 mdev->port, error);
0395 maple_free_dev(mdev);
0396 mdev = NULL;
0397 return;
0398 }
0399 }
0400
0401
0402
0403
0404
0405
0406 static int check_maple_device(struct device *device, void *portptr)
0407 {
0408 struct maple_device_specify *ds;
0409 struct maple_device *mdev;
0410
0411 ds = portptr;
0412 mdev = to_maple_dev(device);
0413 if (mdev->port == ds->port && mdev->unit == ds->unit)
0414 return 1;
0415 return 0;
0416 }
0417
0418 static int setup_maple_commands(struct device *device, void *ignored)
0419 {
0420 int add;
0421 struct maple_device *mdev = to_maple_dev(device);
0422 if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
0423 time_after(jiffies, mdev->when)) {
0424
0425 add = maple_add_packet(mdev,
0426 be32_to_cpu(mdev->devinfo.function),
0427 MAPLE_COMMAND_GETCOND, 1, NULL);
0428 if (!add)
0429 mdev->when = jiffies + mdev->interval;
0430 } else {
0431 if (time_after(jiffies, maple_pnp_time))
0432
0433
0434
0435 if (atomic_read(&mdev->busy) == 0) {
0436 atomic_set(&mdev->busy, 1);
0437 maple_add_packet(mdev, 0,
0438 MAPLE_COMMAND_DEVINFO, 0, NULL);
0439 }
0440 }
0441 return 0;
0442 }
0443
0444
0445 static void maple_vblank_handler(struct work_struct *work)
0446 {
0447 int x, locking;
0448 struct maple_device *mdev;
0449
0450 if (!maple_dma_done())
0451 return;
0452
0453 __raw_writel(0, MAPLE_ENABLE);
0454
0455 if (!list_empty(&maple_sentq))
0456 goto finish;
0457
0458
0459
0460
0461
0462 bus_for_each_dev(&maple_bus_type, NULL, NULL,
0463 setup_maple_commands);
0464
0465 if (time_after(jiffies, maple_pnp_time)) {
0466
0467
0468
0469
0470 for (x = 0; x < MAPLE_PORTS; x++) {
0471 if (checked[x] && empty[x]) {
0472 mdev = baseunits[x];
0473 if (!mdev)
0474 break;
0475 atomic_set(&mdev->busy, 1);
0476 locking = maple_add_packet(mdev, 0,
0477 MAPLE_COMMAND_DEVINFO, 0, NULL);
0478 if (!locking)
0479 break;
0480 }
0481 }
0482
0483 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
0484 }
0485
0486 finish:
0487 maple_send();
0488 }
0489
0490
0491 static void maple_map_subunits(struct maple_device *mdev, int submask)
0492 {
0493 int retval, k, devcheck;
0494 struct maple_device *mdev_add;
0495 struct maple_device_specify ds;
0496
0497 ds.port = mdev->port;
0498 for (k = 0; k < 5; k++) {
0499 ds.unit = k + 1;
0500 retval =
0501 bus_for_each_dev(&maple_bus_type, NULL, &ds,
0502 check_maple_device);
0503 if (retval) {
0504 submask = submask >> 1;
0505 continue;
0506 }
0507 devcheck = submask & 0x01;
0508 if (devcheck) {
0509 mdev_add = maple_alloc_dev(mdev->port, k + 1);
0510 if (!mdev_add)
0511 return;
0512 atomic_set(&mdev_add->busy, 1);
0513 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
0514 0, NULL);
0515
0516 scanning = 1;
0517 }
0518 submask = submask >> 1;
0519 }
0520 }
0521
0522
0523 static void maple_clean_submap(struct maple_device *mdev)
0524 {
0525 int killbit;
0526
0527 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
0528 killbit = ~killbit;
0529 killbit &= 0xFF;
0530 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
0531 }
0532
0533
0534 static void maple_response_none(struct maple_device *mdev)
0535 {
0536 maple_clean_submap(mdev);
0537
0538 if (likely(mdev->unit != 0)) {
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 if (mdev->can_unload) {
0549 if (!mdev->can_unload(mdev)) {
0550 atomic_set(&mdev->busy, 2);
0551 wake_up(&mdev->maple_wait);
0552 return;
0553 }
0554 }
0555
0556 dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
0557 mdev->port, mdev->unit);
0558 maple_detach_driver(mdev);
0559 return;
0560 } else {
0561 if (!started || !fullscan) {
0562 if (checked[mdev->port] == false) {
0563 checked[mdev->port] = true;
0564 empty[mdev->port] = true;
0565 dev_info(&mdev->dev, "no devices"
0566 " to port %d\n", mdev->port);
0567 }
0568 return;
0569 }
0570 }
0571
0572 atomic_set(&mdev->busy, 0);
0573 }
0574
0575
0576 static void maple_response_devinfo(struct maple_device *mdev,
0577 char *recvbuf)
0578 {
0579 char submask;
0580 if (!started || (scanning == 2) || !fullscan) {
0581 if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
0582 checked[mdev->port] = true;
0583 maple_attach_driver(mdev);
0584 } else {
0585 if (mdev->unit != 0)
0586 maple_attach_driver(mdev);
0587 if (mdev->unit == 0) {
0588 empty[mdev->port] = false;
0589 maple_attach_driver(mdev);
0590 }
0591 }
0592 }
0593 if (mdev->unit == 0) {
0594 submask = recvbuf[2] & 0x1F;
0595 if (submask ^ subdevice_map[mdev->port]) {
0596 maple_map_subunits(mdev, submask);
0597 subdevice_map[mdev->port] = submask;
0598 }
0599 }
0600 }
0601
0602 static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
0603 {
0604 if (mdev->fileerr_handler) {
0605 mdev->fileerr_handler(mdev, recvbuf);
0606 return;
0607 } else
0608 dev_warn(&mdev->dev, "device at (%d, %d) reports"
0609 "file error 0x%X\n", mdev->port, mdev->unit,
0610 ((int *)recvbuf)[1]);
0611 }
0612
0613 static void maple_port_rescan(void)
0614 {
0615 int i;
0616 struct maple_device *mdev;
0617
0618 fullscan = 1;
0619 for (i = 0; i < MAPLE_PORTS; i++) {
0620 if (checked[i] == false) {
0621 fullscan = 0;
0622 mdev = baseunits[i];
0623 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
0624 0, NULL);
0625 }
0626 }
0627 }
0628
0629
0630 static void maple_dma_handler(struct work_struct *work)
0631 {
0632 struct mapleq *mq, *nmq;
0633 struct maple_device *mdev;
0634 char *recvbuf;
0635 enum maple_code code;
0636
0637 if (!maple_dma_done())
0638 return;
0639 __raw_writel(0, MAPLE_ENABLE);
0640 if (!list_empty(&maple_sentq)) {
0641 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
0642 mdev = mq->dev;
0643 recvbuf = mq->recvbuf->buf;
0644 __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
0645 0x400);
0646 code = recvbuf[0];
0647 kfree(mq->sendbuf);
0648 list_del_init(&mq->list);
0649 switch (code) {
0650 case MAPLE_RESPONSE_NONE:
0651 maple_response_none(mdev);
0652 break;
0653
0654 case MAPLE_RESPONSE_DEVINFO:
0655 maple_response_devinfo(mdev, recvbuf);
0656 atomic_set(&mdev->busy, 0);
0657 break;
0658
0659 case MAPLE_RESPONSE_DATATRF:
0660 if (mdev->callback)
0661 mdev->callback(mq);
0662 atomic_set(&mdev->busy, 0);
0663 wake_up(&mdev->maple_wait);
0664 break;
0665
0666 case MAPLE_RESPONSE_FILEERR:
0667 maple_response_fileerr(mdev, recvbuf);
0668 atomic_set(&mdev->busy, 0);
0669 wake_up(&mdev->maple_wait);
0670 break;
0671
0672 case MAPLE_RESPONSE_AGAIN:
0673 case MAPLE_RESPONSE_BADCMD:
0674 case MAPLE_RESPONSE_BADFUNC:
0675 dev_warn(&mdev->dev, "non-fatal error"
0676 " 0x%X at (%d, %d)\n", code,
0677 mdev->port, mdev->unit);
0678 atomic_set(&mdev->busy, 0);
0679 break;
0680
0681 case MAPLE_RESPONSE_ALLINFO:
0682 dev_notice(&mdev->dev, "extended"
0683 " device information request for (%d, %d)"
0684 " but call is not supported\n", mdev->port,
0685 mdev->unit);
0686 atomic_set(&mdev->busy, 0);
0687 break;
0688
0689 case MAPLE_RESPONSE_OK:
0690 atomic_set(&mdev->busy, 0);
0691 wake_up(&mdev->maple_wait);
0692 break;
0693
0694 default:
0695 break;
0696 }
0697 }
0698
0699 if (scanning == 1) {
0700 maple_send();
0701 scanning = 2;
0702 } else
0703 scanning = 0;
0704
0705 if (!fullscan)
0706 maple_port_rescan();
0707
0708 started = 1;
0709 }
0710 maple_send();
0711 }
0712
0713 static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
0714 {
0715
0716 schedule_work(&maple_dma_process);
0717 return IRQ_HANDLED;
0718 }
0719
0720 static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
0721 {
0722 schedule_work(&maple_vblank_process);
0723 return IRQ_HANDLED;
0724 }
0725
0726 static int maple_set_dma_interrupt_handler(void)
0727 {
0728 return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
0729 IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
0730 }
0731
0732 static int maple_set_vblank_interrupt_handler(void)
0733 {
0734 return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
0735 IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
0736 }
0737
0738 static int maple_get_dma_buffer(void)
0739 {
0740 maple_sendbuf =
0741 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
0742 MAPLE_DMA_PAGES);
0743 if (!maple_sendbuf)
0744 return -ENOMEM;
0745 return 0;
0746 }
0747
0748 static int maple_match_bus_driver(struct device *devptr,
0749 struct device_driver *drvptr)
0750 {
0751 struct maple_driver *maple_drv = to_maple_driver(drvptr);
0752 struct maple_device *maple_dev = to_maple_dev(devptr);
0753
0754
0755 if (maple_dev->devinfo.function == 0xFFFFFFFF)
0756 return 0;
0757 else if (maple_dev->devinfo.function &
0758 cpu_to_be32(maple_drv->function))
0759 return 1;
0760 return 0;
0761 }
0762
0763 static int maple_bus_uevent(struct device *dev,
0764 struct kobj_uevent_env *env)
0765 {
0766 return 0;
0767 }
0768
0769 static void maple_bus_release(struct device *dev)
0770 {
0771 }
0772
0773 static struct maple_driver maple_unsupported_device = {
0774 .drv = {
0775 .name = "maple_unsupported_device",
0776 .bus = &maple_bus_type,
0777 },
0778 };
0779
0780
0781
0782 struct bus_type maple_bus_type = {
0783 .name = "maple",
0784 .match = maple_match_bus_driver,
0785 .uevent = maple_bus_uevent,
0786 };
0787 EXPORT_SYMBOL_GPL(maple_bus_type);
0788
0789 static struct device maple_bus = {
0790 .init_name = "maple",
0791 .release = maple_bus_release,
0792 };
0793
0794 static int __init maple_bus_init(void)
0795 {
0796 int retval, i;
0797 struct maple_device *mdev[MAPLE_PORTS];
0798
0799 __raw_writel(0, MAPLE_ENABLE);
0800
0801 retval = device_register(&maple_bus);
0802 if (retval)
0803 goto cleanup;
0804
0805 retval = bus_register(&maple_bus_type);
0806 if (retval)
0807 goto cleanup_device;
0808
0809 retval = driver_register(&maple_unsupported_device.drv);
0810 if (retval)
0811 goto cleanup_bus;
0812
0813
0814 retval = maple_get_dma_buffer();
0815 if (retval) {
0816 dev_err(&maple_bus, "failed to allocate DMA buffers\n");
0817 goto cleanup_basic;
0818 }
0819
0820
0821 retval = maple_set_dma_interrupt_handler();
0822 if (retval) {
0823 dev_err(&maple_bus, "bus failed to grab maple "
0824 "DMA IRQ\n");
0825 goto cleanup_dma;
0826 }
0827
0828
0829 retval = maple_set_vblank_interrupt_handler();
0830 if (retval) {
0831 dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
0832 goto cleanup_irq;
0833 }
0834
0835 maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
0836
0837 if (!maple_queue_cache) {
0838 retval = -ENOMEM;
0839 goto cleanup_bothirqs;
0840 }
0841
0842 INIT_LIST_HEAD(&maple_waitq);
0843 INIT_LIST_HEAD(&maple_sentq);
0844
0845
0846 for (i = 0; i < MAPLE_PORTS; i++) {
0847 checked[i] = false;
0848 empty[i] = false;
0849 mdev[i] = maple_alloc_dev(i, 0);
0850 if (!mdev[i]) {
0851 while (i-- > 0)
0852 maple_free_dev(mdev[i]);
0853 retval = -ENOMEM;
0854 goto cleanup_cache;
0855 }
0856 baseunits[i] = mdev[i];
0857 atomic_set(&mdev[i]->busy, 1);
0858 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
0859 subdevice_map[i] = 0;
0860 }
0861
0862 maple_pnp_time = jiffies + HZ;
0863
0864 maple_send();
0865 dev_info(&maple_bus, "bus core now registered\n");
0866
0867 return 0;
0868
0869 cleanup_cache:
0870 kmem_cache_destroy(maple_queue_cache);
0871
0872 cleanup_bothirqs:
0873 free_irq(HW_EVENT_VSYNC, 0);
0874
0875 cleanup_irq:
0876 free_irq(HW_EVENT_MAPLE_DMA, 0);
0877
0878 cleanup_dma:
0879 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
0880
0881 cleanup_basic:
0882 driver_unregister(&maple_unsupported_device.drv);
0883
0884 cleanup_bus:
0885 bus_unregister(&maple_bus_type);
0886
0887 cleanup_device:
0888 device_unregister(&maple_bus);
0889
0890 cleanup:
0891 printk(KERN_ERR "Maple bus registration failed\n");
0892 return retval;
0893 }
0894
0895 fs_initcall(maple_bus_init);