0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0048
0049 #include <linux/pktcdvd.h>
0050 #include <linux/module.h>
0051 #include <linux/types.h>
0052 #include <linux/kernel.h>
0053 #include <linux/compat.h>
0054 #include <linux/kthread.h>
0055 #include <linux/errno.h>
0056 #include <linux/spinlock.h>
0057 #include <linux/file.h>
0058 #include <linux/proc_fs.h>
0059 #include <linux/seq_file.h>
0060 #include <linux/miscdevice.h>
0061 #include <linux/freezer.h>
0062 #include <linux/mutex.h>
0063 #include <linux/slab.h>
0064 #include <linux/backing-dev.h>
0065 #include <scsi/scsi_cmnd.h>
0066 #include <scsi/scsi_ioctl.h>
0067 #include <scsi/scsi.h>
0068 #include <linux/debugfs.h>
0069 #include <linux/device.h>
0070 #include <linux/nospec.h>
0071 #include <linux/uaccess.h>
0072
0073 #define DRIVER_NAME "pktcdvd"
0074
0075 #define pkt_err(pd, fmt, ...) \
0076 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
0077 #define pkt_notice(pd, fmt, ...) \
0078 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
0079 #define pkt_info(pd, fmt, ...) \
0080 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
0081
0082 #define pkt_dbg(level, pd, fmt, ...) \
0083 do { \
0084 if (level == 2 && PACKET_DEBUG >= 2) \
0085 pr_notice("%s: %s():" fmt, \
0086 pd->name, __func__, ##__VA_ARGS__); \
0087 else if (level == 1 && PACKET_DEBUG >= 1) \
0088 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
0089 } while (0)
0090
0091 #define MAX_SPEED 0xffff
0092
0093 static DEFINE_MUTEX(pktcdvd_mutex);
0094 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
0095 static struct proc_dir_entry *pkt_proc;
0096 static int pktdev_major;
0097 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
0098 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
0099 static struct mutex ctl_mutex;
0100 static mempool_t psd_pool;
0101 static struct bio_set pkt_bio_set;
0102
0103 static struct class *class_pktcdvd = NULL;
0104 static struct dentry *pkt_debugfs_root = NULL;
0105
0106
0107 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
0108 static int pkt_remove_dev(dev_t pkt_dev);
0109 static int pkt_seq_show(struct seq_file *m, void *p);
0110
0111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
0112 {
0113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static ssize_t packets_started_show(struct device *dev,
0133 struct device_attribute *attr, char *buf)
0134 {
0135 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0136
0137 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
0138 }
0139 static DEVICE_ATTR_RO(packets_started);
0140
0141 static ssize_t packets_finished_show(struct device *dev,
0142 struct device_attribute *attr, char *buf)
0143 {
0144 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0145
0146 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
0147 }
0148 static DEVICE_ATTR_RO(packets_finished);
0149
0150 static ssize_t kb_written_show(struct device *dev,
0151 struct device_attribute *attr, char *buf)
0152 {
0153 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0154
0155 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
0156 }
0157 static DEVICE_ATTR_RO(kb_written);
0158
0159 static ssize_t kb_read_show(struct device *dev,
0160 struct device_attribute *attr, char *buf)
0161 {
0162 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0163
0164 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
0165 }
0166 static DEVICE_ATTR_RO(kb_read);
0167
0168 static ssize_t kb_read_gather_show(struct device *dev,
0169 struct device_attribute *attr, char *buf)
0170 {
0171 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0172
0173 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
0174 }
0175 static DEVICE_ATTR_RO(kb_read_gather);
0176
0177 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
0178 const char *buf, size_t len)
0179 {
0180 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0181
0182 if (len > 0) {
0183 pd->stats.pkt_started = 0;
0184 pd->stats.pkt_ended = 0;
0185 pd->stats.secs_w = 0;
0186 pd->stats.secs_rg = 0;
0187 pd->stats.secs_r = 0;
0188 }
0189 return len;
0190 }
0191 static DEVICE_ATTR_WO(reset);
0192
0193 static struct attribute *pkt_stat_attrs[] = {
0194 &dev_attr_packets_finished.attr,
0195 &dev_attr_packets_started.attr,
0196 &dev_attr_kb_read.attr,
0197 &dev_attr_kb_written.attr,
0198 &dev_attr_kb_read_gather.attr,
0199 &dev_attr_reset.attr,
0200 NULL,
0201 };
0202
0203 static const struct attribute_group pkt_stat_group = {
0204 .name = "stat",
0205 .attrs = pkt_stat_attrs,
0206 };
0207
0208 static ssize_t size_show(struct device *dev,
0209 struct device_attribute *attr, char *buf)
0210 {
0211 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0212 int n;
0213
0214 spin_lock(&pd->lock);
0215 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
0216 spin_unlock(&pd->lock);
0217 return n;
0218 }
0219 static DEVICE_ATTR_RO(size);
0220
0221 static void init_write_congestion_marks(int* lo, int* hi)
0222 {
0223 if (*hi > 0) {
0224 *hi = max(*hi, 500);
0225 *hi = min(*hi, 1000000);
0226 if (*lo <= 0)
0227 *lo = *hi - 100;
0228 else {
0229 *lo = min(*lo, *hi - 100);
0230 *lo = max(*lo, 100);
0231 }
0232 } else {
0233 *hi = -1;
0234 *lo = -1;
0235 }
0236 }
0237
0238 static ssize_t congestion_off_show(struct device *dev,
0239 struct device_attribute *attr, char *buf)
0240 {
0241 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0242 int n;
0243
0244 spin_lock(&pd->lock);
0245 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
0246 spin_unlock(&pd->lock);
0247 return n;
0248 }
0249
0250 static ssize_t congestion_off_store(struct device *dev,
0251 struct device_attribute *attr,
0252 const char *buf, size_t len)
0253 {
0254 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0255 int val;
0256
0257 if (sscanf(buf, "%d", &val) == 1) {
0258 spin_lock(&pd->lock);
0259 pd->write_congestion_off = val;
0260 init_write_congestion_marks(&pd->write_congestion_off,
0261 &pd->write_congestion_on);
0262 spin_unlock(&pd->lock);
0263 }
0264 return len;
0265 }
0266 static DEVICE_ATTR_RW(congestion_off);
0267
0268 static ssize_t congestion_on_show(struct device *dev,
0269 struct device_attribute *attr, char *buf)
0270 {
0271 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0272 int n;
0273
0274 spin_lock(&pd->lock);
0275 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
0276 spin_unlock(&pd->lock);
0277 return n;
0278 }
0279
0280 static ssize_t congestion_on_store(struct device *dev,
0281 struct device_attribute *attr,
0282 const char *buf, size_t len)
0283 {
0284 struct pktcdvd_device *pd = dev_get_drvdata(dev);
0285 int val;
0286
0287 if (sscanf(buf, "%d", &val) == 1) {
0288 spin_lock(&pd->lock);
0289 pd->write_congestion_on = val;
0290 init_write_congestion_marks(&pd->write_congestion_off,
0291 &pd->write_congestion_on);
0292 spin_unlock(&pd->lock);
0293 }
0294 return len;
0295 }
0296 static DEVICE_ATTR_RW(congestion_on);
0297
0298 static struct attribute *pkt_wq_attrs[] = {
0299 &dev_attr_congestion_on.attr,
0300 &dev_attr_congestion_off.attr,
0301 &dev_attr_size.attr,
0302 NULL,
0303 };
0304
0305 static const struct attribute_group pkt_wq_group = {
0306 .name = "write_queue",
0307 .attrs = pkt_wq_attrs,
0308 };
0309
0310 static const struct attribute_group *pkt_groups[] = {
0311 &pkt_stat_group,
0312 &pkt_wq_group,
0313 NULL,
0314 };
0315
0316 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
0317 {
0318 if (class_pktcdvd) {
0319 pd->dev = device_create_with_groups(class_pktcdvd, NULL,
0320 MKDEV(0, 0), pd, pkt_groups,
0321 "%s", pd->name);
0322 if (IS_ERR(pd->dev))
0323 pd->dev = NULL;
0324 }
0325 }
0326
0327 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
0328 {
0329 if (class_pktcdvd)
0330 device_unregister(pd->dev);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static void class_pktcdvd_release(struct class *cls)
0342 {
0343 kfree(cls);
0344 }
0345
0346 static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
0347 char *data)
0348 {
0349 int n = 0;
0350 int idx;
0351 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
0352 for (idx = 0; idx < MAX_WRITERS; idx++) {
0353 struct pktcdvd_device *pd = pkt_devs[idx];
0354 if (!pd)
0355 continue;
0356 n += sprintf(data+n, "%s %u:%u %u:%u\n",
0357 pd->name,
0358 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
0359 MAJOR(pd->bdev->bd_dev),
0360 MINOR(pd->bdev->bd_dev));
0361 }
0362 mutex_unlock(&ctl_mutex);
0363 return n;
0364 }
0365 static CLASS_ATTR_RO(device_map);
0366
0367 static ssize_t add_store(struct class *c, struct class_attribute *attr,
0368 const char *buf, size_t count)
0369 {
0370 unsigned int major, minor;
0371
0372 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
0373
0374 if (!try_module_get(THIS_MODULE))
0375 return -ENODEV;
0376
0377 pkt_setup_dev(MKDEV(major, minor), NULL);
0378
0379 module_put(THIS_MODULE);
0380
0381 return count;
0382 }
0383
0384 return -EINVAL;
0385 }
0386 static CLASS_ATTR_WO(add);
0387
0388 static ssize_t remove_store(struct class *c, struct class_attribute *attr,
0389 const char *buf, size_t count)
0390 {
0391 unsigned int major, minor;
0392 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
0393 pkt_remove_dev(MKDEV(major, minor));
0394 return count;
0395 }
0396 return -EINVAL;
0397 }
0398 static CLASS_ATTR_WO(remove);
0399
0400 static struct attribute *class_pktcdvd_attrs[] = {
0401 &class_attr_add.attr,
0402 &class_attr_remove.attr,
0403 &class_attr_device_map.attr,
0404 NULL,
0405 };
0406 ATTRIBUTE_GROUPS(class_pktcdvd);
0407
0408 static int pkt_sysfs_init(void)
0409 {
0410 int ret = 0;
0411
0412
0413
0414
0415
0416 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
0417 if (!class_pktcdvd)
0418 return -ENOMEM;
0419 class_pktcdvd->name = DRIVER_NAME;
0420 class_pktcdvd->owner = THIS_MODULE;
0421 class_pktcdvd->class_release = class_pktcdvd_release;
0422 class_pktcdvd->class_groups = class_pktcdvd_groups;
0423 ret = class_register(class_pktcdvd);
0424 if (ret) {
0425 kfree(class_pktcdvd);
0426 class_pktcdvd = NULL;
0427 pr_err("failed to create class pktcdvd\n");
0428 return ret;
0429 }
0430 return 0;
0431 }
0432
0433 static void pkt_sysfs_cleanup(void)
0434 {
0435 if (class_pktcdvd)
0436 class_destroy(class_pktcdvd);
0437 class_pktcdvd = NULL;
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
0449 {
0450 return pkt_seq_show(m, p);
0451 }
0452
0453 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
0454 {
0455 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
0456 }
0457
0458 static const struct file_operations debug_fops = {
0459 .open = pkt_debugfs_fops_open,
0460 .read = seq_read,
0461 .llseek = seq_lseek,
0462 .release = single_release,
0463 .owner = THIS_MODULE,
0464 };
0465
0466 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
0467 {
0468 if (!pkt_debugfs_root)
0469 return;
0470 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
0471 if (!pd->dfs_d_root)
0472 return;
0473
0474 pd->dfs_f_info = debugfs_create_file("info", 0444,
0475 pd->dfs_d_root, pd, &debug_fops);
0476 }
0477
0478 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
0479 {
0480 if (!pkt_debugfs_root)
0481 return;
0482 debugfs_remove(pd->dfs_f_info);
0483 debugfs_remove(pd->dfs_d_root);
0484 pd->dfs_f_info = NULL;
0485 pd->dfs_d_root = NULL;
0486 }
0487
0488 static void pkt_debugfs_init(void)
0489 {
0490 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
0491 }
0492
0493 static void pkt_debugfs_cleanup(void)
0494 {
0495 debugfs_remove(pkt_debugfs_root);
0496 pkt_debugfs_root = NULL;
0497 }
0498
0499
0500
0501
0502 static void pkt_bio_finished(struct pktcdvd_device *pd)
0503 {
0504 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
0505 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
0506 pkt_dbg(2, pd, "queue empty\n");
0507 atomic_set(&pd->iosched.attention, 1);
0508 wake_up(&pd->wqueue);
0509 }
0510 }
0511
0512
0513
0514
0515 static struct packet_data *pkt_alloc_packet_data(int frames)
0516 {
0517 int i;
0518 struct packet_data *pkt;
0519
0520 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
0521 if (!pkt)
0522 goto no_pkt;
0523
0524 pkt->frames = frames;
0525 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
0526 if (!pkt->w_bio)
0527 goto no_bio;
0528
0529 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
0530 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
0531 if (!pkt->pages[i])
0532 goto no_page;
0533 }
0534
0535 spin_lock_init(&pkt->lock);
0536 bio_list_init(&pkt->orig_bios);
0537
0538 for (i = 0; i < frames; i++) {
0539 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
0540 if (!pkt->r_bios[i])
0541 goto no_rd_bio;
0542 }
0543
0544 return pkt;
0545
0546 no_rd_bio:
0547 for (i = 0; i < frames; i++)
0548 kfree(pkt->r_bios[i]);
0549 no_page:
0550 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
0551 if (pkt->pages[i])
0552 __free_page(pkt->pages[i]);
0553 kfree(pkt->w_bio);
0554 no_bio:
0555 kfree(pkt);
0556 no_pkt:
0557 return NULL;
0558 }
0559
0560
0561
0562
0563 static void pkt_free_packet_data(struct packet_data *pkt)
0564 {
0565 int i;
0566
0567 for (i = 0; i < pkt->frames; i++)
0568 kfree(pkt->r_bios[i]);
0569 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
0570 __free_page(pkt->pages[i]);
0571 kfree(pkt->w_bio);
0572 kfree(pkt);
0573 }
0574
0575 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
0576 {
0577 struct packet_data *pkt, *next;
0578
0579 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
0580
0581 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
0582 pkt_free_packet_data(pkt);
0583 }
0584 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
0585 }
0586
0587 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
0588 {
0589 struct packet_data *pkt;
0590
0591 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
0592
0593 while (nr_packets > 0) {
0594 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
0595 if (!pkt) {
0596 pkt_shrink_pktlist(pd);
0597 return 0;
0598 }
0599 pkt->id = nr_packets;
0600 pkt->pd = pd;
0601 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
0602 nr_packets--;
0603 }
0604 return 1;
0605 }
0606
0607 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
0608 {
0609 struct rb_node *n = rb_next(&node->rb_node);
0610 if (!n)
0611 return NULL;
0612 return rb_entry(n, struct pkt_rb_node, rb_node);
0613 }
0614
0615 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
0616 {
0617 rb_erase(&node->rb_node, &pd->bio_queue);
0618 mempool_free(node, &pd->rb_pool);
0619 pd->bio_queue_size--;
0620 BUG_ON(pd->bio_queue_size < 0);
0621 }
0622
0623
0624
0625
0626 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
0627 {
0628 struct rb_node *n = pd->bio_queue.rb_node;
0629 struct rb_node *next;
0630 struct pkt_rb_node *tmp;
0631
0632 if (!n) {
0633 BUG_ON(pd->bio_queue_size > 0);
0634 return NULL;
0635 }
0636
0637 for (;;) {
0638 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
0639 if (s <= tmp->bio->bi_iter.bi_sector)
0640 next = n->rb_left;
0641 else
0642 next = n->rb_right;
0643 if (!next)
0644 break;
0645 n = next;
0646 }
0647
0648 if (s > tmp->bio->bi_iter.bi_sector) {
0649 tmp = pkt_rbtree_next(tmp);
0650 if (!tmp)
0651 return NULL;
0652 }
0653 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
0654 return tmp;
0655 }
0656
0657
0658
0659
0660 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
0661 {
0662 struct rb_node **p = &pd->bio_queue.rb_node;
0663 struct rb_node *parent = NULL;
0664 sector_t s = node->bio->bi_iter.bi_sector;
0665 struct pkt_rb_node *tmp;
0666
0667 while (*p) {
0668 parent = *p;
0669 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
0670 if (s < tmp->bio->bi_iter.bi_sector)
0671 p = &(*p)->rb_left;
0672 else
0673 p = &(*p)->rb_right;
0674 }
0675 rb_link_node(&node->rb_node, parent, p);
0676 rb_insert_color(&node->rb_node, &pd->bio_queue);
0677 pd->bio_queue_size++;
0678 }
0679
0680
0681
0682
0683
0684 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
0685 {
0686 struct request_queue *q = bdev_get_queue(pd->bdev);
0687 struct scsi_cmnd *scmd;
0688 struct request *rq;
0689 int ret = 0;
0690
0691 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
0692 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
0693 if (IS_ERR(rq))
0694 return PTR_ERR(rq);
0695 scmd = blk_mq_rq_to_pdu(rq);
0696
0697 if (cgc->buflen) {
0698 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
0699 GFP_NOIO);
0700 if (ret)
0701 goto out;
0702 }
0703
0704 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
0705 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
0706
0707 rq->timeout = 60*HZ;
0708 if (cgc->quiet)
0709 rq->rq_flags |= RQF_QUIET;
0710
0711 blk_execute_rq(rq, false);
0712 if (scmd->result)
0713 ret = -EIO;
0714 out:
0715 blk_mq_free_request(rq);
0716 return ret;
0717 }
0718
0719 static const char *sense_key_string(__u8 index)
0720 {
0721 static const char * const info[] = {
0722 "No sense", "Recovered error", "Not ready",
0723 "Medium error", "Hardware error", "Illegal request",
0724 "Unit attention", "Data protect", "Blank check",
0725 };
0726
0727 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
0728 }
0729
0730
0731
0732
0733
0734 static void pkt_dump_sense(struct pktcdvd_device *pd,
0735 struct packet_command *cgc)
0736 {
0737 struct scsi_sense_hdr *sshdr = cgc->sshdr;
0738
0739 if (sshdr)
0740 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
0741 CDROM_PACKET_SIZE, cgc->cmd,
0742 sshdr->sense_key, sshdr->asc, sshdr->ascq,
0743 sense_key_string(sshdr->sense_key));
0744 else
0745 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
0746 }
0747
0748
0749
0750
0751 static int pkt_flush_cache(struct pktcdvd_device *pd)
0752 {
0753 struct packet_command cgc;
0754
0755 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
0756 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
0757 cgc.quiet = 1;
0758
0759
0760
0761
0762
0763 #if 0
0764 cgc.cmd[1] = 1 << 1;
0765 #endif
0766 return pkt_generic_packet(pd, &cgc);
0767 }
0768
0769
0770
0771
0772 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
0773 unsigned write_speed, unsigned read_speed)
0774 {
0775 struct packet_command cgc;
0776 struct scsi_sense_hdr sshdr;
0777 int ret;
0778
0779 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
0780 cgc.sshdr = &sshdr;
0781 cgc.cmd[0] = GPCMD_SET_SPEED;
0782 cgc.cmd[2] = (read_speed >> 8) & 0xff;
0783 cgc.cmd[3] = read_speed & 0xff;
0784 cgc.cmd[4] = (write_speed >> 8) & 0xff;
0785 cgc.cmd[5] = write_speed & 0xff;
0786
0787 ret = pkt_generic_packet(pd, &cgc);
0788 if (ret)
0789 pkt_dump_sense(pd, &cgc);
0790
0791 return ret;
0792 }
0793
0794
0795
0796
0797
0798 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
0799 {
0800 spin_lock(&pd->iosched.lock);
0801 if (bio_data_dir(bio) == READ)
0802 bio_list_add(&pd->iosched.read_queue, bio);
0803 else
0804 bio_list_add(&pd->iosched.write_queue, bio);
0805 spin_unlock(&pd->iosched.lock);
0806
0807 atomic_set(&pd->iosched.attention, 1);
0808 wake_up(&pd->wqueue);
0809 }
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
0828 {
0829
0830 if (atomic_read(&pd->iosched.attention) == 0)
0831 return;
0832 atomic_set(&pd->iosched.attention, 0);
0833
0834 for (;;) {
0835 struct bio *bio;
0836 int reads_queued, writes_queued;
0837
0838 spin_lock(&pd->iosched.lock);
0839 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
0840 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
0841 spin_unlock(&pd->iosched.lock);
0842
0843 if (!reads_queued && !writes_queued)
0844 break;
0845
0846 if (pd->iosched.writing) {
0847 int need_write_seek = 1;
0848 spin_lock(&pd->iosched.lock);
0849 bio = bio_list_peek(&pd->iosched.write_queue);
0850 spin_unlock(&pd->iosched.lock);
0851 if (bio && (bio->bi_iter.bi_sector ==
0852 pd->iosched.last_write))
0853 need_write_seek = 0;
0854 if (need_write_seek && reads_queued) {
0855 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
0856 pkt_dbg(2, pd, "write, waiting\n");
0857 break;
0858 }
0859 pkt_flush_cache(pd);
0860 pd->iosched.writing = 0;
0861 }
0862 } else {
0863 if (!reads_queued && writes_queued) {
0864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
0865 pkt_dbg(2, pd, "read, waiting\n");
0866 break;
0867 }
0868 pd->iosched.writing = 1;
0869 }
0870 }
0871
0872 spin_lock(&pd->iosched.lock);
0873 if (pd->iosched.writing)
0874 bio = bio_list_pop(&pd->iosched.write_queue);
0875 else
0876 bio = bio_list_pop(&pd->iosched.read_queue);
0877 spin_unlock(&pd->iosched.lock);
0878
0879 if (!bio)
0880 continue;
0881
0882 if (bio_data_dir(bio) == READ)
0883 pd->iosched.successive_reads +=
0884 bio->bi_iter.bi_size >> 10;
0885 else {
0886 pd->iosched.successive_reads = 0;
0887 pd->iosched.last_write = bio_end_sector(bio);
0888 }
0889 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
0890 if (pd->read_speed == pd->write_speed) {
0891 pd->read_speed = MAX_SPEED;
0892 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
0893 }
0894 } else {
0895 if (pd->read_speed != pd->write_speed) {
0896 pd->read_speed = pd->write_speed;
0897 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
0898 }
0899 }
0900
0901 atomic_inc(&pd->cdrw.pending_bios);
0902 submit_bio_noacct(bio);
0903 }
0904 }
0905
0906
0907
0908
0909
0910 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
0911 {
0912 if ((pd->settings.size << 9) / CD_FRAMESIZE
0913 <= queue_max_segments(q)) {
0914
0915
0916
0917 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
0918 return 0;
0919 } else if ((pd->settings.size << 9) / PAGE_SIZE
0920 <= queue_max_segments(q)) {
0921
0922
0923
0924
0925 set_bit(PACKET_MERGE_SEGS, &pd->flags);
0926 return 0;
0927 } else {
0928 pkt_err(pd, "cdrom max_phys_segments too small\n");
0929 return -EIO;
0930 }
0931 }
0932
0933 static void pkt_end_io_read(struct bio *bio)
0934 {
0935 struct packet_data *pkt = bio->bi_private;
0936 struct pktcdvd_device *pd = pkt->pd;
0937 BUG_ON(!pd);
0938
0939 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
0940 bio, (unsigned long long)pkt->sector,
0941 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
0942
0943 if (bio->bi_status)
0944 atomic_inc(&pkt->io_errors);
0945 bio_uninit(bio);
0946 if (atomic_dec_and_test(&pkt->io_wait)) {
0947 atomic_inc(&pkt->run_sm);
0948 wake_up(&pd->wqueue);
0949 }
0950 pkt_bio_finished(pd);
0951 }
0952
0953 static void pkt_end_io_packet_write(struct bio *bio)
0954 {
0955 struct packet_data *pkt = bio->bi_private;
0956 struct pktcdvd_device *pd = pkt->pd;
0957 BUG_ON(!pd);
0958
0959 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
0960
0961 pd->stats.pkt_ended++;
0962
0963 bio_uninit(bio);
0964 pkt_bio_finished(pd);
0965 atomic_dec(&pkt->io_wait);
0966 atomic_inc(&pkt->run_sm);
0967 wake_up(&pd->wqueue);
0968 }
0969
0970
0971
0972
0973 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
0974 {
0975 int frames_read = 0;
0976 struct bio *bio;
0977 int f;
0978 char written[PACKET_MAX_SIZE];
0979
0980 BUG_ON(bio_list_empty(&pkt->orig_bios));
0981
0982 atomic_set(&pkt->io_wait, 0);
0983 atomic_set(&pkt->io_errors, 0);
0984
0985
0986
0987
0988 memset(written, 0, sizeof(written));
0989 spin_lock(&pkt->lock);
0990 bio_list_for_each(bio, &pkt->orig_bios) {
0991 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
0992 (CD_FRAMESIZE >> 9);
0993 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
0994 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
0995 BUG_ON(first_frame < 0);
0996 BUG_ON(first_frame + num_frames > pkt->frames);
0997 for (f = first_frame; f < first_frame + num_frames; f++)
0998 written[f] = 1;
0999 }
1000 spin_unlock(&pkt->lock);
1001
1002 if (pkt->cache_valid) {
1003 pkt_dbg(2, pd, "zone %llx cached\n",
1004 (unsigned long long)pkt->sector);
1005 goto out_account;
1006 }
1007
1008
1009
1010
1011 for (f = 0; f < pkt->frames; f++) {
1012 int p, offset;
1013
1014 if (written[f])
1015 continue;
1016
1017 bio = pkt->r_bios[f];
1018 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
1019 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1020 bio->bi_end_io = pkt_end_io_read;
1021 bio->bi_private = pkt;
1022
1023 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1024 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1025 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1026 f, pkt->pages[p], offset);
1027 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1028 BUG();
1029
1030 atomic_inc(&pkt->io_wait);
1031 pkt_queue_bio(pd, bio);
1032 frames_read++;
1033 }
1034
1035 out_account:
1036 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1037 frames_read, (unsigned long long)pkt->sector);
1038 pd->stats.pkt_started++;
1039 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1040 }
1041
1042
1043
1044
1045
1046 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1047 {
1048 struct packet_data *pkt;
1049
1050 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1051 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1052 list_del_init(&pkt->list);
1053 if (pkt->sector != zone)
1054 pkt->cache_valid = 0;
1055 return pkt;
1056 }
1057 }
1058 BUG();
1059 return NULL;
1060 }
1061
1062 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1063 {
1064 if (pkt->cache_valid) {
1065 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1066 } else {
1067 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1068 }
1069 }
1070
1071 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1072 {
1073 #if PACKET_DEBUG > 1
1074 static const char *state_name[] = {
1075 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1076 };
1077 enum packet_data_state old_state = pkt->state;
1078 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1079 pkt->id, (unsigned long long)pkt->sector,
1080 state_name[old_state], state_name[state]);
1081 #endif
1082 pkt->state = state;
1083 }
1084
1085
1086
1087
1088
1089 static int pkt_handle_queue(struct pktcdvd_device *pd)
1090 {
1091 struct packet_data *pkt, *p;
1092 struct bio *bio = NULL;
1093 sector_t zone = 0;
1094 struct pkt_rb_node *node, *first_node;
1095 struct rb_node *n;
1096
1097 atomic_set(&pd->scan_queue, 0);
1098
1099 if (list_empty(&pd->cdrw.pkt_free_list)) {
1100 pkt_dbg(2, pd, "no pkt\n");
1101 return 0;
1102 }
1103
1104
1105
1106
1107 spin_lock(&pd->lock);
1108 first_node = pkt_rbtree_find(pd, pd->current_sector);
1109 if (!first_node) {
1110 n = rb_first(&pd->bio_queue);
1111 if (n)
1112 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1113 }
1114 node = first_node;
1115 while (node) {
1116 bio = node->bio;
1117 zone = get_zone(bio->bi_iter.bi_sector, pd);
1118 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1119 if (p->sector == zone) {
1120 bio = NULL;
1121 goto try_next_bio;
1122 }
1123 }
1124 break;
1125 try_next_bio:
1126 node = pkt_rbtree_next(node);
1127 if (!node) {
1128 n = rb_first(&pd->bio_queue);
1129 if (n)
1130 node = rb_entry(n, struct pkt_rb_node, rb_node);
1131 }
1132 if (node == first_node)
1133 node = NULL;
1134 }
1135 spin_unlock(&pd->lock);
1136 if (!bio) {
1137 pkt_dbg(2, pd, "no bio\n");
1138 return 0;
1139 }
1140
1141 pkt = pkt_get_packet_data(pd, zone);
1142
1143 pd->current_sector = zone + pd->settings.size;
1144 pkt->sector = zone;
1145 BUG_ON(pkt->frames != pd->settings.size >> 2);
1146 pkt->write_size = 0;
1147
1148
1149
1150
1151
1152 spin_lock(&pd->lock);
1153 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1154 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1155 bio = node->bio;
1156 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1157 get_zone(bio->bi_iter.bi_sector, pd));
1158 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1159 break;
1160 pkt_rbtree_erase(pd, node);
1161 spin_lock(&pkt->lock);
1162 bio_list_add(&pkt->orig_bios, bio);
1163 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1164 spin_unlock(&pkt->lock);
1165 }
1166
1167
1168
1169 if (pd->congested &&
1170 pd->bio_queue_size <= pd->write_congestion_off) {
1171 pd->congested = false;
1172 wake_up_var(&pd->congested);
1173 }
1174 spin_unlock(&pd->lock);
1175
1176 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1177 pkt_set_state(pkt, PACKET_WAITING_STATE);
1178 atomic_set(&pkt->run_sm, 1);
1179
1180 spin_lock(&pd->cdrw.active_list_lock);
1181 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1182 spin_unlock(&pd->cdrw.active_list_lock);
1183
1184 return 1;
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 static void bio_list_copy_data(struct bio *dst, struct bio *src)
1198 {
1199 struct bvec_iter src_iter = src->bi_iter;
1200 struct bvec_iter dst_iter = dst->bi_iter;
1201
1202 while (1) {
1203 if (!src_iter.bi_size) {
1204 src = src->bi_next;
1205 if (!src)
1206 break;
1207
1208 src_iter = src->bi_iter;
1209 }
1210
1211 if (!dst_iter.bi_size) {
1212 dst = dst->bi_next;
1213 if (!dst)
1214 break;
1215
1216 dst_iter = dst->bi_iter;
1217 }
1218
1219 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1220 }
1221 }
1222
1223
1224
1225
1226
1227 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1228 {
1229 int f;
1230
1231 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
1232 REQ_OP_WRITE);
1233 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1234 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1235 pkt->w_bio->bi_private = pkt;
1236
1237
1238 for (f = 0; f < pkt->frames; f++) {
1239 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1240 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1241
1242 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
1243 BUG();
1244 }
1245 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1246
1247
1248
1249
1250 spin_lock(&pkt->lock);
1251 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1252
1253 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1254 spin_unlock(&pkt->lock);
1255
1256 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1257 pkt->write_size, (unsigned long long)pkt->sector);
1258
1259 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1260 pkt->cache_valid = 1;
1261 else
1262 pkt->cache_valid = 0;
1263
1264
1265 atomic_set(&pkt->io_wait, 1);
1266 pkt_queue_bio(pd, pkt->w_bio);
1267 }
1268
1269 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1270 {
1271 struct bio *bio;
1272
1273 if (status)
1274 pkt->cache_valid = 0;
1275
1276
1277 while ((bio = bio_list_pop(&pkt->orig_bios))) {
1278 bio->bi_status = status;
1279 bio_endio(bio);
1280 }
1281 }
1282
1283 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1284 {
1285 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1286
1287 for (;;) {
1288 switch (pkt->state) {
1289 case PACKET_WAITING_STATE:
1290 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1291 return;
1292
1293 pkt->sleep_time = 0;
1294 pkt_gather_data(pd, pkt);
1295 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1296 break;
1297
1298 case PACKET_READ_WAIT_STATE:
1299 if (atomic_read(&pkt->io_wait) > 0)
1300 return;
1301
1302 if (atomic_read(&pkt->io_errors) > 0) {
1303 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1304 } else {
1305 pkt_start_write(pd, pkt);
1306 }
1307 break;
1308
1309 case PACKET_WRITE_WAIT_STATE:
1310 if (atomic_read(&pkt->io_wait) > 0)
1311 return;
1312
1313 if (!pkt->w_bio->bi_status) {
1314 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1315 } else {
1316 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1317 }
1318 break;
1319
1320 case PACKET_RECOVERY_STATE:
1321 pkt_dbg(2, pd, "No recovery possible\n");
1322 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1323 break;
1324
1325 case PACKET_FINISHED_STATE:
1326 pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1327 return;
1328
1329 default:
1330 BUG();
1331 break;
1332 }
1333 }
1334 }
1335
1336 static void pkt_handle_packets(struct pktcdvd_device *pd)
1337 {
1338 struct packet_data *pkt, *next;
1339
1340
1341
1342
1343 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1344 if (atomic_read(&pkt->run_sm) > 0) {
1345 atomic_set(&pkt->run_sm, 0);
1346 pkt_run_state_machine(pd, pkt);
1347 }
1348 }
1349
1350
1351
1352
1353 spin_lock(&pd->cdrw.active_list_lock);
1354 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1355 if (pkt->state == PACKET_FINISHED_STATE) {
1356 list_del(&pkt->list);
1357 pkt_put_packet_data(pd, pkt);
1358 pkt_set_state(pkt, PACKET_IDLE_STATE);
1359 atomic_set(&pd->scan_queue, 1);
1360 }
1361 }
1362 spin_unlock(&pd->cdrw.active_list_lock);
1363 }
1364
1365 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1366 {
1367 struct packet_data *pkt;
1368 int i;
1369
1370 for (i = 0; i < PACKET_NUM_STATES; i++)
1371 states[i] = 0;
1372
1373 spin_lock(&pd->cdrw.active_list_lock);
1374 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1375 states[pkt->state]++;
1376 }
1377 spin_unlock(&pd->cdrw.active_list_lock);
1378 }
1379
1380
1381
1382
1383
1384 static int kcdrwd(void *foobar)
1385 {
1386 struct pktcdvd_device *pd = foobar;
1387 struct packet_data *pkt;
1388 long min_sleep_time, residue;
1389
1390 set_user_nice(current, MIN_NICE);
1391 set_freezable();
1392
1393 for (;;) {
1394 DECLARE_WAITQUEUE(wait, current);
1395
1396
1397
1398
1399 add_wait_queue(&pd->wqueue, &wait);
1400 for (;;) {
1401 set_current_state(TASK_INTERRUPTIBLE);
1402
1403
1404 if (atomic_read(&pd->scan_queue) > 0)
1405 goto work_to_do;
1406
1407
1408 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1409 if (atomic_read(&pkt->run_sm) > 0)
1410 goto work_to_do;
1411 }
1412
1413
1414 if (atomic_read(&pd->iosched.attention) != 0)
1415 goto work_to_do;
1416
1417
1418 if (PACKET_DEBUG > 1) {
1419 int states[PACKET_NUM_STATES];
1420 pkt_count_states(pd, states);
1421 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1422 states[0], states[1], states[2],
1423 states[3], states[4], states[5]);
1424 }
1425
1426 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1427 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1428 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1429 min_sleep_time = pkt->sleep_time;
1430 }
1431
1432 pkt_dbg(2, pd, "sleeping\n");
1433 residue = schedule_timeout(min_sleep_time);
1434 pkt_dbg(2, pd, "wake up\n");
1435
1436
1437 try_to_freeze();
1438
1439 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1440 if (!pkt->sleep_time)
1441 continue;
1442 pkt->sleep_time -= min_sleep_time - residue;
1443 if (pkt->sleep_time <= 0) {
1444 pkt->sleep_time = 0;
1445 atomic_inc(&pkt->run_sm);
1446 }
1447 }
1448
1449 if (kthread_should_stop())
1450 break;
1451 }
1452 work_to_do:
1453 set_current_state(TASK_RUNNING);
1454 remove_wait_queue(&pd->wqueue, &wait);
1455
1456 if (kthread_should_stop())
1457 break;
1458
1459
1460
1461
1462
1463 while (pkt_handle_queue(pd))
1464 ;
1465
1466
1467
1468
1469 pkt_handle_packets(pd);
1470
1471
1472
1473
1474 pkt_iosched_process_queue(pd);
1475 }
1476
1477 return 0;
1478 }
1479
1480 static void pkt_print_settings(struct pktcdvd_device *pd)
1481 {
1482 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1483 pd->settings.fp ? "Fixed" : "Variable",
1484 pd->settings.size >> 2,
1485 pd->settings.block_mode == 8 ? '1' : '2');
1486 }
1487
1488 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1489 {
1490 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1491
1492 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1493 cgc->cmd[2] = page_code | (page_control << 6);
1494 cgc->cmd[7] = cgc->buflen >> 8;
1495 cgc->cmd[8] = cgc->buflen & 0xff;
1496 cgc->data_direction = CGC_DATA_READ;
1497 return pkt_generic_packet(pd, cgc);
1498 }
1499
1500 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1501 {
1502 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1503 memset(cgc->buffer, 0, 2);
1504 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1505 cgc->cmd[1] = 0x10;
1506 cgc->cmd[7] = cgc->buflen >> 8;
1507 cgc->cmd[8] = cgc->buflen & 0xff;
1508 cgc->data_direction = CGC_DATA_WRITE;
1509 return pkt_generic_packet(pd, cgc);
1510 }
1511
1512 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1513 {
1514 struct packet_command cgc;
1515 int ret;
1516
1517
1518 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1519 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1520 cgc.cmd[8] = cgc.buflen = 2;
1521 cgc.quiet = 1;
1522
1523 ret = pkt_generic_packet(pd, &cgc);
1524 if (ret)
1525 return ret;
1526
1527
1528
1529
1530 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1531 sizeof(di->disc_information_length);
1532
1533 if (cgc.buflen > sizeof(disc_information))
1534 cgc.buflen = sizeof(disc_information);
1535
1536 cgc.cmd[8] = cgc.buflen;
1537 return pkt_generic_packet(pd, &cgc);
1538 }
1539
1540 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1541 {
1542 struct packet_command cgc;
1543 int ret;
1544
1545 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1546 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1547 cgc.cmd[1] = type & 3;
1548 cgc.cmd[4] = (track & 0xff00) >> 8;
1549 cgc.cmd[5] = track & 0xff;
1550 cgc.cmd[8] = 8;
1551 cgc.quiet = 1;
1552
1553 ret = pkt_generic_packet(pd, &cgc);
1554 if (ret)
1555 return ret;
1556
1557 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1558 sizeof(ti->track_information_length);
1559
1560 if (cgc.buflen > sizeof(track_information))
1561 cgc.buflen = sizeof(track_information);
1562
1563 cgc.cmd[8] = cgc.buflen;
1564 return pkt_generic_packet(pd, &cgc);
1565 }
1566
1567 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1568 long *last_written)
1569 {
1570 disc_information di;
1571 track_information ti;
1572 __u32 last_track;
1573 int ret;
1574
1575 ret = pkt_get_disc_info(pd, &di);
1576 if (ret)
1577 return ret;
1578
1579 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1580 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1581 if (ret)
1582 return ret;
1583
1584
1585 if (ti.blank) {
1586 last_track--;
1587 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1588 if (ret)
1589 return ret;
1590 }
1591
1592
1593 if (ti.lra_v) {
1594 *last_written = be32_to_cpu(ti.last_rec_address);
1595 } else {
1596
1597 *last_written = be32_to_cpu(ti.track_start) +
1598 be32_to_cpu(ti.track_size);
1599 if (ti.free_blocks)
1600 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1601 }
1602 return 0;
1603 }
1604
1605
1606
1607
1608 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1609 {
1610 struct packet_command cgc;
1611 struct scsi_sense_hdr sshdr;
1612 write_param_page *wp;
1613 char buffer[128];
1614 int ret, size;
1615
1616
1617 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1618 return 0;
1619
1620 memset(buffer, 0, sizeof(buffer));
1621 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1622 cgc.sshdr = &sshdr;
1623 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1624 if (ret) {
1625 pkt_dump_sense(pd, &cgc);
1626 return ret;
1627 }
1628
1629 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1630 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1631 if (size > sizeof(buffer))
1632 size = sizeof(buffer);
1633
1634
1635
1636
1637 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1638 cgc.sshdr = &sshdr;
1639 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1640 if (ret) {
1641 pkt_dump_sense(pd, &cgc);
1642 return ret;
1643 }
1644
1645
1646
1647
1648 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1649
1650 wp->fp = pd->settings.fp;
1651 wp->track_mode = pd->settings.track_mode;
1652 wp->write_type = pd->settings.write_type;
1653 wp->data_block_type = pd->settings.block_mode;
1654
1655 wp->multi_session = 0;
1656
1657 #ifdef PACKET_USE_LS
1658 wp->link_size = 7;
1659 wp->ls_v = 1;
1660 #endif
1661
1662 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1663 wp->session_format = 0;
1664 wp->subhdr2 = 0x20;
1665 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1666 wp->session_format = 0x20;
1667 wp->subhdr2 = 8;
1668 #if 0
1669 wp->mcn[0] = 0x80;
1670 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1671 #endif
1672 } else {
1673
1674
1675
1676 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1677 return 1;
1678 }
1679 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1680
1681 cgc.buflen = cgc.cmd[8] = size;
1682 ret = pkt_mode_select(pd, &cgc);
1683 if (ret) {
1684 pkt_dump_sense(pd, &cgc);
1685 return ret;
1686 }
1687
1688 pkt_print_settings(pd);
1689 return 0;
1690 }
1691
1692
1693
1694
1695 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1696 {
1697 switch (pd->mmc3_profile) {
1698 case 0x1a:
1699 case 0x12:
1700
1701 return 1;
1702 default:
1703 break;
1704 }
1705
1706 if (!ti->packet || !ti->fp)
1707 return 0;
1708
1709
1710
1711
1712 if (ti->rt == 0 && ti->blank == 0)
1713 return 1;
1714
1715 if (ti->rt == 0 && ti->blank == 1)
1716 return 1;
1717
1718 if (ti->rt == 1 && ti->blank == 0)
1719 return 1;
1720
1721 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1722 return 0;
1723 }
1724
1725
1726
1727
1728 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1729 {
1730 switch (pd->mmc3_profile) {
1731 case 0x0a:
1732 case 0xffff:
1733 break;
1734 case 0x1a:
1735 case 0x13:
1736 case 0x12:
1737 return 1;
1738 default:
1739 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1740 pd->mmc3_profile);
1741 return 0;
1742 }
1743
1744
1745
1746
1747
1748 if (di->disc_type == 0xff) {
1749 pkt_notice(pd, "unknown disc - no track?\n");
1750 return 0;
1751 }
1752
1753 if (di->disc_type != 0x20 && di->disc_type != 0) {
1754 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1755 return 0;
1756 }
1757
1758 if (di->erasable == 0) {
1759 pkt_notice(pd, "disc not erasable\n");
1760 return 0;
1761 }
1762
1763 if (di->border_status == PACKET_SESSION_RESERVED) {
1764 pkt_err(pd, "can't write to last track (reserved)\n");
1765 return 0;
1766 }
1767
1768 return 1;
1769 }
1770
1771 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1772 {
1773 struct packet_command cgc;
1774 unsigned char buf[12];
1775 disc_information di;
1776 track_information ti;
1777 int ret, track;
1778
1779 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1780 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1781 cgc.cmd[8] = 8;
1782 ret = pkt_generic_packet(pd, &cgc);
1783 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1784
1785 memset(&di, 0, sizeof(disc_information));
1786 memset(&ti, 0, sizeof(track_information));
1787
1788 ret = pkt_get_disc_info(pd, &di);
1789 if (ret) {
1790 pkt_err(pd, "failed get_disc\n");
1791 return ret;
1792 }
1793
1794 if (!pkt_writable_disc(pd, &di))
1795 return -EROFS;
1796
1797 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1798
1799 track = 1;
1800 ret = pkt_get_track_info(pd, track, 1, &ti);
1801 if (ret) {
1802 pkt_err(pd, "failed get_track\n");
1803 return ret;
1804 }
1805
1806 if (!pkt_writable_track(pd, &ti)) {
1807 pkt_err(pd, "can't write to this track\n");
1808 return -EROFS;
1809 }
1810
1811
1812
1813
1814
1815 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1816 if (pd->settings.size == 0) {
1817 pkt_notice(pd, "detected zero packet size!\n");
1818 return -ENXIO;
1819 }
1820 if (pd->settings.size > PACKET_MAX_SECTORS) {
1821 pkt_err(pd, "packet size is too big\n");
1822 return -EROFS;
1823 }
1824 pd->settings.fp = ti.fp;
1825 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1826
1827 if (ti.nwa_v) {
1828 pd->nwa = be32_to_cpu(ti.next_writable);
1829 set_bit(PACKET_NWA_VALID, &pd->flags);
1830 }
1831
1832
1833
1834
1835
1836
1837 if (ti.lra_v) {
1838 pd->lra = be32_to_cpu(ti.last_rec_address);
1839 set_bit(PACKET_LRA_VALID, &pd->flags);
1840 } else {
1841 pd->lra = 0xffffffff;
1842 set_bit(PACKET_LRA_VALID, &pd->flags);
1843 }
1844
1845
1846
1847
1848 pd->settings.link_loss = 7;
1849 pd->settings.write_type = 0;
1850 pd->settings.track_mode = ti.track_mode;
1851
1852
1853
1854
1855 switch (ti.data_mode) {
1856 case PACKET_MODE1:
1857 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1858 break;
1859 case PACKET_MODE2:
1860 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1861 break;
1862 default:
1863 pkt_err(pd, "unknown data mode\n");
1864 return -EROFS;
1865 }
1866 return 0;
1867 }
1868
1869
1870
1871
1872 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1873 int set)
1874 {
1875 struct packet_command cgc;
1876 struct scsi_sense_hdr sshdr;
1877 unsigned char buf[64];
1878 int ret;
1879
1880 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1881 cgc.sshdr = &sshdr;
1882 cgc.buflen = pd->mode_offset + 12;
1883
1884
1885
1886
1887 cgc.quiet = 1;
1888
1889 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1890 if (ret)
1891 return ret;
1892
1893 buf[pd->mode_offset + 10] |= (!!set << 2);
1894
1895 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1896 ret = pkt_mode_select(pd, &cgc);
1897 if (ret) {
1898 pkt_err(pd, "write caching control failed\n");
1899 pkt_dump_sense(pd, &cgc);
1900 } else if (!ret && set)
1901 pkt_notice(pd, "enabled write caching\n");
1902 return ret;
1903 }
1904
1905 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1906 {
1907 struct packet_command cgc;
1908
1909 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1910 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1911 cgc.cmd[4] = lockflag ? 1 : 0;
1912 return pkt_generic_packet(pd, &cgc);
1913 }
1914
1915
1916
1917
1918 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1919 unsigned *write_speed)
1920 {
1921 struct packet_command cgc;
1922 struct scsi_sense_hdr sshdr;
1923 unsigned char buf[256+18];
1924 unsigned char *cap_buf;
1925 int ret, offset;
1926
1927 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1928 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1929 cgc.sshdr = &sshdr;
1930
1931 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1932 if (ret) {
1933 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1934 sizeof(struct mode_page_header);
1935 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1936 if (ret) {
1937 pkt_dump_sense(pd, &cgc);
1938 return ret;
1939 }
1940 }
1941
1942 offset = 20;
1943 if (cap_buf[1] >= 28)
1944 offset = 28;
1945 if (cap_buf[1] >= 30) {
1946
1947
1948
1949
1950 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1951 if (num_spdb > 0)
1952 offset = 34;
1953 }
1954
1955 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1956 return 0;
1957 }
1958
1959
1960
1961 static char clv_to_speed[16] = {
1962
1963 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1964 };
1965
1966 static char hs_clv_to_speed[16] = {
1967
1968 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1969 };
1970
1971 static char us_clv_to_speed[16] = {
1972
1973 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1974 };
1975
1976
1977
1978
1979 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1980 unsigned *speed)
1981 {
1982 struct packet_command cgc;
1983 struct scsi_sense_hdr sshdr;
1984 unsigned char buf[64];
1985 unsigned int size, st, sp;
1986 int ret;
1987
1988 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1989 cgc.sshdr = &sshdr;
1990 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1991 cgc.cmd[1] = 2;
1992 cgc.cmd[2] = 4;
1993 cgc.cmd[8] = 2;
1994 ret = pkt_generic_packet(pd, &cgc);
1995 if (ret) {
1996 pkt_dump_sense(pd, &cgc);
1997 return ret;
1998 }
1999 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2000 if (size > sizeof(buf))
2001 size = sizeof(buf);
2002
2003 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2004 cgc.sshdr = &sshdr;
2005 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2006 cgc.cmd[1] = 2;
2007 cgc.cmd[2] = 4;
2008 cgc.cmd[8] = size;
2009 ret = pkt_generic_packet(pd, &cgc);
2010 if (ret) {
2011 pkt_dump_sense(pd, &cgc);
2012 return ret;
2013 }
2014
2015 if (!(buf[6] & 0x40)) {
2016 pkt_notice(pd, "disc type is not CD-RW\n");
2017 return 1;
2018 }
2019 if (!(buf[6] & 0x4)) {
2020 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
2021 return 1;
2022 }
2023
2024 st = (buf[6] >> 3) & 0x7;
2025
2026 sp = buf[16] & 0xf;
2027
2028
2029 switch (st) {
2030 case 0:
2031 *speed = clv_to_speed[sp];
2032 break;
2033 case 1:
2034 *speed = hs_clv_to_speed[sp];
2035 break;
2036 case 2:
2037 *speed = us_clv_to_speed[sp];
2038 break;
2039 default:
2040 pkt_notice(pd, "unknown disc sub-type %d\n", st);
2041 return 1;
2042 }
2043 if (*speed) {
2044 pkt_info(pd, "maximum media speed: %d\n", *speed);
2045 return 0;
2046 } else {
2047 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
2048 return 1;
2049 }
2050 }
2051
2052 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2053 {
2054 struct packet_command cgc;
2055 struct scsi_sense_hdr sshdr;
2056 int ret;
2057
2058 pkt_dbg(2, pd, "Performing OPC\n");
2059
2060 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2061 cgc.sshdr = &sshdr;
2062 cgc.timeout = 60*HZ;
2063 cgc.cmd[0] = GPCMD_SEND_OPC;
2064 cgc.cmd[1] = 1;
2065 ret = pkt_generic_packet(pd, &cgc);
2066 if (ret)
2067 pkt_dump_sense(pd, &cgc);
2068 return ret;
2069 }
2070
2071 static int pkt_open_write(struct pktcdvd_device *pd)
2072 {
2073 int ret;
2074 unsigned int write_speed, media_write_speed, read_speed;
2075
2076 ret = pkt_probe_settings(pd);
2077 if (ret) {
2078 pkt_dbg(2, pd, "failed probe\n");
2079 return ret;
2080 }
2081
2082 ret = pkt_set_write_settings(pd);
2083 if (ret) {
2084 pkt_dbg(1, pd, "failed saving write settings\n");
2085 return -EIO;
2086 }
2087
2088 pkt_write_caching(pd, USE_WCACHING);
2089
2090 ret = pkt_get_max_speed(pd, &write_speed);
2091 if (ret)
2092 write_speed = 16 * 177;
2093 switch (pd->mmc3_profile) {
2094 case 0x13:
2095 case 0x1a:
2096 case 0x12:
2097 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2098 break;
2099 default:
2100 ret = pkt_media_speed(pd, &media_write_speed);
2101 if (ret)
2102 media_write_speed = 16;
2103 write_speed = min(write_speed, media_write_speed * 177);
2104 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2105 break;
2106 }
2107 read_speed = write_speed;
2108
2109 ret = pkt_set_speed(pd, write_speed, read_speed);
2110 if (ret) {
2111 pkt_dbg(1, pd, "couldn't set write speed\n");
2112 return -EIO;
2113 }
2114 pd->write_speed = write_speed;
2115 pd->read_speed = read_speed;
2116
2117 ret = pkt_perform_opc(pd);
2118 if (ret) {
2119 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2120 }
2121
2122 return 0;
2123 }
2124
2125
2126
2127
2128 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2129 {
2130 int ret;
2131 long lba;
2132 struct request_queue *q;
2133 struct block_device *bdev;
2134
2135
2136
2137
2138
2139
2140 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2141 if (IS_ERR(bdev)) {
2142 ret = PTR_ERR(bdev);
2143 goto out;
2144 }
2145
2146 ret = pkt_get_last_written(pd, &lba);
2147 if (ret) {
2148 pkt_err(pd, "pkt_get_last_written failed\n");
2149 goto out_putdev;
2150 }
2151
2152 set_capacity(pd->disk, lba << 2);
2153 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
2154
2155 q = bdev_get_queue(pd->bdev);
2156 if (write) {
2157 ret = pkt_open_write(pd);
2158 if (ret)
2159 goto out_putdev;
2160
2161
2162
2163
2164 blk_queue_max_hw_sectors(q, pd->settings.size);
2165 set_bit(PACKET_WRITABLE, &pd->flags);
2166 } else {
2167 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2168 clear_bit(PACKET_WRITABLE, &pd->flags);
2169 }
2170
2171 ret = pkt_set_segment_merging(pd, q);
2172 if (ret)
2173 goto out_putdev;
2174
2175 if (write) {
2176 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2177 pkt_err(pd, "not enough memory for buffers\n");
2178 ret = -ENOMEM;
2179 goto out_putdev;
2180 }
2181 pkt_info(pd, "%lukB available on disc\n", lba << 1);
2182 }
2183
2184 return 0;
2185
2186 out_putdev:
2187 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
2188 out:
2189 return ret;
2190 }
2191
2192
2193
2194
2195
2196 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2197 {
2198 if (flush && pkt_flush_cache(pd))
2199 pkt_dbg(1, pd, "not flushing cache\n");
2200
2201 pkt_lock_door(pd, 0);
2202
2203 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2204 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2205
2206 pkt_shrink_pktlist(pd);
2207 }
2208
2209 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2210 {
2211 if (dev_minor >= MAX_WRITERS)
2212 return NULL;
2213
2214 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
2215 return pkt_devs[dev_minor];
2216 }
2217
2218 static int pkt_open(struct block_device *bdev, fmode_t mode)
2219 {
2220 struct pktcdvd_device *pd = NULL;
2221 int ret;
2222
2223 mutex_lock(&pktcdvd_mutex);
2224 mutex_lock(&ctl_mutex);
2225 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2226 if (!pd) {
2227 ret = -ENODEV;
2228 goto out;
2229 }
2230 BUG_ON(pd->refcnt < 0);
2231
2232 pd->refcnt++;
2233 if (pd->refcnt > 1) {
2234 if ((mode & FMODE_WRITE) &&
2235 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2236 ret = -EBUSY;
2237 goto out_dec;
2238 }
2239 } else {
2240 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2241 if (ret)
2242 goto out_dec;
2243
2244
2245
2246
2247 set_blocksize(bdev, CD_FRAMESIZE);
2248 }
2249
2250 mutex_unlock(&ctl_mutex);
2251 mutex_unlock(&pktcdvd_mutex);
2252 return 0;
2253
2254 out_dec:
2255 pd->refcnt--;
2256 out:
2257 mutex_unlock(&ctl_mutex);
2258 mutex_unlock(&pktcdvd_mutex);
2259 return ret;
2260 }
2261
2262 static void pkt_close(struct gendisk *disk, fmode_t mode)
2263 {
2264 struct pktcdvd_device *pd = disk->private_data;
2265
2266 mutex_lock(&pktcdvd_mutex);
2267 mutex_lock(&ctl_mutex);
2268 pd->refcnt--;
2269 BUG_ON(pd->refcnt < 0);
2270 if (pd->refcnt == 0) {
2271 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2272 pkt_release_dev(pd, flush);
2273 }
2274 mutex_unlock(&ctl_mutex);
2275 mutex_unlock(&pktcdvd_mutex);
2276 }
2277
2278
2279 static void pkt_end_io_read_cloned(struct bio *bio)
2280 {
2281 struct packet_stacked_data *psd = bio->bi_private;
2282 struct pktcdvd_device *pd = psd->pd;
2283
2284 psd->bio->bi_status = bio->bi_status;
2285 bio_put(bio);
2286 bio_endio(psd->bio);
2287 mempool_free(psd, &psd_pool);
2288 pkt_bio_finished(pd);
2289 }
2290
2291 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2292 {
2293 struct bio *cloned_bio =
2294 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
2295 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
2296
2297 psd->pd = pd;
2298 psd->bio = bio;
2299 cloned_bio->bi_private = psd;
2300 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2301 pd->stats.secs_r += bio_sectors(bio);
2302 pkt_queue_bio(pd, cloned_bio);
2303 }
2304
2305 static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2306 {
2307 struct pktcdvd_device *pd = q->queuedata;
2308 sector_t zone;
2309 struct packet_data *pkt;
2310 int was_empty, blocked_bio;
2311 struct pkt_rb_node *node;
2312
2313 zone = get_zone(bio->bi_iter.bi_sector, pd);
2314
2315
2316
2317
2318
2319 spin_lock(&pd->cdrw.active_list_lock);
2320 blocked_bio = 0;
2321 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2322 if (pkt->sector == zone) {
2323 spin_lock(&pkt->lock);
2324 if ((pkt->state == PACKET_WAITING_STATE) ||
2325 (pkt->state == PACKET_READ_WAIT_STATE)) {
2326 bio_list_add(&pkt->orig_bios, bio);
2327 pkt->write_size +=
2328 bio->bi_iter.bi_size / CD_FRAMESIZE;
2329 if ((pkt->write_size >= pkt->frames) &&
2330 (pkt->state == PACKET_WAITING_STATE)) {
2331 atomic_inc(&pkt->run_sm);
2332 wake_up(&pd->wqueue);
2333 }
2334 spin_unlock(&pkt->lock);
2335 spin_unlock(&pd->cdrw.active_list_lock);
2336 return;
2337 } else {
2338 blocked_bio = 1;
2339 }
2340 spin_unlock(&pkt->lock);
2341 }
2342 }
2343 spin_unlock(&pd->cdrw.active_list_lock);
2344
2345
2346
2347
2348
2349
2350 spin_lock(&pd->lock);
2351 if (pd->write_congestion_on > 0
2352 && pd->bio_queue_size >= pd->write_congestion_on) {
2353 struct wait_bit_queue_entry wqe;
2354
2355 init_wait_var_entry(&wqe, &pd->congested, 0);
2356 for (;;) {
2357 prepare_to_wait_event(__var_waitqueue(&pd->congested),
2358 &wqe.wq_entry,
2359 TASK_UNINTERRUPTIBLE);
2360 if (pd->bio_queue_size <= pd->write_congestion_off)
2361 break;
2362 pd->congested = true;
2363 spin_unlock(&pd->lock);
2364 schedule();
2365 spin_lock(&pd->lock);
2366 }
2367 }
2368 spin_unlock(&pd->lock);
2369
2370
2371
2372
2373 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
2374 node->bio = bio;
2375 spin_lock(&pd->lock);
2376 BUG_ON(pd->bio_queue_size < 0);
2377 was_empty = (pd->bio_queue_size == 0);
2378 pkt_rbtree_insert(pd, node);
2379 spin_unlock(&pd->lock);
2380
2381
2382
2383
2384 atomic_set(&pd->scan_queue, 1);
2385 if (was_empty) {
2386
2387 wake_up(&pd->wqueue);
2388 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2389
2390
2391
2392
2393 wake_up(&pd->wqueue);
2394 }
2395 }
2396
2397 static void pkt_submit_bio(struct bio *bio)
2398 {
2399 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
2400 struct bio *split;
2401
2402 bio = bio_split_to_limits(bio);
2403
2404 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2405 (unsigned long long)bio->bi_iter.bi_sector,
2406 (unsigned long long)bio_end_sector(bio));
2407
2408
2409
2410
2411 if (bio_data_dir(bio) == READ) {
2412 pkt_make_request_read(pd, bio);
2413 return;
2414 }
2415
2416 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2417 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2418 (unsigned long long)bio->bi_iter.bi_sector);
2419 goto end_io;
2420 }
2421
2422 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2423 pkt_err(pd, "wrong bio size\n");
2424 goto end_io;
2425 }
2426
2427 do {
2428 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2429 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2430
2431 if (last_zone != zone) {
2432 BUG_ON(last_zone != zone + pd->settings.size);
2433
2434 split = bio_split(bio, last_zone -
2435 bio->bi_iter.bi_sector,
2436 GFP_NOIO, &pkt_bio_set);
2437 bio_chain(split, bio);
2438 } else {
2439 split = bio;
2440 }
2441
2442 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
2443 } while (split != bio);
2444
2445 return;
2446 end_io:
2447 bio_io_error(bio);
2448 }
2449
2450 static void pkt_init_queue(struct pktcdvd_device *pd)
2451 {
2452 struct request_queue *q = pd->disk->queue;
2453
2454 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2455 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2456 q->queuedata = pd;
2457 }
2458
2459 static int pkt_seq_show(struct seq_file *m, void *p)
2460 {
2461 struct pktcdvd_device *pd = m->private;
2462 char *msg;
2463 int states[PACKET_NUM_STATES];
2464
2465 seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
2466
2467 seq_printf(m, "\nSettings:\n");
2468 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2469
2470 if (pd->settings.write_type == 0)
2471 msg = "Packet";
2472 else
2473 msg = "Unknown";
2474 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2475
2476 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2477 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2478
2479 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2480
2481 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2482 msg = "Mode 1";
2483 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2484 msg = "Mode 2";
2485 else
2486 msg = "Unknown";
2487 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2488
2489 seq_printf(m, "\nStatistics:\n");
2490 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2491 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2492 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2493 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2494 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2495
2496 seq_printf(m, "\nMisc:\n");
2497 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2498 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2499 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2500 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2501 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2502 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2503
2504 seq_printf(m, "\nQueue state:\n");
2505 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2506 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2507 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2508
2509 pkt_count_states(pd, states);
2510 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2511 states[0], states[1], states[2], states[3], states[4], states[5]);
2512
2513 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2514 pd->write_congestion_off,
2515 pd->write_congestion_on);
2516 return 0;
2517 }
2518
2519 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2520 {
2521 int i;
2522 struct block_device *bdev;
2523 struct scsi_device *sdev;
2524
2525 if (pd->pkt_dev == dev) {
2526 pkt_err(pd, "recursive setup not allowed\n");
2527 return -EBUSY;
2528 }
2529 for (i = 0; i < MAX_WRITERS; i++) {
2530 struct pktcdvd_device *pd2 = pkt_devs[i];
2531 if (!pd2)
2532 continue;
2533 if (pd2->bdev->bd_dev == dev) {
2534 pkt_err(pd, "%pg already setup\n", pd2->bdev);
2535 return -EBUSY;
2536 }
2537 if (pd2->pkt_dev == dev) {
2538 pkt_err(pd, "can't chain pktcdvd devices\n");
2539 return -EBUSY;
2540 }
2541 }
2542
2543 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2544 if (IS_ERR(bdev))
2545 return PTR_ERR(bdev);
2546 sdev = scsi_device_from_queue(bdev->bd_disk->queue);
2547 if (!sdev) {
2548 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2549 return -EINVAL;
2550 }
2551 put_device(&sdev->sdev_gendev);
2552
2553
2554 __module_get(THIS_MODULE);
2555
2556 pd->bdev = bdev;
2557 set_blocksize(bdev, CD_FRAMESIZE);
2558
2559 pkt_init_queue(pd);
2560
2561 atomic_set(&pd->cdrw.pending_bios, 0);
2562 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2563 if (IS_ERR(pd->cdrw.thread)) {
2564 pkt_err(pd, "can't start kernel thread\n");
2565 goto out_mem;
2566 }
2567
2568 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
2569 pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
2570 return 0;
2571
2572 out_mem:
2573 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2574
2575 module_put(THIS_MODULE);
2576 return -ENOMEM;
2577 }
2578
2579 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2580 {
2581 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2582 int ret;
2583
2584 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2585 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2586
2587 mutex_lock(&pktcdvd_mutex);
2588 switch (cmd) {
2589 case CDROMEJECT:
2590
2591
2592
2593
2594 if (pd->refcnt == 1)
2595 pkt_lock_door(pd, 0);
2596 fallthrough;
2597
2598
2599
2600 case CDROMMULTISESSION:
2601 case CDROMREADTOCENTRY:
2602 case CDROM_LAST_WRITTEN:
2603 case CDROM_SEND_PACKET:
2604 case SCSI_IOCTL_SEND_COMMAND:
2605 if (!bdev->bd_disk->fops->ioctl)
2606 ret = -ENOTTY;
2607 else
2608 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
2609 break;
2610 default:
2611 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2612 ret = -ENOTTY;
2613 }
2614 mutex_unlock(&pktcdvd_mutex);
2615
2616 return ret;
2617 }
2618
2619 static unsigned int pkt_check_events(struct gendisk *disk,
2620 unsigned int clearing)
2621 {
2622 struct pktcdvd_device *pd = disk->private_data;
2623 struct gendisk *attached_disk;
2624
2625 if (!pd)
2626 return 0;
2627 if (!pd->bdev)
2628 return 0;
2629 attached_disk = pd->bdev->bd_disk;
2630 if (!attached_disk || !attached_disk->fops->check_events)
2631 return 0;
2632 return attached_disk->fops->check_events(attached_disk, clearing);
2633 }
2634
2635 static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2636 {
2637 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2638 }
2639
2640 static const struct block_device_operations pktcdvd_ops = {
2641 .owner = THIS_MODULE,
2642 .submit_bio = pkt_submit_bio,
2643 .open = pkt_open,
2644 .release = pkt_close,
2645 .ioctl = pkt_ioctl,
2646 .compat_ioctl = blkdev_compat_ptr_ioctl,
2647 .check_events = pkt_check_events,
2648 .devnode = pkt_devnode,
2649 };
2650
2651
2652
2653
2654 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2655 {
2656 int idx;
2657 int ret = -ENOMEM;
2658 struct pktcdvd_device *pd;
2659 struct gendisk *disk;
2660
2661 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2662
2663 for (idx = 0; idx < MAX_WRITERS; idx++)
2664 if (!pkt_devs[idx])
2665 break;
2666 if (idx == MAX_WRITERS) {
2667 pr_err("max %d writers supported\n", MAX_WRITERS);
2668 ret = -EBUSY;
2669 goto out_mutex;
2670 }
2671
2672 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2673 if (!pd)
2674 goto out_mutex;
2675
2676 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2677 sizeof(struct pkt_rb_node));
2678 if (ret)
2679 goto out_mem;
2680
2681 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2682 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2683 spin_lock_init(&pd->cdrw.active_list_lock);
2684
2685 spin_lock_init(&pd->lock);
2686 spin_lock_init(&pd->iosched.lock);
2687 bio_list_init(&pd->iosched.read_queue);
2688 bio_list_init(&pd->iosched.write_queue);
2689 sprintf(pd->name, DRIVER_NAME"%d", idx);
2690 init_waitqueue_head(&pd->wqueue);
2691 pd->bio_queue = RB_ROOT;
2692
2693 pd->write_congestion_on = write_congestion_on;
2694 pd->write_congestion_off = write_congestion_off;
2695
2696 ret = -ENOMEM;
2697 disk = blk_alloc_disk(NUMA_NO_NODE);
2698 if (!disk)
2699 goto out_mem;
2700 pd->disk = disk;
2701 disk->major = pktdev_major;
2702 disk->first_minor = idx;
2703 disk->minors = 1;
2704 disk->fops = &pktcdvd_ops;
2705 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
2706 strcpy(disk->disk_name, pd->name);
2707 disk->private_data = pd;
2708
2709 pd->pkt_dev = MKDEV(pktdev_major, idx);
2710 ret = pkt_new_dev(pd, dev);
2711 if (ret)
2712 goto out_mem2;
2713
2714
2715 disk->events = pd->bdev->bd_disk->events;
2716
2717 ret = add_disk(disk);
2718 if (ret)
2719 goto out_mem2;
2720
2721 pkt_sysfs_dev_new(pd);
2722 pkt_debugfs_dev_new(pd);
2723
2724 pkt_devs[idx] = pd;
2725 if (pkt_dev)
2726 *pkt_dev = pd->pkt_dev;
2727
2728 mutex_unlock(&ctl_mutex);
2729 return 0;
2730
2731 out_mem2:
2732 put_disk(disk);
2733 out_mem:
2734 mempool_exit(&pd->rb_pool);
2735 kfree(pd);
2736 out_mutex:
2737 mutex_unlock(&ctl_mutex);
2738 pr_err("setup of pktcdvd device failed\n");
2739 return ret;
2740 }
2741
2742
2743
2744
2745 static int pkt_remove_dev(dev_t pkt_dev)
2746 {
2747 struct pktcdvd_device *pd;
2748 int idx;
2749 int ret = 0;
2750
2751 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2752
2753 for (idx = 0; idx < MAX_WRITERS; idx++) {
2754 pd = pkt_devs[idx];
2755 if (pd && (pd->pkt_dev == pkt_dev))
2756 break;
2757 }
2758 if (idx == MAX_WRITERS) {
2759 pr_debug("dev not setup\n");
2760 ret = -ENXIO;
2761 goto out;
2762 }
2763
2764 if (pd->refcnt > 0) {
2765 ret = -EBUSY;
2766 goto out;
2767 }
2768 if (!IS_ERR(pd->cdrw.thread))
2769 kthread_stop(pd->cdrw.thread);
2770
2771 pkt_devs[idx] = NULL;
2772
2773 pkt_debugfs_dev_remove(pd);
2774 pkt_sysfs_dev_remove(pd);
2775
2776 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2777
2778 remove_proc_entry(pd->name, pkt_proc);
2779 pkt_dbg(1, pd, "writer unmapped\n");
2780
2781 del_gendisk(pd->disk);
2782 put_disk(pd->disk);
2783
2784 mempool_exit(&pd->rb_pool);
2785 kfree(pd);
2786
2787
2788 module_put(THIS_MODULE);
2789
2790 out:
2791 mutex_unlock(&ctl_mutex);
2792 return ret;
2793 }
2794
2795 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2796 {
2797 struct pktcdvd_device *pd;
2798
2799 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2800
2801 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2802 if (pd) {
2803 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2804 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2805 } else {
2806 ctrl_cmd->dev = 0;
2807 ctrl_cmd->pkt_dev = 0;
2808 }
2809 ctrl_cmd->num_devices = MAX_WRITERS;
2810
2811 mutex_unlock(&ctl_mutex);
2812 }
2813
2814 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2815 {
2816 void __user *argp = (void __user *)arg;
2817 struct pkt_ctrl_command ctrl_cmd;
2818 int ret = 0;
2819 dev_t pkt_dev = 0;
2820
2821 if (cmd != PACKET_CTRL_CMD)
2822 return -ENOTTY;
2823
2824 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2825 return -EFAULT;
2826
2827 switch (ctrl_cmd.command) {
2828 case PKT_CTRL_CMD_SETUP:
2829 if (!capable(CAP_SYS_ADMIN))
2830 return -EPERM;
2831 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2832 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2833 break;
2834 case PKT_CTRL_CMD_TEARDOWN:
2835 if (!capable(CAP_SYS_ADMIN))
2836 return -EPERM;
2837 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2838 break;
2839 case PKT_CTRL_CMD_STATUS:
2840 pkt_get_status(&ctrl_cmd);
2841 break;
2842 default:
2843 return -ENOTTY;
2844 }
2845
2846 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2847 return -EFAULT;
2848 return ret;
2849 }
2850
2851 #ifdef CONFIG_COMPAT
2852 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2853 {
2854 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2855 }
2856 #endif
2857
2858 static const struct file_operations pkt_ctl_fops = {
2859 .open = nonseekable_open,
2860 .unlocked_ioctl = pkt_ctl_ioctl,
2861 #ifdef CONFIG_COMPAT
2862 .compat_ioctl = pkt_ctl_compat_ioctl,
2863 #endif
2864 .owner = THIS_MODULE,
2865 .llseek = no_llseek,
2866 };
2867
2868 static struct miscdevice pkt_misc = {
2869 .minor = MISC_DYNAMIC_MINOR,
2870 .name = DRIVER_NAME,
2871 .nodename = "pktcdvd/control",
2872 .fops = &pkt_ctl_fops
2873 };
2874
2875 static int __init pkt_init(void)
2876 {
2877 int ret;
2878
2879 mutex_init(&ctl_mutex);
2880
2881 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2882 sizeof(struct packet_stacked_data));
2883 if (ret)
2884 return ret;
2885 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2886 if (ret) {
2887 mempool_exit(&psd_pool);
2888 return ret;
2889 }
2890
2891 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2892 if (ret < 0) {
2893 pr_err("unable to register block device\n");
2894 goto out2;
2895 }
2896 if (!pktdev_major)
2897 pktdev_major = ret;
2898
2899 ret = pkt_sysfs_init();
2900 if (ret)
2901 goto out;
2902
2903 pkt_debugfs_init();
2904
2905 ret = misc_register(&pkt_misc);
2906 if (ret) {
2907 pr_err("unable to register misc device\n");
2908 goto out_misc;
2909 }
2910
2911 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2912
2913 return 0;
2914
2915 out_misc:
2916 pkt_debugfs_cleanup();
2917 pkt_sysfs_cleanup();
2918 out:
2919 unregister_blkdev(pktdev_major, DRIVER_NAME);
2920 out2:
2921 mempool_exit(&psd_pool);
2922 bioset_exit(&pkt_bio_set);
2923 return ret;
2924 }
2925
2926 static void __exit pkt_exit(void)
2927 {
2928 remove_proc_entry("driver/"DRIVER_NAME, NULL);
2929 misc_deregister(&pkt_misc);
2930
2931 pkt_debugfs_cleanup();
2932 pkt_sysfs_cleanup();
2933
2934 unregister_blkdev(pktdev_major, DRIVER_NAME);
2935 mempool_exit(&psd_pool);
2936 bioset_exit(&pkt_bio_set);
2937 }
2938
2939 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2940 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2941 MODULE_LICENSE("GPL");
2942
2943 module_init(pkt_init);
2944 module_exit(pkt_exit);