0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define KMSG_COMPONENT "hvc_iucv"
0013 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0014
0015 #include <linux/types.h>
0016 #include <linux/slab.h>
0017 #include <asm/ebcdic.h>
0018 #include <linux/ctype.h>
0019 #include <linux/delay.h>
0020 #include <linux/device.h>
0021 #include <linux/init.h>
0022 #include <linux/mempool.h>
0023 #include <linux/moduleparam.h>
0024 #include <linux/tty.h>
0025 #include <linux/wait.h>
0026 #include <net/iucv/iucv.h>
0027
0028 #include "hvc_console.h"
0029
0030
0031
0032 #define HVC_IUCV_MAGIC 0xc9e4c3e5
0033 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
0034 #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
0035
0036
0037 #define MSG_VERSION 0x02
0038 #define MSG_TYPE_ERROR 0x01
0039 #define MSG_TYPE_TERMENV 0x02
0040 #define MSG_TYPE_TERMIOS 0x04
0041 #define MSG_TYPE_WINSIZE 0x08
0042 #define MSG_TYPE_DATA 0x10
0043
0044 struct iucv_tty_msg {
0045 u8 version;
0046 u8 type;
0047 #define MSG_MAX_DATALEN ((u16)(~0))
0048 u16 datalen;
0049 u8 data[];
0050 } __attribute__((packed));
0051 #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
0052
0053 enum iucv_state_t {
0054 IUCV_DISCONN = 0,
0055 IUCV_CONNECTED = 1,
0056 IUCV_SEVERED = 2,
0057 };
0058
0059 enum tty_state_t {
0060 TTY_CLOSED = 0,
0061 TTY_OPENED = 1,
0062 };
0063
0064 struct hvc_iucv_private {
0065 struct hvc_struct *hvc;
0066 u8 srv_name[8];
0067 unsigned char is_console;
0068 enum iucv_state_t iucv_state;
0069 enum tty_state_t tty_state;
0070 struct iucv_path *path;
0071 spinlock_t lock;
0072 #define SNDBUF_SIZE (PAGE_SIZE)
0073 void *sndbuf;
0074 size_t sndbuf_len;
0075 #define QUEUE_SNDBUF_DELAY (HZ / 25)
0076 struct delayed_work sndbuf_work;
0077 wait_queue_head_t sndbuf_waitq;
0078 struct list_head tty_outqueue;
0079 struct list_head tty_inqueue;
0080 struct device *dev;
0081 u8 info_path[16];
0082 };
0083
0084 struct iucv_tty_buffer {
0085 struct list_head list;
0086 struct iucv_message msg;
0087 size_t offset;
0088 struct iucv_tty_msg *mbuf;
0089 };
0090
0091
0092 static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
0093 static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
0094 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
0095 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
0096
0097
0098
0099 static unsigned long hvc_iucv_devices = 1;
0100
0101
0102 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
0103 #define IUCV_HVC_CON_IDX (0)
0104
0105 #define MAX_VMID_FILTER (500)
0106 #define FILTER_WILDCARD_CHAR '*'
0107 static size_t hvc_iucv_filter_size;
0108 static void *hvc_iucv_filter;
0109 static const char *hvc_iucv_filter_string;
0110 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
0111
0112
0113 static struct kmem_cache *hvc_iucv_buffer_cache;
0114 static mempool_t *hvc_iucv_mempool;
0115
0116
0117 static struct iucv_handler hvc_iucv_handler = {
0118 .path_pending = hvc_iucv_path_pending,
0119 .path_severed = hvc_iucv_path_severed,
0120 .message_complete = hvc_iucv_msg_complete,
0121 .message_pending = hvc_iucv_msg_pending,
0122 };
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
0133 {
0134 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
0135 return NULL;
0136 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
0153 {
0154 struct iucv_tty_buffer *bufp;
0155
0156 bufp = mempool_alloc(hvc_iucv_mempool, flags);
0157 if (!bufp)
0158 return NULL;
0159 memset(bufp, 0, sizeof(*bufp));
0160
0161 if (size > 0) {
0162 bufp->msg.length = MSG_SIZE(size);
0163 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
0164 if (!bufp->mbuf) {
0165 mempool_free(bufp, hvc_iucv_mempool);
0166 return NULL;
0167 }
0168 bufp->mbuf->version = MSG_VERSION;
0169 bufp->mbuf->type = MSG_TYPE_DATA;
0170 bufp->mbuf->datalen = (u16) size;
0171 }
0172 return bufp;
0173 }
0174
0175
0176
0177
0178
0179 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
0180 {
0181 kfree(bufp->mbuf);
0182 mempool_free(bufp, hvc_iucv_mempool);
0183 }
0184
0185
0186
0187
0188
0189 static void destroy_tty_buffer_list(struct list_head *list)
0190 {
0191 struct iucv_tty_buffer *ent, *next;
0192
0193 list_for_each_entry_safe(ent, next, list, list) {
0194 list_del(&ent->list);
0195 destroy_tty_buffer(ent);
0196 }
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 static int hvc_iucv_write(struct hvc_iucv_private *priv,
0220 char *buf, int count, int *has_more_data)
0221 {
0222 struct iucv_tty_buffer *rb;
0223 int written;
0224 int rc;
0225
0226
0227 if (priv->iucv_state == IUCV_DISCONN)
0228 return 0;
0229
0230
0231
0232 if (priv->iucv_state == IUCV_SEVERED)
0233 return -EPIPE;
0234
0235
0236 if (list_empty(&priv->tty_inqueue))
0237 return 0;
0238
0239
0240 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
0241
0242 written = 0;
0243 if (!rb->mbuf) {
0244
0245
0246 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
0247 if (!rb->mbuf)
0248 return -ENOMEM;
0249
0250 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
0251 rb->mbuf, rb->msg.length, NULL);
0252 switch (rc) {
0253 case 0:
0254 break;
0255 case 2:
0256 case 9:
0257 break;
0258 default:
0259 written = -EIO;
0260 }
0261
0262
0263 if (rc || (rb->mbuf->version != MSG_VERSION) ||
0264 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
0265 goto out_remove_buffer;
0266 }
0267
0268 switch (rb->mbuf->type) {
0269 case MSG_TYPE_DATA:
0270 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
0271 memcpy(buf, rb->mbuf->data + rb->offset, written);
0272 if (written < (rb->mbuf->datalen - rb->offset)) {
0273 rb->offset += written;
0274 *has_more_data = 1;
0275 goto out_written;
0276 }
0277 break;
0278
0279 case MSG_TYPE_WINSIZE:
0280 if (rb->mbuf->datalen != sizeof(struct winsize))
0281 break;
0282
0283
0284 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
0285 break;
0286
0287 case MSG_TYPE_ERROR:
0288 case MSG_TYPE_TERMENV:
0289 case MSG_TYPE_TERMIOS:
0290 break;
0291 }
0292
0293 out_remove_buffer:
0294 list_del(&rb->list);
0295 destroy_tty_buffer(rb);
0296 *has_more_data = !list_empty(&priv->tty_inqueue);
0297
0298 out_written:
0299 return written;
0300 }
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
0317 {
0318 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
0319 int written;
0320 int has_more_data;
0321
0322 if (count <= 0)
0323 return 0;
0324
0325 if (!priv)
0326 return -ENODEV;
0327
0328 spin_lock(&priv->lock);
0329 has_more_data = 0;
0330 written = hvc_iucv_write(priv, buf, count, &has_more_data);
0331 spin_unlock(&priv->lock);
0332
0333
0334 if (has_more_data)
0335 hvc_kick();
0336
0337 return written;
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
0357 int count)
0358 {
0359 size_t len;
0360
0361 if (priv->iucv_state == IUCV_DISCONN)
0362 return count;
0363
0364 if (priv->iucv_state == IUCV_SEVERED)
0365 return -EPIPE;
0366
0367 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
0368 if (!len)
0369 return 0;
0370
0371 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
0372 priv->sndbuf_len += len;
0373
0374 if (priv->iucv_state == IUCV_CONNECTED)
0375 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
0376
0377 return len;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 static int hvc_iucv_send(struct hvc_iucv_private *priv)
0390 {
0391 struct iucv_tty_buffer *sb;
0392 int rc, len;
0393
0394 if (priv->iucv_state == IUCV_SEVERED)
0395 return -EPIPE;
0396
0397 if (priv->iucv_state == IUCV_DISCONN)
0398 return -EIO;
0399
0400 if (!priv->sndbuf_len)
0401 return 0;
0402
0403
0404
0405 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
0406 if (!sb)
0407 return -ENOMEM;
0408
0409 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
0410 sb->mbuf->datalen = (u16) priv->sndbuf_len;
0411 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
0412
0413 list_add_tail(&sb->list, &priv->tty_outqueue);
0414
0415 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
0416 (void *) sb->mbuf, sb->msg.length);
0417 if (rc) {
0418
0419
0420 list_del(&sb->list);
0421 destroy_tty_buffer(sb);
0422 }
0423 len = priv->sndbuf_len;
0424 priv->sndbuf_len = 0;
0425
0426 return len;
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436 static void hvc_iucv_sndbuf_work(struct work_struct *work)
0437 {
0438 struct hvc_iucv_private *priv;
0439
0440 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
0441
0442 spin_lock_bh(&priv->lock);
0443 hvc_iucv_send(priv);
0444 spin_unlock_bh(&priv->lock);
0445 }
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
0460 {
0461 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
0462 int queued;
0463
0464 if (count <= 0)
0465 return 0;
0466
0467 if (!priv)
0468 return -ENODEV;
0469
0470 spin_lock(&priv->lock);
0471 queued = hvc_iucv_queue(priv, buf, count);
0472 spin_unlock(&priv->lock);
0473
0474 return queued;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
0489 {
0490 struct hvc_iucv_private *priv;
0491
0492 priv = hvc_iucv_get_private(id);
0493 if (!priv)
0494 return 0;
0495
0496 spin_lock_bh(&priv->lock);
0497 priv->tty_state = TTY_OPENED;
0498 spin_unlock_bh(&priv->lock);
0499
0500 return 0;
0501 }
0502
0503
0504
0505
0506
0507 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
0508 {
0509 destroy_tty_buffer_list(&priv->tty_outqueue);
0510 destroy_tty_buffer_list(&priv->tty_inqueue);
0511
0512 priv->tty_state = TTY_CLOSED;
0513 priv->iucv_state = IUCV_DISCONN;
0514
0515 priv->sndbuf_len = 0;
0516 }
0517
0518
0519
0520
0521
0522 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
0523 {
0524 int rc;
0525
0526 spin_lock_bh(&priv->lock);
0527 rc = list_empty(&priv->tty_outqueue);
0528 spin_unlock_bh(&priv->lock);
0529
0530 return rc;
0531 }
0532
0533
0534
0535
0536
0537
0538
0539
0540 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
0541 {
0542 int sync_wait;
0543
0544 cancel_delayed_work_sync(&priv->sndbuf_work);
0545
0546 spin_lock_bh(&priv->lock);
0547 hvc_iucv_send(priv);
0548 sync_wait = !list_empty(&priv->tty_outqueue);
0549 spin_unlock_bh(&priv->lock);
0550
0551 if (sync_wait)
0552 wait_event_timeout(priv->sndbuf_waitq,
0553 tty_outqueue_empty(priv), HZ/10);
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
0589 {
0590 struct iucv_path *path;
0591
0592 path = NULL;
0593 spin_lock(&priv->lock);
0594 if (priv->iucv_state == IUCV_CONNECTED) {
0595 path = priv->path;
0596 priv->path = NULL;
0597 priv->iucv_state = IUCV_SEVERED;
0598 if (priv->tty_state == TTY_CLOSED)
0599 hvc_iucv_cleanup(priv);
0600 else
0601
0602 if (priv->is_console) {
0603 hvc_iucv_cleanup(priv);
0604 priv->tty_state = TTY_OPENED;
0605 } else
0606 hvc_kick();
0607 }
0608 spin_unlock(&priv->lock);
0609
0610
0611 if (path) {
0612 iucv_path_sever(path, NULL);
0613 iucv_path_free(path);
0614 }
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
0635 {
0636 struct hvc_iucv_private *priv;
0637
0638 priv = hvc_iucv_get_private(id);
0639 if (!priv)
0640 return;
0641
0642 flush_sndbuf_sync(priv);
0643
0644 spin_lock_bh(&priv->lock);
0645
0646
0647
0648
0649
0650
0651
0652 priv->tty_state = TTY_CLOSED;
0653
0654 if (priv->iucv_state == IUCV_SEVERED)
0655 hvc_iucv_cleanup(priv);
0656 spin_unlock_bh(&priv->lock);
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
0669 {
0670 struct hvc_iucv_private *priv;
0671 struct iucv_path *path;
0672
0673
0674
0675
0676 if (raise)
0677 return;
0678
0679 priv = hvc_iucv_get_private(hp->vtermno);
0680 if (!priv)
0681 return;
0682
0683
0684
0685
0686 flush_sndbuf_sync(priv);
0687
0688 spin_lock_bh(&priv->lock);
0689 path = priv->path;
0690 priv->path = NULL;
0691 priv->iucv_state = IUCV_DISCONN;
0692 spin_unlock_bh(&priv->lock);
0693
0694
0695
0696 if (path) {
0697 iucv_path_sever(path, NULL);
0698 iucv_path_free(path);
0699 }
0700 }
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
0716 {
0717 struct hvc_iucv_private *priv;
0718
0719 priv = hvc_iucv_get_private(id);
0720 if (!priv)
0721 return;
0722
0723 flush_sndbuf_sync(priv);
0724
0725 spin_lock_bh(&priv->lock);
0726 destroy_tty_buffer_list(&priv->tty_outqueue);
0727 destroy_tty_buffer_list(&priv->tty_inqueue);
0728 priv->tty_state = TTY_CLOSED;
0729 priv->sndbuf_len = 0;
0730 spin_unlock_bh(&priv->lock);
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
0741 {
0742 const char *wildcard, *filter_entry;
0743 size_t i, len;
0744
0745
0746 if (!hvc_iucv_filter_size)
0747 return 0;
0748
0749 for (i = 0; i < hvc_iucv_filter_size; i++) {
0750 filter_entry = hvc_iucv_filter + (8 * i);
0751
0752
0753
0754
0755
0756
0757 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
0758 len = (wildcard) ? wildcard - filter_entry : 8;
0759 if (0 == memcmp(ipvmid, filter_entry, len))
0760 return 0;
0761 }
0762 return 1;
0763 }
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784 static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
0785 u8 *ipuser)
0786 {
0787 struct hvc_iucv_private *priv, *tmp;
0788 u8 wildcard[9] = "lnxhvc ";
0789 int i, rc, find_unused;
0790 u8 nuser_data[16];
0791 u8 vm_user_id[9];
0792
0793 ASCEBC(wildcard, sizeof(wildcard));
0794 find_unused = !memcmp(wildcard, ipuser, 8);
0795
0796
0797
0798
0799
0800
0801 priv = NULL;
0802 for (i = 0; i < hvc_iucv_devices; i++) {
0803 tmp = hvc_iucv_table[i];
0804 if (!tmp)
0805 continue;
0806
0807 if (find_unused) {
0808 spin_lock(&tmp->lock);
0809 if (tmp->iucv_state == IUCV_DISCONN)
0810 priv = tmp;
0811 spin_unlock(&tmp->lock);
0812
0813 } else if (!memcmp(tmp->srv_name, ipuser, 8))
0814 priv = tmp;
0815 if (priv)
0816 break;
0817 }
0818 if (!priv)
0819 return -ENODEV;
0820
0821
0822 read_lock(&hvc_iucv_filter_lock);
0823 rc = hvc_iucv_filter_connreq(ipvmid);
0824 read_unlock(&hvc_iucv_filter_lock);
0825 if (rc) {
0826 iucv_path_sever(path, ipuser);
0827 iucv_path_free(path);
0828 memcpy(vm_user_id, ipvmid, 8);
0829 vm_user_id[8] = 0;
0830 pr_info("A connection request from z/VM user ID %s "
0831 "was refused\n", vm_user_id);
0832 return 0;
0833 }
0834
0835 spin_lock(&priv->lock);
0836
0837
0838
0839
0840 if (priv->iucv_state != IUCV_DISCONN) {
0841 iucv_path_sever(path, ipuser);
0842 iucv_path_free(path);
0843 goto out_path_handled;
0844 }
0845
0846
0847 memcpy(nuser_data, ipuser + 8, 8);
0848 memcpy(nuser_data + 8, ipuser, 8);
0849 path->msglim = 0xffff;
0850 path->flags &= ~IUCV_IPRMDATA;
0851 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
0852 if (rc) {
0853 iucv_path_sever(path, ipuser);
0854 iucv_path_free(path);
0855 goto out_path_handled;
0856 }
0857 priv->path = path;
0858 priv->iucv_state = IUCV_CONNECTED;
0859
0860
0861 memcpy(priv->info_path, ipvmid, 8);
0862 memcpy(priv->info_path + 8, ipuser + 8, 8);
0863
0864
0865 schedule_delayed_work(&priv->sndbuf_work, 5);
0866
0867 out_path_handled:
0868 spin_unlock(&priv->lock);
0869 return 0;
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883 static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
0884 {
0885 struct hvc_iucv_private *priv = path->private;
0886
0887 hvc_iucv_hangup(priv);
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static void hvc_iucv_msg_pending(struct iucv_path *path,
0902 struct iucv_message *msg)
0903 {
0904 struct hvc_iucv_private *priv = path->private;
0905 struct iucv_tty_buffer *rb;
0906
0907
0908 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
0909 iucv_message_reject(path, msg);
0910 return;
0911 }
0912
0913 spin_lock(&priv->lock);
0914
0915
0916 if (priv->tty_state == TTY_CLOSED) {
0917 iucv_message_reject(path, msg);
0918 goto unlock_return;
0919 }
0920
0921
0922 rb = alloc_tty_buffer(0, GFP_ATOMIC);
0923 if (!rb) {
0924 iucv_message_reject(path, msg);
0925 goto unlock_return;
0926 }
0927 rb->msg = *msg;
0928
0929 list_add_tail(&rb->list, &priv->tty_inqueue);
0930
0931 hvc_kick();
0932
0933 unlock_return:
0934 spin_unlock(&priv->lock);
0935 }
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949 static void hvc_iucv_msg_complete(struct iucv_path *path,
0950 struct iucv_message *msg)
0951 {
0952 struct hvc_iucv_private *priv = path->private;
0953 struct iucv_tty_buffer *ent, *next;
0954 LIST_HEAD(list_remove);
0955
0956 spin_lock(&priv->lock);
0957 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
0958 if (ent->msg.id == msg->id) {
0959 list_move(&ent->list, &list_remove);
0960 break;
0961 }
0962 wake_up(&priv->sndbuf_waitq);
0963 spin_unlock(&priv->lock);
0964 destroy_tty_buffer_list(&list_remove);
0965 }
0966
0967 static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
0968 struct device_attribute *attr,
0969 char *buf)
0970 {
0971 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
0972 size_t len;
0973
0974 len = sizeof(priv->srv_name);
0975 memcpy(buf, priv->srv_name, len);
0976 EBCASC(buf, len);
0977 buf[len++] = '\n';
0978 return len;
0979 }
0980
0981 static ssize_t hvc_iucv_dev_state_show(struct device *dev,
0982 struct device_attribute *attr,
0983 char *buf)
0984 {
0985 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
0986 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
0987 }
0988
0989 static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
0990 struct device_attribute *attr,
0991 char *buf)
0992 {
0993 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
0994 char vmid[9], ipuser[9];
0995
0996 memset(vmid, 0, sizeof(vmid));
0997 memset(ipuser, 0, sizeof(ipuser));
0998
0999 spin_lock_bh(&priv->lock);
1000 if (priv->iucv_state == IUCV_CONNECTED) {
1001 memcpy(vmid, priv->info_path, 8);
1002 memcpy(ipuser, priv->info_path + 8, 8);
1003 }
1004 spin_unlock_bh(&priv->lock);
1005 EBCASC(ipuser, 8);
1006
1007 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1008 }
1009
1010
1011
1012 static const struct hv_ops hvc_iucv_ops = {
1013 .get_chars = hvc_iucv_get_chars,
1014 .put_chars = hvc_iucv_put_chars,
1015 .notifier_add = hvc_iucv_notifier_add,
1016 .notifier_del = hvc_iucv_notifier_del,
1017 .notifier_hangup = hvc_iucv_notifier_hangup,
1018 .dtr_rts = hvc_iucv_dtr_rts,
1019 };
1020
1021
1022 static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1023 static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1024 static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1025 static struct attribute *hvc_iucv_dev_attrs[] = {
1026 &dev_attr_termid.attr,
1027 &dev_attr_state.attr,
1028 &dev_attr_peer.attr,
1029 NULL,
1030 };
1031 static struct attribute_group hvc_iucv_dev_attr_group = {
1032 .attrs = hvc_iucv_dev_attrs,
1033 };
1034 static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1035 &hvc_iucv_dev_attr_group,
1036 NULL,
1037 };
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1050 {
1051 struct hvc_iucv_private *priv;
1052 char name[9];
1053 int rc;
1054
1055 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1056 if (!priv)
1057 return -ENOMEM;
1058
1059 spin_lock_init(&priv->lock);
1060 INIT_LIST_HEAD(&priv->tty_outqueue);
1061 INIT_LIST_HEAD(&priv->tty_inqueue);
1062 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1063 init_waitqueue_head(&priv->sndbuf_waitq);
1064
1065 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1066 if (!priv->sndbuf) {
1067 kfree(priv);
1068 return -ENOMEM;
1069 }
1070
1071
1072 priv->is_console = is_console;
1073
1074
1075 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
1076 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1077 if (IS_ERR(priv->hvc)) {
1078 rc = PTR_ERR(priv->hvc);
1079 goto out_error_hvc;
1080 }
1081
1082
1083 priv->hvc->irq_requested = 1;
1084
1085
1086 snprintf(name, 9, "lnxhvc%-2d", id);
1087 memcpy(priv->srv_name, name, 8);
1088 ASCEBC(priv->srv_name, 8);
1089
1090
1091 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1092 if (!priv->dev) {
1093 rc = -ENOMEM;
1094 goto out_error_dev;
1095 }
1096 dev_set_name(priv->dev, "hvc_iucv%d", id);
1097 dev_set_drvdata(priv->dev, priv);
1098 priv->dev->bus = &iucv_bus;
1099 priv->dev->parent = iucv_root;
1100 priv->dev->groups = hvc_iucv_dev_attr_groups;
1101 priv->dev->release = (void (*)(struct device *)) kfree;
1102 rc = device_register(priv->dev);
1103 if (rc) {
1104 put_device(priv->dev);
1105 goto out_error_dev;
1106 }
1107
1108 hvc_iucv_table[id] = priv;
1109 return 0;
1110
1111 out_error_dev:
1112 hvc_remove(priv->hvc);
1113 out_error_hvc:
1114 free_page((unsigned long) priv->sndbuf);
1115 kfree(priv);
1116
1117 return rc;
1118 }
1119
1120
1121
1122
1123 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1124 {
1125 hvc_remove(priv->hvc);
1126 device_unregister(priv->dev);
1127 free_page((unsigned long) priv->sndbuf);
1128 kfree(priv);
1129 }
1130
1131
1132
1133
1134
1135
1136 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1137 {
1138 const char *nextdelim, *residual;
1139 size_t len;
1140
1141 nextdelim = strchr(filter, ',');
1142 if (nextdelim) {
1143 len = nextdelim - filter;
1144 residual = nextdelim + 1;
1145 } else {
1146 len = strlen(filter);
1147 residual = filter + len;
1148 }
1149
1150 if (len == 0)
1151 return ERR_PTR(-EINVAL);
1152
1153
1154 if (filter[len - 1] == '\n')
1155 len--;
1156
1157
1158 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1159 return ERR_PTR(-EINVAL);
1160
1161 if (len > 8)
1162 return ERR_PTR(-EINVAL);
1163
1164
1165 memset(dest, ' ', 8);
1166 while (len--)
1167 dest[len] = toupper(filter[len]);
1168 return residual;
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 static int hvc_iucv_setup_filter(const char *val)
1182 {
1183 const char *residual;
1184 int err;
1185 size_t size, count;
1186 void *array, *old_filter;
1187
1188 count = strlen(val);
1189 if (count == 0 || (count == 1 && val[0] == '\n')) {
1190 size = 0;
1191 array = NULL;
1192 goto out_replace_filter;
1193 }
1194
1195
1196 size = 1;
1197 residual = val;
1198 while ((residual = strchr(residual, ',')) != NULL) {
1199 residual++;
1200 size++;
1201 }
1202
1203
1204 if (size > MAX_VMID_FILTER)
1205 return -ENOSPC;
1206
1207 array = kcalloc(size, 8, GFP_KERNEL);
1208 if (!array)
1209 return -ENOMEM;
1210
1211 count = size;
1212 residual = val;
1213 while (*residual && count) {
1214 residual = hvc_iucv_parse_filter(residual,
1215 array + ((size - count) * 8));
1216 if (IS_ERR(residual)) {
1217 err = PTR_ERR(residual);
1218 kfree(array);
1219 goto out_err;
1220 }
1221 count--;
1222 }
1223
1224 out_replace_filter:
1225 write_lock_bh(&hvc_iucv_filter_lock);
1226 old_filter = hvc_iucv_filter;
1227 hvc_iucv_filter_size = size;
1228 hvc_iucv_filter = array;
1229 write_unlock_bh(&hvc_iucv_filter_lock);
1230 kfree(old_filter);
1231
1232 err = 0;
1233 out_err:
1234 return err;
1235 }
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1248 {
1249 int rc;
1250
1251 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1252 return -ENODEV;
1253
1254 if (!val)
1255 return -EINVAL;
1256
1257 rc = 0;
1258 if (slab_is_available())
1259 rc = hvc_iucv_setup_filter(val);
1260 else
1261 hvc_iucv_filter_string = val;
1262 return rc;
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1275 {
1276 int rc;
1277 size_t index, len;
1278 void *start, *end;
1279
1280 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1281 return -ENODEV;
1282
1283 rc = 0;
1284 read_lock_bh(&hvc_iucv_filter_lock);
1285 for (index = 0; index < hvc_iucv_filter_size; index++) {
1286 start = hvc_iucv_filter + (8 * index);
1287 end = memchr(start, ' ', 8);
1288 len = (end) ? end - start : 8;
1289 memcpy(buffer + rc, start, len);
1290 rc += len;
1291 buffer[rc++] = ',';
1292 }
1293 read_unlock_bh(&hvc_iucv_filter_lock);
1294 if (rc)
1295 buffer[--rc] = '\0';
1296 return rc;
1297 }
1298
1299 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1300
1301 static const struct kernel_param_ops param_ops_vmidfilter = {
1302 .set = param_set_vmidfilter,
1303 .get = param_get_vmidfilter,
1304 };
1305
1306
1307
1308
1309 static int __init hvc_iucv_init(void)
1310 {
1311 int rc;
1312 unsigned int i;
1313
1314 if (!hvc_iucv_devices)
1315 return -ENODEV;
1316
1317 if (!MACHINE_IS_VM) {
1318 pr_notice("The z/VM IUCV HVC device driver cannot "
1319 "be used without z/VM\n");
1320 rc = -ENODEV;
1321 goto out_error;
1322 }
1323
1324 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1325 pr_err("%lu is not a valid value for the hvc_iucv= "
1326 "kernel parameter\n", hvc_iucv_devices);
1327 rc = -EINVAL;
1328 goto out_error;
1329 }
1330
1331
1332 if (hvc_iucv_filter_string) {
1333 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1334 switch (rc) {
1335 case 0:
1336 break;
1337 case -ENOMEM:
1338 pr_err("Allocating memory failed with "
1339 "reason code=%d\n", 3);
1340 goto out_error;
1341 case -EINVAL:
1342 pr_err("hvc_iucv_allow= does not specify a valid "
1343 "z/VM user ID list\n");
1344 goto out_error;
1345 case -ENOSPC:
1346 pr_err("hvc_iucv_allow= specifies too many "
1347 "z/VM user IDs\n");
1348 goto out_error;
1349 default:
1350 goto out_error;
1351 }
1352 }
1353
1354 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1355 sizeof(struct iucv_tty_buffer),
1356 0, 0, NULL);
1357 if (!hvc_iucv_buffer_cache) {
1358 pr_err("Allocating memory failed with reason code=%d\n", 1);
1359 rc = -ENOMEM;
1360 goto out_error;
1361 }
1362
1363 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1364 hvc_iucv_buffer_cache);
1365 if (!hvc_iucv_mempool) {
1366 pr_err("Allocating memory failed with reason code=%d\n", 2);
1367 kmem_cache_destroy(hvc_iucv_buffer_cache);
1368 rc = -ENOMEM;
1369 goto out_error;
1370 }
1371
1372
1373
1374 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1375 if (rc) {
1376 pr_err("Registering HVC terminal device as "
1377 "Linux console failed\n");
1378 goto out_error_memory;
1379 }
1380
1381
1382 for (i = 0; i < hvc_iucv_devices; i++) {
1383 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1384 if (rc) {
1385 pr_err("Creating a new HVC terminal device "
1386 "failed with error code=%d\n", rc);
1387 goto out_error_hvc;
1388 }
1389 }
1390
1391
1392 rc = iucv_register(&hvc_iucv_handler, 0);
1393 if (rc) {
1394 pr_err("Registering IUCV handlers failed with error code=%d\n",
1395 rc);
1396 goto out_error_hvc;
1397 }
1398
1399 return 0;
1400
1401 out_error_hvc:
1402 for (i = 0; i < hvc_iucv_devices; i++)
1403 if (hvc_iucv_table[i])
1404 hvc_iucv_destroy(hvc_iucv_table[i]);
1405 out_error_memory:
1406 mempool_destroy(hvc_iucv_mempool);
1407 kmem_cache_destroy(hvc_iucv_buffer_cache);
1408 out_error:
1409 kfree(hvc_iucv_filter);
1410 hvc_iucv_devices = 0;
1411 return rc;
1412 }
1413
1414
1415
1416
1417
1418 static int __init hvc_iucv_config(char *val)
1419 {
1420 if (kstrtoul(val, 10, &hvc_iucv_devices))
1421 pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
1422 return 1;
1423 }
1424
1425
1426 device_initcall(hvc_iucv_init);
1427 __setup("hvc_iucv=", hvc_iucv_config);
1428 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);