0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel_stat.h>
0012 #include <linux/module.h>
0013 #include <linux/err.h>
0014 #include <linux/panic_notifier.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/timer.h>
0018 #include <linux/reboot.h>
0019 #include <linux/jiffies.h>
0020 #include <linux/init.h>
0021 #include <linux/platform_device.h>
0022 #include <asm/types.h>
0023 #include <asm/irq.h>
0024 #include <asm/debug.h>
0025
0026 #include "sclp.h"
0027
0028 #define SCLP_HEADER "sclp: "
0029
0030 struct sclp_trace_entry {
0031 char id[4] __nonstring;
0032 u32 a;
0033 u64 b;
0034 };
0035
0036 #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
0037 #define SCLP_TRACE_MAX_SIZE 128
0038 #define SCLP_TRACE_EVENT_MAX_SIZE 64
0039
0040
0041 DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
0042 &debug_hex_ascii_view);
0043
0044
0045 DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
0046 SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
0047
0048
0049 static DEFINE_SPINLOCK(sclp_lock);
0050
0051
0052 static sccb_mask_t sclp_receive_mask;
0053
0054
0055 static sccb_mask_t sclp_send_mask;
0056
0057
0058 static LIST_HEAD(sclp_reg_list);
0059
0060
0061 static LIST_HEAD(sclp_req_queue);
0062
0063
0064 static struct sclp_req sclp_read_req;
0065 static struct sclp_req sclp_init_req;
0066 static void *sclp_read_sccb;
0067 static struct init_sccb *sclp_init_sccb;
0068
0069
0070 int sclp_console_pages = SCLP_CONSOLE_PAGES;
0071
0072 int sclp_console_drop = 1;
0073
0074 unsigned long sclp_console_full;
0075
0076
0077 static sclp_cmdw_t active_cmd;
0078
0079 static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
0080 {
0081 struct sclp_trace_entry e;
0082
0083 memset(&e, 0, sizeof(e));
0084 strncpy(e.id, id, sizeof(e.id));
0085 e.a = a;
0086 e.b = b;
0087 debug_event(&sclp_debug, prio, &e, sizeof(e));
0088 if (err)
0089 debug_event(&sclp_debug_err, 0, &e, sizeof(e));
0090 }
0091
0092 static inline int no_zeroes_len(void *data, int len)
0093 {
0094 char *d = data;
0095
0096
0097 while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
0098 len--;
0099
0100 return len;
0101 }
0102
0103 static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
0104 {
0105 debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
0106 if (errlen)
0107 debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
0108 }
0109
0110 static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
0111 {
0112 struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
0113 int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
0114
0115
0116 if (sclp_debug.level == DEBUG_MAX_LEVEL)
0117 return len;
0118
0119
0120 if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
0121 (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
0122 limit = SCLP_TRACE_ENTRY_SIZE;
0123
0124 return min(len, limit);
0125 }
0126
0127 static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
0128 sclp_cmdw_t cmd, struct sccb_header *sccb,
0129 bool err)
0130 {
0131 sclp_trace(prio, id, a, b, err);
0132 if (sccb) {
0133 sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
0134 err ? sccb->length : 0);
0135 }
0136 }
0137
0138 static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
0139 struct evbuf_header *evbuf, bool err)
0140 {
0141 sclp_trace(prio, id, a, b, err);
0142 sclp_trace_bin(prio + 1, evbuf,
0143 min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
0144 err ? evbuf->length : 0);
0145 }
0146
0147 static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
0148 bool err)
0149 {
0150 struct sccb_header *sccb = req->sccb;
0151 union {
0152 struct {
0153 u16 status;
0154 u16 response;
0155 u16 timeout;
0156 u16 start_count;
0157 };
0158 u64 b;
0159 } summary;
0160
0161 summary.status = req->status;
0162 summary.response = sccb ? sccb->response_code : 0;
0163 summary.timeout = (u16)req->queue_timeout;
0164 summary.start_count = (u16)req->start_count;
0165
0166 sclp_trace(prio, id, __pa(sccb), summary.b, err);
0167 }
0168
0169 static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
0170 struct sclp_register *reg)
0171 {
0172 struct {
0173 u64 receive;
0174 u64 send;
0175 } d;
0176
0177 d.receive = reg->receive_mask;
0178 d.send = reg->send_mask;
0179
0180 sclp_trace(prio, id, a, b, false);
0181 sclp_trace_bin(prio, &d, sizeof(d), 0);
0182 }
0183
0184 static int __init sclp_setup_console_pages(char *str)
0185 {
0186 int pages, rc;
0187
0188 rc = kstrtoint(str, 0, &pages);
0189 if (!rc && pages >= SCLP_CONSOLE_PAGES)
0190 sclp_console_pages = pages;
0191 return 1;
0192 }
0193
0194 __setup("sclp_con_pages=", sclp_setup_console_pages);
0195
0196 static int __init sclp_setup_console_drop(char *str)
0197 {
0198 int drop, rc;
0199
0200 rc = kstrtoint(str, 0, &drop);
0201 if (!rc)
0202 sclp_console_drop = drop;
0203 return 1;
0204 }
0205
0206 __setup("sclp_con_drop=", sclp_setup_console_drop);
0207
0208
0209 static struct timer_list sclp_request_timer;
0210
0211
0212 static struct timer_list sclp_queue_timer;
0213
0214
0215 static volatile enum sclp_running_state_t {
0216 sclp_running_state_idle,
0217 sclp_running_state_running,
0218 sclp_running_state_reset_pending
0219 } sclp_running_state = sclp_running_state_idle;
0220
0221
0222 static volatile enum sclp_reading_state_t {
0223 sclp_reading_state_idle,
0224 sclp_reading_state_reading
0225 } sclp_reading_state = sclp_reading_state_idle;
0226
0227
0228 static volatile enum sclp_activation_state_t {
0229 sclp_activation_state_active,
0230 sclp_activation_state_deactivating,
0231 sclp_activation_state_inactive,
0232 sclp_activation_state_activating
0233 } sclp_activation_state = sclp_activation_state_active;
0234
0235
0236 static volatile enum sclp_mask_state_t {
0237 sclp_mask_state_idle,
0238 sclp_mask_state_initializing
0239 } sclp_mask_state = sclp_mask_state_idle;
0240
0241
0242 #define SCLP_INIT_RETRY 3
0243 #define SCLP_MASK_RETRY 3
0244
0245
0246 #define SCLP_BUSY_INTERVAL 10
0247 #define SCLP_RETRY_INTERVAL 30
0248
0249 static void sclp_request_timeout(bool force_restart);
0250 static void sclp_process_queue(void);
0251 static void __sclp_make_read_req(void);
0252 static int sclp_init_mask(int calculate);
0253 static int sclp_init(void);
0254
0255 static void
0256 __sclp_queue_read_req(void)
0257 {
0258 if (sclp_reading_state == sclp_reading_state_idle) {
0259 sclp_reading_state = sclp_reading_state_reading;
0260 __sclp_make_read_req();
0261
0262 list_add(&sclp_read_req.list, &sclp_req_queue);
0263 }
0264 }
0265
0266
0267 static inline void
0268 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
0269 {
0270 del_timer(&sclp_request_timer);
0271 sclp_request_timer.function = cb;
0272 sclp_request_timer.expires = jiffies + time;
0273 add_timer(&sclp_request_timer);
0274 }
0275
0276 static void sclp_request_timeout_restart(struct timer_list *unused)
0277 {
0278 sclp_request_timeout(true);
0279 }
0280
0281 static void sclp_request_timeout_normal(struct timer_list *unused)
0282 {
0283 sclp_request_timeout(false);
0284 }
0285
0286
0287
0288 static void sclp_request_timeout(bool force_restart)
0289 {
0290 unsigned long flags;
0291
0292
0293 sclp_trace(2, "TMO", force_restart, 0, true);
0294
0295 spin_lock_irqsave(&sclp_lock, flags);
0296 if (force_restart) {
0297 if (sclp_running_state == sclp_running_state_running) {
0298
0299
0300 __sclp_queue_read_req();
0301 sclp_running_state = sclp_running_state_idle;
0302 }
0303 } else {
0304 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
0305 sclp_request_timeout_normal);
0306 }
0307 spin_unlock_irqrestore(&sclp_lock, flags);
0308 sclp_process_queue();
0309 }
0310
0311
0312
0313
0314
0315 static unsigned long __sclp_req_queue_find_next_timeout(void)
0316 {
0317 unsigned long expires_next = 0;
0318 struct sclp_req *req;
0319
0320 list_for_each_entry(req, &sclp_req_queue, list) {
0321 if (!req->queue_expires)
0322 continue;
0323 if (!expires_next ||
0324 (time_before(req->queue_expires, expires_next)))
0325 expires_next = req->queue_expires;
0326 }
0327 return expires_next;
0328 }
0329
0330
0331
0332
0333 static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
0334 {
0335 unsigned long flags, now;
0336 struct sclp_req *req;
0337
0338 spin_lock_irqsave(&sclp_lock, flags);
0339 now = jiffies;
0340
0341 list_for_each_entry(req, &sclp_req_queue, list) {
0342 if (!req->queue_expires)
0343 continue;
0344 if (time_before_eq(req->queue_expires, now)) {
0345 if (req->status == SCLP_REQ_QUEUED) {
0346 req->status = SCLP_REQ_QUEUED_TIMEOUT;
0347 list_del(&req->list);
0348 goto out;
0349 }
0350 }
0351 }
0352 req = NULL;
0353 out:
0354 spin_unlock_irqrestore(&sclp_lock, flags);
0355 return req;
0356 }
0357
0358
0359
0360
0361
0362
0363 static void sclp_req_queue_timeout(struct timer_list *unused)
0364 {
0365 unsigned long flags, expires_next;
0366 struct sclp_req *req;
0367
0368 do {
0369 req = __sclp_req_queue_remove_expired_req();
0370
0371 if (req) {
0372
0373 sclp_trace_req(2, "RQTM", req, true);
0374 }
0375
0376 if (req && req->callback)
0377 req->callback(req, req->callback_data);
0378 } while (req);
0379
0380 spin_lock_irqsave(&sclp_lock, flags);
0381 expires_next = __sclp_req_queue_find_next_timeout();
0382 if (expires_next)
0383 mod_timer(&sclp_queue_timer, expires_next);
0384 spin_unlock_irqrestore(&sclp_lock, flags);
0385 }
0386
0387 static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
0388 {
0389 static u64 srvc_count;
0390 int rc;
0391
0392
0393 sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
0394
0395 rc = sclp_service_call(command, sccb);
0396
0397
0398 sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
0399
0400 if (rc == 0)
0401 active_cmd = command;
0402
0403 return rc;
0404 }
0405
0406
0407
0408
0409 static int
0410 __sclp_start_request(struct sclp_req *req)
0411 {
0412 int rc;
0413
0414 if (sclp_running_state != sclp_running_state_idle)
0415 return 0;
0416 del_timer(&sclp_request_timer);
0417 rc = sclp_service_call_trace(req->command, req->sccb);
0418 req->start_count++;
0419
0420 if (rc == 0) {
0421
0422 req->status = SCLP_REQ_RUNNING;
0423 sclp_running_state = sclp_running_state_running;
0424 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
0425 sclp_request_timeout_restart);
0426 return 0;
0427 } else if (rc == -EBUSY) {
0428
0429 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
0430 sclp_request_timeout_normal);
0431 return 0;
0432 }
0433
0434 req->status = SCLP_REQ_FAILED;
0435 return rc;
0436 }
0437
0438
0439 static void
0440 sclp_process_queue(void)
0441 {
0442 struct sclp_req *req;
0443 int rc;
0444 unsigned long flags;
0445
0446 spin_lock_irqsave(&sclp_lock, flags);
0447 if (sclp_running_state != sclp_running_state_idle) {
0448 spin_unlock_irqrestore(&sclp_lock, flags);
0449 return;
0450 }
0451 del_timer(&sclp_request_timer);
0452 while (!list_empty(&sclp_req_queue)) {
0453 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
0454 rc = __sclp_start_request(req);
0455 if (rc == 0)
0456 break;
0457
0458 if (req->start_count > 1) {
0459
0460
0461 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
0462 sclp_request_timeout_normal);
0463 break;
0464 }
0465
0466 list_del(&req->list);
0467
0468
0469 sclp_trace_req(2, "RQAB", req, true);
0470
0471 if (req->callback) {
0472 spin_unlock_irqrestore(&sclp_lock, flags);
0473 req->callback(req, req->callback_data);
0474 spin_lock_irqsave(&sclp_lock, flags);
0475 }
0476 }
0477 spin_unlock_irqrestore(&sclp_lock, flags);
0478 }
0479
0480 static int __sclp_can_add_request(struct sclp_req *req)
0481 {
0482 if (req == &sclp_init_req)
0483 return 1;
0484 if (sclp_init_state != sclp_init_state_initialized)
0485 return 0;
0486 if (sclp_activation_state != sclp_activation_state_active)
0487 return 0;
0488 return 1;
0489 }
0490
0491
0492 int
0493 sclp_add_request(struct sclp_req *req)
0494 {
0495 unsigned long flags;
0496 int rc;
0497
0498 spin_lock_irqsave(&sclp_lock, flags);
0499 if (!__sclp_can_add_request(req)) {
0500 spin_unlock_irqrestore(&sclp_lock, flags);
0501 return -EIO;
0502 }
0503
0504
0505 sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
0506
0507 req->status = SCLP_REQ_QUEUED;
0508 req->start_count = 0;
0509 list_add_tail(&req->list, &sclp_req_queue);
0510 rc = 0;
0511 if (req->queue_timeout) {
0512 req->queue_expires = jiffies + req->queue_timeout * HZ;
0513 if (!timer_pending(&sclp_queue_timer) ||
0514 time_after(sclp_queue_timer.expires, req->queue_expires))
0515 mod_timer(&sclp_queue_timer, req->queue_expires);
0516 } else
0517 req->queue_expires = 0;
0518
0519 if (sclp_running_state == sclp_running_state_idle &&
0520 req->list.prev == &sclp_req_queue) {
0521 rc = __sclp_start_request(req);
0522 if (rc)
0523 list_del(&req->list);
0524 }
0525 spin_unlock_irqrestore(&sclp_lock, flags);
0526 return rc;
0527 }
0528
0529 EXPORT_SYMBOL(sclp_add_request);
0530
0531
0532
0533 static int
0534 sclp_dispatch_evbufs(struct sccb_header *sccb)
0535 {
0536 unsigned long flags;
0537 struct evbuf_header *evbuf;
0538 struct list_head *l;
0539 struct sclp_register *reg;
0540 int offset;
0541 int rc;
0542
0543 spin_lock_irqsave(&sclp_lock, flags);
0544 rc = 0;
0545 for (offset = sizeof(struct sccb_header); offset < sccb->length;
0546 offset += evbuf->length) {
0547 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
0548
0549 if (evbuf->length == 0)
0550 break;
0551
0552 reg = NULL;
0553 list_for_each(l, &sclp_reg_list) {
0554 reg = list_entry(l, struct sclp_register, list);
0555 if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
0556 break;
0557 else
0558 reg = NULL;
0559 }
0560
0561
0562 sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
0563 evbuf, !reg);
0564
0565 if (reg && reg->receiver_fn) {
0566 spin_unlock_irqrestore(&sclp_lock, flags);
0567 reg->receiver_fn(evbuf);
0568 spin_lock_irqsave(&sclp_lock, flags);
0569 } else if (reg == NULL)
0570 rc = -EOPNOTSUPP;
0571 }
0572 spin_unlock_irqrestore(&sclp_lock, flags);
0573 return rc;
0574 }
0575
0576
0577 static void
0578 sclp_read_cb(struct sclp_req *req, void *data)
0579 {
0580 unsigned long flags;
0581 struct sccb_header *sccb;
0582
0583 sccb = (struct sccb_header *) req->sccb;
0584 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
0585 sccb->response_code == 0x220))
0586 sclp_dispatch_evbufs(sccb);
0587 spin_lock_irqsave(&sclp_lock, flags);
0588 sclp_reading_state = sclp_reading_state_idle;
0589 spin_unlock_irqrestore(&sclp_lock, flags);
0590 }
0591
0592
0593 static void __sclp_make_read_req(void)
0594 {
0595 struct sccb_header *sccb;
0596
0597 sccb = (struct sccb_header *) sclp_read_sccb;
0598 clear_page(sccb);
0599 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
0600 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
0601 sclp_read_req.status = SCLP_REQ_QUEUED;
0602 sclp_read_req.start_count = 0;
0603 sclp_read_req.callback = sclp_read_cb;
0604 sclp_read_req.sccb = sccb;
0605 sccb->length = PAGE_SIZE;
0606 sccb->function_code = 0;
0607 sccb->control_mask[2] = 0x80;
0608 }
0609
0610
0611
0612 static inline struct sclp_req *
0613 __sclp_find_req(u32 sccb)
0614 {
0615 struct list_head *l;
0616 struct sclp_req *req;
0617
0618 list_for_each(l, &sclp_req_queue) {
0619 req = list_entry(l, struct sclp_req, list);
0620 if (sccb == __pa(req->sccb))
0621 return req;
0622 }
0623 return NULL;
0624 }
0625
0626 static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
0627 {
0628 struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
0629 struct evbuf_header *evbuf;
0630 u16 response;
0631
0632 if (!sccb)
0633 return true;
0634
0635
0636 response = sccb->response_code & 0xff;
0637 if (response != 0x10 && response != 0x20)
0638 return false;
0639
0640
0641 if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
0642 evbuf = (struct evbuf_header *)(sccb + 1);
0643 if (!(evbuf->flags & 0x80))
0644 return false;
0645 }
0646
0647 return true;
0648 }
0649
0650
0651
0652
0653 static void sclp_interrupt_handler(struct ext_code ext_code,
0654 unsigned int param32, unsigned long param64)
0655 {
0656 struct sclp_req *req;
0657 u32 finished_sccb;
0658 u32 evbuf_pending;
0659
0660 inc_irq_stat(IRQEXT_SCP);
0661 spin_lock(&sclp_lock);
0662 finished_sccb = param32 & 0xfffffff8;
0663 evbuf_pending = param32 & 0x3;
0664
0665
0666 sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
0667 (struct sccb_header *)__va(finished_sccb),
0668 !ok_response(finished_sccb, active_cmd));
0669
0670 if (finished_sccb) {
0671 del_timer(&sclp_request_timer);
0672 sclp_running_state = sclp_running_state_reset_pending;
0673 req = __sclp_find_req(finished_sccb);
0674 if (req) {
0675
0676 list_del(&req->list);
0677 req->status = SCLP_REQ_DONE;
0678
0679
0680 sclp_trace_req(2, "RQOK", req, false);
0681
0682 if (req->callback) {
0683 spin_unlock(&sclp_lock);
0684 req->callback(req, req->callback_data);
0685 spin_lock(&sclp_lock);
0686 }
0687 } else {
0688
0689 sclp_trace(0, "UNEX", finished_sccb, 0, true);
0690 }
0691 sclp_running_state = sclp_running_state_idle;
0692 active_cmd = 0;
0693 }
0694 if (evbuf_pending &&
0695 sclp_activation_state == sclp_activation_state_active)
0696 __sclp_queue_read_req();
0697 spin_unlock(&sclp_lock);
0698 sclp_process_queue();
0699 }
0700
0701
0702 static inline u64
0703 sclp_tod_from_jiffies(unsigned long jiffies)
0704 {
0705 return (u64) (jiffies / HZ) << 32;
0706 }
0707
0708
0709
0710 void
0711 sclp_sync_wait(void)
0712 {
0713 unsigned long long old_tick;
0714 unsigned long flags;
0715 unsigned long cr0, cr0_sync;
0716 static u64 sync_count;
0717 u64 timeout;
0718 int irq_context;
0719
0720
0721 sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
0722
0723
0724
0725 timeout = 0;
0726 if (timer_pending(&sclp_request_timer)) {
0727
0728 timeout = get_tod_clock_fast() +
0729 sclp_tod_from_jiffies(sclp_request_timer.expires -
0730 jiffies);
0731 }
0732 local_irq_save(flags);
0733
0734 irq_context = in_interrupt();
0735 if (!irq_context)
0736 local_bh_disable();
0737
0738 old_tick = local_tick_disable();
0739 trace_hardirqs_on();
0740 __ctl_store(cr0, 0, 0);
0741 cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
0742 cr0_sync |= 1UL << (63 - 54);
0743 __ctl_load(cr0_sync, 0, 0);
0744 __arch_local_irq_stosm(0x01);
0745
0746 while (sclp_running_state != sclp_running_state_idle) {
0747
0748 if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
0749 sclp_request_timer.function(&sclp_request_timer);
0750 cpu_relax();
0751 }
0752 local_irq_disable();
0753 __ctl_load(cr0, 0, 0);
0754 if (!irq_context)
0755 _local_bh_enable();
0756 local_tick_enable(old_tick);
0757 local_irq_restore(flags);
0758
0759
0760 sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
0761 }
0762 EXPORT_SYMBOL(sclp_sync_wait);
0763
0764
0765 static void
0766 sclp_dispatch_state_change(void)
0767 {
0768 struct list_head *l;
0769 struct sclp_register *reg;
0770 unsigned long flags;
0771 sccb_mask_t receive_mask;
0772 sccb_mask_t send_mask;
0773
0774 do {
0775 spin_lock_irqsave(&sclp_lock, flags);
0776 reg = NULL;
0777 list_for_each(l, &sclp_reg_list) {
0778 reg = list_entry(l, struct sclp_register, list);
0779 receive_mask = reg->send_mask & sclp_receive_mask;
0780 send_mask = reg->receive_mask & sclp_send_mask;
0781 if (reg->sclp_receive_mask != receive_mask ||
0782 reg->sclp_send_mask != send_mask) {
0783 reg->sclp_receive_mask = receive_mask;
0784 reg->sclp_send_mask = send_mask;
0785 break;
0786 } else
0787 reg = NULL;
0788 }
0789 spin_unlock_irqrestore(&sclp_lock, flags);
0790 if (reg && reg->state_change_fn) {
0791
0792 sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
0793 false);
0794
0795 reg->state_change_fn(reg);
0796 }
0797 } while (reg);
0798 }
0799
0800 struct sclp_statechangebuf {
0801 struct evbuf_header header;
0802 u8 validity_sclp_active_facility_mask : 1;
0803 u8 validity_sclp_receive_mask : 1;
0804 u8 validity_sclp_send_mask : 1;
0805 u8 validity_read_data_function_mask : 1;
0806 u16 _zeros : 12;
0807 u16 mask_length;
0808 u64 sclp_active_facility_mask;
0809 u8 masks[2 * 1021 + 4];
0810
0811
0812
0813
0814
0815 } __attribute__((packed));
0816
0817
0818
0819 static void
0820 sclp_state_change_cb(struct evbuf_header *evbuf)
0821 {
0822 unsigned long flags;
0823 struct sclp_statechangebuf *scbuf;
0824
0825 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
0826
0827 scbuf = (struct sclp_statechangebuf *) evbuf;
0828 spin_lock_irqsave(&sclp_lock, flags);
0829 if (scbuf->validity_sclp_receive_mask)
0830 sclp_receive_mask = sccb_get_recv_mask(scbuf);
0831 if (scbuf->validity_sclp_send_mask)
0832 sclp_send_mask = sccb_get_send_mask(scbuf);
0833 spin_unlock_irqrestore(&sclp_lock, flags);
0834 if (scbuf->validity_sclp_active_facility_mask)
0835 sclp.facilities = scbuf->sclp_active_facility_mask;
0836 sclp_dispatch_state_change();
0837 }
0838
0839 static struct sclp_register sclp_state_change_event = {
0840 .receive_mask = EVTYP_STATECHANGE_MASK,
0841 .receiver_fn = sclp_state_change_cb
0842 };
0843
0844
0845
0846 static inline void
0847 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
0848 {
0849 struct list_head *l;
0850 struct sclp_register *t;
0851
0852 *receive_mask = 0;
0853 *send_mask = 0;
0854 list_for_each(l, &sclp_reg_list) {
0855 t = list_entry(l, struct sclp_register, list);
0856 *receive_mask |= t->receive_mask;
0857 *send_mask |= t->send_mask;
0858 }
0859 }
0860
0861
0862 int
0863 sclp_register(struct sclp_register *reg)
0864 {
0865 unsigned long flags;
0866 sccb_mask_t receive_mask;
0867 sccb_mask_t send_mask;
0868 int rc;
0869
0870
0871 sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
0872
0873 rc = sclp_init();
0874 if (rc)
0875 return rc;
0876 spin_lock_irqsave(&sclp_lock, flags);
0877
0878 __sclp_get_mask(&receive_mask, &send_mask);
0879 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
0880 spin_unlock_irqrestore(&sclp_lock, flags);
0881 return -EBUSY;
0882 }
0883
0884 reg->sclp_receive_mask = 0;
0885 reg->sclp_send_mask = 0;
0886 list_add(®->list, &sclp_reg_list);
0887 spin_unlock_irqrestore(&sclp_lock, flags);
0888 rc = sclp_init_mask(1);
0889 if (rc) {
0890 spin_lock_irqsave(&sclp_lock, flags);
0891 list_del(®->list);
0892 spin_unlock_irqrestore(&sclp_lock, flags);
0893 }
0894 return rc;
0895 }
0896
0897 EXPORT_SYMBOL(sclp_register);
0898
0899
0900 void
0901 sclp_unregister(struct sclp_register *reg)
0902 {
0903 unsigned long flags;
0904
0905
0906 sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
0907
0908 spin_lock_irqsave(&sclp_lock, flags);
0909 list_del(®->list);
0910 spin_unlock_irqrestore(&sclp_lock, flags);
0911 sclp_init_mask(1);
0912 }
0913
0914 EXPORT_SYMBOL(sclp_unregister);
0915
0916
0917
0918 int
0919 sclp_remove_processed(struct sccb_header *sccb)
0920 {
0921 struct evbuf_header *evbuf;
0922 int unprocessed;
0923 u16 remaining;
0924
0925 evbuf = (struct evbuf_header *) (sccb + 1);
0926 unprocessed = 0;
0927 remaining = sccb->length - sizeof(struct sccb_header);
0928 while (remaining > 0) {
0929 remaining -= evbuf->length;
0930 if (evbuf->flags & 0x80) {
0931 sccb->length -= evbuf->length;
0932 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
0933 remaining);
0934 } else {
0935 unprocessed++;
0936 evbuf = (struct evbuf_header *)
0937 ((addr_t) evbuf + evbuf->length);
0938 }
0939 }
0940 return unprocessed;
0941 }
0942
0943 EXPORT_SYMBOL(sclp_remove_processed);
0944
0945
0946 static inline void
0947 __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
0948 {
0949 struct init_sccb *sccb = sclp_init_sccb;
0950
0951 clear_page(sccb);
0952 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
0953 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
0954 sclp_init_req.status = SCLP_REQ_FILLED;
0955 sclp_init_req.start_count = 0;
0956 sclp_init_req.callback = NULL;
0957 sclp_init_req.callback_data = NULL;
0958 sclp_init_req.sccb = sccb;
0959 sccb->header.length = sizeof(*sccb);
0960 if (sclp_mask_compat_mode)
0961 sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
0962 else
0963 sccb->mask_length = sizeof(sccb_mask_t);
0964 sccb_set_recv_mask(sccb, receive_mask);
0965 sccb_set_send_mask(sccb, send_mask);
0966 sccb_set_sclp_recv_mask(sccb, 0);
0967 sccb_set_sclp_send_mask(sccb, 0);
0968 }
0969
0970
0971
0972
0973 static int
0974 sclp_init_mask(int calculate)
0975 {
0976 unsigned long flags;
0977 struct init_sccb *sccb = sclp_init_sccb;
0978 sccb_mask_t receive_mask;
0979 sccb_mask_t send_mask;
0980 int retry;
0981 int rc;
0982 unsigned long wait;
0983
0984 spin_lock_irqsave(&sclp_lock, flags);
0985
0986 if (sclp_mask_state != sclp_mask_state_idle) {
0987 spin_unlock_irqrestore(&sclp_lock, flags);
0988 return -EBUSY;
0989 }
0990 if (sclp_activation_state == sclp_activation_state_inactive) {
0991 spin_unlock_irqrestore(&sclp_lock, flags);
0992 return -EINVAL;
0993 }
0994 sclp_mask_state = sclp_mask_state_initializing;
0995
0996 if (calculate)
0997 __sclp_get_mask(&receive_mask, &send_mask);
0998 else {
0999 receive_mask = 0;
1000 send_mask = 0;
1001 }
1002 rc = -EIO;
1003 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
1004
1005 __sclp_make_init_req(receive_mask, send_mask);
1006 spin_unlock_irqrestore(&sclp_lock, flags);
1007 if (sclp_add_request(&sclp_init_req)) {
1008
1009 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
1010 while (time_before(jiffies, wait))
1011 sclp_sync_wait();
1012 spin_lock_irqsave(&sclp_lock, flags);
1013 continue;
1014 }
1015 while (sclp_init_req.status != SCLP_REQ_DONE &&
1016 sclp_init_req.status != SCLP_REQ_FAILED)
1017 sclp_sync_wait();
1018 spin_lock_irqsave(&sclp_lock, flags);
1019 if (sclp_init_req.status == SCLP_REQ_DONE &&
1020 sccb->header.response_code == 0x20) {
1021
1022 if (calculate) {
1023 sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
1024 sclp_send_mask = sccb_get_sclp_send_mask(sccb);
1025 } else {
1026 sclp_receive_mask = 0;
1027 sclp_send_mask = 0;
1028 }
1029 spin_unlock_irqrestore(&sclp_lock, flags);
1030 sclp_dispatch_state_change();
1031 spin_lock_irqsave(&sclp_lock, flags);
1032 rc = 0;
1033 break;
1034 }
1035 }
1036 sclp_mask_state = sclp_mask_state_idle;
1037 spin_unlock_irqrestore(&sclp_lock, flags);
1038 return rc;
1039 }
1040
1041
1042
1043
1044 int
1045 sclp_deactivate(void)
1046 {
1047 unsigned long flags;
1048 int rc;
1049
1050 spin_lock_irqsave(&sclp_lock, flags);
1051
1052 if (sclp_activation_state != sclp_activation_state_active) {
1053 spin_unlock_irqrestore(&sclp_lock, flags);
1054 return -EINVAL;
1055 }
1056 sclp_activation_state = sclp_activation_state_deactivating;
1057 spin_unlock_irqrestore(&sclp_lock, flags);
1058 rc = sclp_init_mask(0);
1059 spin_lock_irqsave(&sclp_lock, flags);
1060 if (rc == 0)
1061 sclp_activation_state = sclp_activation_state_inactive;
1062 else
1063 sclp_activation_state = sclp_activation_state_active;
1064 spin_unlock_irqrestore(&sclp_lock, flags);
1065 return rc;
1066 }
1067
1068 EXPORT_SYMBOL(sclp_deactivate);
1069
1070
1071
1072
1073 int
1074 sclp_reactivate(void)
1075 {
1076 unsigned long flags;
1077 int rc;
1078
1079 spin_lock_irqsave(&sclp_lock, flags);
1080
1081 if (sclp_activation_state != sclp_activation_state_inactive) {
1082 spin_unlock_irqrestore(&sclp_lock, flags);
1083 return -EINVAL;
1084 }
1085 sclp_activation_state = sclp_activation_state_activating;
1086 spin_unlock_irqrestore(&sclp_lock, flags);
1087 rc = sclp_init_mask(1);
1088 spin_lock_irqsave(&sclp_lock, flags);
1089 if (rc == 0)
1090 sclp_activation_state = sclp_activation_state_active;
1091 else
1092 sclp_activation_state = sclp_activation_state_inactive;
1093 spin_unlock_irqrestore(&sclp_lock, flags);
1094 return rc;
1095 }
1096
1097 EXPORT_SYMBOL(sclp_reactivate);
1098
1099
1100
1101 static void sclp_check_handler(struct ext_code ext_code,
1102 unsigned int param32, unsigned long param64)
1103 {
1104 u32 finished_sccb;
1105
1106 inc_irq_stat(IRQEXT_SCP);
1107 finished_sccb = param32 & 0xfffffff8;
1108
1109 if (finished_sccb == 0)
1110 return;
1111 if (finished_sccb != __pa(sclp_init_sccb))
1112 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
1113 finished_sccb);
1114 spin_lock(&sclp_lock);
1115 if (sclp_running_state == sclp_running_state_running) {
1116 sclp_init_req.status = SCLP_REQ_DONE;
1117 sclp_running_state = sclp_running_state_idle;
1118 }
1119 spin_unlock(&sclp_lock);
1120 }
1121
1122
1123 static void
1124 sclp_check_timeout(struct timer_list *unused)
1125 {
1126 unsigned long flags;
1127
1128 spin_lock_irqsave(&sclp_lock, flags);
1129 if (sclp_running_state == sclp_running_state_running) {
1130 sclp_init_req.status = SCLP_REQ_FAILED;
1131 sclp_running_state = sclp_running_state_idle;
1132 }
1133 spin_unlock_irqrestore(&sclp_lock, flags);
1134 }
1135
1136
1137
1138
1139 static int
1140 sclp_check_interface(void)
1141 {
1142 struct init_sccb *sccb;
1143 unsigned long flags;
1144 int retry;
1145 int rc;
1146
1147 spin_lock_irqsave(&sclp_lock, flags);
1148
1149 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1150 if (rc) {
1151 spin_unlock_irqrestore(&sclp_lock, flags);
1152 return rc;
1153 }
1154 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
1155 __sclp_make_init_req(0, 0);
1156 sccb = (struct init_sccb *) sclp_init_req.sccb;
1157 rc = sclp_service_call_trace(sclp_init_req.command, sccb);
1158 if (rc == -EIO)
1159 break;
1160 sclp_init_req.status = SCLP_REQ_RUNNING;
1161 sclp_running_state = sclp_running_state_running;
1162 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
1163 sclp_check_timeout);
1164 spin_unlock_irqrestore(&sclp_lock, flags);
1165
1166
1167 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1168
1169 sclp_sync_wait();
1170
1171
1172 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
1173 spin_lock_irqsave(&sclp_lock, flags);
1174 del_timer(&sclp_request_timer);
1175 rc = -EBUSY;
1176 if (sclp_init_req.status == SCLP_REQ_DONE) {
1177 if (sccb->header.response_code == 0x20) {
1178 rc = 0;
1179 break;
1180 } else if (sccb->header.response_code == 0x74f0) {
1181 if (!sclp_mask_compat_mode) {
1182 sclp_mask_compat_mode = true;
1183 retry = 0;
1184 }
1185 }
1186 }
1187 }
1188 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1189 spin_unlock_irqrestore(&sclp_lock, flags);
1190 return rc;
1191 }
1192
1193
1194
1195 static int
1196 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1197 {
1198 sclp_deactivate();
1199 return NOTIFY_DONE;
1200 }
1201
1202 static struct notifier_block sclp_reboot_notifier = {
1203 .notifier_call = sclp_reboot_event
1204 };
1205
1206 static ssize_t con_pages_show(struct device_driver *dev, char *buf)
1207 {
1208 return sprintf(buf, "%i\n", sclp_console_pages);
1209 }
1210
1211 static DRIVER_ATTR_RO(con_pages);
1212
1213 static ssize_t con_drop_show(struct device_driver *dev, char *buf)
1214 {
1215 return sprintf(buf, "%i\n", sclp_console_drop);
1216 }
1217
1218 static DRIVER_ATTR_RO(con_drop);
1219
1220 static ssize_t con_full_show(struct device_driver *dev, char *buf)
1221 {
1222 return sprintf(buf, "%lu\n", sclp_console_full);
1223 }
1224
1225 static DRIVER_ATTR_RO(con_full);
1226
1227 static struct attribute *sclp_drv_attrs[] = {
1228 &driver_attr_con_pages.attr,
1229 &driver_attr_con_drop.attr,
1230 &driver_attr_con_full.attr,
1231 NULL,
1232 };
1233 static struct attribute_group sclp_drv_attr_group = {
1234 .attrs = sclp_drv_attrs,
1235 };
1236 static const struct attribute_group *sclp_drv_attr_groups[] = {
1237 &sclp_drv_attr_group,
1238 NULL,
1239 };
1240
1241 static struct platform_driver sclp_pdrv = {
1242 .driver = {
1243 .name = "sclp",
1244 .groups = sclp_drv_attr_groups,
1245 },
1246 };
1247
1248
1249
1250 static int
1251 sclp_init(void)
1252 {
1253 unsigned long flags;
1254 int rc = 0;
1255
1256 spin_lock_irqsave(&sclp_lock, flags);
1257
1258 if (sclp_init_state != sclp_init_state_uninitialized)
1259 goto fail_unlock;
1260 sclp_init_state = sclp_init_state_initializing;
1261 sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1262 sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1263 BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
1264
1265 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1266 timer_setup(&sclp_request_timer, NULL, 0);
1267 timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
1268
1269 spin_unlock_irqrestore(&sclp_lock, flags);
1270 rc = sclp_check_interface();
1271 spin_lock_irqsave(&sclp_lock, flags);
1272 if (rc)
1273 goto fail_init_state_uninitialized;
1274
1275 rc = register_reboot_notifier(&sclp_reboot_notifier);
1276 if (rc)
1277 goto fail_init_state_uninitialized;
1278
1279 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1280 if (rc)
1281 goto fail_unregister_reboot_notifier;
1282 sclp_init_state = sclp_init_state_initialized;
1283 spin_unlock_irqrestore(&sclp_lock, flags);
1284
1285
1286 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1287 sclp_init_mask(1);
1288 return 0;
1289
1290 fail_unregister_reboot_notifier:
1291 unregister_reboot_notifier(&sclp_reboot_notifier);
1292 fail_init_state_uninitialized:
1293 sclp_init_state = sclp_init_state_uninitialized;
1294 free_page((unsigned long) sclp_read_sccb);
1295 free_page((unsigned long) sclp_init_sccb);
1296 fail_unlock:
1297 spin_unlock_irqrestore(&sclp_lock, flags);
1298 return rc;
1299 }
1300
1301 static __init int sclp_initcall(void)
1302 {
1303 int rc;
1304
1305 rc = platform_driver_register(&sclp_pdrv);
1306 if (rc)
1307 return rc;
1308
1309 return sclp_init();
1310 }
1311
1312 arch_initcall(sclp_initcall);